diff options
Diffstat (limited to 'arch/arm64/include/asm')
34 files changed, 897 insertions, 287 deletions
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 70f7b9e04..10b017c4b 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -1,5 +1,5 @@ /* - * Based on arch/arm/include/asm/assembler.h + * Based on arch/arm/include/asm/assembler.h, arch/arm/mm/proc-macros.S * * Copyright (C) 1996-2000 Russell King * Copyright (C) 2012 ARM Ltd. @@ -23,22 +23,13 @@ #ifndef __ASM_ASSEMBLER_H #define __ASM_ASSEMBLER_H +#include <asm/asm-offsets.h> +#include <asm/page.h> +#include <asm/pgtable-hwdef.h> #include <asm/ptrace.h> #include <asm/thread_info.h> /* - * Stack pushing/popping (register pairs only). Equivalent to store decrement - * before, load increment after. - */ - .macro push, xreg1, xreg2 - stp \xreg1, \xreg2, [sp, #-16]! - .endm - - .macro pop, xreg1, xreg2 - ldp \xreg1, \xreg2, [sp], #16 - .endm - -/* * Enable and disable interrupts. */ .macro disable_irq @@ -212,6 +203,102 @@ lr .req x30 // link register .endm /* + * vma_vm_mm - get mm pointer from vma pointer (vma->vm_mm) + */ + .macro vma_vm_mm, rd, rn + ldr \rd, [\rn, #VMA_VM_MM] + .endm + +/* + * mmid - get context id from mm pointer (mm->context.id) + */ + .macro mmid, rd, rn + ldr \rd, [\rn, #MM_CONTEXT_ID] + .endm + +/* + * dcache_line_size - get the minimum D-cache line size from the CTR register. + */ + .macro dcache_line_size, reg, tmp + mrs \tmp, ctr_el0 // read CTR + ubfm \tmp, \tmp, #16, #19 // cache line size encoding + mov \reg, #4 // bytes per word + lsl \reg, \reg, \tmp // actual cache line size + .endm + +/* + * icache_line_size - get the minimum I-cache line size from the CTR register. + */ + .macro icache_line_size, reg, tmp + mrs \tmp, ctr_el0 // read CTR + and \tmp, \tmp, #0xf // cache line size encoding + mov \reg, #4 // bytes per word + lsl \reg, \reg, \tmp // actual cache line size + .endm + +/* + * tcr_set_idmap_t0sz - update TCR.T0SZ so that we can load the ID map + */ + .macro tcr_set_idmap_t0sz, valreg, tmpreg +#ifndef CONFIG_ARM64_VA_BITS_48 + ldr_l \tmpreg, idmap_t0sz + bfi \valreg, \tmpreg, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH +#endif + .endm + +/* + * Macro to perform a data cache maintenance for the interval + * [kaddr, kaddr + size) + * + * op: operation passed to dc instruction + * domain: domain used in dsb instruciton + * kaddr: starting virtual address of the region + * size: size of the region + * Corrupts: kaddr, size, tmp1, tmp2 + */ + .macro dcache_by_line_op op, domain, kaddr, size, tmp1, tmp2 + dcache_line_size \tmp1, \tmp2 + add \size, \kaddr, \size + sub \tmp2, \tmp1, #1 + bic \kaddr, \kaddr, \tmp2 +9998: dc \op, \kaddr + add \kaddr, \kaddr, \tmp1 + cmp \kaddr, \size + b.lo 9998b + dsb \domain + .endm + +/* + * reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present + */ + .macro reset_pmuserenr_el0, tmpreg + mrs \tmpreg, id_aa64dfr0_el1 // Check ID_AA64DFR0_EL1 PMUVer + sbfx \tmpreg, \tmpreg, #8, #4 + cmp \tmpreg, #1 // Skip if no PMU present + b.lt 9000f + msr pmuserenr_el0, xzr // Disable PMU access from EL0 +9000: + .endm + +/* + * copy_page - copy src to dest using temp registers t1-t8 + */ + .macro copy_page dest:req src:req t1:req t2:req t3:req t4:req t5:req t6:req t7:req t8:req +9998: ldp \t1, \t2, [\src] + ldp \t3, \t4, [\src, #16] + ldp \t5, \t6, [\src, #32] + ldp \t7, \t8, [\src, #48] + add \src, \src, #64 + stnp \t1, \t2, [\dest] + stnp \t3, \t4, [\dest, #16] + stnp \t5, \t6, [\dest, #32] + stnp \t7, \t8, [\dest, #48] + add \dest, \dest, #64 + tst \src, #(PAGE_SIZE - 1) + b.ne 9998b + .endm + +/* * Annotate a function as position independent, i.e., safe to be called before * the kernel virtual mapping is activated. */ @@ -233,4 +320,24 @@ lr .req x30 // link register .long \sym\()_hi32 .endm + /* + * mov_q - move an immediate constant into a 64-bit register using + * between 2 and 4 movz/movk instructions (depending on the + * magnitude and sign of the operand) + */ + .macro mov_q, reg, val + .if (((\val) >> 31) == 0 || ((\val) >> 31) == 0x1ffffffff) + movz \reg, :abs_g1_s:\val + .else + .if (((\val) >> 47) == 0 || ((\val) >> 47) == 0x1ffff) + movz \reg, :abs_g2_s:\val + .else + movz \reg, :abs_g3:\val + movk \reg, :abs_g2_nc:\val + .endif + movk \reg, :abs_g1_nc:\val + .endif + movk \reg, :abs_g0_nc:\val + .endm + #endif /* __ASM_ASSEMBLER_H */ diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index b9b649422..224efe730 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -35,8 +35,9 @@ #define ARM64_ALT_PAN_NOT_UAO 10 #define ARM64_HAS_VIRT_HOST_EXTN 11 #define ARM64_WORKAROUND_CAVIUM_27456 12 +#define ARM64_HAS_32BIT_EL0 13 -#define ARM64_NCAPS 13 +#define ARM64_NCAPS 14 #ifndef __ASSEMBLY__ @@ -77,10 +78,17 @@ struct arm64_ftr_reg { struct arm64_ftr_bits *ftr_bits; }; +/* scope of capability check */ +enum { + SCOPE_SYSTEM, + SCOPE_LOCAL_CPU, +}; + struct arm64_cpu_capabilities { const char *desc; u16 capability; - bool (*matches)(const struct arm64_cpu_capabilities *); + int def_scope; /* default scope */ + bool (*matches)(const struct arm64_cpu_capabilities *caps, int scope); void (*enable)(void *); /* Called on all active CPUs */ union { struct { /* To be used for erratum handling only */ @@ -101,6 +109,8 @@ struct arm64_cpu_capabilities { extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); +bool this_cpu_has_cap(unsigned int cap); + static inline bool cpu_have_feature(unsigned int num) { return elf_hwcap & (1UL << num); @@ -170,12 +180,20 @@ static inline bool id_aa64mmfr0_mixed_endian_el0(u64 mmfr0) cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_BIGENDEL0_SHIFT) == 0x1; } +static inline bool id_aa64pfr0_32bit_el0(u64 pfr0) +{ + u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL0_SHIFT); + + return val == ID_AA64PFR0_EL0_32BIT_64BIT; +} + void __init setup_cpu_features(void); void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps, const char *info); void check_local_cpu_errata(void); +void verify_local_cpu_errata(void); void verify_local_cpu_capabilities(void); u64 read_system_reg(u32 id); @@ -185,6 +203,11 @@ static inline bool cpu_supports_mixed_endian_el0(void) return id_aa64mmfr0_mixed_endian_el0(read_cpuid(ID_AA64MMFR0_EL1)); } +static inline bool system_supports_32bit_el0(void) +{ + return cpus_have_cap(ARM64_HAS_32BIT_EL0); +} + static inline bool system_supports_mixed_endian_el0(void) { return id_aa64mmfr0_mixed_endian_el0(read_system_reg(SYS_ID_AA64MMFR0_EL1)); diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index 87e1985f3..9d9fd4b9a 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h @@ -80,12 +80,14 @@ #define APM_CPU_PART_POTENZA 0x000 #define CAVIUM_CPU_PART_THUNDERX 0x0A1 +#define CAVIUM_CPU_PART_THUNDERX_81XX 0x0A2 #define BRCM_CPU_PART_VULCAN 0x516 #define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53) #define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57) #define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX) +#define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX) #ifndef __ASSEMBLY__ diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h index ba437f090..7dbea6c07 100644 --- a/arch/arm64/include/asm/dma-mapping.h +++ b/arch/arm64/include/asm/dma-mapping.h @@ -48,7 +48,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) } void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, - struct iommu_ops *iommu, bool coherent); + const struct iommu_ops *iommu, bool coherent); #define arch_setup_dma_ops arch_setup_dma_ops #ifdef CONFIG_IOMMU_DMA diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h index 8e88a696c..622db3c64 100644 --- a/arch/arm64/include/asm/efi.h +++ b/arch/arm64/include/asm/efi.h @@ -4,6 +4,7 @@ #include <asm/io.h> #include <asm/mmu_context.h> #include <asm/neon.h> +#include <asm/ptrace.h> #include <asm/tlbflush.h> #ifdef CONFIG_EFI @@ -14,32 +15,29 @@ extern void efi_init(void); int efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md); -#define efi_call_virt(f, ...) \ +#define efi_set_mapping_permissions efi_create_mapping + +#define arch_efi_call_virt_setup() \ ({ \ - efi_##f##_t *__f; \ - efi_status_t __s; \ - \ kernel_neon_begin(); \ efi_virtmap_load(); \ - __f = efi.systab->runtime->f; \ - __s = __f(__VA_ARGS__); \ - efi_virtmap_unload(); \ - kernel_neon_end(); \ - __s; \ }) -#define __efi_call_virt(f, ...) \ +#define arch_efi_call_virt(f, args...) \ ({ \ efi_##f##_t *__f; \ - \ - kernel_neon_begin(); \ - efi_virtmap_load(); \ __f = efi.systab->runtime->f; \ - __f(__VA_ARGS__); \ + __f(args); \ +}) + +#define arch_efi_call_virt_teardown() \ +({ \ efi_virtmap_unload(); \ kernel_neon_end(); \ }) +#define ARCH_EFI_IRQ_FLAGS_MASK (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT) + /* arch specific definitions used by the stub code */ /* @@ -50,7 +48,16 @@ int efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md); #define EFI_FDT_ALIGN SZ_2M /* used by allocate_new_fdt_and_exit_boot() */ #define MAX_FDT_OFFSET SZ_512M -#define efi_call_early(f, ...) sys_table_arg->boottime->f(__VA_ARGS__) +#define efi_call_early(f, ...) sys_table_arg->boottime->f(__VA_ARGS__) +#define __efi_call_early(f, ...) f(__VA_ARGS__) +#define efi_is_64bit() (true) + +#define alloc_screen_info(x...) &screen_info +#define free_screen_info(x...) + +static inline void efifb_setup_from_dmi(struct screen_info *si, const char *opt) +{ +} #define EFI_ALLOC_ALIGN SZ_64K diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h index 83d48a599..579b6e654 100644 --- a/arch/arm64/include/asm/elf.h +++ b/arch/arm64/include/asm/elf.h @@ -177,7 +177,8 @@ typedef compat_elf_greg_t compat_elf_gregset_t[COMPAT_ELF_NGREG]; /* AArch32 EABI. */ #define EF_ARM_EABI_MASK 0xff000000 -#define compat_elf_check_arch(x) (((x)->e_machine == EM_ARM) && \ +#define compat_elf_check_arch(x) (system_supports_32bit_el0() && \ + ((x)->e_machine == EM_ARM) && \ ((x)->e_flags & EF_ARM_EABI_MASK)) #define compat_start_thread compat_start_thread diff --git a/arch/arm64/include/asm/kernel-pgtable.h b/arch/arm64/include/asm/kernel-pgtable.h index 5c6375d85..7e51d1b57 100644 --- a/arch/arm64/include/asm/kernel-pgtable.h +++ b/arch/arm64/include/asm/kernel-pgtable.h @@ -19,6 +19,7 @@ #ifndef __ASM_KERNEL_PGTABLE_H #define __ASM_KERNEL_PGTABLE_H +#include <asm/sparsemem.h> /* * The linear mapping and the start of memory are both 2M aligned (per @@ -86,10 +87,24 @@ * (64k granule), or a multiple that can be mapped using contiguous bits * in the page tables: 32 * PMD_SIZE (16k granule) */ -#ifdef CONFIG_ARM64_64K_PAGES -#define ARM64_MEMSTART_ALIGN SZ_512M +#if defined(CONFIG_ARM64_4K_PAGES) +#define ARM64_MEMSTART_SHIFT PUD_SHIFT +#elif defined(CONFIG_ARM64_16K_PAGES) +#define ARM64_MEMSTART_SHIFT (PMD_SHIFT + 5) #else -#define ARM64_MEMSTART_ALIGN SZ_1G +#define ARM64_MEMSTART_SHIFT PMD_SHIFT +#endif + +/* + * sparsemem vmemmap imposes an additional requirement on the alignment of + * memstart_addr, due to the fact that the base of the vmemmap region + * has a direct correspondence, and needs to appear sufficiently aligned + * in the virtual address space. + */ +#if defined(CONFIG_SPARSEMEM_VMEMMAP) && ARM64_MEMSTART_SHIFT < SECTION_SIZE_BITS +#define ARM64_MEMSTART_ALIGN (1UL << SECTION_SIZE_BITS) +#else +#define ARM64_MEMSTART_ALIGN (1UL << ARM64_MEMSTART_SHIFT) #endif #endif /* __ASM_KERNEL_PGTABLE_H */ diff --git a/arch/arm64/include/asm/kgdb.h b/arch/arm64/include/asm/kgdb.h index f69f69c81..da8464552 100644 --- a/arch/arm64/include/asm/kgdb.h +++ b/arch/arm64/include/asm/kgdb.h @@ -38,25 +38,54 @@ extern int kgdb_fault_expected; #endif /* !__ASSEMBLY__ */ /* - * gdb is expecting the following registers layout. + * gdb remote procotol (well most versions of it) expects the following + * register layout. * * General purpose regs: * r0-r30: 64 bit * sp,pc : 64 bit - * pstate : 64 bit - * Total: 34 + * pstate : 32 bit + * Total: 33 + 1 * FPU regs: * f0-f31: 128 bit - * Total: 32 - * Extra regs * fpsr & fpcr: 32 bit - * Total: 2 + * Total: 32 + 2 * + * To expand a little on the "most versions of it"... when the gdb remote + * protocol for AArch64 was developed it depended on a statement in the + * Architecture Reference Manual that claimed "SPSR_ELx is a 32-bit register". + * and, as a result, allocated only 32-bits for the PSTATE in the remote + * protocol. In fact this statement is still present in ARM DDI 0487A.i. + * + * Unfortunately "is a 32-bit register" has a very special meaning for + * system registers. It means that "the upper bits, bits[63:32], are + * RES0.". RES0 is heavily used in the ARM architecture documents as a + * way to leave space for future architecture changes. So to translate a + * little for people who don't spend their spare time reading ARM architecture + * manuals, what "is a 32-bit register" actually means in this context is + * "is a 64-bit register but one with no meaning allocated to any of the + * upper 32-bits... *yet*". + * + * Perhaps then we should not be surprised that this has led to some + * confusion. Specifically a patch, influenced by the above translation, + * that extended PSTATE to 64-bit was accepted into gdb-7.7 but the patch + * was reverted in gdb-7.8.1 and all later releases, when this was + * discovered to be an undocumented protocol change. + * + * So... it is *not* wrong for us to only allocate 32-bits to PSTATE + * here even though the kernel itself allocates 64-bits for the same + * state. That is because this bit of code tells the kernel how the gdb + * remote protocol (well most versions of it) describes the register state. + * + * Note that if you are using one of the versions of gdb that supports + * the gdb-7.7 version of the protocol you cannot use kgdb directly + * without providing a custom register description (gdb can load new + * protocol descriptions at runtime). */ -#define _GP_REGS 34 +#define _GP_REGS 33 #define _FP_REGS 32 -#define _EXTRA_REGS 2 +#define _EXTRA_REGS 3 /* * general purpose registers size in bytes. * pstate is only 4 bytes. subtract 4 bytes diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index 3f2988799..2cdb6b551 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -84,44 +84,38 @@ #define HCR_INT_OVERRIDE (HCR_FMO | HCR_IMO) #define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H) -/* Hyp System Control Register (SCTLR_EL2) bits */ -#define SCTLR_EL2_EE (1 << 25) -#define SCTLR_EL2_WXN (1 << 19) -#define SCTLR_EL2_I (1 << 12) -#define SCTLR_EL2_SA (1 << 3) -#define SCTLR_EL2_C (1 << 2) -#define SCTLR_EL2_A (1 << 1) -#define SCTLR_EL2_M 1 -#define SCTLR_EL2_FLAGS (SCTLR_EL2_M | SCTLR_EL2_A | SCTLR_EL2_C | \ - SCTLR_EL2_SA | SCTLR_EL2_I) - /* TCR_EL2 Registers bits */ -#define TCR_EL2_RES1 ((1 << 31) | (1 << 23)) -#define TCR_EL2_TBI (1 << 20) -#define TCR_EL2_PS (7 << 16) -#define TCR_EL2_PS_40B (2 << 16) -#define TCR_EL2_TG0 (1 << 14) -#define TCR_EL2_SH0 (3 << 12) -#define TCR_EL2_ORGN0 (3 << 10) -#define TCR_EL2_IRGN0 (3 << 8) -#define TCR_EL2_T0SZ 0x3f -#define TCR_EL2_MASK (TCR_EL2_TG0 | TCR_EL2_SH0 | \ - TCR_EL2_ORGN0 | TCR_EL2_IRGN0 | TCR_EL2_T0SZ) +#define TCR_EL2_RES1 ((1 << 31) | (1 << 23)) +#define TCR_EL2_TBI (1 << 20) +#define TCR_EL2_PS_SHIFT 16 +#define TCR_EL2_PS_MASK (7 << TCR_EL2_PS_SHIFT) +#define TCR_EL2_PS_40B (2 << TCR_EL2_PS_SHIFT) +#define TCR_EL2_TG0_MASK TCR_TG0_MASK +#define TCR_EL2_SH0_MASK TCR_SH0_MASK +#define TCR_EL2_ORGN0_MASK TCR_ORGN0_MASK +#define TCR_EL2_IRGN0_MASK TCR_IRGN0_MASK +#define TCR_EL2_T0SZ_MASK 0x3f +#define TCR_EL2_MASK (TCR_EL2_TG0_MASK | TCR_EL2_SH0_MASK | \ + TCR_EL2_ORGN0_MASK | TCR_EL2_IRGN0_MASK | TCR_EL2_T0SZ_MASK) /* VTCR_EL2 Registers bits */ #define VTCR_EL2_RES1 (1 << 31) -#define VTCR_EL2_PS_MASK (7 << 16) -#define VTCR_EL2_TG0_MASK (1 << 14) -#define VTCR_EL2_TG0_4K (0 << 14) -#define VTCR_EL2_TG0_64K (1 << 14) -#define VTCR_EL2_SH0_MASK (3 << 12) -#define VTCR_EL2_SH0_INNER (3 << 12) -#define VTCR_EL2_ORGN0_MASK (3 << 10) -#define VTCR_EL2_ORGN0_WBWA (1 << 10) -#define VTCR_EL2_IRGN0_MASK (3 << 8) -#define VTCR_EL2_IRGN0_WBWA (1 << 8) -#define VTCR_EL2_SL0_MASK (3 << 6) -#define VTCR_EL2_SL0_LVL1 (1 << 6) +#define VTCR_EL2_HD (1 << 22) +#define VTCR_EL2_HA (1 << 21) +#define VTCR_EL2_PS_MASK TCR_EL2_PS_MASK +#define VTCR_EL2_TG0_MASK TCR_TG0_MASK +#define VTCR_EL2_TG0_4K TCR_TG0_4K +#define VTCR_EL2_TG0_16K TCR_TG0_16K +#define VTCR_EL2_TG0_64K TCR_TG0_64K +#define VTCR_EL2_SH0_MASK TCR_SH0_MASK +#define VTCR_EL2_SH0_INNER TCR_SH0_INNER +#define VTCR_EL2_ORGN0_MASK TCR_ORGN0_MASK +#define VTCR_EL2_ORGN0_WBWA TCR_ORGN0_WBWA +#define VTCR_EL2_IRGN0_MASK TCR_IRGN0_MASK +#define VTCR_EL2_IRGN0_WBWA TCR_IRGN0_WBWA +#define VTCR_EL2_SL0_SHIFT 6 +#define VTCR_EL2_SL0_MASK (3 << VTCR_EL2_SL0_SHIFT) +#define VTCR_EL2_SL0_LVL1 (1 << VTCR_EL2_SL0_SHIFT) #define VTCR_EL2_T0SZ_MASK 0x3f #define VTCR_EL2_T0SZ_40B 24 #define VTCR_EL2_VS_SHIFT 19 @@ -137,35 +131,45 @@ * (see hyp-init.S). * * Note that when using 4K pages, we concatenate two first level page tables - * together. + * together. With 16K pages, we concatenate 16 first level page tables. * * The magic numbers used for VTTBR_X in this patch can be found in Tables * D4-23 and D4-25 in ARM DDI 0487A.b. */ + +#define VTCR_EL2_T0SZ_IPA VTCR_EL2_T0SZ_40B +#define VTCR_EL2_COMMON_BITS (VTCR_EL2_SH0_INNER | VTCR_EL2_ORGN0_WBWA | \ + VTCR_EL2_IRGN0_WBWA | VTCR_EL2_RES1) + #ifdef CONFIG_ARM64_64K_PAGES /* * Stage2 translation configuration: - * 40bits input (T0SZ = 24) * 64kB pages (TG0 = 1) * 2 level page tables (SL = 1) */ -#define VTCR_EL2_FLAGS (VTCR_EL2_TG0_64K | VTCR_EL2_SH0_INNER | \ - VTCR_EL2_ORGN0_WBWA | VTCR_EL2_IRGN0_WBWA | \ - VTCR_EL2_SL0_LVL1 | VTCR_EL2_RES1) -#define VTTBR_X (38 - VTCR_EL2_T0SZ_40B) -#else +#define VTCR_EL2_TGRAN_FLAGS (VTCR_EL2_TG0_64K | VTCR_EL2_SL0_LVL1) +#define VTTBR_X_TGRAN_MAGIC 38 +#elif defined(CONFIG_ARM64_16K_PAGES) +/* + * Stage2 translation configuration: + * 16kB pages (TG0 = 2) + * 2 level page tables (SL = 1) + */ +#define VTCR_EL2_TGRAN_FLAGS (VTCR_EL2_TG0_16K | VTCR_EL2_SL0_LVL1) +#define VTTBR_X_TGRAN_MAGIC 42 +#else /* 4K */ /* * Stage2 translation configuration: - * 40bits input (T0SZ = 24) * 4kB pages (TG0 = 0) * 3 level page tables (SL = 1) */ -#define VTCR_EL2_FLAGS (VTCR_EL2_TG0_4K | VTCR_EL2_SH0_INNER | \ - VTCR_EL2_ORGN0_WBWA | VTCR_EL2_IRGN0_WBWA | \ - VTCR_EL2_SL0_LVL1 | VTCR_EL2_RES1) -#define VTTBR_X (37 - VTCR_EL2_T0SZ_40B) +#define VTCR_EL2_TGRAN_FLAGS (VTCR_EL2_TG0_4K | VTCR_EL2_SL0_LVL1) +#define VTTBR_X_TGRAN_MAGIC 37 #endif +#define VTCR_EL2_FLAGS (VTCR_EL2_COMMON_BITS | VTCR_EL2_TGRAN_FLAGS) +#define VTTBR_X (VTTBR_X_TGRAN_MAGIC - VTCR_EL2_T0SZ_IPA) + #define VTTBR_BADDR_SHIFT (VTTBR_X - 1) #define VTTBR_BADDR_MASK (((UL(1) << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT) #define VTTBR_VMID_SHIFT (UL(48)) diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index 40a0a24e6..7561f63f1 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -22,6 +22,8 @@ #define ARM_EXCEPTION_IRQ 0 #define ARM_EXCEPTION_TRAP 1 +/* The hyp-stub will return this for any kvm_call_hyp() call */ +#define ARM_EXCEPTION_HYP_GONE 2 #define KVM_ARM64_DEBUG_DIRTY_SHIFT 0 #define KVM_ARM64_DEBUG_DIRTY (1 << KVM_ARM64_DEBUG_DIRTY_SHIFT) @@ -40,6 +42,7 @@ struct kvm_vcpu; extern char __kvm_hyp_init[]; extern char __kvm_hyp_init_end[]; +extern char __kvm_hyp_reset[]; extern char __kvm_hyp_vector[]; diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index f5c6bd254..49095fc4b 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -43,9 +43,13 @@ #define KVM_VCPU_MAX_FEATURES 4 +#define KVM_REQ_VCPU_EXIT 8 + int __attribute_const__ kvm_target_cpu(void); int kvm_reset_vcpu(struct kvm_vcpu *vcpu); int kvm_arch_dev_ioctl_check_extension(long ext); +unsigned long kvm_hyp_reset_entry(void); +void __extended_idmap_trampoline(phys_addr_t boot_pgd, phys_addr_t idmap_start); struct kvm_arch { /* The VMID generation used for the virt. memory system */ @@ -293,6 +297,7 @@ struct kvm_vm_stat { struct kvm_vcpu_stat { u32 halt_successful_poll; u32 halt_attempted_poll; + u32 halt_poll_invalid; u32 halt_wakeup; u32 hvc_exit_stat; u64 wfe_exit_stat; @@ -324,6 +329,10 @@ static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm, struct kvm_vcpu *kvm_arm_get_running_vcpu(void); struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void); +void kvm_arm_halt_guest(struct kvm *kvm); +void kvm_arm_resume_guest(struct kvm *kvm); +void kvm_arm_halt_vcpu(struct kvm_vcpu *vcpu); +void kvm_arm_resume_vcpu(struct kvm_vcpu *vcpu); u64 __kvm_call_hyp(void *hypfn, ...); #define kvm_call_hyp(f, ...) __kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__) @@ -352,11 +361,22 @@ static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr, hyp_stack_ptr, vector_ptr); } -static inline void kvm_arch_hardware_disable(void) {} +static inline void __cpu_reset_hyp_mode(phys_addr_t boot_pgd_ptr, + phys_addr_t phys_idmap_start) +{ + /* + * Call reset code, and switch back to stub hyp vectors. + * Uses __kvm_call_hyp() to avoid kaslr's kvm_ksym_ref() translation. + */ + __kvm_call_hyp((void *)kvm_hyp_reset_entry(), + boot_pgd_ptr, phys_idmap_start); +} + static inline void kvm_arch_hardware_unsetup(void) {} static inline void kvm_arch_sync_events(struct kvm *kvm) {} static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {} static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} +static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {} void kvm_arm_init_debug(void); void kvm_arm_setup_debug(struct kvm_vcpu *vcpu); diff --git a/arch/arm64/include/asm/kvm_mmio.h b/arch/arm64/include/asm/kvm_mmio.h index fe612a962..75ea42079 100644 --- a/arch/arm64/include/asm/kvm_mmio.h +++ b/arch/arm64/include/asm/kvm_mmio.h @@ -30,6 +30,9 @@ struct kvm_decode { bool sign_extend; }; +void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data); +unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len); + int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run); int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, phys_addr_t fault_ipa); diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 22732a5e3..f05ac27d0 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -45,18 +45,6 @@ */ #define TRAMPOLINE_VA (HYP_PAGE_OFFSET_MASK & PAGE_MASK) -/* - * KVM_MMU_CACHE_MIN_PAGES is the number of stage2 page table translation - * levels in addition to the PGD and potentially the PUD which are - * pre-allocated (we pre-allocate the fake PGD and the PUD when the Stage-2 - * tables use one level of tables less than the kernel. - */ -#ifdef CONFIG_ARM64_64K_PAGES -#define KVM_MMU_CACHE_MIN_PAGES 1 -#else -#define KVM_MMU_CACHE_MIN_PAGES 2 -#endif - #ifdef __ASSEMBLY__ #include <asm/alternative.h> @@ -91,6 +79,8 @@ alternative_endif #define KVM_PHYS_SIZE (1UL << KVM_PHYS_SHIFT) #define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1UL) +#include <asm/stage2_pgtable.h> + int create_hyp_mappings(void *from, void *to); int create_hyp_io_mappings(void *from, void *to, phys_addr_t); void free_boot_hyp_pgd(void); @@ -109,6 +99,7 @@ void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu); phys_addr_t kvm_mmu_get_httbr(void); phys_addr_t kvm_mmu_get_boot_httbr(void); phys_addr_t kvm_get_idmap_vector(void); +phys_addr_t kvm_get_idmap_start(void); int kvm_mmu_init(void); void kvm_clear_hyp_idmap(void); @@ -121,19 +112,32 @@ static inline void kvm_clean_pmd_entry(pmd_t *pmd) {} static inline void kvm_clean_pte(pte_t *pte) {} static inline void kvm_clean_pte_entry(pte_t *pte) {} -static inline void kvm_set_s2pte_writable(pte_t *pte) +static inline pte_t kvm_s2pte_mkwrite(pte_t pte) { - pte_val(*pte) |= PTE_S2_RDWR; + pte_val(pte) |= PTE_S2_RDWR; + return pte; } -static inline void kvm_set_s2pmd_writable(pmd_t *pmd) +static inline pmd_t kvm_s2pmd_mkwrite(pmd_t pmd) { - pmd_val(*pmd) |= PMD_S2_RDWR; + pmd_val(pmd) |= PMD_S2_RDWR; + return pmd; } static inline void kvm_set_s2pte_readonly(pte_t *pte) { - pte_val(*pte) = (pte_val(*pte) & ~PTE_S2_RDWR) | PTE_S2_RDONLY; + pteval_t pteval; + unsigned long tmp; + + asm volatile("// kvm_set_s2pte_readonly\n" + " prfm pstl1strm, %2\n" + "1: ldxr %0, %2\n" + " and %0, %0, %3 // clear PTE_S2_RDWR\n" + " orr %0, %0, %4 // set PTE_S2_RDONLY\n" + " stxr %w1, %0, %2\n" + " cbnz %w1, 1b\n" + : "=&r" (pteval), "=&r" (tmp), "+Q" (pte_val(*pte)) + : "L" (~PTE_S2_RDWR), "L" (PTE_S2_RDONLY)); } static inline bool kvm_s2pte_readonly(pte_t *pte) @@ -143,69 +147,12 @@ static inline bool kvm_s2pte_readonly(pte_t *pte) static inline void kvm_set_s2pmd_readonly(pmd_t *pmd) { - pmd_val(*pmd) = (pmd_val(*pmd) & ~PMD_S2_RDWR) | PMD_S2_RDONLY; + kvm_set_s2pte_readonly((pte_t *)pmd); } static inline bool kvm_s2pmd_readonly(pmd_t *pmd) { - return (pmd_val(*pmd) & PMD_S2_RDWR) == PMD_S2_RDONLY; -} - - -#define kvm_pgd_addr_end(addr, end) pgd_addr_end(addr, end) -#define kvm_pud_addr_end(addr, end) pud_addr_end(addr, end) -#define kvm_pmd_addr_end(addr, end) pmd_addr_end(addr, end) - -/* - * In the case where PGDIR_SHIFT is larger than KVM_PHYS_SHIFT, we can address - * the entire IPA input range with a single pgd entry, and we would only need - * one pgd entry. Note that in this case, the pgd is actually not used by - * the MMU for Stage-2 translations, but is merely a fake pgd used as a data - * structure for the kernel pgtable macros to work. - */ -#if PGDIR_SHIFT > KVM_PHYS_SHIFT -#define PTRS_PER_S2_PGD_SHIFT 0 -#else -#define PTRS_PER_S2_PGD_SHIFT (KVM_PHYS_SHIFT - PGDIR_SHIFT) -#endif -#define PTRS_PER_S2_PGD (1 << PTRS_PER_S2_PGD_SHIFT) - -#define kvm_pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_S2_PGD - 1)) - -/* - * If we are concatenating first level stage-2 page tables, we would have less - * than or equal to 16 pointers in the fake PGD, because that's what the - * architecture allows. In this case, (4 - CONFIG_PGTABLE_LEVELS) - * represents the first level for the host, and we add 1 to go to the next - * level (which uses contatenation) for the stage-2 tables. - */ -#if PTRS_PER_S2_PGD <= 16 -#define KVM_PREALLOC_LEVEL (4 - CONFIG_PGTABLE_LEVELS + 1) -#else -#define KVM_PREALLOC_LEVEL (0) -#endif - -static inline void *kvm_get_hwpgd(struct kvm *kvm) -{ - pgd_t *pgd = kvm->arch.pgd; - pud_t *pud; - - if (KVM_PREALLOC_LEVEL == 0) - return pgd; - - pud = pud_offset(pgd, 0); - if (KVM_PREALLOC_LEVEL == 1) - return pud; - - BUG_ON(KVM_PREALLOC_LEVEL != 2); - return pmd_offset(pud, 0); -} - -static inline unsigned int kvm_get_hwpgd_size(void) -{ - if (KVM_PREALLOC_LEVEL > 0) - return PTRS_PER_S2_PGD * PAGE_SIZE; - return PTRS_PER_S2_PGD * sizeof(pgd_t); + return kvm_s2pte_readonly((pte_t *)pmd); } static inline bool kvm_page_empty(void *ptr) @@ -214,23 +161,20 @@ static inline bool kvm_page_empty(void *ptr) return page_count(ptr_page) == 1; } -#define kvm_pte_table_empty(kvm, ptep) kvm_page_empty(ptep) +#define hyp_pte_table_empty(ptep) kvm_page_empty(ptep) #ifdef __PAGETABLE_PMD_FOLDED -#define kvm_pmd_table_empty(kvm, pmdp) (0) +#define hyp_pmd_table_empty(pmdp) (0) #else -#define kvm_pmd_table_empty(kvm, pmdp) \ - (kvm_page_empty(pmdp) && (!(kvm) || KVM_PREALLOC_LEVEL < 2)) +#define hyp_pmd_table_empty(pmdp) kvm_page_empty(pmdp) #endif #ifdef __PAGETABLE_PUD_FOLDED -#define kvm_pud_table_empty(kvm, pudp) (0) +#define hyp_pud_table_empty(pudp) (0) #else -#define kvm_pud_table_empty(kvm, pudp) \ - (kvm_page_empty(pudp) && (!(kvm) || KVM_PREALLOC_LEVEL < 1)) +#define hyp_pud_table_empty(pudp) kvm_page_empty(pudp) #endif - struct kvm; #define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l)) diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 12f8a00fb..31b73227b 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -40,8 +40,24 @@ #define PCI_IO_SIZE SZ_16M /* - * PAGE_OFFSET - the virtual address of the start of the kernel image (top + * Log2 of the upper bound of the size of a struct page. Used for sizing + * the vmemmap region only, does not affect actual memory footprint. + * We don't use sizeof(struct page) directly since taking its size here + * requires its definition to be available at this point in the inclusion + * chain, and it may not be a power of 2 in the first place. + */ +#define STRUCT_PAGE_MAX_SHIFT 6 + +/* + * VMEMMAP_SIZE - allows the whole linear region to be covered by + * a struct page array + */ +#define VMEMMAP_SIZE (UL(1) << (VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT)) + +/* + * PAGE_OFFSET - the virtual address of the start of the linear map (top * (VA_BITS - 1)) + * KIMAGE_VADDR - the virtual address of the start of the kernel image * VA_BITS - the maximum number of bits for virtual addresses. * VA_START - the first kernel virtual address. * TASK_SIZE - the maximum size of a user space task. @@ -54,7 +70,8 @@ #define MODULES_END (MODULES_VADDR + MODULES_VSIZE) #define MODULES_VADDR (VA_START + KASAN_SHADOW_SIZE) #define MODULES_VSIZE (SZ_128M) -#define PCI_IO_END (PAGE_OFFSET - SZ_2M) +#define VMEMMAP_START (PAGE_OFFSET - VMEMMAP_SIZE) +#define PCI_IO_END (VMEMMAP_START - SZ_2M) #define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE) #define FIXADDR_TOP (PCI_IO_START - SZ_2M) #define TASK_SIZE_64 (UL(1) << VA_BITS) @@ -71,6 +88,9 @@ #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 4)) +#define KERNEL_START _text +#define KERNEL_END _end + /* * The size of the KASAN shadow region. This should be 1/8th of the * size of the entire kernel virtual address space. @@ -192,9 +212,19 @@ static inline void *phys_to_virt(phys_addr_t x) */ #define ARCH_PFN_OFFSET ((unsigned long)PHYS_PFN_OFFSET) +#ifndef CONFIG_SPARSEMEM_VMEMMAP #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) -#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) +#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) +#else +#define __virt_to_pgoff(kaddr) (((u64)(kaddr) & ~PAGE_OFFSET) / PAGE_SIZE * sizeof(struct page)) +#define __page_to_voff(kaddr) (((u64)(page) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page)) +#define page_to_virt(page) ((void *)((__page_to_voff(page)) | PAGE_OFFSET)) +#define virt_to_page(vaddr) ((struct page *)((__virt_to_pgoff(vaddr)) | VMEMMAP_START)) + +#define virt_addr_valid(kaddr) pfn_valid((((u64)(kaddr) & ~PAGE_OFFSET) \ + + PHYS_OFFSET) >> PAGE_SHIFT) +#endif #endif #include <asm-generic/memory_model.h> diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h index 990124a67..97b1d8f26 100644 --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h @@ -29,6 +29,7 @@ typedef struct { #define ASID(mm) ((mm)->context.id.counter & 0xffff) extern void paging_init(void); +extern void bootmem_init(void); extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt); extern void init_mem_pgprot(void); extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, diff --git a/arch/arm64/include/asm/mmzone.h b/arch/arm64/include/asm/mmzone.h new file mode 100644 index 000000000..a0de9e6ba --- /dev/null +++ b/arch/arm64/include/asm/mmzone.h @@ -0,0 +1,12 @@ +#ifndef __ASM_MMZONE_H +#define __ASM_MMZONE_H + +#ifdef CONFIG_NUMA + +#include <asm/numa.h> + +extern struct pglist_data *node_data[]; +#define NODE_DATA(nid) (node_data[(nid)]) + +#endif /* CONFIG_NUMA */ +#endif /* __ASM_MMZONE_H */ diff --git a/arch/arm64/include/asm/numa.h b/arch/arm64/include/asm/numa.h new file mode 100644 index 000000000..e9b4f2942 --- /dev/null +++ b/arch/arm64/include/asm/numa.h @@ -0,0 +1,45 @@ +#ifndef __ASM_NUMA_H +#define __ASM_NUMA_H + +#include <asm/topology.h> + +#ifdef CONFIG_NUMA + +/* currently, arm64 implements flat NUMA topology */ +#define parent_node(node) (node) + +int __node_distance(int from, int to); +#define node_distance(a, b) __node_distance(a, b) + +extern nodemask_t numa_nodes_parsed __initdata; + +/* Mappings between node number and cpus on that node. */ +extern cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; +void numa_clear_node(unsigned int cpu); + +#ifdef CONFIG_DEBUG_PER_CPU_MAPS +const struct cpumask *cpumask_of_node(int node); +#else +/* Returns a pointer to the cpumask of CPUs on Node 'node'. */ +static inline const struct cpumask *cpumask_of_node(int node) +{ + return node_to_cpumask_map[node]; +} +#endif + +void __init arm64_numa_init(void); +int __init numa_add_memblk(int nodeid, u64 start, u64 end); +void __init numa_set_distance(int from, int to, int distance); +void __init numa_free_distance(void); +void __init early_map_cpu_to_node(unsigned int cpu, int nid); +void numa_store_cpu_info(unsigned int cpu); + +#else /* CONFIG_NUMA */ + +static inline void numa_store_cpu_info(unsigned int cpu) { } +static inline void arm64_numa_init(void) { } +static inline void early_map_cpu_to_node(unsigned int cpu, int nid) { } + +#endif /* CONFIG_NUMA */ + +#endif /* __ASM_NUMA_H */ diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h index ae615b9d9..8472c6def 100644 --- a/arch/arm64/include/asm/page.h +++ b/arch/arm64/include/asm/page.h @@ -19,18 +19,12 @@ #ifndef __ASM_PAGE_H #define __ASM_PAGE_H +#include <linux/const.h> + /* PAGE_SHIFT determines the page size */ /* CONT_SHIFT determines the number of pages which can be tracked together */ -#ifdef CONFIG_ARM64_64K_PAGES -#define PAGE_SHIFT 16 -#define CONT_SHIFT 5 -#elif defined(CONFIG_ARM64_16K_PAGES) -#define PAGE_SHIFT 14 -#define CONT_SHIFT 7 -#else -#define PAGE_SHIFT 12 -#define CONT_SHIFT 4 -#endif +#define PAGE_SHIFT CONFIG_ARM64_PAGE_SHIFT +#define CONT_SHIFT CONFIG_ARM64_CONT_SHIFT #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) #define PAGE_MASK (~(PAGE_SIZE-1)) diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h index ff98585d0..d25f4f137 100644 --- a/arch/arm64/include/asm/pgalloc.h +++ b/arch/arm64/include/asm/pgalloc.h @@ -26,7 +26,7 @@ #define check_pgt_cache() do { } while (0) -#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO) +#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO) #define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t)) #if CONFIG_PGTABLE_LEVELS > 2 diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h index 9786f7700..2813748e2 100644 --- a/arch/arm64/include/asm/pgtable-hwdef.h +++ b/arch/arm64/include/asm/pgtable-hwdef.h @@ -207,23 +207,69 @@ #define TCR_T1SZ(x) ((UL(64) - (x)) << TCR_T1SZ_OFFSET) #define TCR_TxSZ(x) (TCR_T0SZ(x) | TCR_T1SZ(x)) #define TCR_TxSZ_WIDTH 6 -#define TCR_IRGN_NC ((UL(0) << 8) | (UL(0) << 24)) -#define TCR_IRGN_WBWA ((UL(1) << 8) | (UL(1) << 24)) -#define TCR_IRGN_WT ((UL(2) << 8) | (UL(2) << 24)) -#define TCR_IRGN_WBnWA ((UL(3) << 8) | (UL(3) << 24)) -#define TCR_IRGN_MASK ((UL(3) << 8) | (UL(3) << 24)) -#define TCR_ORGN_NC ((UL(0) << 10) | (UL(0) << 26)) -#define TCR_ORGN_WBWA ((UL(1) << 10) | (UL(1) << 26)) -#define TCR_ORGN_WT ((UL(2) << 10) | (UL(2) << 26)) -#define TCR_ORGN_WBnWA ((UL(3) << 10) | (UL(3) << 26)) -#define TCR_ORGN_MASK ((UL(3) << 10) | (UL(3) << 26)) -#define TCR_SHARED ((UL(3) << 12) | (UL(3) << 28)) -#define TCR_TG0_4K (UL(0) << 14) -#define TCR_TG0_64K (UL(1) << 14) -#define TCR_TG0_16K (UL(2) << 14) -#define TCR_TG1_16K (UL(1) << 30) -#define TCR_TG1_4K (UL(2) << 30) -#define TCR_TG1_64K (UL(3) << 30) + +#define TCR_IRGN0_SHIFT 8 +#define TCR_IRGN0_MASK (UL(3) << TCR_IRGN0_SHIFT) +#define TCR_IRGN0_NC (UL(0) << TCR_IRGN0_SHIFT) +#define TCR_IRGN0_WBWA (UL(1) << TCR_IRGN0_SHIFT) +#define TCR_IRGN0_WT (UL(2) << TCR_IRGN0_SHIFT) +#define TCR_IRGN0_WBnWA (UL(3) << TCR_IRGN0_SHIFT) + +#define TCR_IRGN1_SHIFT 24 +#define TCR_IRGN1_MASK (UL(3) << TCR_IRGN1_SHIFT) +#define TCR_IRGN1_NC (UL(0) << TCR_IRGN1_SHIFT) +#define TCR_IRGN1_WBWA (UL(1) << TCR_IRGN1_SHIFT) +#define TCR_IRGN1_WT (UL(2) << TCR_IRGN1_SHIFT) +#define TCR_IRGN1_WBnWA (UL(3) << TCR_IRGN1_SHIFT) + +#define TCR_IRGN_NC (TCR_IRGN0_NC | TCR_IRGN1_NC) +#define TCR_IRGN_WBWA (TCR_IRGN0_WBWA | TCR_IRGN1_WBWA) +#define TCR_IRGN_WT (TCR_IRGN0_WT | TCR_IRGN1_WT) +#define TCR_IRGN_WBnWA (TCR_IRGN0_WBnWA | TCR_IRGN1_WBnWA) +#define TCR_IRGN_MASK (TCR_IRGN0_MASK | TCR_IRGN1_MASK) + + +#define TCR_ORGN0_SHIFT 10 +#define TCR_ORGN0_MASK (UL(3) << TCR_ORGN0_SHIFT) +#define TCR_ORGN0_NC (UL(0) << TCR_ORGN0_SHIFT) +#define TCR_ORGN0_WBWA (UL(1) << TCR_ORGN0_SHIFT) +#define TCR_ORGN0_WT (UL(2) << TCR_ORGN0_SHIFT) +#define TCR_ORGN0_WBnWA (UL(3) << TCR_ORGN0_SHIFT) + +#define TCR_ORGN1_SHIFT 26 +#define TCR_ORGN1_MASK (UL(3) << TCR_ORGN1_SHIFT) +#define TCR_ORGN1_NC (UL(0) << TCR_ORGN1_SHIFT) +#define TCR_ORGN1_WBWA (UL(1) << TCR_ORGN1_SHIFT) +#define TCR_ORGN1_WT (UL(2) << TCR_ORGN1_SHIFT) +#define TCR_ORGN1_WBnWA (UL(3) << TCR_ORGN1_SHIFT) + +#define TCR_ORGN_NC (TCR_ORGN0_NC | TCR_ORGN1_NC) +#define TCR_ORGN_WBWA (TCR_ORGN0_WBWA | TCR_ORGN1_WBWA) +#define TCR_ORGN_WT (TCR_ORGN0_WT | TCR_ORGN1_WT) +#define TCR_ORGN_WBnWA (TCR_ORGN0_WBnWA | TCR_ORGN1_WBnWA) +#define TCR_ORGN_MASK (TCR_ORGN0_MASK | TCR_ORGN1_MASK) + +#define TCR_SH0_SHIFT 12 +#define TCR_SH0_MASK (UL(3) << TCR_SH0_SHIFT) +#define TCR_SH0_INNER (UL(3) << TCR_SH0_SHIFT) + +#define TCR_SH1_SHIFT 28 +#define TCR_SH1_MASK (UL(3) << TCR_SH1_SHIFT) +#define TCR_SH1_INNER (UL(3) << TCR_SH1_SHIFT) +#define TCR_SHARED (TCR_SH0_INNER | TCR_SH1_INNER) + +#define TCR_TG0_SHIFT 14 +#define TCR_TG0_MASK (UL(3) << TCR_TG0_SHIFT) +#define TCR_TG0_4K (UL(0) << TCR_TG0_SHIFT) +#define TCR_TG0_64K (UL(1) << TCR_TG0_SHIFT) +#define TCR_TG0_16K (UL(2) << TCR_TG0_SHIFT) + +#define TCR_TG1_SHIFT 30 +#define TCR_TG1_MASK (UL(3) << TCR_TG1_SHIFT) +#define TCR_TG1_16K (UL(1) << TCR_TG1_SHIFT) +#define TCR_TG1_4K (UL(2) << TCR_TG1_SHIFT) +#define TCR_TG1_64K (UL(3) << TCR_TG1_SHIFT) + #define TCR_ASID16 (UL(1) << 36) #define TCR_TBI0 (UL(1) << 37) #define TCR_HA (UL(1) << 39) diff --git a/arch/arm64/include/asm/pgtable-types.h b/arch/arm64/include/asm/pgtable-types.h index 2b1bd7e52..69b2fd415 100644 --- a/arch/arm64/include/asm/pgtable-types.h +++ b/arch/arm64/include/asm/pgtable-types.h @@ -27,10 +27,6 @@ typedef u64 pmdval_t; typedef u64 pudval_t; typedef u64 pgdval_t; -#undef STRICT_MM_TYPECHECKS - -#ifdef STRICT_MM_TYPECHECKS - /* * These are used to make use of C type-checking.. */ @@ -58,34 +54,6 @@ typedef struct { pteval_t pgprot; } pgprot_t; #define pgprot_val(x) ((x).pgprot) #define __pgprot(x) ((pgprot_t) { (x) } ) -#else /* !STRICT_MM_TYPECHECKS */ - -typedef pteval_t pte_t; -#define pte_val(x) (x) -#define __pte(x) (x) - -#if CONFIG_PGTABLE_LEVELS > 2 -typedef pmdval_t pmd_t; -#define pmd_val(x) (x) -#define __pmd(x) (x) -#endif - -#if CONFIG_PGTABLE_LEVELS > 3 -typedef pudval_t pud_t; -#define pud_val(x) (x) -#define __pud(x) (x) -#endif - -typedef pgdval_t pgd_t; -#define pgd_val(x) (x) -#define __pgd(x) (x) - -typedef pteval_t pgprot_t; -#define pgprot_val(x) (x) -#define __pgprot(x) (x) - -#endif /* STRICT_MM_TYPECHECKS */ - #if CONFIG_PGTABLE_LEVELS == 2 #include <asm-generic/pgtable-nopmd.h> #elif CONFIG_PGTABLE_LEVELS == 3 diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 44430ce58..46472a91b 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -24,22 +24,16 @@ #include <asm/pgtable-prot.h> /* - * VMALLOC and SPARSEMEM_VMEMMAP ranges. + * VMALLOC range. * - * VMEMAP_SIZE: allows the whole linear region to be covered by a struct page array - * (rounded up to PUD_SIZE). * VMALLOC_START: beginning of the kernel vmalloc space - * VMALLOC_END: extends to the available space below vmmemmap, PCI I/O space, - * fixed mappings and modules + * VMALLOC_END: extends to the available space below vmmemmap, PCI I/O space + * and fixed mappings */ -#define VMEMMAP_SIZE ALIGN((1UL << (VA_BITS - PAGE_SHIFT)) * sizeof(struct page), PUD_SIZE) - #define VMALLOC_START (MODULES_END) #define VMALLOC_END (PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - SZ_64K) -#define VMEMMAP_START (VMALLOC_END + SZ_64K) -#define vmemmap ((struct page *)VMEMMAP_START - \ - SECTION_ALIGN_DOWN(memstart_addr >> PAGE_SHIFT)) +#define vmemmap ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT)) #define FIRST_USER_ADDRESS 0UL @@ -58,7 +52,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val); * for zero-mapped memory areas etc.. */ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; -#define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page) +#define ZERO_PAGE(vaddr) pfn_to_page(PHYS_PFN(__pa(empty_zero_page))) #define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte)) @@ -272,6 +266,21 @@ static inline pgprot_t mk_sect_prot(pgprot_t prot) return __pgprot(pgprot_val(prot) & ~PTE_TABLE_BIT); } +#ifdef CONFIG_NUMA_BALANCING +/* + * See the comment in include/asm-generic/pgtable.h + */ +static inline int pte_protnone(pte_t pte) +{ + return (pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)) == PTE_PROT_NONE; +} + +static inline int pmd_protnone(pmd_t pmd) +{ + return pte_protnone(pmd_pte(pmd)); +} +#endif + /* * THP definitions. */ @@ -286,11 +295,13 @@ static inline pgprot_t mk_sect_prot(pgprot_t prot) #define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd))) #define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd))) #define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd))) -#define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd))) +#define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd))) #define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd))) #define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd))) #define pmd_mknotpresent(pmd) (__pmd(pmd_val(pmd) & ~PMD_SECT_VALID)) +#define pmd_thp_or_huge(pmd) (pmd_huge(pmd) || pmd_trans_huge(pmd)) + #define __HAVE_ARCH_PMD_WRITE #define pmd_write(pmd) pte_write(pmd_pte(pmd)) @@ -305,11 +316,6 @@ static inline pgprot_t mk_sect_prot(pgprot_t prot) #define set_pmd_at(mm, addr, pmdp, pmd) set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd)) -static inline int has_transparent_hugepage(void) -{ - return 1; -} - #define __pgprot_modify(prot,mask,bits) \ __pgprot((pgprot_val(prot) & ~(mask)) | (bits)) @@ -329,7 +335,7 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, #define pmd_none(pmd) (!pmd_val(pmd)) -#define pmd_bad(pmd) (!(pmd_val(pmd) & 2)) +#define pmd_bad(pmd) (!(pmd_val(pmd) & PMD_TABLE_BIT)) #define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \ PMD_TYPE_TABLE) @@ -394,7 +400,7 @@ static inline phys_addr_t pmd_page_paddr(pmd_t pmd) #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd_val(pmd)) #define pud_none(pud) (!pud_val(pud)) -#define pud_bad(pud) (!(pud_val(pud) & 2)) +#define pud_bad(pud) (!(pud_val(pud) & PUD_TABLE_BIT)) #define pud_present(pud) (pud_val(pud)) static inline void set_pud(pud_t *pudp, pud_t pud) @@ -545,14 +551,12 @@ static inline int pmdp_set_access_flags(struct vm_area_struct *vma, * Atomic pte/pmd modifications. */ #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG -static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, - unsigned long address, - pte_t *ptep) +static inline int __ptep_test_and_clear_young(pte_t *ptep) { pteval_t pteval; unsigned int tmp, res; - asm volatile("// ptep_test_and_clear_young\n" + asm volatile("// __ptep_test_and_clear_young\n" " prfm pstl1strm, %2\n" "1: ldxr %0, %2\n" " ubfx %w3, %w0, %5, #1 // extract PTE_AF (young)\n" @@ -565,6 +569,13 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, return res; } +static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, + unsigned long address, + pte_t *ptep) +{ + return __ptep_test_and_clear_young(ptep); +} + #ifdef CONFIG_TRANSPARENT_HUGEPAGE #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h index 817a067ba..022644704 100644 --- a/arch/arm64/include/asm/smp.h +++ b/arch/arm64/include/asm/smp.h @@ -113,6 +113,29 @@ static inline void update_cpu_boot_status(int val) dsb(ishst); } +/* + * The calling secondary CPU has detected serious configuration mismatch, + * which calls for a kernel panic. Update the boot status and park the calling + * CPU. + */ +static inline void cpu_panic_kernel(void) +{ + update_cpu_boot_status(CPU_PANIC_KERNEL); + cpu_park_loop(); +} + +/* + * If a secondary CPU enters the kernel but fails to come online, + * (e.g. due to mismatched features), and cannot exit the kernel, + * we increment cpus_stuck_in_kernel and leave the CPU in a + * quiesecent loop within the kernel text. The memory containing + * this loop must not be re-used for anything else as the 'stuck' + * core is executing it. + * + * This function is used to inhibit features like kexec and hibernate. + */ +bool cpus_are_stuck_in_kernel(void); + #endif /* ifndef __ASSEMBLY__ */ #endif /* ifndef __ASM_SMP_H */ diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h index fc9682bfe..e875a5a55 100644 --- a/arch/arm64/include/asm/spinlock.h +++ b/arch/arm64/include/asm/spinlock.h @@ -30,22 +30,53 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) { unsigned int tmp; arch_spinlock_t lockval; + u32 owner; + + /* + * Ensure prior spin_lock operations to other locks have completed + * on this CPU before we test whether "lock" is locked. + */ + smp_mb(); + owner = READ_ONCE(lock->owner) << 16; asm volatile( " sevl\n" "1: wfe\n" "2: ldaxr %w0, %2\n" + /* Is the lock free? */ " eor %w1, %w0, %w0, ror #16\n" -" cbnz %w1, 1b\n" +" cbz %w1, 3f\n" + /* Lock taken -- has there been a subsequent unlock->lock transition? */ +" eor %w1, %w3, %w0, lsl #16\n" +" cbz %w1, 1b\n" + /* + * The owner has been updated, so there was an unlock->lock + * transition that we missed. That means we can rely on the + * store-release of the unlock operation paired with the + * load-acquire of the lock operation to publish any of our + * previous stores to the new lock owner and therefore don't + * need to bother with the writeback below. + */ +" b 4f\n" +"3:\n" + /* + * Serialise against any concurrent lockers by writing back the + * unlocked lock value + */ ARM64_LSE_ATOMIC_INSN( /* LL/SC */ " stxr %w1, %w0, %2\n" -" cbnz %w1, 2b\n", /* Serialise against any concurrent lockers */ - /* LSE atomics */ " nop\n" -" nop\n") +" nop\n", + /* LSE atomics */ +" mov %w1, %w0\n" +" cas %w0, %w0, %2\n" +" eor %w1, %w1, %w0\n") + /* Somebody else wrote to the lock, GOTO 10 and reload the value */ +" cbnz %w1, 2b\n" +"4:" : "=&r" (lockval), "=&r" (tmp), "+Q" (*lock) - : + : "r" (owner) : "memory"); } @@ -148,6 +179,7 @@ static inline int arch_spin_value_unlocked(arch_spinlock_t lock) static inline int arch_spin_is_locked(arch_spinlock_t *lock) { + smp_mb(); /* See arch_spin_unlock_wait */ return !arch_spin_value_unlocked(READ_ONCE(*lock)); } diff --git a/arch/arm64/include/asm/stage2_pgtable-nopmd.h b/arch/arm64/include/asm/stage2_pgtable-nopmd.h new file mode 100644 index 000000000..2656a0fd0 --- /dev/null +++ b/arch/arm64/include/asm/stage2_pgtable-nopmd.h @@ -0,0 +1,42 @@ +/* + * Copyright (C) 2016 - ARM Ltd + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef __ARM64_S2_PGTABLE_NOPMD_H_ +#define __ARM64_S2_PGTABLE_NOPMD_H_ + +#include <asm/stage2_pgtable-nopud.h> + +#define __S2_PGTABLE_PMD_FOLDED + +#define S2_PMD_SHIFT S2_PUD_SHIFT +#define S2_PTRS_PER_PMD 1 +#define S2_PMD_SIZE (1UL << S2_PMD_SHIFT) +#define S2_PMD_MASK (~(S2_PMD_SIZE-1)) + +#define stage2_pud_none(pud) (0) +#define stage2_pud_present(pud) (1) +#define stage2_pud_clear(pud) do { } while (0) +#define stage2_pud_populate(pud, pmd) do { } while (0) +#define stage2_pmd_offset(pud, address) ((pmd_t *)(pud)) + +#define stage2_pmd_free(pmd) do { } while (0) + +#define stage2_pmd_addr_end(addr, end) (end) + +#define stage2_pud_huge(pud) (0) +#define stage2_pmd_table_empty(pmdp) (0) + +#endif diff --git a/arch/arm64/include/asm/stage2_pgtable-nopud.h b/arch/arm64/include/asm/stage2_pgtable-nopud.h new file mode 100644 index 000000000..5ee87b54e --- /dev/null +++ b/arch/arm64/include/asm/stage2_pgtable-nopud.h @@ -0,0 +1,39 @@ +/* + * Copyright (C) 2016 - ARM Ltd + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef __ARM64_S2_PGTABLE_NOPUD_H_ +#define __ARM64_S2_PGTABLE_NOPUD_H_ + +#define __S2_PGTABLE_PUD_FOLDED + +#define S2_PUD_SHIFT S2_PGDIR_SHIFT +#define S2_PTRS_PER_PUD 1 +#define S2_PUD_SIZE (_AC(1, UL) << S2_PUD_SHIFT) +#define S2_PUD_MASK (~(S2_PUD_SIZE-1)) + +#define stage2_pgd_none(pgd) (0) +#define stage2_pgd_present(pgd) (1) +#define stage2_pgd_clear(pgd) do { } while (0) +#define stage2_pgd_populate(pgd, pud) do { } while (0) + +#define stage2_pud_offset(pgd, address) ((pud_t *)(pgd)) + +#define stage2_pud_free(x) do { } while (0) + +#define stage2_pud_addr_end(addr, end) (end) +#define stage2_pud_table_empty(pmdp) (0) + +#endif diff --git a/arch/arm64/include/asm/stage2_pgtable.h b/arch/arm64/include/asm/stage2_pgtable.h new file mode 100644 index 000000000..8b6809934 --- /dev/null +++ b/arch/arm64/include/asm/stage2_pgtable.h @@ -0,0 +1,142 @@ +/* + * Copyright (C) 2016 - ARM Ltd + * + * stage2 page table helpers + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef __ARM64_S2_PGTABLE_H_ +#define __ARM64_S2_PGTABLE_H_ + +#include <asm/pgtable.h> + +/* + * The hardware supports concatenation of up to 16 tables at stage2 entry level + * and we use the feature whenever possible. + * + * Now, the minimum number of bits resolved at any level is (PAGE_SHIFT - 3). + * On arm64, the smallest PAGE_SIZE supported is 4k, which means + * (PAGE_SHIFT - 3) > 4 holds for all page sizes. + * This implies, the total number of page table levels at stage2 expected + * by the hardware is actually the number of levels required for (KVM_PHYS_SHIFT - 4) + * in normal translations(e.g, stage1), since we cannot have another level in + * the range (KVM_PHYS_SHIFT, KVM_PHYS_SHIFT - 4). + */ +#define STAGE2_PGTABLE_LEVELS ARM64_HW_PGTABLE_LEVELS(KVM_PHYS_SHIFT - 4) + +/* + * With all the supported VA_BITs and 40bit guest IPA, the following condition + * is always true: + * + * STAGE2_PGTABLE_LEVELS <= CONFIG_PGTABLE_LEVELS + * + * We base our stage-2 page table walker helpers on this assumption and + * fall back to using the host version of the helper wherever possible. + * i.e, if a particular level is not folded (e.g, PUD) at stage2, we fall back + * to using the host version, since it is guaranteed it is not folded at host. + * + * If the condition breaks in the future, we can rearrange the host level + * definitions and reuse them for stage2. Till then... + */ +#if STAGE2_PGTABLE_LEVELS > CONFIG_PGTABLE_LEVELS +#error "Unsupported combination of guest IPA and host VA_BITS." +#endif + +/* S2_PGDIR_SHIFT is the size mapped by top-level stage2 entry */ +#define S2_PGDIR_SHIFT ARM64_HW_PGTABLE_LEVEL_SHIFT(4 - STAGE2_PGTABLE_LEVELS) +#define S2_PGDIR_SIZE (_AC(1, UL) << S2_PGDIR_SHIFT) +#define S2_PGDIR_MASK (~(S2_PGDIR_SIZE - 1)) + +/* + * The number of PTRS across all concatenated stage2 tables given by the + * number of bits resolved at the initial level. + */ +#define PTRS_PER_S2_PGD (1 << (KVM_PHYS_SHIFT - S2_PGDIR_SHIFT)) + +/* + * KVM_MMU_CACHE_MIN_PAGES is the number of stage2 page table translation + * levels in addition to the PGD. + */ +#define KVM_MMU_CACHE_MIN_PAGES (STAGE2_PGTABLE_LEVELS - 1) + + +#if STAGE2_PGTABLE_LEVELS > 3 + +#define S2_PUD_SHIFT ARM64_HW_PGTABLE_LEVEL_SHIFT(1) +#define S2_PUD_SIZE (_AC(1, UL) << S2_PUD_SHIFT) +#define S2_PUD_MASK (~(S2_PUD_SIZE - 1)) + +#define stage2_pgd_none(pgd) pgd_none(pgd) +#define stage2_pgd_clear(pgd) pgd_clear(pgd) +#define stage2_pgd_present(pgd) pgd_present(pgd) +#define stage2_pgd_populate(pgd, pud) pgd_populate(NULL, pgd, pud) +#define stage2_pud_offset(pgd, address) pud_offset(pgd, address) +#define stage2_pud_free(pud) pud_free(NULL, pud) + +#define stage2_pud_table_empty(pudp) kvm_page_empty(pudp) + +static inline phys_addr_t stage2_pud_addr_end(phys_addr_t addr, phys_addr_t end) +{ + phys_addr_t boundary = (addr + S2_PUD_SIZE) & S2_PUD_MASK; + + return (boundary - 1 < end - 1) ? boundary : end; +} + +#endif /* STAGE2_PGTABLE_LEVELS > 3 */ + + +#if STAGE2_PGTABLE_LEVELS > 2 + +#define S2_PMD_SHIFT ARM64_HW_PGTABLE_LEVEL_SHIFT(2) +#define S2_PMD_SIZE (_AC(1, UL) << S2_PMD_SHIFT) +#define S2_PMD_MASK (~(S2_PMD_SIZE - 1)) + +#define stage2_pud_none(pud) pud_none(pud) +#define stage2_pud_clear(pud) pud_clear(pud) +#define stage2_pud_present(pud) pud_present(pud) +#define stage2_pud_populate(pud, pmd) pud_populate(NULL, pud, pmd) +#define stage2_pmd_offset(pud, address) pmd_offset(pud, address) +#define stage2_pmd_free(pmd) pmd_free(NULL, pmd) + +#define stage2_pud_huge(pud) pud_huge(pud) +#define stage2_pmd_table_empty(pmdp) kvm_page_empty(pmdp) + +static inline phys_addr_t stage2_pmd_addr_end(phys_addr_t addr, phys_addr_t end) +{ + phys_addr_t boundary = (addr + S2_PMD_SIZE) & S2_PMD_MASK; + + return (boundary - 1 < end - 1) ? boundary : end; +} + +#endif /* STAGE2_PGTABLE_LEVELS > 2 */ + +#define stage2_pte_table_empty(ptep) kvm_page_empty(ptep) + +#if STAGE2_PGTABLE_LEVELS == 2 +#include <asm/stage2_pgtable-nopmd.h> +#elif STAGE2_PGTABLE_LEVELS == 3 +#include <asm/stage2_pgtable-nopud.h> +#endif + + +#define stage2_pgd_index(addr) (((addr) >> S2_PGDIR_SHIFT) & (PTRS_PER_S2_PGD - 1)) + +static inline phys_addr_t stage2_pgd_addr_end(phys_addr_t addr, phys_addr_t end) +{ + phys_addr_t boundary = (addr + S2_PGDIR_SIZE) & S2_PGDIR_MASK; + + return (boundary - 1 < end - 1) ? boundary : end; +} + +#endif /* __ARM64_S2_PGTABLE_H_ */ diff --git a/arch/arm64/include/asm/suspend.h b/arch/arm64/include/asm/suspend.h index 59a5b0f1e..024d623f6 100644 --- a/arch/arm64/include/asm/suspend.h +++ b/arch/arm64/include/asm/suspend.h @@ -1,7 +1,8 @@ #ifndef __ASM_SUSPEND_H #define __ASM_SUSPEND_H -#define NR_CTX_REGS 11 +#define NR_CTX_REGS 10 +#define NR_CALLEE_SAVED_REGS 12 /* * struct cpu_suspend_ctx must be 16-byte aligned since it is allocated on @@ -16,11 +17,34 @@ struct cpu_suspend_ctx { u64 sp; } __aligned(16); -struct sleep_save_sp { - phys_addr_t *save_ptr_stash; - phys_addr_t save_ptr_stash_phys; +/* + * Memory to save the cpu state is allocated on the stack by + * __cpu_suspend_enter()'s caller, and populated by __cpu_suspend_enter(). + * This data must survive until cpu_resume() is called. + * + * This struct desribes the size and the layout of the saved cpu state. + * The layout of the callee_saved_regs is defined by the implementation + * of __cpu_suspend_enter(), and cpu_resume(). This struct must be passed + * in by the caller as __cpu_suspend_enter()'s stack-frame is gone once it + * returns, and the data would be subsequently corrupted by the call to the + * finisher. + */ +struct sleep_stack_data { + struct cpu_suspend_ctx system_regs; + unsigned long callee_saved_regs[NR_CALLEE_SAVED_REGS]; }; +extern unsigned long *sleep_save_stash; + extern int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)); extern void cpu_resume(void); +int __cpu_suspend_enter(struct sleep_stack_data *state); +void __cpu_suspend_exit(void); +void _cpu_resume(void); + +int swsusp_arch_suspend(void); +int swsusp_arch_resume(void); +int arch_hibernation_header_save(void *addr, unsigned int max_size); +int arch_hibernation_header_restore(void *addr); + #endif diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 12874164b..751e901c8 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -86,10 +86,21 @@ #define SET_PSTATE_UAO(x) __inst_arm(0xd5000000 | REG_PSTATE_UAO_IMM |\ (!!x)<<8 | 0x1f) -/* SCTLR_EL1 */ -#define SCTLR_EL1_CP15BEN (0x1 << 5) -#define SCTLR_EL1_SED (0x1 << 8) -#define SCTLR_EL1_SPAN (0x1 << 23) +/* Common SCTLR_ELx flags. */ +#define SCTLR_ELx_EE (1 << 25) +#define SCTLR_ELx_I (1 << 12) +#define SCTLR_ELx_SA (1 << 3) +#define SCTLR_ELx_C (1 << 2) +#define SCTLR_ELx_A (1 << 1) +#define SCTLR_ELx_M 1 + +#define SCTLR_ELx_FLAGS (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \ + SCTLR_ELx_SA | SCTLR_ELx_I) + +/* SCTLR_EL1 specific flags. */ +#define SCTLR_EL1_SPAN (1 << 23) +#define SCTLR_EL1_SED (1 << 8) +#define SCTLR_EL1_CP15BEN (1 << 5) /* id_aa64isar0 */ @@ -115,6 +126,7 @@ #define ID_AA64PFR0_ASIMD_SUPPORTED 0x0 #define ID_AA64PFR0_EL1_64BIT_ONLY 0x1 #define ID_AA64PFR0_EL0_64BIT_ONLY 0x1 +#define ID_AA64PFR0_EL0_32BIT_64BIT 0x2 /* id_aa64mmfr0 */ #define ID_AA64MMFR0_TGRAN4_SHIFT 28 @@ -145,7 +157,11 @@ #define ID_AA64MMFR1_VMIDBITS_16 2 /* id_aa64mmfr2 */ +#define ID_AA64MMFR2_LVA_SHIFT 16 +#define ID_AA64MMFR2_IESB_SHIFT 12 +#define ID_AA64MMFR2_LSM_SHIFT 8 #define ID_AA64MMFR2_UAO_SHIFT 4 +#define ID_AA64MMFR2_CNP_SHIFT 0 /* id_aa64dfr0 */ #define ID_AA64DFR0_CTX_CMPS_SHIFT 28 diff --git a/arch/arm64/include/asm/topology.h b/arch/arm64/include/asm/topology.h index a3e9d6fdb..8b5733982 100644 --- a/arch/arm64/include/asm/topology.h +++ b/arch/arm64/include/asm/topology.h @@ -22,6 +22,16 @@ void init_cpu_topology(void); void store_cpu_topology(unsigned int cpuid); const struct cpumask *cpu_coregroup_mask(int cpu); +#ifdef CONFIG_NUMA + +struct pci_bus; +int pcibus_to_node(struct pci_bus *bus); +#define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \ + cpu_all_mask : \ + cpumask_of_node(pcibus_to_node(bus))) + +#endif /* CONFIG_NUMA */ + #include <asm-generic/topology.h> #endif /* _ASM_ARM_TOPOLOGY_H */ diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index 0685d7457..9e397a542 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -81,19 +81,6 @@ static inline void set_fs(mm_segment_t fs) #define segment_eq(a, b) ((a) == (b)) /* - * Return 1 if addr < current->addr_limit, 0 otherwise. - */ -#define __addr_ok(addr) \ -({ \ - unsigned long flag; \ - asm("cmp %1, %0; cset %0, lo" \ - : "=&r" (flag) \ - : "r" (addr), "0" (current_thread_info()->addr_limit) \ - : "cc"); \ - flag; \ -}) - -/* * Test whether a block of memory is a valid user space address. * Returns 1 if the range is valid, 0 otherwise. * diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h index 41e58fe3c..e78ac2632 100644 --- a/arch/arm64/include/asm/unistd.h +++ b/arch/arm64/include/asm/unistd.h @@ -44,7 +44,7 @@ #define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2) #define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5) -#define __NR_compat_syscalls 390 +#define __NR_compat_syscalls 394 #endif #define __ARCH_WANT_SYS_CLONE diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h index 5b925b761..b7e8ef16f 100644 --- a/arch/arm64/include/asm/unistd32.h +++ b/arch/arm64/include/asm/unistd32.h @@ -801,6 +801,14 @@ __SYSCALL(__NR_execveat, compat_sys_execveat) __SYSCALL(__NR_userfaultfd, sys_userfaultfd) #define __NR_membarrier 389 __SYSCALL(__NR_membarrier, sys_membarrier) +#define __NR_mlock2 390 +__SYSCALL(__NR_mlock2, sys_mlock2) +#define __NR_copy_file_range 391 +__SYSCALL(__NR_copy_file_range, sys_copy_file_range) +#define __NR_preadv2 392 +__SYSCALL(__NR_preadv2, compat_sys_preadv2) +#define __NR_pwritev2 393 +__SYSCALL(__NR_pwritev2, compat_sys_pwritev2) /* * Please add new compat syscalls above this comment and update diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h index 9f22dd607..dcbcf8dcb 100644 --- a/arch/arm64/include/asm/virt.h +++ b/arch/arm64/include/asm/virt.h @@ -18,6 +18,22 @@ #ifndef __ASM__VIRT_H #define __ASM__VIRT_H +/* + * The arm64 hcall implementation uses x0 to specify the hcall type. A value + * less than 0xfff indicates a special hcall, such as get/set vector. + * Any other value is used as a pointer to the function to call. + */ + +/* HVC_GET_VECTORS - Return the value of the vbar_el2 register. */ +#define HVC_GET_VECTORS 0 + +/* + * HVC_SET_VECTORS - Set the value of the vbar_el2 register. + * + * @x1: Physical address of the new vector table. + */ +#define HVC_SET_VECTORS 1 + #define BOOT_CPU_MODE_EL1 (0xe11) #define BOOT_CPU_MODE_EL2 (0xe12) @@ -60,6 +76,12 @@ static inline bool is_kernel_in_hyp_mode(void) return el == CurrentEL_EL2; } +#ifdef CONFIG_ARM64_VHE +extern void verify_cpu_run_el(void); +#else +static inline void verify_cpu_run_el(void) {} +#endif + /* The section containing the hypervisor text */ extern char __hyp_text_start[]; extern char __hyp_text_end[]; |