diff options
Diffstat (limited to 'arch/arm64/kernel')
28 files changed, 1451 insertions, 1561 deletions
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index 22dc9bc78..474691f8b 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -4,7 +4,6 @@ CPPFLAGS_vmlinux.lds := -DTEXT_OFFSET=$(TEXT_OFFSET) AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET) -CFLAGS_efi-stub.o := -DTEXT_OFFSET=$(TEXT_OFFSET) CFLAGS_armv8_deprecated.o := -I$(src) CFLAGS_REMOVE_ftrace.o = -pg @@ -20,6 +19,12 @@ arm64-obj-y := debug-monitors.o entry.o irq.o fpsimd.o \ cpufeature.o alternative.o cacheinfo.o \ smp.o smp_spin_table.o topology.o +extra-$(CONFIG_EFI) := efi-entry.o + +OBJCOPYFLAGS := --prefix-symbols=__efistub_ +$(obj)/%.stub.o: $(obj)/%.o FORCE + $(call if_changed,objcopy) + arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \ sys_compat.o entry32.o \ ../../arm/kernel/opcodes.o @@ -32,7 +37,7 @@ arm64-obj-$(CONFIG_CPU_PM) += sleep.o suspend.o arm64-obj-$(CONFIG_CPU_IDLE) += cpuidle.o arm64-obj-$(CONFIG_JUMP_LABEL) += jump_label.o arm64-obj-$(CONFIG_KGDB) += kgdb.o -arm64-obj-$(CONFIG_EFI) += efi.o efi-stub.o efi-entry.o +arm64-obj-$(CONFIG_EFI) += efi.o efi-entry.stub.o arm64-obj-$(CONFIG_PCI) += pci.o arm64-obj-$(CONFIG_ARMV8_DEPRECATED) += armv8_deprecated.o arm64-obj-$(CONFIG_ACPI) += acpi.o @@ -40,7 +45,7 @@ arm64-obj-$(CONFIG_ACPI) += acpi.o obj-y += $(arm64-obj-y) vdso/ obj-m += $(arm64-obj-m) head-y := head.o -extra-y := $(head-y) vmlinux.lds +extra-y += $(head-y) vmlinux.lds # vDSO - this must be built first to generate the symbol offsets $(call objectify,$(arm64-obj-y)): $(obj)/vdso/vdso-offsets.h diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c index 19de7537e..d1ce8e2f9 100644 --- a/arch/arm64/kernel/acpi.c +++ b/arch/arm64/kernel/acpi.c @@ -29,6 +29,11 @@ #include <asm/cpu_ops.h> #include <asm/smp_plat.h> +#ifdef CONFIG_ACPI_APEI +# include <linux/efi.h> +# include <asm/pgtable.h> +#endif + int acpi_noirq = 1; /* skip ACPI IRQ initialization */ int acpi_disabled = 1; EXPORT_SYMBOL(acpi_disabled); @@ -206,27 +211,26 @@ void __init acpi_boot_table_init(void) } } -void __init acpi_gic_init(void) +#ifdef CONFIG_ACPI_APEI +pgprot_t arch_apei_get_mem_attribute(phys_addr_t addr) { - struct acpi_table_header *table; - acpi_status status; - acpi_size tbl_size; - int err; - - if (acpi_disabled) - return; - - status = acpi_get_table_with_size(ACPI_SIG_MADT, 0, &table, &tbl_size); - if (ACPI_FAILURE(status)) { - const char *msg = acpi_format_exception(status); - - pr_err("Failed to get MADT table, %s\n", msg); - return; - } + /* + * According to "Table 8 Map: EFI memory types to AArch64 memory + * types" of UEFI 2.5 section 2.3.6.1, each EFI memory type is + * mapped to a corresponding MAIR attribute encoding. + * The EFI memory attribute advises all possible capabilities + * of a memory region. We use the most efficient capability. + */ - err = gic_v2_acpi_init(table); - if (err) - pr_err("Failed to initialize GIC IRQ controller"); + u64 attr; - early_acpi_os_unmap_memory((char *)table, tbl_size); + attr = efi_mem_attributes(addr); + if (attr & EFI_MEMORY_WB) + return PAGE_KERNEL; + if (attr & EFI_MEMORY_WT) + return __pgprot(PROT_NORMAL_WT); + if (attr & EFI_MEMORY_WC) + return __pgprot(PROT_NORMAL_NC); + return __pgprot(PROT_DEVICE_nGnRnE); } +#endif diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c index a85843ddb..3b6d8cc9d 100644 --- a/arch/arm64/kernel/arm64ksyms.c +++ b/arch/arm64/kernel/arm64ksyms.c @@ -51,6 +51,9 @@ EXPORT_SYMBOL(strnlen); EXPORT_SYMBOL(memset); EXPORT_SYMBOL(memcpy); EXPORT_SYMBOL(memmove); +EXPORT_SYMBOL(__memset); +EXPORT_SYMBOL(__memcpy); +EXPORT_SYMBOL(__memmove); EXPORT_SYMBOL(memchr); EXPORT_SYMBOL(memcmp); diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index 8d89cf8da..25de8b244 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -60,7 +60,7 @@ int main(void) DEFINE(S_SYSCALLNO, offsetof(struct pt_regs, syscallno)); DEFINE(S_FRAME_SIZE, sizeof(struct pt_regs)); BLANK(); - DEFINE(MM_CONTEXT_ID, offsetof(struct mm_struct, context.id)); + DEFINE(MM_CONTEXT_ID, offsetof(struct mm_struct, context.id.counter)); BLANK(); DEFINE(VMA_VM_MM, offsetof(struct vm_area_struct, vm_mm)); DEFINE(VMA_VM_FLAGS, offsetof(struct vm_area_struct, vm_flags)); diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index 6ffd91438..feb6b4efa 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -23,6 +23,7 @@ #define MIDR_CORTEX_A53 MIDR_CPU_PART(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53) #define MIDR_CORTEX_A57 MIDR_CPU_PART(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57) +#define MIDR_THUNDERX MIDR_CPU_PART(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX) #define CPU_MODEL_MASK (MIDR_IMPLEMENTOR_MASK | MIDR_PARTNUM_MASK | \ MIDR_ARCHITECTURE_MASK) @@ -74,6 +75,15 @@ const struct arm64_cpu_capabilities arm64_errata[] = { (1 << MIDR_VARIANT_SHIFT) | 2), }, #endif +#ifdef CONFIG_ARM64_ERRATUM_834220 + { + /* Cortex-A57 r0p0 - r1p2 */ + .desc = "ARM erratum 834220", + .capability = ARM64_WORKAROUND_834220, + MIDR_RANGE(MIDR_CORTEX_A57, 0x00, + (1 << MIDR_VARIANT_SHIFT) | 2), + }, +#endif #ifdef CONFIG_ARM64_ERRATUM_845719 { /* Cortex-A53 r0p[01234] */ @@ -82,11 +92,19 @@ const struct arm64_cpu_capabilities arm64_errata[] = { MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x04), }, #endif +#ifdef CONFIG_CAVIUM_ERRATUM_23154 + { + /* Cavium ThunderX, pass 1.x */ + .desc = "Cavium erratum 23154", + .capability = ARM64_WORKAROUND_CAVIUM_23154, + MIDR_RANGE(MIDR_THUNDERX, 0x00, 0x01), + }, +#endif { } }; void check_local_cpu_errata(void) { - check_cpu_capabilities(arm64_errata, "enabling workaround for"); + update_cpu_capabilities(arm64_errata, "enabling workaround for"); } diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 3c9aed32f..0669c6328 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -16,12 +16,578 @@ * along with this program. If not, see <http://www.gnu.org/licenses/>. */ -#define pr_fmt(fmt) "alternatives: " fmt +#define pr_fmt(fmt) "CPU features: " fmt +#include <linux/bsearch.h> +#include <linux/sort.h> #include <linux/types.h> #include <asm/cpu.h> #include <asm/cpufeature.h> +#include <asm/cpu_ops.h> #include <asm/processor.h> +#include <asm/sysreg.h> + +unsigned long elf_hwcap __read_mostly; +EXPORT_SYMBOL_GPL(elf_hwcap); + +#ifdef CONFIG_COMPAT +#define COMPAT_ELF_HWCAP_DEFAULT \ + (COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\ + COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\ + COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\ + COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\ + COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV|\ + COMPAT_HWCAP_LPAE) +unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT; +unsigned int compat_elf_hwcap2 __read_mostly; +#endif + +DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); + +#define __ARM64_FTR_BITS(SIGNED, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \ + { \ + .sign = SIGNED, \ + .strict = STRICT, \ + .type = TYPE, \ + .shift = SHIFT, \ + .width = WIDTH, \ + .safe_val = SAFE_VAL, \ + } + +/* Define a feature with signed values */ +#define ARM64_FTR_BITS(STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \ + __ARM64_FTR_BITS(FTR_SIGNED, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) + +/* Define a feature with unsigned value */ +#define U_ARM64_FTR_BITS(STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \ + __ARM64_FTR_BITS(FTR_UNSIGNED, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) + +#define ARM64_FTR_END \ + { \ + .width = 0, \ + } + +static struct arm64_ftr_bits ftr_id_aa64isar0[] = { + ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0), + ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64ISAR0_RDM_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 24, 4, 0), + ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_ATOMICS_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_CRC32_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA2_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA1_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_AES_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 4, 0), /* RAZ */ + ARM64_FTR_END, +}; + +static struct arm64_ftr_bits ftr_id_aa64pfr0[] = { + ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0), + ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 28, 4, 0), + ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64PFR0_GIC_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI), + ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI), + /* Linux doesn't care about the EL3 */ + ARM64_FTR_BITS(FTR_NONSTRICT, FTR_EXACT, ID_AA64PFR0_EL3_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64PFR0_EL2_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_EL1_64BIT_ONLY), + ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_EL0_64BIT_ONLY), + ARM64_FTR_END, +}; + +static struct arm64_ftr_bits ftr_id_aa64mmfr0[] = { + ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0), + ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI), + ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI), + ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN16_SHIFT, 4, ID_AA64MMFR0_TGRAN16_NI), + ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_BIGENDEL0_SHIFT, 4, 0), + /* Linux shouldn't care about secure memory */ + ARM64_FTR_BITS(FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_SNSMEM_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_BIGENDEL_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_ASID_SHIFT, 4, 0), + /* + * Differing PARange is fine as long as all peripherals and memory are mapped + * within the minimum PARange of all CPUs + */ + U_ARM64_FTR_BITS(FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_PARANGE_SHIFT, 4, 0), + ARM64_FTR_END, +}; + +static struct arm64_ftr_bits ftr_id_aa64mmfr1[] = { + ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0), + ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_PAN_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_LOR_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_HPD_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_VHE_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_VMIDBITS_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_HADBS_SHIFT, 4, 0), + ARM64_FTR_END, +}; + +static struct arm64_ftr_bits ftr_ctr[] = { + U_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RAO */ + ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 28, 3, 0), + U_ARM64_FTR_BITS(FTR_STRICT, FTR_HIGHER_SAFE, 24, 4, 0), /* CWG */ + U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0), /* ERG */ + U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 1), /* DminLine */ + /* + * Linux can handle differing I-cache policies. Userspace JITs will + * make use of *minLine + */ + U_ARM64_FTR_BITS(FTR_NONSTRICT, FTR_EXACT, 14, 2, 0), /* L1Ip */ + ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 10, 0), /* RAZ */ + U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* IminLine */ + ARM64_FTR_END, +}; + +static struct arm64_ftr_bits ftr_id_mmfr0[] = { + ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 28, 4, 0), /* InnerShr */ + ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 24, 4, 0), /* FCSE */ + ARM64_FTR_BITS(FTR_NONSTRICT, FTR_LOWER_SAFE, 20, 4, 0), /* AuxReg */ + ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 16, 4, 0), /* TCM */ + ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 12, 4, 0), /* ShareLvl */ + ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 8, 4, 0), /* OuterShr */ + ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 4, 0), /* PMSA */ + ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 4, 0), /* VMSA */ + ARM64_FTR_END, +}; + +static struct arm64_ftr_bits ftr_id_aa64dfr0[] = { + ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0), + U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_CTX_CMPS_SHIFT, 4, 0), + U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_WRPS_SHIFT, 4, 0), + U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_BRPS_SHIFT, 4, 0), + U_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_PMUVER_SHIFT, 4, 0), + U_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_TRACEVER_SHIFT, 4, 0), + U_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DEBUGVER_SHIFT, 4, 0x6), + ARM64_FTR_END, +}; + +static struct arm64_ftr_bits ftr_mvfr2[] = { + ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 8, 24, 0), /* RAZ */ + ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 4, 0), /* FPMisc */ + ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 4, 0), /* SIMDMisc */ + ARM64_FTR_END, +}; + +static struct arm64_ftr_bits ftr_dczid[] = { + ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 5, 27, 0), /* RAZ */ + ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 1, 1), /* DZP */ + ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* BS */ + ARM64_FTR_END, +}; + + +static struct arm64_ftr_bits ftr_id_isar5[] = { + ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_ISAR5_RDM_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 20, 4, 0), /* RAZ */ + ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_ISAR5_CRC32_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_ISAR5_SHA2_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_ISAR5_SHA1_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_ISAR5_AES_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_ISAR5_SEVL_SHIFT, 4, 0), + ARM64_FTR_END, +}; + +static struct arm64_ftr_bits ftr_id_mmfr4[] = { + ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 8, 24, 0), /* RAZ */ + ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 4, 0), /* ac2 */ + ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 4, 0), /* RAZ */ + ARM64_FTR_END, +}; + +static struct arm64_ftr_bits ftr_id_pfr0[] = { + ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 16, 16, 0), /* RAZ */ + ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 12, 4, 0), /* State3 */ + ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 8, 4, 0), /* State2 */ + ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 4, 0), /* State1 */ + ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 4, 0), /* State0 */ + ARM64_FTR_END, +}; + +/* + * Common ftr bits for a 32bit register with all hidden, strict + * attributes, with 4bit feature fields and a default safe value of + * 0. Covers the following 32bit registers: + * id_isar[0-4], id_mmfr[1-3], id_pfr1, mvfr[0-1] + */ +static struct arm64_ftr_bits ftr_generic_32bits[] = { + ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0), + ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0), + ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0), + ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0), + ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0), + ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0), + ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0), + ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), + ARM64_FTR_END, +}; + +static struct arm64_ftr_bits ftr_generic[] = { + ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 64, 0), + ARM64_FTR_END, +}; + +static struct arm64_ftr_bits ftr_generic32[] = { + ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 32, 0), + ARM64_FTR_END, +}; + +static struct arm64_ftr_bits ftr_aa64raz[] = { + ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 64, 0), + ARM64_FTR_END, +}; + +#define ARM64_FTR_REG(id, table) \ + { \ + .sys_id = id, \ + .name = #id, \ + .ftr_bits = &((table)[0]), \ + } + +static struct arm64_ftr_reg arm64_ftr_regs[] = { + + /* Op1 = 0, CRn = 0, CRm = 1 */ + ARM64_FTR_REG(SYS_ID_PFR0_EL1, ftr_id_pfr0), + ARM64_FTR_REG(SYS_ID_PFR1_EL1, ftr_generic_32bits), + ARM64_FTR_REG(SYS_ID_DFR0_EL1, ftr_generic_32bits), + ARM64_FTR_REG(SYS_ID_MMFR0_EL1, ftr_id_mmfr0), + ARM64_FTR_REG(SYS_ID_MMFR1_EL1, ftr_generic_32bits), + ARM64_FTR_REG(SYS_ID_MMFR2_EL1, ftr_generic_32bits), + ARM64_FTR_REG(SYS_ID_MMFR3_EL1, ftr_generic_32bits), + + /* Op1 = 0, CRn = 0, CRm = 2 */ + ARM64_FTR_REG(SYS_ID_ISAR0_EL1, ftr_generic_32bits), + ARM64_FTR_REG(SYS_ID_ISAR1_EL1, ftr_generic_32bits), + ARM64_FTR_REG(SYS_ID_ISAR2_EL1, ftr_generic_32bits), + ARM64_FTR_REG(SYS_ID_ISAR3_EL1, ftr_generic_32bits), + ARM64_FTR_REG(SYS_ID_ISAR4_EL1, ftr_generic_32bits), + ARM64_FTR_REG(SYS_ID_ISAR5_EL1, ftr_id_isar5), + ARM64_FTR_REG(SYS_ID_MMFR4_EL1, ftr_id_mmfr4), + + /* Op1 = 0, CRn = 0, CRm = 3 */ + ARM64_FTR_REG(SYS_MVFR0_EL1, ftr_generic_32bits), + ARM64_FTR_REG(SYS_MVFR1_EL1, ftr_generic_32bits), + ARM64_FTR_REG(SYS_MVFR2_EL1, ftr_mvfr2), + + /* Op1 = 0, CRn = 0, CRm = 4 */ + ARM64_FTR_REG(SYS_ID_AA64PFR0_EL1, ftr_id_aa64pfr0), + ARM64_FTR_REG(SYS_ID_AA64PFR1_EL1, ftr_aa64raz), + + /* Op1 = 0, CRn = 0, CRm = 5 */ + ARM64_FTR_REG(SYS_ID_AA64DFR0_EL1, ftr_id_aa64dfr0), + ARM64_FTR_REG(SYS_ID_AA64DFR1_EL1, ftr_generic), + + /* Op1 = 0, CRn = 0, CRm = 6 */ + ARM64_FTR_REG(SYS_ID_AA64ISAR0_EL1, ftr_id_aa64isar0), + ARM64_FTR_REG(SYS_ID_AA64ISAR1_EL1, ftr_aa64raz), + + /* Op1 = 0, CRn = 0, CRm = 7 */ + ARM64_FTR_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0), + ARM64_FTR_REG(SYS_ID_AA64MMFR1_EL1, ftr_id_aa64mmfr1), + + /* Op1 = 3, CRn = 0, CRm = 0 */ + ARM64_FTR_REG(SYS_CTR_EL0, ftr_ctr), + ARM64_FTR_REG(SYS_DCZID_EL0, ftr_dczid), + + /* Op1 = 3, CRn = 14, CRm = 0 */ + ARM64_FTR_REG(SYS_CNTFRQ_EL0, ftr_generic32), +}; + +static int search_cmp_ftr_reg(const void *id, const void *regp) +{ + return (int)(unsigned long)id - (int)((const struct arm64_ftr_reg *)regp)->sys_id; +} + +/* + * get_arm64_ftr_reg - Lookup a feature register entry using its + * sys_reg() encoding. With the array arm64_ftr_regs sorted in the + * ascending order of sys_id , we use binary search to find a matching + * entry. + * + * returns - Upon success, matching ftr_reg entry for id. + * - NULL on failure. It is upto the caller to decide + * the impact of a failure. + */ +static struct arm64_ftr_reg *get_arm64_ftr_reg(u32 sys_id) +{ + return bsearch((const void *)(unsigned long)sys_id, + arm64_ftr_regs, + ARRAY_SIZE(arm64_ftr_regs), + sizeof(arm64_ftr_regs[0]), + search_cmp_ftr_reg); +} + +static u64 arm64_ftr_set_value(struct arm64_ftr_bits *ftrp, s64 reg, s64 ftr_val) +{ + u64 mask = arm64_ftr_mask(ftrp); + + reg &= ~mask; + reg |= (ftr_val << ftrp->shift) & mask; + return reg; +} + +static s64 arm64_ftr_safe_value(struct arm64_ftr_bits *ftrp, s64 new, s64 cur) +{ + s64 ret = 0; + + switch (ftrp->type) { + case FTR_EXACT: + ret = ftrp->safe_val; + break; + case FTR_LOWER_SAFE: + ret = new < cur ? new : cur; + break; + case FTR_HIGHER_SAFE: + ret = new > cur ? new : cur; + break; + default: + BUG(); + } + + return ret; +} + +static int __init sort_cmp_ftr_regs(const void *a, const void *b) +{ + return ((const struct arm64_ftr_reg *)a)->sys_id - + ((const struct arm64_ftr_reg *)b)->sys_id; +} + +static void __init swap_ftr_regs(void *a, void *b, int size) +{ + struct arm64_ftr_reg tmp = *(struct arm64_ftr_reg *)a; + *(struct arm64_ftr_reg *)a = *(struct arm64_ftr_reg *)b; + *(struct arm64_ftr_reg *)b = tmp; +} + +static void __init sort_ftr_regs(void) +{ + /* Keep the array sorted so that we can do the binary search */ + sort(arm64_ftr_regs, + ARRAY_SIZE(arm64_ftr_regs), + sizeof(arm64_ftr_regs[0]), + sort_cmp_ftr_regs, + swap_ftr_regs); +} + +/* + * Initialise the CPU feature register from Boot CPU values. + * Also initiliases the strict_mask for the register. + */ +static void __init init_cpu_ftr_reg(u32 sys_reg, u64 new) +{ + u64 val = 0; + u64 strict_mask = ~0x0ULL; + struct arm64_ftr_bits *ftrp; + struct arm64_ftr_reg *reg = get_arm64_ftr_reg(sys_reg); + + BUG_ON(!reg); + + for (ftrp = reg->ftr_bits; ftrp->width; ftrp++) { + s64 ftr_new = arm64_ftr_value(ftrp, new); + + val = arm64_ftr_set_value(ftrp, val, ftr_new); + if (!ftrp->strict) + strict_mask &= ~arm64_ftr_mask(ftrp); + } + reg->sys_val = val; + reg->strict_mask = strict_mask; +} + +void __init init_cpu_features(struct cpuinfo_arm64 *info) +{ + /* Before we start using the tables, make sure it is sorted */ + sort_ftr_regs(); + + init_cpu_ftr_reg(SYS_CTR_EL0, info->reg_ctr); + init_cpu_ftr_reg(SYS_DCZID_EL0, info->reg_dczid); + init_cpu_ftr_reg(SYS_CNTFRQ_EL0, info->reg_cntfrq); + init_cpu_ftr_reg(SYS_ID_AA64DFR0_EL1, info->reg_id_aa64dfr0); + init_cpu_ftr_reg(SYS_ID_AA64DFR1_EL1, info->reg_id_aa64dfr1); + init_cpu_ftr_reg(SYS_ID_AA64ISAR0_EL1, info->reg_id_aa64isar0); + init_cpu_ftr_reg(SYS_ID_AA64ISAR1_EL1, info->reg_id_aa64isar1); + init_cpu_ftr_reg(SYS_ID_AA64MMFR0_EL1, info->reg_id_aa64mmfr0); + init_cpu_ftr_reg(SYS_ID_AA64MMFR1_EL1, info->reg_id_aa64mmfr1); + init_cpu_ftr_reg(SYS_ID_AA64PFR0_EL1, info->reg_id_aa64pfr0); + init_cpu_ftr_reg(SYS_ID_AA64PFR1_EL1, info->reg_id_aa64pfr1); + init_cpu_ftr_reg(SYS_ID_DFR0_EL1, info->reg_id_dfr0); + init_cpu_ftr_reg(SYS_ID_ISAR0_EL1, info->reg_id_isar0); + init_cpu_ftr_reg(SYS_ID_ISAR1_EL1, info->reg_id_isar1); + init_cpu_ftr_reg(SYS_ID_ISAR2_EL1, info->reg_id_isar2); + init_cpu_ftr_reg(SYS_ID_ISAR3_EL1, info->reg_id_isar3); + init_cpu_ftr_reg(SYS_ID_ISAR4_EL1, info->reg_id_isar4); + init_cpu_ftr_reg(SYS_ID_ISAR5_EL1, info->reg_id_isar5); + init_cpu_ftr_reg(SYS_ID_MMFR0_EL1, info->reg_id_mmfr0); + init_cpu_ftr_reg(SYS_ID_MMFR1_EL1, info->reg_id_mmfr1); + init_cpu_ftr_reg(SYS_ID_MMFR2_EL1, info->reg_id_mmfr2); + init_cpu_ftr_reg(SYS_ID_MMFR3_EL1, info->reg_id_mmfr3); + init_cpu_ftr_reg(SYS_ID_PFR0_EL1, info->reg_id_pfr0); + init_cpu_ftr_reg(SYS_ID_PFR1_EL1, info->reg_id_pfr1); + init_cpu_ftr_reg(SYS_MVFR0_EL1, info->reg_mvfr0); + init_cpu_ftr_reg(SYS_MVFR1_EL1, info->reg_mvfr1); + init_cpu_ftr_reg(SYS_MVFR2_EL1, info->reg_mvfr2); +} + +static void update_cpu_ftr_reg(struct arm64_ftr_reg *reg, u64 new) +{ + struct arm64_ftr_bits *ftrp; + + for (ftrp = reg->ftr_bits; ftrp->width; ftrp++) { + s64 ftr_cur = arm64_ftr_value(ftrp, reg->sys_val); + s64 ftr_new = arm64_ftr_value(ftrp, new); + + if (ftr_cur == ftr_new) + continue; + /* Find a safe value */ + ftr_new = arm64_ftr_safe_value(ftrp, ftr_new, ftr_cur); + reg->sys_val = arm64_ftr_set_value(ftrp, reg->sys_val, ftr_new); + } + +} + +static int check_update_ftr_reg(u32 sys_id, int cpu, u64 val, u64 boot) +{ + struct arm64_ftr_reg *regp = get_arm64_ftr_reg(sys_id); + + BUG_ON(!regp); + update_cpu_ftr_reg(regp, val); + if ((boot & regp->strict_mask) == (val & regp->strict_mask)) + return 0; + pr_warn("SANITY CHECK: Unexpected variation in %s. Boot CPU: %#016llx, CPU%d: %#016llx\n", + regp->name, boot, cpu, val); + return 1; +} + +/* + * Update system wide CPU feature registers with the values from a + * non-boot CPU. Also performs SANITY checks to make sure that there + * aren't any insane variations from that of the boot CPU. + */ +void update_cpu_features(int cpu, + struct cpuinfo_arm64 *info, + struct cpuinfo_arm64 *boot) +{ + int taint = 0; + + /* + * The kernel can handle differing I-cache policies, but otherwise + * caches should look identical. Userspace JITs will make use of + * *minLine. + */ + taint |= check_update_ftr_reg(SYS_CTR_EL0, cpu, + info->reg_ctr, boot->reg_ctr); + + /* + * Userspace may perform DC ZVA instructions. Mismatched block sizes + * could result in too much or too little memory being zeroed if a + * process is preempted and migrated between CPUs. + */ + taint |= check_update_ftr_reg(SYS_DCZID_EL0, cpu, + info->reg_dczid, boot->reg_dczid); + + /* If different, timekeeping will be broken (especially with KVM) */ + taint |= check_update_ftr_reg(SYS_CNTFRQ_EL0, cpu, + info->reg_cntfrq, boot->reg_cntfrq); + + /* + * The kernel uses self-hosted debug features and expects CPUs to + * support identical debug features. We presently need CTX_CMPs, WRPs, + * and BRPs to be identical. + * ID_AA64DFR1 is currently RES0. + */ + taint |= check_update_ftr_reg(SYS_ID_AA64DFR0_EL1, cpu, + info->reg_id_aa64dfr0, boot->reg_id_aa64dfr0); + taint |= check_update_ftr_reg(SYS_ID_AA64DFR1_EL1, cpu, + info->reg_id_aa64dfr1, boot->reg_id_aa64dfr1); + /* + * Even in big.LITTLE, processors should be identical instruction-set + * wise. + */ + taint |= check_update_ftr_reg(SYS_ID_AA64ISAR0_EL1, cpu, + info->reg_id_aa64isar0, boot->reg_id_aa64isar0); + taint |= check_update_ftr_reg(SYS_ID_AA64ISAR1_EL1, cpu, + info->reg_id_aa64isar1, boot->reg_id_aa64isar1); + + /* + * Differing PARange support is fine as long as all peripherals and + * memory are mapped within the minimum PARange of all CPUs. + * Linux should not care about secure memory. + */ + taint |= check_update_ftr_reg(SYS_ID_AA64MMFR0_EL1, cpu, + info->reg_id_aa64mmfr0, boot->reg_id_aa64mmfr0); + taint |= check_update_ftr_reg(SYS_ID_AA64MMFR1_EL1, cpu, + info->reg_id_aa64mmfr1, boot->reg_id_aa64mmfr1); + + /* + * EL3 is not our concern. + * ID_AA64PFR1 is currently RES0. + */ + taint |= check_update_ftr_reg(SYS_ID_AA64PFR0_EL1, cpu, + info->reg_id_aa64pfr0, boot->reg_id_aa64pfr0); + taint |= check_update_ftr_reg(SYS_ID_AA64PFR1_EL1, cpu, + info->reg_id_aa64pfr1, boot->reg_id_aa64pfr1); + + /* + * If we have AArch32, we care about 32-bit features for compat. These + * registers should be RES0 otherwise. + */ + taint |= check_update_ftr_reg(SYS_ID_DFR0_EL1, cpu, + info->reg_id_dfr0, boot->reg_id_dfr0); + taint |= check_update_ftr_reg(SYS_ID_ISAR0_EL1, cpu, + info->reg_id_isar0, boot->reg_id_isar0); + taint |= check_update_ftr_reg(SYS_ID_ISAR1_EL1, cpu, + info->reg_id_isar1, boot->reg_id_isar1); + taint |= check_update_ftr_reg(SYS_ID_ISAR2_EL1, cpu, + info->reg_id_isar2, boot->reg_id_isar2); + taint |= check_update_ftr_reg(SYS_ID_ISAR3_EL1, cpu, + info->reg_id_isar3, boot->reg_id_isar3); + taint |= check_update_ftr_reg(SYS_ID_ISAR4_EL1, cpu, + info->reg_id_isar4, boot->reg_id_isar4); + taint |= check_update_ftr_reg(SYS_ID_ISAR5_EL1, cpu, + info->reg_id_isar5, boot->reg_id_isar5); + + /* + * Regardless of the value of the AuxReg field, the AIFSR, ADFSR, and + * ACTLR formats could differ across CPUs and therefore would have to + * be trapped for virtualization anyway. + */ + taint |= check_update_ftr_reg(SYS_ID_MMFR0_EL1, cpu, + info->reg_id_mmfr0, boot->reg_id_mmfr0); + taint |= check_update_ftr_reg(SYS_ID_MMFR1_EL1, cpu, + info->reg_id_mmfr1, boot->reg_id_mmfr1); + taint |= check_update_ftr_reg(SYS_ID_MMFR2_EL1, cpu, + info->reg_id_mmfr2, boot->reg_id_mmfr2); + taint |= check_update_ftr_reg(SYS_ID_MMFR3_EL1, cpu, + info->reg_id_mmfr3, boot->reg_id_mmfr3); + taint |= check_update_ftr_reg(SYS_ID_PFR0_EL1, cpu, + info->reg_id_pfr0, boot->reg_id_pfr0); + taint |= check_update_ftr_reg(SYS_ID_PFR1_EL1, cpu, + info->reg_id_pfr1, boot->reg_id_pfr1); + taint |= check_update_ftr_reg(SYS_MVFR0_EL1, cpu, + info->reg_mvfr0, boot->reg_mvfr0); + taint |= check_update_ftr_reg(SYS_MVFR1_EL1, cpu, + info->reg_mvfr1, boot->reg_mvfr1); + taint |= check_update_ftr_reg(SYS_MVFR2_EL1, cpu, + info->reg_mvfr2, boot->reg_mvfr2); + + /* + * Mismatched CPU features are a recipe for disaster. Don't even + * pretend to support them. + */ + WARN_TAINT_ONCE(taint, TAINT_CPU_OUT_OF_SPEC, + "Unsupported CPU feature variation.\n"); +} + +u64 read_system_reg(u32 id) +{ + struct arm64_ftr_reg *regp = get_arm64_ftr_reg(id); + + /* We shouldn't get a request for an unsupported register */ + BUG_ON(!regp); + return regp->sys_val; +} + +#include <linux/irqchip/arm-gic-v3.h> static bool feature_matches(u64 reg, const struct arm64_cpu_capabilities *entry) @@ -31,34 +597,46 @@ feature_matches(u64 reg, const struct arm64_cpu_capabilities *entry) return val >= entry->min_field_value; } -#define __ID_FEAT_CHK(reg) \ -static bool __maybe_unused \ -has_##reg##_feature(const struct arm64_cpu_capabilities *entry) \ -{ \ - u64 val; \ - \ - val = read_cpuid(reg##_el1); \ - return feature_matches(val, entry); \ +static bool +has_cpuid_feature(const struct arm64_cpu_capabilities *entry) +{ + u64 val; + + val = read_system_reg(entry->sys_reg); + return feature_matches(val, entry); } -__ID_FEAT_CHK(id_aa64pfr0); -__ID_FEAT_CHK(id_aa64mmfr1); -__ID_FEAT_CHK(id_aa64isar0); +static bool has_useable_gicv3_cpuif(const struct arm64_cpu_capabilities *entry) +{ + bool has_sre; + + if (!has_cpuid_feature(entry)) + return false; + + has_sre = gic_enable_sre(); + if (!has_sre) + pr_warn_once("%s present but disabled by higher exception level\n", + entry->desc); + + return has_sre; +} static const struct arm64_cpu_capabilities arm64_features[] = { { .desc = "GIC system register CPU interface", .capability = ARM64_HAS_SYSREG_GIC_CPUIF, - .matches = has_id_aa64pfr0_feature, - .field_pos = 24, + .matches = has_useable_gicv3_cpuif, + .sys_reg = SYS_ID_AA64PFR0_EL1, + .field_pos = ID_AA64PFR0_GIC_SHIFT, .min_field_value = 1, }, #ifdef CONFIG_ARM64_PAN { .desc = "Privileged Access Never", .capability = ARM64_HAS_PAN, - .matches = has_id_aa64mmfr1_feature, - .field_pos = 20, + .matches = has_cpuid_feature, + .sys_reg = SYS_ID_AA64MMFR1_EL1, + .field_pos = ID_AA64MMFR1_PAN_SHIFT, .min_field_value = 1, .enable = cpu_enable_pan, }, @@ -67,15 +645,101 @@ static const struct arm64_cpu_capabilities arm64_features[] = { { .desc = "LSE atomic instructions", .capability = ARM64_HAS_LSE_ATOMICS, - .matches = has_id_aa64isar0_feature, - .field_pos = 20, + .matches = has_cpuid_feature, + .sys_reg = SYS_ID_AA64ISAR0_EL1, + .field_pos = ID_AA64ISAR0_ATOMICS_SHIFT, .min_field_value = 2, }, #endif /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */ {}, }; -void check_cpu_capabilities(const struct arm64_cpu_capabilities *caps, +#define HWCAP_CAP(reg, field, min_value, type, cap) \ + { \ + .desc = #cap, \ + .matches = has_cpuid_feature, \ + .sys_reg = reg, \ + .field_pos = field, \ + .min_field_value = min_value, \ + .hwcap_type = type, \ + .hwcap = cap, \ + } + +static const struct arm64_cpu_capabilities arm64_hwcaps[] = { + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, 2, CAP_HWCAP, HWCAP_PMULL), + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, 1, CAP_HWCAP, HWCAP_AES), + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA1_SHIFT, 1, CAP_HWCAP, HWCAP_SHA1), + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, 1, CAP_HWCAP, HWCAP_SHA2), + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_CRC32_SHIFT, 1, CAP_HWCAP, HWCAP_CRC32), + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_ATOMICS_SHIFT, 2, CAP_HWCAP, HWCAP_ATOMICS), + HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, 0, CAP_HWCAP, HWCAP_FP), + HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, 0, CAP_HWCAP, HWCAP_ASIMD), +#ifdef CONFIG_COMPAT + HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, 2, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_PMULL), + HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_AES), + HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA1_SHIFT, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA1), + HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA2_SHIFT, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA2), + HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_CRC32_SHIFT, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_CRC32), +#endif + {}, +}; + +static void cap_set_hwcap(const struct arm64_cpu_capabilities *cap) +{ + switch (cap->hwcap_type) { + case CAP_HWCAP: + elf_hwcap |= cap->hwcap; + break; +#ifdef CONFIG_COMPAT + case CAP_COMPAT_HWCAP: + compat_elf_hwcap |= (u32)cap->hwcap; + break; + case CAP_COMPAT_HWCAP2: + compat_elf_hwcap2 |= (u32)cap->hwcap; + break; +#endif + default: + WARN_ON(1); + break; + } +} + +/* Check if we have a particular HWCAP enabled */ +static bool __maybe_unused cpus_have_hwcap(const struct arm64_cpu_capabilities *cap) +{ + bool rc; + + switch (cap->hwcap_type) { + case CAP_HWCAP: + rc = (elf_hwcap & cap->hwcap) != 0; + break; +#ifdef CONFIG_COMPAT + case CAP_COMPAT_HWCAP: + rc = (compat_elf_hwcap & (u32)cap->hwcap) != 0; + break; + case CAP_COMPAT_HWCAP2: + rc = (compat_elf_hwcap2 & (u32)cap->hwcap) != 0; + break; +#endif + default: + WARN_ON(1); + rc = false; + } + + return rc; +} + +static void setup_cpu_hwcaps(void) +{ + int i; + const struct arm64_cpu_capabilities *hwcaps = arm64_hwcaps; + + for (i = 0; hwcaps[i].desc; i++) + if (hwcaps[i].matches(&hwcaps[i])) + cap_set_hwcap(&hwcaps[i]); +} + +void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps, const char *info) { int i; @@ -88,15 +752,178 @@ void check_cpu_capabilities(const struct arm64_cpu_capabilities *caps, pr_info("%s %s\n", info, caps[i].desc); cpus_set_cap(caps[i].capability); } +} + +/* + * Run through the enabled capabilities and enable() it on all active + * CPUs + */ +static void enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps) +{ + int i; + + for (i = 0; caps[i].desc; i++) + if (caps[i].enable && cpus_have_cap(caps[i].capability)) + on_each_cpu(caps[i].enable, NULL, true); +} + +#ifdef CONFIG_HOTPLUG_CPU + +/* + * Flag to indicate if we have computed the system wide + * capabilities based on the boot time active CPUs. This + * will be used to determine if a new booting CPU should + * go through the verification process to make sure that it + * supports the system capabilities, without using a hotplug + * notifier. + */ +static bool sys_caps_initialised; + +static inline void set_sys_caps_initialised(void) +{ + sys_caps_initialised = true; +} + +/* + * __raw_read_system_reg() - Used by a STARTING cpu before cpuinfo is populated. + */ +static u64 __raw_read_system_reg(u32 sys_id) +{ + switch (sys_id) { + case SYS_ID_PFR0_EL1: return (u64)read_cpuid(ID_PFR0_EL1); + case SYS_ID_PFR1_EL1: return (u64)read_cpuid(ID_PFR1_EL1); + case SYS_ID_DFR0_EL1: return (u64)read_cpuid(ID_DFR0_EL1); + case SYS_ID_MMFR0_EL1: return (u64)read_cpuid(ID_MMFR0_EL1); + case SYS_ID_MMFR1_EL1: return (u64)read_cpuid(ID_MMFR1_EL1); + case SYS_ID_MMFR2_EL1: return (u64)read_cpuid(ID_MMFR2_EL1); + case SYS_ID_MMFR3_EL1: return (u64)read_cpuid(ID_MMFR3_EL1); + case SYS_ID_ISAR0_EL1: return (u64)read_cpuid(ID_ISAR0_EL1); + case SYS_ID_ISAR1_EL1: return (u64)read_cpuid(ID_ISAR1_EL1); + case SYS_ID_ISAR2_EL1: return (u64)read_cpuid(ID_ISAR2_EL1); + case SYS_ID_ISAR3_EL1: return (u64)read_cpuid(ID_ISAR3_EL1); + case SYS_ID_ISAR4_EL1: return (u64)read_cpuid(ID_ISAR4_EL1); + case SYS_ID_ISAR5_EL1: return (u64)read_cpuid(ID_ISAR4_EL1); + case SYS_MVFR0_EL1: return (u64)read_cpuid(MVFR0_EL1); + case SYS_MVFR1_EL1: return (u64)read_cpuid(MVFR1_EL1); + case SYS_MVFR2_EL1: return (u64)read_cpuid(MVFR2_EL1); + + case SYS_ID_AA64PFR0_EL1: return (u64)read_cpuid(ID_AA64PFR0_EL1); + case SYS_ID_AA64PFR1_EL1: return (u64)read_cpuid(ID_AA64PFR0_EL1); + case SYS_ID_AA64DFR0_EL1: return (u64)read_cpuid(ID_AA64DFR0_EL1); + case SYS_ID_AA64DFR1_EL1: return (u64)read_cpuid(ID_AA64DFR0_EL1); + case SYS_ID_AA64MMFR0_EL1: return (u64)read_cpuid(ID_AA64MMFR0_EL1); + case SYS_ID_AA64MMFR1_EL1: return (u64)read_cpuid(ID_AA64MMFR1_EL1); + case SYS_ID_AA64ISAR0_EL1: return (u64)read_cpuid(ID_AA64ISAR0_EL1); + case SYS_ID_AA64ISAR1_EL1: return (u64)read_cpuid(ID_AA64ISAR1_EL1); + + case SYS_CNTFRQ_EL0: return (u64)read_cpuid(CNTFRQ_EL0); + case SYS_CTR_EL0: return (u64)read_cpuid(CTR_EL0); + case SYS_DCZID_EL0: return (u64)read_cpuid(DCZID_EL0); + default: + BUG(); + return 0; + } +} + +/* + * Park the CPU which doesn't have the capability as advertised + * by the system. + */ +static void fail_incapable_cpu(char *cap_type, + const struct arm64_cpu_capabilities *cap) +{ + int cpu = smp_processor_id(); - /* second pass allows enable() to consider interacting capabilities */ + pr_crit("CPU%d: missing %s : %s\n", cpu, cap_type, cap->desc); + /* Mark this CPU absent */ + set_cpu_present(cpu, 0); + + /* Check if we can park ourselves */ + if (cpu_ops[cpu] && cpu_ops[cpu]->cpu_die) + cpu_ops[cpu]->cpu_die(cpu); + asm( + "1: wfe\n" + " wfi\n" + " b 1b"); +} + +/* + * Run through the enabled system capabilities and enable() it on this CPU. + * The capabilities were decided based on the available CPUs at the boot time. + * Any new CPU should match the system wide status of the capability. If the + * new CPU doesn't have a capability which the system now has enabled, we + * cannot do anything to fix it up and could cause unexpected failures. So + * we park the CPU. + */ +void verify_local_cpu_capabilities(void) +{ + int i; + const struct arm64_cpu_capabilities *caps; + + /* + * If we haven't computed the system capabilities, there is nothing + * to verify. + */ + if (!sys_caps_initialised) + return; + + caps = arm64_features; for (i = 0; caps[i].desc; i++) { - if (cpus_have_cap(caps[i].capability) && caps[i].enable) - caps[i].enable(); + if (!cpus_have_cap(caps[i].capability) || !caps[i].sys_reg) + continue; + /* + * If the new CPU misses an advertised feature, we cannot proceed + * further, park the cpu. + */ + if (!feature_matches(__raw_read_system_reg(caps[i].sys_reg), &caps[i])) + fail_incapable_cpu("arm64_features", &caps[i]); + if (caps[i].enable) + caps[i].enable(NULL); + } + + for (i = 0, caps = arm64_hwcaps; caps[i].desc; i++) { + if (!cpus_have_hwcap(&caps[i])) + continue; + if (!feature_matches(__raw_read_system_reg(caps[i].sys_reg), &caps[i])) + fail_incapable_cpu("arm64_hwcaps", &caps[i]); } } -void check_local_cpu_features(void) +#else /* !CONFIG_HOTPLUG_CPU */ + +static inline void set_sys_caps_initialised(void) +{ +} + +#endif /* CONFIG_HOTPLUG_CPU */ + +static void setup_feature_capabilities(void) { - check_cpu_capabilities(arm64_features, "detected feature:"); + update_cpu_capabilities(arm64_features, "detected feature:"); + enable_cpu_capabilities(arm64_features); +} + +void __init setup_cpu_features(void) +{ + u32 cwg; + int cls; + + /* Set the CPU feature capabilies */ + setup_feature_capabilities(); + setup_cpu_hwcaps(); + + /* Advertise that we have computed the system capabilities */ + set_sys_caps_initialised(); + + /* + * Check for sane CTR_EL0.CWG value. + */ + cwg = cache_type_cwg(); + cls = cache_line_size(); + if (!cwg) + pr_warn("No Cache Writeback Granule information, assuming cache line size %d\n", + cls); + if (L1_CACHE_BYTES < cls) + pr_warn("L1_CACHE_BYTES smaller than the Cache Writeback Granule (%d < %d)\n", + L1_CACHE_BYTES, cls); } diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index 75d5a867e..212ae6361 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -24,9 +24,13 @@ #include <linux/bug.h> #include <linux/init.h> #include <linux/kernel.h> +#include <linux/personality.h> #include <linux/preempt.h> #include <linux/printk.h> +#include <linux/seq_file.h> +#include <linux/sched.h> #include <linux/smp.h> +#include <linux/delay.h> /* * In case the boot CPU is hotpluggable, we record its initial state and @@ -35,7 +39,6 @@ */ DEFINE_PER_CPU(struct cpuinfo_arm64, cpu_data); static struct cpuinfo_arm64 boot_cpu_data; -static bool mixed_endian_el0 = true; static char *icache_policy_str[] = { [ICACHE_POLICY_RESERVED] = "RESERVED/UNKNOWN", @@ -46,157 +49,152 @@ static char *icache_policy_str[] = { unsigned long __icache_flags; -static void cpuinfo_detect_icache_policy(struct cpuinfo_arm64 *info) +static const char *const hwcap_str[] = { + "fp", + "asimd", + "evtstrm", + "aes", + "pmull", + "sha1", + "sha2", + "crc32", + "atomics", + NULL +}; + +#ifdef CONFIG_COMPAT +static const char *const compat_hwcap_str[] = { + "swp", + "half", + "thumb", + "26bit", + "fastmult", + "fpa", + "vfp", + "edsp", + "java", + "iwmmxt", + "crunch", + "thumbee", + "neon", + "vfpv3", + "vfpv3d16", + "tls", + "vfpv4", + "idiva", + "idivt", + "vfpd32", + "lpae", + "evtstrm" +}; + +static const char *const compat_hwcap2_str[] = { + "aes", + "pmull", + "sha1", + "sha2", + "crc32", + NULL +}; +#endif /* CONFIG_COMPAT */ + +static int c_show(struct seq_file *m, void *v) { - unsigned int cpu = smp_processor_id(); - u32 l1ip = CTR_L1IP(info->reg_ctr); + int i, j; + + for_each_online_cpu(i) { + struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i); + u32 midr = cpuinfo->reg_midr; - if (l1ip != ICACHE_POLICY_PIPT) { /* - * VIPT caches are non-aliasing if the VA always equals the PA - * in all bit positions that are covered by the index. This is - * the case if the size of a way (# of sets * line size) does - * not exceed PAGE_SIZE. + * glibc reads /proc/cpuinfo to determine the number of + * online processors, looking for lines beginning with + * "processor". Give glibc what it expects. */ - u32 waysize = icache_get_numsets() * icache_get_linesize(); + seq_printf(m, "processor\t: %d\n", i); - if (l1ip != ICACHE_POLICY_VIPT || waysize > PAGE_SIZE) - set_bit(ICACHEF_ALIASING, &__icache_flags); + seq_printf(m, "BogoMIPS\t: %lu.%02lu\n", + loops_per_jiffy / (500000UL/HZ), + loops_per_jiffy / (5000UL/HZ) % 100); + + /* + * Dump out the common processor features in a single line. + * Userspace should read the hwcaps with getauxval(AT_HWCAP) + * rather than attempting to parse this, but there's a body of + * software which does already (at least for 32-bit). + */ + seq_puts(m, "Features\t:"); + if (personality(current->personality) == PER_LINUX32) { +#ifdef CONFIG_COMPAT + for (j = 0; compat_hwcap_str[j]; j++) + if (compat_elf_hwcap & (1 << j)) + seq_printf(m, " %s", compat_hwcap_str[j]); + + for (j = 0; compat_hwcap2_str[j]; j++) + if (compat_elf_hwcap2 & (1 << j)) + seq_printf(m, " %s", compat_hwcap2_str[j]); +#endif /* CONFIG_COMPAT */ + } else { + for (j = 0; hwcap_str[j]; j++) + if (elf_hwcap & (1 << j)) + seq_printf(m, " %s", hwcap_str[j]); + } + seq_puts(m, "\n"); + + seq_printf(m, "CPU implementer\t: 0x%02x\n", + MIDR_IMPLEMENTOR(midr)); + seq_printf(m, "CPU architecture: 8\n"); + seq_printf(m, "CPU variant\t: 0x%x\n", MIDR_VARIANT(midr)); + seq_printf(m, "CPU part\t: 0x%03x\n", MIDR_PARTNUM(midr)); + seq_printf(m, "CPU revision\t: %d\n\n", MIDR_REVISION(midr)); } - if (l1ip == ICACHE_POLICY_AIVIVT) - set_bit(ICACHEF_AIVIVT, &__icache_flags); - pr_info("Detected %s I-cache on CPU%d\n", icache_policy_str[l1ip], cpu); + return 0; } -bool cpu_supports_mixed_endian_el0(void) +static void *c_start(struct seq_file *m, loff_t *pos) { - return id_aa64mmfr0_mixed_endian_el0(read_cpuid(ID_AA64MMFR0_EL1)); + return *pos < 1 ? (void *)1 : NULL; } -bool system_supports_mixed_endian_el0(void) +static void *c_next(struct seq_file *m, void *v, loff_t *pos) { - return mixed_endian_el0; + ++*pos; + return NULL; } -static void update_mixed_endian_el0_support(struct cpuinfo_arm64 *info) +static void c_stop(struct seq_file *m, void *v) { - mixed_endian_el0 &= id_aa64mmfr0_mixed_endian_el0(info->reg_id_aa64mmfr0); } -static void update_cpu_features(struct cpuinfo_arm64 *info) -{ - update_mixed_endian_el0_support(info); -} +const struct seq_operations cpuinfo_op = { + .start = c_start, + .next = c_next, + .stop = c_stop, + .show = c_show +}; -static int check_reg_mask(char *name, u64 mask, u64 boot, u64 cur, int cpu) +static void cpuinfo_detect_icache_policy(struct cpuinfo_arm64 *info) { - if ((boot & mask) == (cur & mask)) - return 0; - - pr_warn("SANITY CHECK: Unexpected variation in %s. Boot CPU: %#016lx, CPU%d: %#016lx\n", - name, (unsigned long)boot, cpu, (unsigned long)cur); - - return 1; -} + unsigned int cpu = smp_processor_id(); + u32 l1ip = CTR_L1IP(info->reg_ctr); -#define CHECK_MASK(field, mask, boot, cur, cpu) \ - check_reg_mask(#field, mask, (boot)->reg_ ## field, (cur)->reg_ ## field, cpu) + if (l1ip != ICACHE_POLICY_PIPT) { + /* + * VIPT caches are non-aliasing if the VA always equals the PA + * in all bit positions that are covered by the index. This is + * the case if the size of a way (# of sets * line size) does + * not exceed PAGE_SIZE. + */ + u32 waysize = icache_get_numsets() * icache_get_linesize(); -#define CHECK(field, boot, cur, cpu) \ - CHECK_MASK(field, ~0ULL, boot, cur, cpu) + if (l1ip != ICACHE_POLICY_VIPT || waysize > PAGE_SIZE) + set_bit(ICACHEF_ALIASING, &__icache_flags); + } + if (l1ip == ICACHE_POLICY_AIVIVT) + set_bit(ICACHEF_AIVIVT, &__icache_flags); -/* - * Verify that CPUs don't have unexpected differences that will cause problems. - */ -static void cpuinfo_sanity_check(struct cpuinfo_arm64 *cur) -{ - unsigned int cpu = smp_processor_id(); - struct cpuinfo_arm64 *boot = &boot_cpu_data; - unsigned int diff = 0; - - /* - * The kernel can handle differing I-cache policies, but otherwise - * caches should look identical. Userspace JITs will make use of - * *minLine. - */ - diff |= CHECK_MASK(ctr, 0xffff3fff, boot, cur, cpu); - - /* - * Userspace may perform DC ZVA instructions. Mismatched block sizes - * could result in too much or too little memory being zeroed if a - * process is preempted and migrated between CPUs. - */ - diff |= CHECK(dczid, boot, cur, cpu); - - /* If different, timekeeping will be broken (especially with KVM) */ - diff |= CHECK(cntfrq, boot, cur, cpu); - - /* - * The kernel uses self-hosted debug features and expects CPUs to - * support identical debug features. We presently need CTX_CMPs, WRPs, - * and BRPs to be identical. - * ID_AA64DFR1 is currently RES0. - */ - diff |= CHECK(id_aa64dfr0, boot, cur, cpu); - diff |= CHECK(id_aa64dfr1, boot, cur, cpu); - - /* - * Even in big.LITTLE, processors should be identical instruction-set - * wise. - */ - diff |= CHECK(id_aa64isar0, boot, cur, cpu); - diff |= CHECK(id_aa64isar1, boot, cur, cpu); - - /* - * Differing PARange support is fine as long as all peripherals and - * memory are mapped within the minimum PARange of all CPUs. - * Linux should not care about secure memory. - * ID_AA64MMFR1 is currently RES0. - */ - diff |= CHECK_MASK(id_aa64mmfr0, 0xffffffffffff0ff0, boot, cur, cpu); - diff |= CHECK(id_aa64mmfr1, boot, cur, cpu); - - /* - * EL3 is not our concern. - * ID_AA64PFR1 is currently RES0. - */ - diff |= CHECK_MASK(id_aa64pfr0, 0xffffffffffff0fff, boot, cur, cpu); - diff |= CHECK(id_aa64pfr1, boot, cur, cpu); - - /* - * If we have AArch32, we care about 32-bit features for compat. These - * registers should be RES0 otherwise. - */ - diff |= CHECK(id_dfr0, boot, cur, cpu); - diff |= CHECK(id_isar0, boot, cur, cpu); - diff |= CHECK(id_isar1, boot, cur, cpu); - diff |= CHECK(id_isar2, boot, cur, cpu); - diff |= CHECK(id_isar3, boot, cur, cpu); - diff |= CHECK(id_isar4, boot, cur, cpu); - diff |= CHECK(id_isar5, boot, cur, cpu); - /* - * Regardless of the value of the AuxReg field, the AIFSR, ADFSR, and - * ACTLR formats could differ across CPUs and therefore would have to - * be trapped for virtualization anyway. - */ - diff |= CHECK_MASK(id_mmfr0, 0xff0fffff, boot, cur, cpu); - diff |= CHECK(id_mmfr1, boot, cur, cpu); - diff |= CHECK(id_mmfr2, boot, cur, cpu); - diff |= CHECK(id_mmfr3, boot, cur, cpu); - diff |= CHECK(id_pfr0, boot, cur, cpu); - diff |= CHECK(id_pfr1, boot, cur, cpu); - - diff |= CHECK(mvfr0, boot, cur, cpu); - diff |= CHECK(mvfr1, boot, cur, cpu); - diff |= CHECK(mvfr2, boot, cur, cpu); - - /* - * Mismatched CPU features are a recipe for disaster. Don't even - * pretend to support them. - */ - WARN_TAINT_ONCE(diff, TAINT_CPU_OUT_OF_SPEC, - "Unsupported CPU feature variation.\n"); + pr_info("Detected %s I-cache on CPU%d\n", icache_policy_str[l1ip], cpu); } static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info) @@ -236,15 +234,13 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info) cpuinfo_detect_icache_policy(info); check_local_cpu_errata(); - check_local_cpu_features(); - update_cpu_features(info); } void cpuinfo_store_cpu(void) { struct cpuinfo_arm64 *info = this_cpu_ptr(&cpu_data); __cpuinfo_store_cpu(info); - cpuinfo_sanity_check(info); + update_cpu_features(smp_processor_id(), info, &boot_cpu_data); } void __init cpuinfo_store_boot_cpu(void) @@ -253,4 +249,5 @@ void __init cpuinfo_store_boot_cpu(void) __cpuinfo_store_cpu(info); boot_cpu_data = *info; + init_cpu_features(&boot_cpu_data); } diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c index 253021ef2..8aee3aeec 100644 --- a/arch/arm64/kernel/debug-monitors.c +++ b/arch/arm64/kernel/debug-monitors.c @@ -26,14 +26,16 @@ #include <linux/stat.h> #include <linux/uaccess.h> -#include <asm/debug-monitors.h> +#include <asm/cpufeature.h> #include <asm/cputype.h> +#include <asm/debug-monitors.h> #include <asm/system_misc.h> /* Determine debug architecture. */ u8 debug_monitors_arch(void) { - return read_cpuid(ID_AA64DFR0_EL1) & 0xf; + return cpuid_feature_extract_field(read_system_reg(SYS_ID_AA64DFR0_EL1), + ID_AA64DFR0_DEBUGVER_SHIFT); } /* @@ -58,7 +60,7 @@ static u32 mdscr_read(void) * Allow root to disable self-hosted debug from userspace. * This is useful if you want to connect an external JTAG debugger. */ -static u32 debug_enabled = 1; +static bool debug_enabled = true; static int create_debug_debugfs_entry(void) { @@ -69,7 +71,7 @@ fs_initcall(create_debug_debugfs_entry); static int __init early_debug_disable(char *buf) { - debug_enabled = 0; + debug_enabled = false; return 0; } diff --git a/arch/arm64/kernel/efi-entry.S b/arch/arm64/kernel/efi-entry.S index 8ce9b0577..a773db929 100644 --- a/arch/arm64/kernel/efi-entry.S +++ b/arch/arm64/kernel/efi-entry.S @@ -29,7 +29,7 @@ * we want to be. The kernel image wants to be placed at TEXT_OFFSET * from start of RAM. */ -ENTRY(efi_stub_entry) +ENTRY(entry) /* * Create a stack frame to save FP/LR with extra space * for image_addr variable passed to efi_entry(). @@ -86,8 +86,8 @@ ENTRY(efi_stub_entry) * entries for the VA range of the current image, so no maintenance is * necessary. */ - adr x0, efi_stub_entry - adr x1, efi_stub_entry_end + adr x0, entry + adr x1, entry_end sub x1, x1, x0 bl __flush_dcache_area @@ -120,5 +120,5 @@ efi_load_fail: ldp x29, x30, [sp], #32 ret -efi_stub_entry_end: -ENDPROC(efi_stub_entry) +entry_end: +ENDPROC(entry) diff --git a/arch/arm64/kernel/efi-stub.c b/arch/arm64/kernel/efi-stub.c deleted file mode 100644 index 78dfbd34b..000000000 --- a/arch/arm64/kernel/efi-stub.c +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Copyright (C) 2013, 2014 Linaro Ltd; <roy.franz@linaro.org> - * - * This file implements the EFI boot stub for the arm64 kernel. - * Adapted from ARM version by Mark Salter <msalter@redhat.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - */ -#include <linux/efi.h> -#include <asm/efi.h> -#include <asm/sections.h> - -efi_status_t __init handle_kernel_image(efi_system_table_t *sys_table_arg, - unsigned long *image_addr, - unsigned long *image_size, - unsigned long *reserve_addr, - unsigned long *reserve_size, - unsigned long dram_base, - efi_loaded_image_t *image) -{ - efi_status_t status; - unsigned long kernel_size, kernel_memsize = 0; - unsigned long nr_pages; - void *old_image_addr = (void *)*image_addr; - unsigned long preferred_offset; - - /* - * The preferred offset of the kernel Image is TEXT_OFFSET bytes beyond - * a 2 MB aligned base, which itself may be lower than dram_base, as - * long as the resulting offset equals or exceeds it. - */ - preferred_offset = round_down(dram_base, SZ_2M) + TEXT_OFFSET; - if (preferred_offset < dram_base) - preferred_offset += SZ_2M; - - /* Relocate the image, if required. */ - kernel_size = _edata - _text; - if (*image_addr != preferred_offset) { - kernel_memsize = kernel_size + (_end - _edata); - - /* - * First, try a straight allocation at the preferred offset. - * This will work around the issue where, if dram_base == 0x0, - * efi_low_alloc() refuses to allocate at 0x0 (to prevent the - * address of the allocation to be mistaken for a FAIL return - * value or a NULL pointer). It will also ensure that, on - * platforms where the [dram_base, dram_base + TEXT_OFFSET) - * interval is partially occupied by the firmware (like on APM - * Mustang), we can still place the kernel at the address - * 'dram_base + TEXT_OFFSET'. - */ - *image_addr = *reserve_addr = preferred_offset; - nr_pages = round_up(kernel_memsize, EFI_ALLOC_ALIGN) / - EFI_PAGE_SIZE; - status = efi_call_early(allocate_pages, EFI_ALLOCATE_ADDRESS, - EFI_LOADER_DATA, nr_pages, - (efi_physical_addr_t *)reserve_addr); - if (status != EFI_SUCCESS) { - kernel_memsize += TEXT_OFFSET; - status = efi_low_alloc(sys_table_arg, kernel_memsize, - SZ_2M, reserve_addr); - - if (status != EFI_SUCCESS) { - pr_efi_err(sys_table_arg, "Failed to relocate kernel\n"); - return status; - } - *image_addr = *reserve_addr + TEXT_OFFSET; - } - memcpy((void *)*image_addr, old_image_addr, kernel_size); - *reserve_size = kernel_memsize; - } - - - return EFI_SUCCESS; -} diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c index 13671a9cf..4eeb17198 100644 --- a/arch/arm64/kernel/efi.c +++ b/arch/arm64/kernel/efi.c @@ -48,18 +48,8 @@ static struct mm_struct efi_mm = { .mmap_sem = __RWSEM_INITIALIZER(efi_mm.mmap_sem), .page_table_lock = __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock), .mmlist = LIST_HEAD_INIT(efi_mm.mmlist), - INIT_MM_CONTEXT(efi_mm) }; -static int uefi_debug __initdata; -static int __init uefi_debug_setup(char *str) -{ - uefi_debug = 1; - - return 0; -} -early_param("uefi_debug", uefi_debug_setup); - static int __init is_normal_ram(efi_memory_desc_t *md) { if (md->attribute & EFI_MEMORY_WB) @@ -137,7 +127,11 @@ static int __init uefi_init(void) table_size = sizeof(efi_config_table_64_t) * efi.systab->nr_tables; config_tables = early_memremap(efi_to_phys(efi.systab->tables), table_size); - + if (config_tables == NULL) { + pr_warn("Unable to map EFI config table array.\n"); + retval = -ENOMEM; + goto out; + } retval = efi_config_parse_tables(config_tables, efi.systab->nr_tables, sizeof(efi_config_table_64_t), NULL); @@ -171,14 +165,14 @@ static __init void reserve_regions(void) efi_memory_desc_t *md; u64 paddr, npages, size; - if (uefi_debug) + if (efi_enabled(EFI_DBG)) pr_info("Processing EFI memory map:\n"); for_each_efi_memory_desc(&memmap, md) { paddr = md->phys_addr; npages = md->num_pages; - if (uefi_debug) { + if (efi_enabled(EFI_DBG)) { char buf[64]; pr_info(" 0x%012llx-0x%012llx %s", @@ -194,11 +188,11 @@ static __init void reserve_regions(void) if (is_reserve_region(md)) { memblock_reserve(paddr, size); - if (uefi_debug) + if (efi_enabled(EFI_DBG)) pr_cont("*"); } - if (uefi_debug) + if (efi_enabled(EFI_DBG)) pr_cont("\n"); } @@ -210,15 +204,23 @@ void __init efi_init(void) struct efi_fdt_params params; /* Grab UEFI information placed in FDT by stub */ - if (!efi_get_fdt_params(¶ms, uefi_debug)) + if (!efi_get_fdt_params(¶ms)) return; efi_system_table = params.system_table; memblock_reserve(params.mmap & PAGE_MASK, PAGE_ALIGN(params.mmap_size + (params.mmap & ~PAGE_MASK))); - memmap.phys_map = (void *)params.mmap; + memmap.phys_map = params.mmap; memmap.map = early_memremap(params.mmap, params.mmap_size); + if (memmap.map == NULL) { + /* + * If we are booting via UEFI, the UEFI memory map is the only + * description of memory we have, so there is little point in + * proceeding if we cannot access it. + */ + panic("Unable to map EFI memory map.\n"); + } memmap.map_end = memmap.map + params.mmap_size; memmap.desc_size = params.desc_size; memmap.desc_version = params.desc_ver; @@ -234,8 +236,9 @@ static bool __init efi_virtmap_init(void) { efi_memory_desc_t *md; + init_new_context(NULL, &efi_mm); + for_each_efi_memory_desc(&memmap, md) { - u64 paddr, npages, size; pgprot_t prot; if (!(md->attribute & EFI_MEMORY_RUNTIME)) @@ -243,11 +246,6 @@ static bool __init efi_virtmap_init(void) if (md->virt_addr == 0) return false; - paddr = md->phys_addr; - npages = md->num_pages; - memrange_efi_to_native(&paddr, &npages); - size = npages << PAGE_SHIFT; - pr_info(" EFI remap 0x%016llx => %p\n", md->phys_addr, (void *)md->virt_addr); @@ -264,7 +262,9 @@ static bool __init efi_virtmap_init(void) else prot = PAGE_KERNEL; - create_pgd_mapping(&efi_mm, paddr, md->virt_addr, size, prot); + create_pgd_mapping(&efi_mm, md->phys_addr, md->virt_addr, + md->num_pages << EFI_PAGE_SHIFT, + __pgprot(pgprot_val(prot) | PTE_NG)); } return true; } @@ -280,22 +280,22 @@ static int __init arm64_enable_runtime_services(void) if (!efi_enabled(EFI_BOOT)) { pr_info("EFI services will not be available.\n"); - return -1; + return 0; } if (efi_runtime_disabled()) { pr_info("EFI runtime services will be disabled.\n"); - return -1; + return 0; } pr_info("Remapping and enabling EFI services.\n"); mapsize = memmap.map_end - memmap.map; - memmap.map = (__force void *)ioremap_cache((phys_addr_t)memmap.phys_map, + memmap.map = (__force void *)ioremap_cache(memmap.phys_map, mapsize); if (!memmap.map) { pr_err("Failed to remap EFI memory map\n"); - return -1; + return -ENOMEM; } memmap.map_end = memmap.map + mapsize; efi.memmap = &memmap; @@ -304,13 +304,13 @@ static int __init arm64_enable_runtime_services(void) sizeof(efi_system_table_t)); if (!efi.systab) { pr_err("Failed to remap EFI System Table\n"); - return -1; + return -ENOMEM; } set_bit(EFI_SYSTEM_TABLES, &efi.flags); if (!efi_virtmap_init()) { pr_err("No UEFI virtual mapping was installed -- runtime services will not be available\n"); - return -1; + return -ENOMEM; } /* Set up runtime services function pointers */ @@ -339,14 +339,7 @@ core_initcall(arm64_dmi_init); static void efi_set_pgd(struct mm_struct *mm) { - if (mm == &init_mm) - cpu_set_reserved_ttbr0(); - else - cpu_switch_mm(mm->pgd, mm); - - flush_tlb_all(); - if (icache_is_aivivt()) - __flush_icache_all(); + switch_mm(NULL, mm, NULL); } void efi_virtmap_load(void) diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 4306c937b..7ed3d75f6 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -430,6 +430,8 @@ el0_sync_compat: b.eq el0_fpsimd_acc cmp x24, #ESR_ELx_EC_FP_EXC32 // FP/ASIMD exception b.eq el0_fpsimd_exc + cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception + b.eq el0_sp_pc cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0 b.eq el0_undef cmp x24, #ESR_ELx_EC_CP15_32 // CP15 MRC/MCR trap diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index c56956a16..4c46c54a3 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -332,21 +332,15 @@ static inline void fpsimd_hotplug_init(void) { } */ static int __init fpsimd_init(void) { - u64 pfr = read_cpuid(ID_AA64PFR0_EL1); - - if (pfr & (0xf << 16)) { + if (elf_hwcap & HWCAP_FP) { + fpsimd_pm_init(); + fpsimd_hotplug_init(); + } else { pr_notice("Floating-point is not implemented\n"); - return 0; } - elf_hwcap |= HWCAP_FP; - if (pfr & (0xf << 20)) + if (!(elf_hwcap & HWCAP_ASIMD)) pr_notice("Advanced SIMD is not implemented\n"); - else - elf_hwcap |= HWCAP_ASIMD; - - fpsimd_pm_init(); - fpsimd_hotplug_init(); return 0; } diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 90d09eddd..23cfc08fc 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -29,11 +29,13 @@ #include <asm/asm-offsets.h> #include <asm/cache.h> #include <asm/cputype.h> +#include <asm/kernel-pgtable.h> #include <asm/memory.h> -#include <asm/thread_info.h> #include <asm/pgtable-hwdef.h> #include <asm/pgtable.h> #include <asm/page.h> +#include <asm/sysreg.h> +#include <asm/thread_info.h> #include <asm/virt.h> #define __PHYS_OFFSET (KERNEL_START - TEXT_OFFSET) @@ -46,32 +48,10 @@ #error TEXT_OFFSET must be less than 2MB #endif -#ifdef CONFIG_ARM64_64K_PAGES -#define BLOCK_SHIFT PAGE_SHIFT -#define BLOCK_SIZE PAGE_SIZE -#define TABLE_SHIFT PMD_SHIFT -#else -#define BLOCK_SHIFT SECTION_SHIFT -#define BLOCK_SIZE SECTION_SIZE -#define TABLE_SHIFT PUD_SHIFT -#endif - #define KERNEL_START _text #define KERNEL_END _end /* - * Initial memory map attributes. - */ -#define PTE_FLAGS PTE_TYPE_PAGE | PTE_AF | PTE_SHARED -#define PMD_FLAGS PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S - -#ifdef CONFIG_ARM64_64K_PAGES -#define MM_MMUFLAGS PTE_ATTRINDX(MT_NORMAL) | PTE_FLAGS -#else -#define MM_MMUFLAGS PMD_ATTRINDX(MT_NORMAL) | PMD_FLAGS -#endif - -/* * Kernel startup entry point. * --------------------------- * @@ -120,8 +100,8 @@ efi_head: #endif #ifdef CONFIG_EFI - .globl stext_offset - .set stext_offset, stext - efi_head + .globl __efistub_stext_offset + .set __efistub_stext_offset, stext - efi_head .align 3 pe_header: .ascii "PE" @@ -144,8 +124,8 @@ optional_header: .long _end - stext // SizeOfCode .long 0 // SizeOfInitializedData .long 0 // SizeOfUninitializedData - .long efi_stub_entry - efi_head // AddressOfEntryPoint - .long stext_offset // BaseOfCode + .long __efistub_entry - efi_head // AddressOfEntryPoint + .long __efistub_stext_offset // BaseOfCode extra_header_fields: .quad 0 // ImageBase @@ -162,7 +142,7 @@ extra_header_fields: .long _end - efi_head // SizeOfImage // Everything before the kernel image is considered part of the header - .long stext_offset // SizeOfHeaders + .long __efistub_stext_offset // SizeOfHeaders .long 0 // CheckSum .short 0xa // Subsystem (EFI application) .short 0 // DllCharacteristics @@ -207,9 +187,9 @@ section_table: .byte 0 .byte 0 // end of 0 padding of section name .long _end - stext // VirtualSize - .long stext_offset // VirtualAddress + .long __efistub_stext_offset // VirtualAddress .long _edata - stext // SizeOfRawData - .long stext_offset // PointerToRawData + .long __efistub_stext_offset // PointerToRawData .long 0 // PointerToRelocations (0 for executables) .long 0 // PointerToLineNumbers (0 for executables) @@ -292,8 +272,11 @@ ENDPROC(preserve_boot_args) */ .macro create_pgd_entry, tbl, virt, tmp1, tmp2 create_table_entry \tbl, \virt, PGDIR_SHIFT, PTRS_PER_PGD, \tmp1, \tmp2 -#if SWAPPER_PGTABLE_LEVELS == 3 - create_table_entry \tbl, \virt, TABLE_SHIFT, PTRS_PER_PTE, \tmp1, \tmp2 +#if SWAPPER_PGTABLE_LEVELS > 3 + create_table_entry \tbl, \virt, PUD_SHIFT, PTRS_PER_PUD, \tmp1, \tmp2 +#endif +#if SWAPPER_PGTABLE_LEVELS > 2 + create_table_entry \tbl, \virt, SWAPPER_TABLE_SHIFT, PTRS_PER_PTE, \tmp1, \tmp2 #endif .endm @@ -305,15 +288,15 @@ ENDPROC(preserve_boot_args) * Corrupts: phys, start, end, pstate */ .macro create_block_map, tbl, flags, phys, start, end - lsr \phys, \phys, #BLOCK_SHIFT - lsr \start, \start, #BLOCK_SHIFT + lsr \phys, \phys, #SWAPPER_BLOCK_SHIFT + lsr \start, \start, #SWAPPER_BLOCK_SHIFT and \start, \start, #PTRS_PER_PTE - 1 // table index - orr \phys, \flags, \phys, lsl #BLOCK_SHIFT // table entry - lsr \end, \end, #BLOCK_SHIFT + orr \phys, \flags, \phys, lsl #SWAPPER_BLOCK_SHIFT // table entry + lsr \end, \end, #SWAPPER_BLOCK_SHIFT and \end, \end, #PTRS_PER_PTE - 1 // table end index 9999: str \phys, [\tbl, \start, lsl #3] // store the entry add \start, \start, #1 // next entry - add \phys, \phys, #BLOCK_SIZE // next block + add \phys, \phys, #SWAPPER_BLOCK_SIZE // next block cmp \start, \end b.ls 9999b .endm @@ -350,7 +333,7 @@ __create_page_tables: cmp x0, x6 b.lo 1b - ldr x7, =MM_MMUFLAGS + ldr x7, =SWAPPER_MM_MMUFLAGS /* * Create the identity mapping. @@ -444,6 +427,9 @@ __mmap_switched: str_l x21, __fdt_pointer, x5 // Save FDT pointer str_l x24, memstart_addr, x6 // Save PHYS_OFFSET mov x29, #0 +#ifdef CONFIG_KASAN + bl kasan_early_init +#endif b start_kernel ENDPROC(__mmap_switched) @@ -498,6 +484,8 @@ CPU_LE( bic x0, x0, #(3 << 24) ) // Clear the EE and E0E bits for EL1 orr x0, x0, #ICC_SRE_EL2_ENABLE // Set ICC_SRE_EL2.Enable==1 msr_s ICC_SRE_EL2, x0 isb // Make sure SRE is now set + mrs_s x0, ICC_SRE_EL2 // Read SRE back, + tbz x0, #0, 3f // and check that it sticks msr_s ICH_HCR_EL2, xzr // Reset ICC_HCR_EL2 to defaults 3: @@ -628,10 +616,17 @@ ENDPROC(__secondary_switched) * x0 = SCTLR_EL1 value for turning on the MMU. * x27 = *virtual* address to jump to upon completion * - * other registers depend on the function called upon completion + * Other registers depend on the function called upon completion. + * + * Checks if the selected granule size is supported by the CPU. + * If it isn't, park the CPU */ .section ".idmap.text", "ax" __enable_mmu: + mrs x1, ID_AA64MMFR0_EL1 + ubfx x2, x1, #ID_AA64MMFR0_TGRAN_SHIFT, 4 + cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED + b.ne __no_granule_support ldr x5, =vectors msr vbar_el1, x5 msr ttbr0_el1, x25 // load TTBR0 @@ -649,3 +644,8 @@ __enable_mmu: isb br x27 ENDPROC(__enable_mmu) + +__no_granule_support: + wfe + b __no_granule_support +ENDPROC(__no_granule_support) diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c index bba85c8f8..b45c95d34 100644 --- a/arch/arm64/kernel/hw_breakpoint.c +++ b/arch/arm64/kernel/hw_breakpoint.c @@ -28,6 +28,7 @@ #include <linux/ptrace.h> #include <linux/smp.h> +#include <asm/compat.h> #include <asm/current.h> #include <asm/debug-monitors.h> #include <asm/hw_breakpoint.h> @@ -163,6 +164,20 @@ enum hw_breakpoint_ops { HW_BREAKPOINT_RESTORE }; +static int is_compat_bp(struct perf_event *bp) +{ + struct task_struct *tsk = bp->hw.target; + + /* + * tsk can be NULL for per-cpu (non-ptrace) breakpoints. + * In this case, use the native interface, since we don't have + * the notion of a "compat CPU" and could end up relying on + * deprecated behaviour if we use unaligned watchpoints in + * AArch64 state. + */ + return tsk && is_compat_thread(task_thread_info(tsk)); +} + /** * hw_breakpoint_slot_setup - Find and setup a perf slot according to * operations @@ -420,7 +435,7 @@ static int arch_build_bp_info(struct perf_event *bp) * Watchpoints can be of length 1, 2, 4 or 8 bytes. */ if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) { - if (is_compat_task()) { + if (is_compat_bp(bp)) { if (info->ctrl.len != ARM_BREAKPOINT_LEN_2 && info->ctrl.len != ARM_BREAKPOINT_LEN_4) return -EINVAL; @@ -477,7 +492,7 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp) * AArch32 tasks expect some simple alignment fixups, so emulate * that here. */ - if (is_compat_task()) { + if (is_compat_bp(bp)) { if (info->ctrl.len == ARM_BREAKPOINT_LEN_8) alignment_mask = 0x7; else diff --git a/arch/arm64/kernel/image.h b/arch/arm64/kernel/image.h index 8fae0756e..bc2abb8b1 100644 --- a/arch/arm64/kernel/image.h +++ b/arch/arm64/kernel/image.h @@ -47,7 +47,10 @@ #define __HEAD_FLAG_BE 0 #endif -#define __HEAD_FLAGS (__HEAD_FLAG_BE << 0) +#define __HEAD_FLAG_PAGE_SIZE ((PAGE_SHIFT - 10) / 2) + +#define __HEAD_FLAGS ((__HEAD_FLAG_BE << 0) | \ + (__HEAD_FLAG_PAGE_SIZE << 1)) /* * These will output as part of the Image header, which should be little-endian @@ -59,4 +62,37 @@ _kernel_offset_le = DATA_LE64(TEXT_OFFSET); \ _kernel_flags_le = DATA_LE64(__HEAD_FLAGS); +#ifdef CONFIG_EFI + +/* + * The EFI stub has its own symbol namespace prefixed by __efistub_, to + * isolate it from the kernel proper. The following symbols are legally + * accessed by the stub, so provide some aliases to make them accessible. + * Only include data symbols here, or text symbols of functions that are + * guaranteed to be safe when executed at another offset than they were + * linked at. The routines below are all implemented in assembler in a + * position independent manner + */ +__efistub_memcmp = __pi_memcmp; +__efistub_memchr = __pi_memchr; +__efistub_memcpy = __pi_memcpy; +__efistub_memmove = __pi_memmove; +__efistub_memset = __pi_memset; +__efistub_strlen = __pi_strlen; +__efistub_strcmp = __pi_strcmp; +__efistub_strncmp = __pi_strncmp; +__efistub___flush_dcache_area = __pi___flush_dcache_area; + +#ifdef CONFIG_KASAN +__efistub___memcpy = __pi_memcpy; +__efistub___memmove = __pi_memmove; +__efistub___memset = __pi_memset; +#endif + +__efistub__text = _text; +__efistub__end = _end; +__efistub__edata = _edata; + +#endif + #endif /* __ASM_IMAGE_H */ diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c index 11dc3fd47..9f17ec071 100644 --- a/arch/arm64/kernel/irq.c +++ b/arch/arm64/kernel/irq.c @@ -27,7 +27,6 @@ #include <linux/init.h> #include <linux/irqchip.h> #include <linux/seq_file.h> -#include <linux/ratelimit.h> unsigned long irq_err_count; @@ -54,64 +53,3 @@ void __init init_IRQ(void) if (!handle_arch_irq) panic("No interrupt controller found."); } - -#ifdef CONFIG_HOTPLUG_CPU -static bool migrate_one_irq(struct irq_desc *desc) -{ - struct irq_data *d = irq_desc_get_irq_data(desc); - const struct cpumask *affinity = irq_data_get_affinity_mask(d); - struct irq_chip *c; - bool ret = false; - - /* - * If this is a per-CPU interrupt, or the affinity does not - * include this CPU, then we have nothing to do. - */ - if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity)) - return false; - - if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { - affinity = cpu_online_mask; - ret = true; - } - - c = irq_data_get_irq_chip(d); - if (!c->irq_set_affinity) - pr_debug("IRQ%u: unable to set affinity\n", d->irq); - else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret) - cpumask_copy(irq_data_get_affinity_mask(d), affinity); - - return ret; -} - -/* - * The current CPU has been marked offline. Migrate IRQs off this CPU. - * If the affinity settings do not allow other CPUs, force them onto any - * available CPU. - * - * Note: we must iterate over all IRQs, whether they have an attached - * action structure or not, as we need to get chained interrupts too. - */ -void migrate_irqs(void) -{ - unsigned int i; - struct irq_desc *desc; - unsigned long flags; - - local_irq_save(flags); - - for_each_irq_desc(i, desc) { - bool affinity_broken; - - raw_spin_lock(&desc->lock); - affinity_broken = migrate_one_irq(desc); - raw_spin_unlock(&desc->lock); - - if (affinity_broken) - pr_warn_ratelimited("IRQ%u no longer affine to CPU%u\n", - i, smp_processor_id()); - } - - local_irq_restore(flags); -} -#endif /* CONFIG_HOTPLUG_CPU */ diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c index 876eb8df5..f4bc779e6 100644 --- a/arch/arm64/kernel/module.c +++ b/arch/arm64/kernel/module.c @@ -21,6 +21,7 @@ #include <linux/bitops.h> #include <linux/elf.h> #include <linux/gfp.h> +#include <linux/kasan.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/moduleloader.h> @@ -34,9 +35,18 @@ void *module_alloc(unsigned long size) { - return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END, - GFP_KERNEL, PAGE_KERNEL_EXEC, 0, - NUMA_NO_NODE, __builtin_return_address(0)); + void *p; + + p = __vmalloc_node_range(size, MODULE_ALIGN, MODULES_VADDR, MODULES_END, + GFP_KERNEL, PAGE_KERNEL_EXEC, 0, + NUMA_NO_NODE, __builtin_return_address(0)); + + if (p && (kasan_module_alloc(p, size) < 0)) { + vfree(p); + return NULL; + } + + return p; } enum aarch64_reloc_op { diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index f9a74d4ff..5b1897e8c 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c @@ -18,651 +18,12 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ -#define pr_fmt(fmt) "hw perfevents: " fmt - -#include <linux/bitmap.h> -#include <linux/interrupt.h> -#include <linux/irq.h> -#include <linux/kernel.h> -#include <linux/export.h> -#include <linux/of_device.h> -#include <linux/perf_event.h> -#include <linux/platform_device.h> -#include <linux/slab.h> -#include <linux/spinlock.h> -#include <linux/uaccess.h> -#include <asm/cputype.h> -#include <asm/irq.h> #include <asm/irq_regs.h> -#include <asm/pmu.h> - -/* - * ARMv8 supports a maximum of 32 events. - * The cycle counter is included in this total. - */ -#define ARMPMU_MAX_HWEVENTS 32 - -static DEFINE_PER_CPU(struct perf_event * [ARMPMU_MAX_HWEVENTS], hw_events); -static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)], used_mask); -static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events); - -#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu)) - -/* Set at runtime when we know what CPU type we are. */ -static struct arm_pmu *cpu_pmu; - -int -armpmu_get_max_events(void) -{ - int max_events = 0; - - if (cpu_pmu != NULL) - max_events = cpu_pmu->num_events; - - return max_events; -} -EXPORT_SYMBOL_GPL(armpmu_get_max_events); - -int perf_num_counters(void) -{ - return armpmu_get_max_events(); -} -EXPORT_SYMBOL_GPL(perf_num_counters); - -#define HW_OP_UNSUPPORTED 0xFFFF - -#define C(_x) \ - PERF_COUNT_HW_CACHE_##_x - -#define CACHE_OP_UNSUPPORTED 0xFFFF - -#define PERF_MAP_ALL_UNSUPPORTED \ - [0 ... PERF_COUNT_HW_MAX - 1] = HW_OP_UNSUPPORTED - -#define PERF_CACHE_MAP_ALL_UNSUPPORTED \ -[0 ... C(MAX) - 1] = { \ - [0 ... C(OP_MAX) - 1] = { \ - [0 ... C(RESULT_MAX) - 1] = CACHE_OP_UNSUPPORTED, \ - }, \ -} - -static int -armpmu_map_cache_event(const unsigned (*cache_map) - [PERF_COUNT_HW_CACHE_MAX] - [PERF_COUNT_HW_CACHE_OP_MAX] - [PERF_COUNT_HW_CACHE_RESULT_MAX], - u64 config) -{ - unsigned int cache_type, cache_op, cache_result, ret; - - cache_type = (config >> 0) & 0xff; - if (cache_type >= PERF_COUNT_HW_CACHE_MAX) - return -EINVAL; - - cache_op = (config >> 8) & 0xff; - if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) - return -EINVAL; - - cache_result = (config >> 16) & 0xff; - if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) - return -EINVAL; - - ret = (int)(*cache_map)[cache_type][cache_op][cache_result]; - - if (ret == CACHE_OP_UNSUPPORTED) - return -ENOENT; - - return ret; -} - -static int -armpmu_map_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config) -{ - int mapping; - - if (config >= PERF_COUNT_HW_MAX) - return -EINVAL; - - mapping = (*event_map)[config]; - return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping; -} - -static int -armpmu_map_raw_event(u32 raw_event_mask, u64 config) -{ - return (int)(config & raw_event_mask); -} - -static int map_cpu_event(struct perf_event *event, - const unsigned (*event_map)[PERF_COUNT_HW_MAX], - const unsigned (*cache_map) - [PERF_COUNT_HW_CACHE_MAX] - [PERF_COUNT_HW_CACHE_OP_MAX] - [PERF_COUNT_HW_CACHE_RESULT_MAX], - u32 raw_event_mask) -{ - u64 config = event->attr.config; - - switch (event->attr.type) { - case PERF_TYPE_HARDWARE: - return armpmu_map_event(event_map, config); - case PERF_TYPE_HW_CACHE: - return armpmu_map_cache_event(cache_map, config); - case PERF_TYPE_RAW: - return armpmu_map_raw_event(raw_event_mask, config); - } - - return -ENOENT; -} - -int -armpmu_event_set_period(struct perf_event *event, - struct hw_perf_event *hwc, - int idx) -{ - struct arm_pmu *armpmu = to_arm_pmu(event->pmu); - s64 left = local64_read(&hwc->period_left); - s64 period = hwc->sample_period; - int ret = 0; - - if (unlikely(left <= -period)) { - left = period; - local64_set(&hwc->period_left, left); - hwc->last_period = period; - ret = 1; - } - - if (unlikely(left <= 0)) { - left += period; - local64_set(&hwc->period_left, left); - hwc->last_period = period; - ret = 1; - } - - /* - * Limit the maximum period to prevent the counter value - * from overtaking the one we are about to program. In - * effect we are reducing max_period to account for - * interrupt latency (and we are being very conservative). - */ - if (left > (armpmu->max_period >> 1)) - left = armpmu->max_period >> 1; - - local64_set(&hwc->prev_count, (u64)-left); - - armpmu->write_counter(idx, (u64)(-left) & 0xffffffff); - - perf_event_update_userpage(event); - - return ret; -} - -u64 -armpmu_event_update(struct perf_event *event, - struct hw_perf_event *hwc, - int idx) -{ - struct arm_pmu *armpmu = to_arm_pmu(event->pmu); - u64 delta, prev_raw_count, new_raw_count; - -again: - prev_raw_count = local64_read(&hwc->prev_count); - new_raw_count = armpmu->read_counter(idx); - - if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, - new_raw_count) != prev_raw_count) - goto again; - - delta = (new_raw_count - prev_raw_count) & armpmu->max_period; - - local64_add(delta, &event->count); - local64_sub(delta, &hwc->period_left); - - return new_raw_count; -} - -static void -armpmu_read(struct perf_event *event) -{ - struct hw_perf_event *hwc = &event->hw; - - /* Don't read disabled counters! */ - if (hwc->idx < 0) - return; - - armpmu_event_update(event, hwc, hwc->idx); -} - -static void -armpmu_stop(struct perf_event *event, int flags) -{ - struct arm_pmu *armpmu = to_arm_pmu(event->pmu); - struct hw_perf_event *hwc = &event->hw; - - /* - * ARM pmu always has to update the counter, so ignore - * PERF_EF_UPDATE, see comments in armpmu_start(). - */ - if (!(hwc->state & PERF_HES_STOPPED)) { - armpmu->disable(hwc, hwc->idx); - barrier(); /* why? */ - armpmu_event_update(event, hwc, hwc->idx); - hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; - } -} - -static void -armpmu_start(struct perf_event *event, int flags) -{ - struct arm_pmu *armpmu = to_arm_pmu(event->pmu); - struct hw_perf_event *hwc = &event->hw; - - /* - * ARM pmu always has to reprogram the period, so ignore - * PERF_EF_RELOAD, see the comment below. - */ - if (flags & PERF_EF_RELOAD) - WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); - - hwc->state = 0; - /* - * Set the period again. Some counters can't be stopped, so when we - * were stopped we simply disabled the IRQ source and the counter - * may have been left counting. If we don't do this step then we may - * get an interrupt too soon or *way* too late if the overflow has - * happened since disabling. - */ - armpmu_event_set_period(event, hwc, hwc->idx); - armpmu->enable(hwc, hwc->idx); -} - -static void -armpmu_del(struct perf_event *event, int flags) -{ - struct arm_pmu *armpmu = to_arm_pmu(event->pmu); - struct pmu_hw_events *hw_events = armpmu->get_hw_events(); - struct hw_perf_event *hwc = &event->hw; - int idx = hwc->idx; - - WARN_ON(idx < 0); - - armpmu_stop(event, PERF_EF_UPDATE); - hw_events->events[idx] = NULL; - clear_bit(idx, hw_events->used_mask); - - perf_event_update_userpage(event); -} - -static int -armpmu_add(struct perf_event *event, int flags) -{ - struct arm_pmu *armpmu = to_arm_pmu(event->pmu); - struct pmu_hw_events *hw_events = armpmu->get_hw_events(); - struct hw_perf_event *hwc = &event->hw; - int idx; - int err = 0; - - perf_pmu_disable(event->pmu); - - /* If we don't have a space for the counter then finish early. */ - idx = armpmu->get_event_idx(hw_events, hwc); - if (idx < 0) { - err = idx; - goto out; - } - - /* - * If there is an event in the counter we are going to use then make - * sure it is disabled. - */ - event->hw.idx = idx; - armpmu->disable(hwc, idx); - hw_events->events[idx] = event; - - hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; - if (flags & PERF_EF_START) - armpmu_start(event, PERF_EF_RELOAD); - - /* Propagate our changes to the userspace mapping. */ - perf_event_update_userpage(event); - -out: - perf_pmu_enable(event->pmu); - return err; -} - -static int -validate_event(struct pmu *pmu, struct pmu_hw_events *hw_events, - struct perf_event *event) -{ - struct arm_pmu *armpmu; - struct hw_perf_event fake_event = event->hw; - struct pmu *leader_pmu = event->group_leader->pmu; - - if (is_software_event(event)) - return 1; - - /* - * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The - * core perf code won't check that the pmu->ctx == leader->ctx - * until after pmu->event_init(event). - */ - if (event->pmu != pmu) - return 0; - - if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF) - return 1; - - if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec) - return 1; - - armpmu = to_arm_pmu(event->pmu); - return armpmu->get_event_idx(hw_events, &fake_event) >= 0; -} - -static int -validate_group(struct perf_event *event) -{ - struct perf_event *sibling, *leader = event->group_leader; - struct pmu_hw_events fake_pmu; - DECLARE_BITMAP(fake_used_mask, ARMPMU_MAX_HWEVENTS); - - /* - * Initialise the fake PMU. We only need to populate the - * used_mask for the purposes of validation. - */ - memset(fake_used_mask, 0, sizeof(fake_used_mask)); - fake_pmu.used_mask = fake_used_mask; - - if (!validate_event(event->pmu, &fake_pmu, leader)) - return -EINVAL; - - list_for_each_entry(sibling, &leader->sibling_list, group_entry) { - if (!validate_event(event->pmu, &fake_pmu, sibling)) - return -EINVAL; - } - - if (!validate_event(event->pmu, &fake_pmu, event)) - return -EINVAL; - - return 0; -} - -static void -armpmu_disable_percpu_irq(void *data) -{ - unsigned int irq = *(unsigned int *)data; - disable_percpu_irq(irq); -} - -static void -armpmu_release_hardware(struct arm_pmu *armpmu) -{ - int irq; - unsigned int i, irqs; - struct platform_device *pmu_device = armpmu->plat_device; - - irqs = min(pmu_device->num_resources, num_possible_cpus()); - if (!irqs) - return; - - irq = platform_get_irq(pmu_device, 0); - if (irq <= 0) - return; - - if (irq_is_percpu(irq)) { - on_each_cpu(armpmu_disable_percpu_irq, &irq, 1); - free_percpu_irq(irq, &cpu_hw_events); - } else { - for (i = 0; i < irqs; ++i) { - int cpu = i; - - if (armpmu->irq_affinity) - cpu = armpmu->irq_affinity[i]; - - if (!cpumask_test_and_clear_cpu(cpu, &armpmu->active_irqs)) - continue; - irq = platform_get_irq(pmu_device, i); - if (irq > 0) - free_irq(irq, armpmu); - } - } -} - -static void -armpmu_enable_percpu_irq(void *data) -{ - unsigned int irq = *(unsigned int *)data; - enable_percpu_irq(irq, IRQ_TYPE_NONE); -} - -static int -armpmu_reserve_hardware(struct arm_pmu *armpmu) -{ - int err, irq; - unsigned int i, irqs; - struct platform_device *pmu_device = armpmu->plat_device; - - if (!pmu_device) - return -ENODEV; - - irqs = min(pmu_device->num_resources, num_possible_cpus()); - if (!irqs) { - pr_err("no irqs for PMUs defined\n"); - return -ENODEV; - } - - irq = platform_get_irq(pmu_device, 0); - if (irq <= 0) { - pr_err("failed to get valid irq for PMU device\n"); - return -ENODEV; - } - - if (irq_is_percpu(irq)) { - err = request_percpu_irq(irq, armpmu->handle_irq, - "arm-pmu", &cpu_hw_events); - - if (err) { - pr_err("unable to request percpu IRQ%d for ARM PMU counters\n", - irq); - armpmu_release_hardware(armpmu); - return err; - } - - on_each_cpu(armpmu_enable_percpu_irq, &irq, 1); - } else { - for (i = 0; i < irqs; ++i) { - int cpu = i; - - err = 0; - irq = platform_get_irq(pmu_device, i); - if (irq <= 0) - continue; - - if (armpmu->irq_affinity) - cpu = armpmu->irq_affinity[i]; - - /* - * If we have a single PMU interrupt that we can't shift, - * assume that we're running on a uniprocessor machine and - * continue. Otherwise, continue without this interrupt. - */ - if (irq_set_affinity(irq, cpumask_of(cpu)) && irqs > 1) { - pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n", - irq, cpu); - continue; - } - - err = request_irq(irq, armpmu->handle_irq, - IRQF_NOBALANCING | IRQF_NO_THREAD, - "arm-pmu", armpmu); - if (err) { - pr_err("unable to request IRQ%d for ARM PMU counters\n", - irq); - armpmu_release_hardware(armpmu); - return err; - } - - cpumask_set_cpu(cpu, &armpmu->active_irqs); - } - } - - return 0; -} - -static void -hw_perf_event_destroy(struct perf_event *event) -{ - struct arm_pmu *armpmu = to_arm_pmu(event->pmu); - atomic_t *active_events = &armpmu->active_events; - struct mutex *pmu_reserve_mutex = &armpmu->reserve_mutex; - - if (atomic_dec_and_mutex_lock(active_events, pmu_reserve_mutex)) { - armpmu_release_hardware(armpmu); - mutex_unlock(pmu_reserve_mutex); - } -} - -static int -event_requires_mode_exclusion(struct perf_event_attr *attr) -{ - return attr->exclude_idle || attr->exclude_user || - attr->exclude_kernel || attr->exclude_hv; -} - -static int -__hw_perf_event_init(struct perf_event *event) -{ - struct arm_pmu *armpmu = to_arm_pmu(event->pmu); - struct hw_perf_event *hwc = &event->hw; - int mapping, err; - - mapping = armpmu->map_event(event); - - if (mapping < 0) { - pr_debug("event %x:%llx not supported\n", event->attr.type, - event->attr.config); - return mapping; - } - - /* - * We don't assign an index until we actually place the event onto - * hardware. Use -1 to signify that we haven't decided where to put it - * yet. For SMP systems, each core has it's own PMU so we can't do any - * clever allocation or constraints checking at this point. - */ - hwc->idx = -1; - hwc->config_base = 0; - hwc->config = 0; - hwc->event_base = 0; - - /* - * Check whether we need to exclude the counter from certain modes. - */ - if ((!armpmu->set_event_filter || - armpmu->set_event_filter(hwc, &event->attr)) && - event_requires_mode_exclusion(&event->attr)) { - pr_debug("ARM performance counters do not support mode exclusion\n"); - return -EPERM; - } - - /* - * Store the event encoding into the config_base field. - */ - hwc->config_base |= (unsigned long)mapping; - - if (!hwc->sample_period) { - /* - * For non-sampling runs, limit the sample_period to half - * of the counter width. That way, the new counter value - * is far less likely to overtake the previous one unless - * you have some serious IRQ latency issues. - */ - hwc->sample_period = armpmu->max_period >> 1; - hwc->last_period = hwc->sample_period; - local64_set(&hwc->period_left, hwc->sample_period); - } - - err = 0; - if (event->group_leader != event) { - err = validate_group(event); - if (err) - return -EINVAL; - } - - return err; -} - -static int armpmu_event_init(struct perf_event *event) -{ - struct arm_pmu *armpmu = to_arm_pmu(event->pmu); - int err = 0; - atomic_t *active_events = &armpmu->active_events; - - if (armpmu->map_event(event) == -ENOENT) - return -ENOENT; - - event->destroy = hw_perf_event_destroy; - - if (!atomic_inc_not_zero(active_events)) { - mutex_lock(&armpmu->reserve_mutex); - if (atomic_read(active_events) == 0) - err = armpmu_reserve_hardware(armpmu); - - if (!err) - atomic_inc(active_events); - mutex_unlock(&armpmu->reserve_mutex); - } - if (err) - return err; - - err = __hw_perf_event_init(event); - if (err) - hw_perf_event_destroy(event); - - return err; -} - -static void armpmu_enable(struct pmu *pmu) -{ - struct arm_pmu *armpmu = to_arm_pmu(pmu); - struct pmu_hw_events *hw_events = armpmu->get_hw_events(); - int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events); - - if (enabled) - armpmu->start(); -} - -static void armpmu_disable(struct pmu *pmu) -{ - struct arm_pmu *armpmu = to_arm_pmu(pmu); - armpmu->stop(); -} - -static void __init armpmu_init(struct arm_pmu *armpmu) -{ - atomic_set(&armpmu->active_events, 0); - mutex_init(&armpmu->reserve_mutex); - - armpmu->pmu = (struct pmu) { - .pmu_enable = armpmu_enable, - .pmu_disable = armpmu_disable, - .event_init = armpmu_event_init, - .add = armpmu_add, - .del = armpmu_del, - .start = armpmu_start, - .stop = armpmu_stop, - .read = armpmu_read, - }; -} - -int __init armpmu_register(struct arm_pmu *armpmu, char *name, int type) -{ - armpmu_init(armpmu); - return perf_pmu_register(&armpmu->pmu, name, type); -} +#include <linux/of.h> +#include <linux/perf/arm_pmu.h> +#include <linux/platform_device.h> /* * ARMv8 PMUv3 Performance Events handling code. @@ -708,6 +69,21 @@ enum armv8_pmuv3_perf_types { ARMV8_PMUV3_PERFCTR_BUS_CYCLES = 0x1D, }; +/* ARMv8 Cortex-A53 specific event types. */ +enum armv8_a53_pmu_perf_types { + ARMV8_A53_PERFCTR_PREFETCH_LINEFILL = 0xC2, +}; + +/* ARMv8 Cortex-A57 specific event types. */ +enum armv8_a57_perf_types { + ARMV8_A57_PERFCTR_L1_DCACHE_ACCESS_LD = 0x40, + ARMV8_A57_PERFCTR_L1_DCACHE_ACCESS_ST = 0x41, + ARMV8_A57_PERFCTR_L1_DCACHE_REFILL_LD = 0x42, + ARMV8_A57_PERFCTR_L1_DCACHE_REFILL_ST = 0x43, + ARMV8_A57_PERFCTR_DTLB_REFILL_LD = 0x4c, + ARMV8_A57_PERFCTR_DTLB_REFILL_ST = 0x4d, +}; + /* PMUv3 HW events mapping. */ static const unsigned armv8_pmuv3_perf_map[PERF_COUNT_HW_MAX] = { PERF_MAP_ALL_UNSUPPORTED, @@ -718,6 +94,28 @@ static const unsigned armv8_pmuv3_perf_map[PERF_COUNT_HW_MAX] = { [PERF_COUNT_HW_BRANCH_MISSES] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED, }; +/* ARM Cortex-A53 HW events mapping. */ +static const unsigned armv8_a53_perf_map[PERF_COUNT_HW_MAX] = { + PERF_MAP_ALL_UNSUPPORTED, + [PERF_COUNT_HW_CPU_CYCLES] = ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES, + [PERF_COUNT_HW_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED, + [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS, + [PERF_COUNT_HW_CACHE_MISSES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_PC_WRITE, + [PERF_COUNT_HW_BRANCH_MISSES] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED, + [PERF_COUNT_HW_BUS_CYCLES] = ARMV8_PMUV3_PERFCTR_BUS_CYCLES, +}; + +static const unsigned armv8_a57_perf_map[PERF_COUNT_HW_MAX] = { + PERF_MAP_ALL_UNSUPPORTED, + [PERF_COUNT_HW_CPU_CYCLES] = ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES, + [PERF_COUNT_HW_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED, + [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS, + [PERF_COUNT_HW_CACHE_MISSES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL, + [PERF_COUNT_HW_BRANCH_MISSES] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED, + [PERF_COUNT_HW_BUS_CYCLES] = ARMV8_PMUV3_PERFCTR_BUS_CYCLES, +}; + static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] [PERF_COUNT_HW_CACHE_OP_MAX] [PERF_COUNT_HW_CACHE_RESULT_MAX] = { @@ -734,12 +132,60 @@ static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED, }; +static const unsigned armv8_a53_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = { + PERF_CACHE_MAP_ALL_UNSUPPORTED, + + [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS, + [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL, + [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS, + [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL, + [C(L1D)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_A53_PERFCTR_PREFETCH_LINEFILL, + + [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_ICACHE_ACCESS, + [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1_ICACHE_REFILL, + + [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_ITLB_REFILL, + + [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED, + [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED, + [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED, + [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED, +}; + +static const unsigned armv8_a57_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = { + PERF_CACHE_MAP_ALL_UNSUPPORTED, + + [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_A57_PERFCTR_L1_DCACHE_ACCESS_LD, + [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_A57_PERFCTR_L1_DCACHE_REFILL_LD, + [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_A57_PERFCTR_L1_DCACHE_ACCESS_ST, + [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_A57_PERFCTR_L1_DCACHE_REFILL_ST, + + [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_ICACHE_ACCESS, + [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1_ICACHE_REFILL, + + [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_A57_PERFCTR_DTLB_REFILL_LD, + [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_A57_PERFCTR_DTLB_REFILL_ST, + + [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_ITLB_REFILL, + + [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED, + [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED, + [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED, + [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED, +}; + + /* * Perf Events' indices */ #define ARMV8_IDX_CYCLE_COUNTER 0 #define ARMV8_IDX_COUNTER0 1 -#define ARMV8_IDX_COUNTER_LAST (ARMV8_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1) +#define ARMV8_IDX_COUNTER_LAST(cpu_pmu) \ + (ARMV8_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1) #define ARMV8_MAX_COUNTERS 32 #define ARMV8_COUNTER_MASK (ARMV8_MAX_COUNTERS - 1) @@ -805,49 +251,34 @@ static inline int armv8pmu_has_overflowed(u32 pmovsr) return pmovsr & ARMV8_OVERFLOWED_MASK; } -static inline int armv8pmu_counter_valid(int idx) +static inline int armv8pmu_counter_valid(struct arm_pmu *cpu_pmu, int idx) { - return idx >= ARMV8_IDX_CYCLE_COUNTER && idx <= ARMV8_IDX_COUNTER_LAST; + return idx >= ARMV8_IDX_CYCLE_COUNTER && + idx <= ARMV8_IDX_COUNTER_LAST(cpu_pmu); } static inline int armv8pmu_counter_has_overflowed(u32 pmnc, int idx) { - int ret = 0; - u32 counter; - - if (!armv8pmu_counter_valid(idx)) { - pr_err("CPU%u checking wrong counter %d overflow status\n", - smp_processor_id(), idx); - } else { - counter = ARMV8_IDX_TO_COUNTER(idx); - ret = pmnc & BIT(counter); - } - - return ret; + return pmnc & BIT(ARMV8_IDX_TO_COUNTER(idx)); } static inline int armv8pmu_select_counter(int idx) { - u32 counter; - - if (!armv8pmu_counter_valid(idx)) { - pr_err("CPU%u selecting wrong PMNC counter %d\n", - smp_processor_id(), idx); - return -EINVAL; - } - - counter = ARMV8_IDX_TO_COUNTER(idx); + u32 counter = ARMV8_IDX_TO_COUNTER(idx); asm volatile("msr pmselr_el0, %0" :: "r" (counter)); isb(); return idx; } -static inline u32 armv8pmu_read_counter(int idx) +static inline u32 armv8pmu_read_counter(struct perf_event *event) { + struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; u32 value = 0; - if (!armv8pmu_counter_valid(idx)) + if (!armv8pmu_counter_valid(cpu_pmu, idx)) pr_err("CPU%u reading wrong counter %d\n", smp_processor_id(), idx); else if (idx == ARMV8_IDX_CYCLE_COUNTER) @@ -858,9 +289,13 @@ static inline u32 armv8pmu_read_counter(int idx) return value; } -static inline void armv8pmu_write_counter(int idx, u32 value) +static inline void armv8pmu_write_counter(struct perf_event *event, u32 value) { - if (!armv8pmu_counter_valid(idx)) + struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + + if (!armv8pmu_counter_valid(cpu_pmu, idx)) pr_err("CPU%u writing wrong counter %d\n", smp_processor_id(), idx); else if (idx == ARMV8_IDX_CYCLE_COUNTER) @@ -879,65 +314,34 @@ static inline void armv8pmu_write_evtype(int idx, u32 val) static inline int armv8pmu_enable_counter(int idx) { - u32 counter; - - if (!armv8pmu_counter_valid(idx)) { - pr_err("CPU%u enabling wrong PMNC counter %d\n", - smp_processor_id(), idx); - return -EINVAL; - } - - counter = ARMV8_IDX_TO_COUNTER(idx); + u32 counter = ARMV8_IDX_TO_COUNTER(idx); asm volatile("msr pmcntenset_el0, %0" :: "r" (BIT(counter))); return idx; } static inline int armv8pmu_disable_counter(int idx) { - u32 counter; - - if (!armv8pmu_counter_valid(idx)) { - pr_err("CPU%u disabling wrong PMNC counter %d\n", - smp_processor_id(), idx); - return -EINVAL; - } - - counter = ARMV8_IDX_TO_COUNTER(idx); + u32 counter = ARMV8_IDX_TO_COUNTER(idx); asm volatile("msr pmcntenclr_el0, %0" :: "r" (BIT(counter))); return idx; } static inline int armv8pmu_enable_intens(int idx) { - u32 counter; - - if (!armv8pmu_counter_valid(idx)) { - pr_err("CPU%u enabling wrong PMNC counter IRQ enable %d\n", - smp_processor_id(), idx); - return -EINVAL; - } - - counter = ARMV8_IDX_TO_COUNTER(idx); + u32 counter = ARMV8_IDX_TO_COUNTER(idx); asm volatile("msr pmintenset_el1, %0" :: "r" (BIT(counter))); return idx; } static inline int armv8pmu_disable_intens(int idx) { - u32 counter; - - if (!armv8pmu_counter_valid(idx)) { - pr_err("CPU%u disabling wrong PMNC counter IRQ enable %d\n", - smp_processor_id(), idx); - return -EINVAL; - } - - counter = ARMV8_IDX_TO_COUNTER(idx); + u32 counter = ARMV8_IDX_TO_COUNTER(idx); asm volatile("msr pmintenclr_el1, %0" :: "r" (BIT(counter))); isb(); /* Clear the overflow flag in case an interrupt is pending. */ asm volatile("msr pmovsclr_el0, %0" :: "r" (BIT(counter))); isb(); + return idx; } @@ -955,10 +359,13 @@ static inline u32 armv8pmu_getreset_flags(void) return value; } -static void armv8pmu_enable_event(struct hw_perf_event *hwc, int idx) +static void armv8pmu_enable_event(struct perf_event *event) { unsigned long flags; - struct pmu_hw_events *events = cpu_pmu->get_hw_events(); + struct hw_perf_event *hwc = &event->hw; + struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); + struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); + int idx = hwc->idx; /* * Enable counter and interrupt, and set the counter to count @@ -989,10 +396,13 @@ static void armv8pmu_enable_event(struct hw_perf_event *hwc, int idx) raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } -static void armv8pmu_disable_event(struct hw_perf_event *hwc, int idx) +static void armv8pmu_disable_event(struct perf_event *event) { unsigned long flags; - struct pmu_hw_events *events = cpu_pmu->get_hw_events(); + struct hw_perf_event *hwc = &event->hw; + struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); + struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); + int idx = hwc->idx; /* * Disable counter and interrupt @@ -1016,7 +426,8 @@ static irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev) { u32 pmovsr; struct perf_sample_data data; - struct pmu_hw_events *cpuc; + struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev; + struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events); struct pt_regs *regs; int idx; @@ -1036,7 +447,6 @@ static irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev) */ regs = get_irq_regs(); - cpuc = this_cpu_ptr(&cpu_hw_events); for (idx = 0; idx < cpu_pmu->num_events; ++idx) { struct perf_event *event = cpuc->events[idx]; struct hw_perf_event *hwc; @@ -1053,13 +463,13 @@ static irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev) continue; hwc = &event->hw; - armpmu_event_update(event, hwc, idx); + armpmu_event_update(event); perf_sample_data_init(&data, 0, hwc->last_period); - if (!armpmu_event_set_period(event, hwc, idx)) + if (!armpmu_event_set_period(event)) continue; if (perf_event_overflow(event, &data, regs)) - cpu_pmu->disable(hwc, idx); + cpu_pmu->disable(event); } /* @@ -1074,10 +484,10 @@ static irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev) return IRQ_HANDLED; } -static void armv8pmu_start(void) +static void armv8pmu_start(struct arm_pmu *cpu_pmu) { unsigned long flags; - struct pmu_hw_events *events = cpu_pmu->get_hw_events(); + struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); raw_spin_lock_irqsave(&events->pmu_lock, flags); /* Enable all counters */ @@ -1085,10 +495,10 @@ static void armv8pmu_start(void) raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } -static void armv8pmu_stop(void) +static void armv8pmu_stop(struct arm_pmu *cpu_pmu) { unsigned long flags; - struct pmu_hw_events *events = cpu_pmu->get_hw_events(); + struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); raw_spin_lock_irqsave(&events->pmu_lock, flags); /* Disable all counters */ @@ -1097,10 +507,12 @@ static void armv8pmu_stop(void) } static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc, - struct hw_perf_event *event) + struct perf_event *event) { int idx; - unsigned long evtype = event->config_base & ARMV8_EVTYPE_EVENT; + struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + unsigned long evtype = hwc->config_base & ARMV8_EVTYPE_EVENT; /* Always place a cycle counter into the cycle counter. */ if (evtype == ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES) { @@ -1151,11 +563,14 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event, static void armv8pmu_reset(void *info) { + struct arm_pmu *cpu_pmu = (struct arm_pmu *)info; u32 idx, nb_cnt = cpu_pmu->num_events; /* The counter and interrupt enable registers are unknown at reset. */ - for (idx = ARMV8_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) - armv8pmu_disable_event(NULL, idx); + for (idx = ARMV8_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) { + armv8pmu_disable_counter(idx); + armv8pmu_disable_intens(idx); + } /* Initialize & Reset PMNC: C and P bits. */ armv8pmu_pmcr_write(ARMV8_PMCR_P | ARMV8_PMCR_C); @@ -1166,169 +581,104 @@ static void armv8pmu_reset(void *info) static int armv8_pmuv3_map_event(struct perf_event *event) { - return map_cpu_event(event, &armv8_pmuv3_perf_map, + return armpmu_map_event(event, &armv8_pmuv3_perf_map, &armv8_pmuv3_perf_cache_map, ARMV8_EVTYPE_EVENT); } -static struct arm_pmu armv8pmu = { - .handle_irq = armv8pmu_handle_irq, - .enable = armv8pmu_enable_event, - .disable = armv8pmu_disable_event, - .read_counter = armv8pmu_read_counter, - .write_counter = armv8pmu_write_counter, - .get_event_idx = armv8pmu_get_event_idx, - .start = armv8pmu_start, - .stop = armv8pmu_stop, - .reset = armv8pmu_reset, - .max_period = (1LLU << 32) - 1, -}; +static int armv8_a53_map_event(struct perf_event *event) +{ + return armpmu_map_event(event, &armv8_a53_perf_map, + &armv8_a53_perf_cache_map, + ARMV8_EVTYPE_EVENT); +} -static u32 __init armv8pmu_read_num_pmnc_events(void) +static int armv8_a57_map_event(struct perf_event *event) { - u32 nb_cnt; + return armpmu_map_event(event, &armv8_a57_perf_map, + &armv8_a57_perf_cache_map, + ARMV8_EVTYPE_EVENT); +} + +static void armv8pmu_read_num_pmnc_events(void *info) +{ + int *nb_cnt = info; /* Read the nb of CNTx counters supported from PMNC */ - nb_cnt = (armv8pmu_pmcr_read() >> ARMV8_PMCR_N_SHIFT) & ARMV8_PMCR_N_MASK; + *nb_cnt = (armv8pmu_pmcr_read() >> ARMV8_PMCR_N_SHIFT) & ARMV8_PMCR_N_MASK; - /* Add the CPU cycles counter and return */ - return nb_cnt + 1; + /* Add the CPU cycles counter */ + *nb_cnt += 1; } -static struct arm_pmu *__init armv8_pmuv3_pmu_init(void) +static int armv8pmu_probe_num_events(struct arm_pmu *arm_pmu) { - armv8pmu.name = "arm/armv8-pmuv3"; - armv8pmu.map_event = armv8_pmuv3_map_event; - armv8pmu.num_events = armv8pmu_read_num_pmnc_events(); - armv8pmu.set_event_filter = armv8pmu_set_event_filter; - return &armv8pmu; + return smp_call_function_any(&arm_pmu->supported_cpus, + armv8pmu_read_num_pmnc_events, + &arm_pmu->num_events, 1); } -/* - * Ensure the PMU has sane values out of reset. - * This requires SMP to be available, so exists as a separate initcall. - */ -static int __init -cpu_pmu_reset(void) +static void armv8_pmu_init(struct arm_pmu *cpu_pmu) { - if (cpu_pmu && cpu_pmu->reset) - return on_each_cpu(cpu_pmu->reset, NULL, 1); - return 0; + cpu_pmu->handle_irq = armv8pmu_handle_irq, + cpu_pmu->enable = armv8pmu_enable_event, + cpu_pmu->disable = armv8pmu_disable_event, + cpu_pmu->read_counter = armv8pmu_read_counter, + cpu_pmu->write_counter = armv8pmu_write_counter, + cpu_pmu->get_event_idx = armv8pmu_get_event_idx, + cpu_pmu->start = armv8pmu_start, + cpu_pmu->stop = armv8pmu_stop, + cpu_pmu->reset = armv8pmu_reset, + cpu_pmu->max_period = (1LLU << 32) - 1, + cpu_pmu->set_event_filter = armv8pmu_set_event_filter; } -arch_initcall(cpu_pmu_reset); - -/* - * PMU platform driver and devicetree bindings. - */ -static const struct of_device_id armpmu_of_device_ids[] = { - {.compatible = "arm,armv8-pmuv3"}, - {}, -}; -static int armpmu_device_probe(struct platform_device *pdev) +static int armv8_pmuv3_init(struct arm_pmu *cpu_pmu) { - int i, irq, *irqs; - - if (!cpu_pmu) - return -ENODEV; - - /* Don't bother with PPIs; they're already affine */ - irq = platform_get_irq(pdev, 0); - if (irq >= 0 && irq_is_percpu(irq)) - goto out; - - irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL); - if (!irqs) - return -ENOMEM; - - for (i = 0; i < pdev->num_resources; ++i) { - struct device_node *dn; - int cpu; - - dn = of_parse_phandle(pdev->dev.of_node, "interrupt-affinity", - i); - if (!dn) { - pr_warn("Failed to parse %s/interrupt-affinity[%d]\n", - of_node_full_name(pdev->dev.of_node), i); - break; - } - - for_each_possible_cpu(cpu) - if (dn == of_cpu_device_node_get(cpu)) - break; - - if (cpu >= nr_cpu_ids) { - pr_warn("Failed to find logical CPU for %s\n", - dn->name); - of_node_put(dn); - break; - } - of_node_put(dn); - - irqs[i] = cpu; - } - - if (i == pdev->num_resources) - cpu_pmu->irq_affinity = irqs; - else - kfree(irqs); - -out: - cpu_pmu->plat_device = pdev; - return 0; + armv8_pmu_init(cpu_pmu); + cpu_pmu->name = "armv8_pmuv3"; + cpu_pmu->map_event = armv8_pmuv3_map_event; + return armv8pmu_probe_num_events(cpu_pmu); } -static struct platform_driver armpmu_driver = { - .driver = { - .name = "arm-pmu", - .of_match_table = armpmu_of_device_ids, - }, - .probe = armpmu_device_probe, -}; - -static int __init register_pmu_driver(void) +static int armv8_a53_pmu_init(struct arm_pmu *cpu_pmu) { - return platform_driver_register(&armpmu_driver); + armv8_pmu_init(cpu_pmu); + cpu_pmu->name = "armv8_cortex_a53"; + cpu_pmu->map_event = armv8_a53_map_event; + return armv8pmu_probe_num_events(cpu_pmu); } -device_initcall(register_pmu_driver); -static struct pmu_hw_events *armpmu_get_cpu_events(void) +static int armv8_a57_pmu_init(struct arm_pmu *cpu_pmu) { - return this_cpu_ptr(&cpu_hw_events); + armv8_pmu_init(cpu_pmu); + cpu_pmu->name = "armv8_cortex_a57"; + cpu_pmu->map_event = armv8_a57_map_event; + return armv8pmu_probe_num_events(cpu_pmu); } -static void __init cpu_pmu_init(struct arm_pmu *armpmu) -{ - int cpu; - for_each_possible_cpu(cpu) { - struct pmu_hw_events *events = &per_cpu(cpu_hw_events, cpu); - events->events = per_cpu(hw_events, cpu); - events->used_mask = per_cpu(used_mask, cpu); - raw_spin_lock_init(&events->pmu_lock); - } - armpmu->get_hw_events = armpmu_get_cpu_events; -} +static const struct of_device_id armv8_pmu_of_device_ids[] = { + {.compatible = "arm,armv8-pmuv3", .data = armv8_pmuv3_init}, + {.compatible = "arm,cortex-a53-pmu", .data = armv8_a53_pmu_init}, + {.compatible = "arm,cortex-a57-pmu", .data = armv8_a57_pmu_init}, + {}, +}; -static int __init init_hw_perf_events(void) +static int armv8_pmu_device_probe(struct platform_device *pdev) { - u64 dfr = read_cpuid(ID_AA64DFR0_EL1); - - switch ((dfr >> 8) & 0xf) { - case 0x1: /* PMUv3 */ - cpu_pmu = armv8_pmuv3_pmu_init(); - break; - } + return arm_pmu_device_probe(pdev, armv8_pmu_of_device_ids, NULL); +} - if (cpu_pmu) { - pr_info("enabled with %s PMU driver, %d counters available\n", - cpu_pmu->name, cpu_pmu->num_events); - cpu_pmu_init(cpu_pmu); - armpmu_register(cpu_pmu, "cpu", PERF_TYPE_RAW); - } else { - pr_info("no hardware support available\n"); - } +static struct platform_driver armv8_pmu_driver = { + .driver = { + .name = "armv8-pmu", + .of_match_table = armv8_pmu_of_device_ids, + }, + .probe = armv8_pmu_device_probe, +}; - return 0; +static int __init register_armv8_pmu_driver(void) +{ + return platform_driver_register(&armv8_pmu_driver); } -early_initcall(init_hw_perf_events); - +device_initcall(register_armv8_pmu_driver); diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 223b093c9..f75b540bc 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -44,6 +44,7 @@ #include <linux/hw_breakpoint.h> #include <linux/personality.h> #include <linux/notifier.h> +#include <trace/events/power.h> #include <asm/compat.h> #include <asm/cacheflush.h> @@ -75,8 +76,10 @@ void arch_cpu_idle(void) * This should do all the clock switching and wait for interrupt * tricks */ + trace_cpu_idle_rcuidle(1, smp_processor_id()); cpu_do_idle(); local_irq_enable(); + trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); } #ifdef CONFIG_HOTPLUG_CPU diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c index aa94a88f6..f67f35b6e 100644 --- a/arch/arm64/kernel/psci.c +++ b/arch/arm64/kernel/psci.c @@ -30,20 +30,6 @@ #include <asm/smp_plat.h> #include <asm/suspend.h> -static bool psci_power_state_loses_context(u32 state) -{ - return state & PSCI_0_2_POWER_STATE_TYPE_MASK; -} - -static bool psci_power_state_is_valid(u32 state) -{ - const u32 valid_mask = PSCI_0_2_POWER_STATE_ID_MASK | - PSCI_0_2_POWER_STATE_TYPE_MASK | - PSCI_0_2_POWER_STATE_AFFL_MASK; - - return !(state & ~valid_mask); -} - static DEFINE_PER_CPU_READ_MOSTLY(u32 *, psci_power_state); static int __maybe_unused cpu_psci_cpu_init_idle(unsigned int cpu) diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 232247945..811947914 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -28,7 +28,6 @@ #include <linux/console.h> #include <linux/cache.h> #include <linux/bootmem.h> -#include <linux/seq_file.h> #include <linux/screen_info.h> #include <linux/init.h> #include <linux/kexec.h> @@ -44,7 +43,6 @@ #include <linux/of_fdt.h> #include <linux/of_platform.h> #include <linux/efi.h> -#include <linux/personality.h> #include <linux/psci.h> #include <asm/acpi.h> @@ -54,6 +52,7 @@ #include <asm/elf.h> #include <asm/cpufeature.h> #include <asm/cpu_ops.h> +#include <asm/kasan.h> #include <asm/sections.h> #include <asm/setup.h> #include <asm/smp_plat.h> @@ -64,23 +63,6 @@ #include <asm/efi.h> #include <asm/xen/hypervisor.h> -unsigned long elf_hwcap __read_mostly; -EXPORT_SYMBOL_GPL(elf_hwcap); - -#ifdef CONFIG_COMPAT -#define COMPAT_ELF_HWCAP_DEFAULT \ - (COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\ - COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\ - COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\ - COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\ - COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV|\ - COMPAT_HWCAP_LPAE) -unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT; -unsigned int compat_elf_hwcap2 __read_mostly; -#endif - -DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); - phys_addr_t __fdt_pointer __initdata; /* @@ -195,104 +177,6 @@ static void __init smp_build_mpidr_hash(void) __flush_dcache_area(&mpidr_hash, sizeof(struct mpidr_hash)); } -static void __init setup_processor(void) -{ - u64 features; - s64 block; - u32 cwg; - int cls; - - printk("CPU: AArch64 Processor [%08x] revision %d\n", - read_cpuid_id(), read_cpuid_id() & 15); - - sprintf(init_utsname()->machine, ELF_PLATFORM); - elf_hwcap = 0; - - cpuinfo_store_boot_cpu(); - - /* - * Check for sane CTR_EL0.CWG value. - */ - cwg = cache_type_cwg(); - cls = cache_line_size(); - if (!cwg) - pr_warn("No Cache Writeback Granule information, assuming cache line size %d\n", - cls); - if (L1_CACHE_BYTES < cls) - pr_warn("L1_CACHE_BYTES smaller than the Cache Writeback Granule (%d < %d)\n", - L1_CACHE_BYTES, cls); - - /* - * ID_AA64ISAR0_EL1 contains 4-bit wide signed feature blocks. - * The blocks we test below represent incremental functionality - * for non-negative values. Negative values are reserved. - */ - features = read_cpuid(ID_AA64ISAR0_EL1); - block = cpuid_feature_extract_field(features, 4); - if (block > 0) { - switch (block) { - default: - case 2: - elf_hwcap |= HWCAP_PMULL; - case 1: - elf_hwcap |= HWCAP_AES; - case 0: - break; - } - } - - if (cpuid_feature_extract_field(features, 8) > 0) - elf_hwcap |= HWCAP_SHA1; - - if (cpuid_feature_extract_field(features, 12) > 0) - elf_hwcap |= HWCAP_SHA2; - - if (cpuid_feature_extract_field(features, 16) > 0) - elf_hwcap |= HWCAP_CRC32; - - block = cpuid_feature_extract_field(features, 20); - if (block > 0) { - switch (block) { - default: - case 2: - elf_hwcap |= HWCAP_ATOMICS; - case 1: - /* RESERVED */ - case 0: - break; - } - } - -#ifdef CONFIG_COMPAT - /* - * ID_ISAR5_EL1 carries similar information as above, but pertaining to - * the AArch32 32-bit execution state. - */ - features = read_cpuid(ID_ISAR5_EL1); - block = cpuid_feature_extract_field(features, 4); - if (block > 0) { - switch (block) { - default: - case 2: - compat_elf_hwcap2 |= COMPAT_HWCAP2_PMULL; - case 1: - compat_elf_hwcap2 |= COMPAT_HWCAP2_AES; - case 0: - break; - } - } - - if (cpuid_feature_extract_field(features, 8) > 0) - compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA1; - - if (cpuid_feature_extract_field(features, 12) > 0) - compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA2; - - if (cpuid_feature_extract_field(features, 16) > 0) - compat_elf_hwcap2 |= COMPAT_HWCAP2_CRC32; -#endif -} - static void __init setup_machine_fdt(phys_addr_t dt_phys) { void *dt_virt = fixmap_remap_fdt(dt_phys); @@ -406,8 +290,9 @@ u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID }; void __init setup_arch(char **cmdline_p) { - setup_processor(); + pr_info("Boot CPU: AArch64 Processor [%08x]\n", read_cpuid_id()); + sprintf(init_utsname()->machine, ELF_PLATFORM); init_mm.start_code = (unsigned long) _text; init_mm.end_code = (unsigned long) _etext; init_mm.end_data = (unsigned long) _edata; @@ -436,6 +321,9 @@ void __init setup_arch(char **cmdline_p) paging_init(); relocate_initrd(); + + kasan_init(); + request_standard_resources(); early_ioremap_reset(); @@ -493,124 +381,3 @@ static int __init topology_init(void) return 0; } subsys_initcall(topology_init); - -static const char *hwcap_str[] = { - "fp", - "asimd", - "evtstrm", - "aes", - "pmull", - "sha1", - "sha2", - "crc32", - "atomics", - NULL -}; - -#ifdef CONFIG_COMPAT -static const char *compat_hwcap_str[] = { - "swp", - "half", - "thumb", - "26bit", - "fastmult", - "fpa", - "vfp", - "edsp", - "java", - "iwmmxt", - "crunch", - "thumbee", - "neon", - "vfpv3", - "vfpv3d16", - "tls", - "vfpv4", - "idiva", - "idivt", - "vfpd32", - "lpae", - "evtstrm" -}; - -static const char *compat_hwcap2_str[] = { - "aes", - "pmull", - "sha1", - "sha2", - "crc32", - NULL -}; -#endif /* CONFIG_COMPAT */ - -static int c_show(struct seq_file *m, void *v) -{ - int i, j; - - for_each_online_cpu(i) { - struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i); - u32 midr = cpuinfo->reg_midr; - - /* - * glibc reads /proc/cpuinfo to determine the number of - * online processors, looking for lines beginning with - * "processor". Give glibc what it expects. - */ - seq_printf(m, "processor\t: %d\n", i); - - /* - * Dump out the common processor features in a single line. - * Userspace should read the hwcaps with getauxval(AT_HWCAP) - * rather than attempting to parse this, but there's a body of - * software which does already (at least for 32-bit). - */ - seq_puts(m, "Features\t:"); - if (personality(current->personality) == PER_LINUX32) { -#ifdef CONFIG_COMPAT - for (j = 0; compat_hwcap_str[j]; j++) - if (compat_elf_hwcap & (1 << j)) - seq_printf(m, " %s", compat_hwcap_str[j]); - - for (j = 0; compat_hwcap2_str[j]; j++) - if (compat_elf_hwcap2 & (1 << j)) - seq_printf(m, " %s", compat_hwcap2_str[j]); -#endif /* CONFIG_COMPAT */ - } else { - for (j = 0; hwcap_str[j]; j++) - if (elf_hwcap & (1 << j)) - seq_printf(m, " %s", hwcap_str[j]); - } - seq_puts(m, "\n"); - - seq_printf(m, "CPU implementer\t: 0x%02x\n", - MIDR_IMPLEMENTOR(midr)); - seq_printf(m, "CPU architecture: 8\n"); - seq_printf(m, "CPU variant\t: 0x%x\n", MIDR_VARIANT(midr)); - seq_printf(m, "CPU part\t: 0x%03x\n", MIDR_PARTNUM(midr)); - seq_printf(m, "CPU revision\t: %d\n\n", MIDR_REVISION(midr)); - } - - return 0; -} - -static void *c_start(struct seq_file *m, loff_t *pos) -{ - return *pos < 1 ? (void *)1 : NULL; -} - -static void *c_next(struct seq_file *m, void *v, loff_t *pos) -{ - ++*pos; - return NULL; -} - -static void c_stop(struct seq_file *m, void *v) -{ -} - -const struct seq_operations cpuinfo_op = { - .start = c_start, - .next = c_next, - .stop = c_stop, - .show = c_show -}; diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index dbdaacddd..b1adc51b2 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -142,22 +142,27 @@ asmlinkage void secondary_start_kernel(void) */ atomic_inc(&mm->mm_count); current->active_mm = mm; - cpumask_set_cpu(cpu, mm_cpumask(mm)); set_my_cpu_offset(per_cpu_offset(smp_processor_id())); - printk("CPU%u: Booted secondary processor\n", cpu); /* * TTBR0 is only used for the identity mapping at this stage. Make it * point to zero page to avoid speculatively fetching new entries. */ cpu_set_reserved_ttbr0(); - flush_tlb_all(); + local_flush_tlb_all(); cpu_set_default_tcr_t0sz(); preempt_disable(); trace_hardirqs_off(); + /* + * If the system has established the capabilities, make sure + * this CPU ticks all of those. If it doesn't, the CPU will + * fail to come online. + */ + verify_local_cpu_capabilities(); + if (cpu_ops[cpu]->cpu_postboot) cpu_ops[cpu]->cpu_postboot(); @@ -178,6 +183,8 @@ asmlinkage void secondary_start_kernel(void) * the CPU migration code to notice that the CPU is online * before we continue. */ + pr_info("CPU%u: Booted secondary processor [%08x]\n", + cpu, read_cpuid_id()); set_cpu_online(cpu, true); complete(&cpu_running); @@ -232,12 +239,7 @@ int __cpu_disable(void) /* * OK - migrate IRQs away from this CPU */ - migrate_irqs(); - - /* - * Remove this CPU from the vm mask set of all processes. - */ - clear_tasks_mm_cpumask(cpu); + irq_migrate_all_off_this_cpu(); return 0; } @@ -325,12 +327,14 @@ static void __init hyp_mode_check(void) void __init smp_cpus_done(unsigned int max_cpus) { pr_info("SMP: Total of %d processors activated.\n", num_online_cpus()); + setup_cpu_features(); hyp_mode_check(); apply_alternatives_all(); } void __init smp_prepare_boot_cpu(void) { + cpuinfo_store_boot_cpu(); set_my_cpu_offset(per_cpu_offset(smp_processor_id())); } @@ -469,7 +473,7 @@ acpi_parse_gic_cpu_interface(struct acpi_subtable_header *header, * cpu logical map array containing MPIDR values related to logical * cpus. Assumes that cpu_logical_map(0) has already been initialized. */ -void __init of_parse_and_init_cpus(void) +static void __init of_parse_and_init_cpus(void) { struct device_node *dn = NULL; diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c index 44ca4143b..1095aa483 100644 --- a/arch/arm64/kernel/suspend.c +++ b/arch/arm64/kernel/suspend.c @@ -1,3 +1,4 @@ +#include <linux/ftrace.h> #include <linux/percpu.h> #include <linux/slab.h> #include <asm/cacheflush.h> @@ -41,7 +42,7 @@ void notrace __cpu_suspend_save(struct cpu_suspend_ctx *ptr, * time the notifier runs debug exceptions might have been enabled already, * with HW breakpoints registers content still in an unknown state. */ -void (*hw_breakpoint_restore)(void *); +static void (*hw_breakpoint_restore)(void *); void __init cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *)) { /* Prevent multiple restore hook initializations */ @@ -71,6 +72,13 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) local_dbg_save(flags); /* + * Function graph tracer state gets incosistent when the kernel + * calls functions that never return (aka suspend finishers) hence + * disable graph tracing during their execution. + */ + pause_graph_tracing(); + + /* * mm context saved on the stack, it will be restored when * the cpu comes out of reset through the identity mapped * page tables, so that the thread address space is properly @@ -90,7 +98,7 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) * restoration before returning. */ cpu_set_reserved_ttbr0(); - flush_tlb_all(); + local_flush_tlb_all(); cpu_set_default_tcr_t0sz(); if (mm != &init_mm) @@ -111,6 +119,8 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) hw_breakpoint_restore(NULL); } + unpause_graph_tracing(); + /* * Restore pstate flags. OS lock and mdscr have been already * restored, so from this point onwards, debugging is fully diff --git a/arch/arm64/kernel/time.c b/arch/arm64/kernel/time.c index 149151fb4..13339b6ff 100644 --- a/arch/arm64/kernel/time.c +++ b/arch/arm64/kernel/time.c @@ -67,16 +67,10 @@ void __init time_init(void) u32 arch_timer_rate; of_clk_init(NULL); - clocksource_of_init(); + clocksource_probe(); tick_setup_hrtimer_broadcast(); - /* - * Since ACPI or FDT will only one be available in the system, - * we can use acpi_generic_timer_init() here safely - */ - acpi_generic_timer_init(); - arch_timer_rate = arch_timer_get_rate(); if (!arch_timer_rate) panic("Unable to initialise architected timer.\n"); diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index f93aae5e4..e9b9b5364 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -103,12 +103,12 @@ static void dump_mem(const char *lvl, const char *str, unsigned long bottom, set_fs(fs); } -static void dump_backtrace_entry(unsigned long where, unsigned long stack) +static void dump_backtrace_entry(unsigned long where) { + /* + * Note that 'where' can have a physical address, but it's not handled. + */ print_ip_sym(where); - if (in_exception_text(where)) - dump_mem("", "Exception stack", stack, - stack + sizeof(struct pt_regs), false); } static void dump_instr(const char *lvl, struct pt_regs *regs) @@ -172,12 +172,17 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) pr_emerg("Call trace:\n"); while (1) { unsigned long where = frame.pc; + unsigned long stack; int ret; + dump_backtrace_entry(where); ret = unwind_frame(&frame); if (ret < 0) break; - dump_backtrace_entry(where, frame.sp); + stack = frame.sp; + if (in_exception_text(where)) + dump_mem("", "Exception stack", stack, + stack + sizeof(struct pt_regs), false); } } diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile index f6fe17d88..b467fd0a3 100644 --- a/arch/arm64/kernel/vdso/Makefile +++ b/arch/arm64/kernel/vdso/Makefile @@ -15,6 +15,9 @@ ccflags-y := -shared -fno-common -fno-builtin ccflags-y += -nostdlib -Wl,-soname=linux-vdso.so.1 \ $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) +# Disable gcov profiling for VDSO code +GCOV_PROFILE := n + # Workaround for bare-metal (ELF) toolchains that neglect to pass -shared # down to collect2, resulting in silent corruption of the vDSO image. ccflags-y += -Wl,-shared diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index 4d77757b5..71426a78d 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -5,6 +5,8 @@ */ #include <asm-generic/vmlinux.lds.h> +#include <asm/cache.h> +#include <asm/kernel-pgtable.h> #include <asm/thread_info.h> #include <asm/memory.h> #include <asm/page.h> @@ -139,7 +141,7 @@ SECTIONS ARM_EXIT_KEEP(EXIT_DATA) } - PERCPU_SECTION(64) + PERCPU_SECTION(L1_CACHE_BYTES) . = ALIGN(PAGE_SIZE); __init_end = .; @@ -157,7 +159,7 @@ SECTIONS . = ALIGN(PAGE_SIZE); _data = .; _sdata = .; - RW_DATA_SECTION(64, PAGE_SIZE, THREAD_SIZE) + RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) PECOFF_EDATA_PADDING _edata = .; |