diff options
author | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2015-09-08 01:01:14 -0300 |
---|---|---|
committer | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2015-09-08 01:01:14 -0300 |
commit | e5fd91f1ef340da553f7a79da9540c3db711c937 (patch) | |
tree | b11842027dc6641da63f4bcc524f8678263304a3 /arch/arm64/kernel | |
parent | 2a9b0348e685a63d97486f6749622b61e9e3292f (diff) |
Linux-libre 4.2-gnu
Diffstat (limited to 'arch/arm64/kernel')
26 files changed, 658 insertions, 486 deletions
diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c index 8b8395588..19de7537e 100644 --- a/arch/arm64/kernel/acpi.c +++ b/arch/arm64/kernel/acpi.c @@ -36,12 +36,6 @@ EXPORT_SYMBOL(acpi_disabled); int acpi_pci_disabled = 1; /* skip ACPI PCI scan and IRQ initialization */ EXPORT_SYMBOL(acpi_pci_disabled); -/* Processors with enabled flag and sane MPIDR */ -static int enabled_cpus; - -/* Boot CPU is valid or not in MADT */ -static bool bootcpu_valid __initdata; - static bool param_acpi_off __initdata; static bool param_acpi_force __initdata; @@ -95,122 +89,15 @@ void __init __acpi_unmap_table(char *map, unsigned long size) early_memunmap(map, size); } -/** - * acpi_map_gic_cpu_interface - generates a logical cpu number - * and map to MPIDR represented by GICC structure - */ -static void __init -acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor) +bool __init acpi_psci_present(void) { - int i; - u64 mpidr = processor->arm_mpidr & MPIDR_HWID_BITMASK; - bool enabled = !!(processor->flags & ACPI_MADT_ENABLED); - - if (mpidr == INVALID_HWID) { - pr_info("Skip MADT cpu entry with invalid MPIDR\n"); - return; - } - - total_cpus++; - if (!enabled) - return; - - if (enabled_cpus >= NR_CPUS) { - pr_warn("NR_CPUS limit of %d reached, Processor %d/0x%llx ignored.\n", - NR_CPUS, total_cpus, mpidr); - return; - } - - /* Check if GICC structure of boot CPU is available in the MADT */ - if (cpu_logical_map(0) == mpidr) { - if (bootcpu_valid) { - pr_err("Firmware bug, duplicate CPU MPIDR: 0x%llx in MADT\n", - mpidr); - return; - } - - bootcpu_valid = true; - } - - /* - * Duplicate MPIDRs are a recipe for disaster. Scan - * all initialized entries and check for - * duplicates. If any is found just ignore the CPU. - */ - for (i = 1; i < enabled_cpus; i++) { - if (cpu_logical_map(i) == mpidr) { - pr_err("Firmware bug, duplicate CPU MPIDR: 0x%llx in MADT\n", - mpidr); - return; - } - } - - if (!acpi_psci_present()) - return; - - cpu_ops[enabled_cpus] = cpu_get_ops("psci"); - /* CPU 0 was already initialized */ - if (enabled_cpus) { - if (!cpu_ops[enabled_cpus]) - return; - - if (cpu_ops[enabled_cpus]->cpu_init(NULL, enabled_cpus)) - return; - - /* map the logical cpu id to cpu MPIDR */ - cpu_logical_map(enabled_cpus) = mpidr; - } - - enabled_cpus++; + return acpi_gbl_FADT.arm_boot_flags & ACPI_FADT_PSCI_COMPLIANT; } -static int __init -acpi_parse_gic_cpu_interface(struct acpi_subtable_header *header, - const unsigned long end) +/* Whether HVC must be used instead of SMC as the PSCI conduit */ +bool __init acpi_psci_use_hvc(void) { - struct acpi_madt_generic_interrupt *processor; - - processor = (struct acpi_madt_generic_interrupt *)header; - - if (BAD_MADT_ENTRY(processor, end)) - return -EINVAL; - - acpi_table_print_madt_entry(header); - acpi_map_gic_cpu_interface(processor); - return 0; -} - -/* Parse GIC cpu interface entries in MADT for SMP init */ -void __init acpi_init_cpus(void) -{ - int count, i; - - /* - * do a partial walk of MADT to determine how many CPUs - * we have including disabled CPUs, and get information - * we need for SMP init - */ - count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT, - acpi_parse_gic_cpu_interface, 0); - - if (!count) { - pr_err("No GIC CPU interface entries present\n"); - return; - } else if (count < 0) { - pr_err("Error parsing GIC CPU interface entry\n"); - return; - } - - if (!bootcpu_valid) { - pr_err("MADT missing boot CPU MPIDR, not enabling secondaries\n"); - return; - } - - for (i = 0; i < enabled_cpus; i++) - set_cpu_possible(i, true); - - /* Make boot-up look pretty */ - pr_info("%d CPUs enabled, %d CPUs total\n", enabled_cpus, total_cpus); + return acpi_gbl_FADT.arm_boot_flags & ACPI_FADT_PSCI_USE_HVC; } /* diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c index 28f8365ed..221b98312 100644 --- a/arch/arm64/kernel/alternative.c +++ b/arch/arm64/kernel/alternative.c @@ -24,8 +24,13 @@ #include <asm/cacheflush.h> #include <asm/alternative.h> #include <asm/cpufeature.h> +#include <asm/insn.h> #include <linux/stop_machine.h> +#define __ALT_PTR(a,f) (u32 *)((void *)&(a)->f + (a)->f) +#define ALT_ORIG_PTR(a) __ALT_PTR(a, orig_offset) +#define ALT_REPL_PTR(a) __ALT_PTR(a, alt_offset) + extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; struct alt_region { @@ -33,13 +38,63 @@ struct alt_region { struct alt_instr *end; }; +/* + * Check if the target PC is within an alternative block. + */ +static bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc) +{ + unsigned long replptr; + + if (kernel_text_address(pc)) + return 1; + + replptr = (unsigned long)ALT_REPL_PTR(alt); + if (pc >= replptr && pc <= (replptr + alt->alt_len)) + return 0; + + /* + * Branching into *another* alternate sequence is doomed, and + * we're not even trying to fix it up. + */ + BUG(); +} + +static u32 get_alt_insn(struct alt_instr *alt, u32 *insnptr, u32 *altinsnptr) +{ + u32 insn; + + insn = le32_to_cpu(*altinsnptr); + + if (aarch64_insn_is_branch_imm(insn)) { + s32 offset = aarch64_get_branch_offset(insn); + unsigned long target; + + target = (unsigned long)altinsnptr + offset; + + /* + * If we're branching inside the alternate sequence, + * do not rewrite the instruction, as it is already + * correct. Otherwise, generate the new instruction. + */ + if (branch_insn_requires_update(alt, target)) { + offset = target - (unsigned long)insnptr; + insn = aarch64_set_branch_offset(insn, offset); + } + } + + return insn; +} + static int __apply_alternatives(void *alt_region) { struct alt_instr *alt; struct alt_region *region = alt_region; - u8 *origptr, *replptr; + u32 *origptr, *replptr; for (alt = region->begin; alt < region->end; alt++) { + u32 insn; + int i, nr_inst; + if (!cpus_have_cap(alt->cpufeature)) continue; @@ -47,11 +102,17 @@ static int __apply_alternatives(void *alt_region) pr_info_once("patching kernel code\n"); - origptr = (u8 *)&alt->orig_offset + alt->orig_offset; - replptr = (u8 *)&alt->alt_offset + alt->alt_offset; - memcpy(origptr, replptr, alt->alt_len); + origptr = ALT_ORIG_PTR(alt); + replptr = ALT_REPL_PTR(alt); + nr_inst = alt->alt_len / sizeof(insn); + + for (i = 0; i < nr_inst; i++) { + insn = get_alt_insn(alt, origptr + i, replptr + i); + *(origptr + i) = cpu_to_le32(insn); + } + flush_icache_range((uintptr_t)origptr, - (uintptr_t)(origptr + alt->alt_len)); + (uintptr_t)(origptr + nr_inst)); } return 0; diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index da675cc5d..c99701a34 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -127,7 +127,6 @@ int main(void) DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu)); DEFINE(VGIC_SAVE_FN, offsetof(struct vgic_sr_vectors, save_vgic)); DEFINE(VGIC_RESTORE_FN, offsetof(struct vgic_sr_vectors, restore_vgic)); - DEFINE(VGIC_SR_VECTOR_SZ, sizeof(struct vgic_sr_vectors)); DEFINE(VGIC_V2_CPU_HCR, offsetof(struct vgic_cpu, vgic_v2.vgic_hcr)); DEFINE(VGIC_V2_CPU_VMCR, offsetof(struct vgic_cpu, vgic_v2.vgic_vmcr)); DEFINE(VGIC_V2_CPU_MISR, offsetof(struct vgic_cpu, vgic_v2.vgic_misr)); diff --git a/arch/arm64/kernel/cpu_ops.c b/arch/arm64/kernel/cpu_ops.c index fb8ff9ba4..5ea337dd2 100644 --- a/arch/arm64/kernel/cpu_ops.c +++ b/arch/arm64/kernel/cpu_ops.c @@ -16,11 +16,13 @@ * along with this program. If not, see <http://www.gnu.org/licenses/>. */ -#include <asm/cpu_ops.h> -#include <asm/smp_plat.h> +#include <linux/acpi.h> #include <linux/errno.h> #include <linux/of.h> #include <linux/string.h> +#include <asm/acpi.h> +#include <asm/cpu_ops.h> +#include <asm/smp_plat.h> extern const struct cpu_operations smp_spin_table_ops; extern const struct cpu_operations cpu_psci_ops; @@ -35,7 +37,7 @@ static const struct cpu_operations *supported_cpu_ops[] __initconst = { NULL, }; -const struct cpu_operations * __init cpu_get_ops(const char *name) +static const struct cpu_operations * __init cpu_get_ops(const char *name) { const struct cpu_operations **ops = supported_cpu_ops; @@ -49,39 +51,53 @@ const struct cpu_operations * __init cpu_get_ops(const char *name) return NULL; } +static const char *__init cpu_read_enable_method(int cpu) +{ + const char *enable_method; + + if (acpi_disabled) { + struct device_node *dn = of_get_cpu_node(cpu, NULL); + + if (!dn) { + if (!cpu) + pr_err("Failed to find device node for boot cpu\n"); + return NULL; + } + + enable_method = of_get_property(dn, "enable-method", NULL); + if (!enable_method) { + /* + * The boot CPU may not have an enable method (e.g. + * when spin-table is used for secondaries). + * Don't warn spuriously. + */ + if (cpu != 0) + pr_err("%s: missing enable-method property\n", + dn->full_name); + } + } else { + enable_method = acpi_get_enable_method(cpu); + if (!enable_method) + pr_err("Unsupported ACPI enable-method\n"); + } + + return enable_method; +} /* - * Read a cpu's enable method from the device tree and record it in cpu_ops. + * Read a cpu's enable method and record it in cpu_ops. */ -int __init cpu_read_ops(struct device_node *dn, int cpu) +int __init cpu_read_ops(int cpu) { - const char *enable_method = of_get_property(dn, "enable-method", NULL); - if (!enable_method) { - /* - * The boot CPU may not have an enable method (e.g. when - * spin-table is used for secondaries). Don't warn spuriously. - */ - if (cpu != 0) - pr_err("%s: missing enable-method property\n", - dn->full_name); - return -ENOENT; - } + const char *enable_method = cpu_read_enable_method(cpu); + + if (!enable_method) + return -ENODEV; cpu_ops[cpu] = cpu_get_ops(enable_method); if (!cpu_ops[cpu]) { - pr_warn("%s: unsupported enable-method property: %s\n", - dn->full_name, enable_method); + pr_warn("Unsupported enable-method: %s\n", enable_method); return -EOPNOTSUPP; } return 0; } - -void __init cpu_read_bootcpu_ops(void) -{ - struct device_node *dn = of_get_cpu_node(0, NULL); - if (!dn) { - pr_err("Failed to find device node for boot cpu\n"); - return; - } - cpu_read_ops(dn, 0); -} diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 3d9967e43..5ad86ceac 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -22,7 +22,23 @@ #include <asm/cpu.h> #include <asm/cpufeature.h> +static bool +has_id_aa64pfr0_feature(const struct arm64_cpu_capabilities *entry) +{ + u64 val; + + val = read_cpuid(id_aa64pfr0_el1); + return (val & entry->register_mask) == entry->register_value; +} + static const struct arm64_cpu_capabilities arm64_features[] = { + { + .desc = "GIC system register CPU interface", + .capability = ARM64_HAS_SYSREG_GIC_CPUIF, + .matches = has_id_aa64pfr0_feature, + .register_mask = (0xf << 24), + .register_value = (1 << 24), + }, {}, }; diff --git a/arch/arm64/kernel/cpuidle.c b/arch/arm64/kernel/cpuidle.c index a78143a5c..9047cab68 100644 --- a/arch/arm64/kernel/cpuidle.c +++ b/arch/arm64/kernel/cpuidle.c @@ -15,18 +15,13 @@ #include <asm/cpuidle.h> #include <asm/cpu_ops.h> -int arm_cpuidle_init(unsigned int cpu) +int __init arm_cpuidle_init(unsigned int cpu) { int ret = -EOPNOTSUPP; - struct device_node *cpu_node = of_cpu_device_node_get(cpu); - - if (!cpu_node) - return -ENODEV; if (cpu_ops[cpu] && cpu_ops[cpu]->cpu_init_idle) - ret = cpu_ops[cpu]->cpu_init_idle(cpu_node, cpu); + ret = cpu_ops[cpu]->cpu_init_idle(cpu); - of_node_put(cpu_node); return ret; } @@ -37,7 +32,7 @@ int arm_cpuidle_init(unsigned int cpu) * Return: 0 on success, -EOPNOTSUPP if CPU suspend hook not initialized, CPU * operations back-end error code otherwise. */ -int cpu_suspend(unsigned long arg) +int arm_cpuidle_suspend(int index) { int cpu = smp_processor_id(); @@ -47,5 +42,5 @@ int cpu_suspend(unsigned long arg) */ if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_suspend) return -EOPNOTSUPP; - return cpu_ops[cpu]->cpu_suspend(arg); + return cpu_ops[cpu]->cpu_suspend(index); } diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c index 352962bc2..e8ca6eaed 100644 --- a/arch/arm64/kernel/efi.c +++ b/arch/arm64/kernel/efi.c @@ -158,6 +158,7 @@ static __init int is_reserve_region(efi_memory_desc_t *md) case EFI_BOOT_SERVICES_CODE: case EFI_BOOT_SERVICES_DATA: case EFI_CONVENTIONAL_MEMORY: + case EFI_PERSISTENT_MEMORY: return 0; default: break; diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index bddd04d03..e16351819 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -21,7 +21,7 @@ #include <linux/init.h> #include <linux/linkage.h> -#include <asm/alternative-asm.h> +#include <asm/alternative.h> #include <asm/assembler.h> #include <asm/asm-offsets.h> #include <asm/cpufeature.h> @@ -124,21 +124,24 @@ msr sp_el0, x23 #ifdef CONFIG_ARM64_ERRATUM_845719 - alternative_insn \ - "nop", \ - "tbz x22, #4, 1f", \ - ARM64_WORKAROUND_845719 + +#undef SEQUENCE_ORG +#undef SEQUENCE_ALT + #ifdef CONFIG_PID_IN_CONTEXTIDR - alternative_insn \ - "nop; nop", \ - "mrs x29, contextidr_el1; msr contextidr_el1, x29; 1:", \ - ARM64_WORKAROUND_845719 + +#define SEQUENCE_ORG "nop ; nop ; nop" +#define SEQUENCE_ALT "tbz x22, #4, 1f ; mrs x29, contextidr_el1; msr contextidr_el1, x29; 1:" + #else - alternative_insn \ - "nop", \ - "msr contextidr_el1, xzr; 1:", \ - ARM64_WORKAROUND_845719 + +#define SEQUENCE_ORG "nop ; nop" +#define SEQUENCE_ALT "tbz x22, #4, 1f ; msr contextidr_el1, xzr; 1:" + #endif + + alternative_insn SEQUENCE_ORG, SEQUENCE_ALT, ARM64_WORKAROUND_845719 + #endif .endif msr elr_el1, x21 // set up the return data @@ -349,8 +352,8 @@ el1_inv: // TODO: add support for undefined instructions in kernel mode enable_dbg mov x0, sp + mov x2, x1 mov x1, #BAD_SYNC - mrs x2, esr_el1 b bad_mode ENDPROC(el1_sync) @@ -550,7 +553,7 @@ el0_inv: ct_user_exit mov x0, sp mov x1, #BAD_SYNC - mrs x2, esr_el1 + mov x2, x25 bl bad_mode b ret_to_user ENDPROC(el0_sync) @@ -582,7 +585,8 @@ ENDPROC(el0_irq) * */ ENTRY(cpu_switch_to) - add x8, x0, #THREAD_CPU_CONTEXT + mov x10, #THREAD_CPU_CONTEXT + add x8, x0, x10 mov x9, sp stp x19, x20, [x8], #16 // store callee-saved registers stp x21, x22, [x8], #16 @@ -591,7 +595,7 @@ ENTRY(cpu_switch_to) stp x27, x28, [x8], #16 stp x29, x9, [x8], #16 str lr, [x8] - add x8, x1, #THREAD_CPU_CONTEXT + add x8, x1, x10 ldp x19, x20, [x8], #16 // restore callee-saved registers ldp x21, x22, [x8], #16 ldp x23, x24, [x8], #16 @@ -609,11 +613,16 @@ ENDPROC(cpu_switch_to) */ ret_fast_syscall: disable_irq // disable interrupts - ldr x1, [tsk, #TI_FLAGS] + ldr x1, [tsk, #TI_FLAGS] // re-check for syscall tracing + and x2, x1, #_TIF_SYSCALL_WORK + cbnz x2, ret_fast_syscall_trace and x2, x1, #_TIF_WORK_MASK cbnz x2, fast_work_pending enable_step_tsk x1, x2 kernel_exit 0, ret = 1 +ret_fast_syscall_trace: + enable_irq // enable interrupts + b __sys_trace_return /* * Ok, we need to do extra processing, enter the slow path. diff --git a/arch/arm64/kernel/entry32.S b/arch/arm64/kernel/entry32.S index bd9bfaa92..f332d5d1f 100644 --- a/arch/arm64/kernel/entry32.S +++ b/arch/arm64/kernel/entry32.S @@ -32,13 +32,11 @@ ENTRY(compat_sys_sigreturn_wrapper) mov x0, sp - mov x27, #0 // prevent syscall restart handling (why) b compat_sys_sigreturn ENDPROC(compat_sys_sigreturn_wrapper) ENTRY(compat_sys_rt_sigreturn_wrapper) mov x0, sp - mov x27, #0 // prevent syscall restart handling (why) b compat_sys_rt_sigreturn ENDPROC(compat_sys_rt_sigreturn_wrapper) diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index 3dca15634..44d6f7545 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -17,6 +17,7 @@ * along with this program. If not, see <http://www.gnu.org/licenses/>. */ +#include <linux/cpu.h> #include <linux/cpu_pm.h> #include <linux/kernel.h> #include <linux/init.h> @@ -296,6 +297,35 @@ static void fpsimd_pm_init(void) static inline void fpsimd_pm_init(void) { } #endif /* CONFIG_CPU_PM */ +#ifdef CONFIG_HOTPLUG_CPU +static int fpsimd_cpu_hotplug_notifier(struct notifier_block *nfb, + unsigned long action, + void *hcpu) +{ + unsigned int cpu = (long)hcpu; + + switch (action) { + case CPU_DEAD: + case CPU_DEAD_FROZEN: + per_cpu(fpsimd_last_state, cpu) = NULL; + break; + } + return NOTIFY_OK; +} + +static struct notifier_block fpsimd_cpu_hotplug_notifier_block = { + .notifier_call = fpsimd_cpu_hotplug_notifier, +}; + +static inline void fpsimd_hotplug_init(void) +{ + register_cpu_notifier(&fpsimd_cpu_hotplug_notifier_block); +} + +#else +static inline void fpsimd_hotplug_init(void) { } +#endif + /* * FP/SIMD support code initialisation. */ @@ -315,6 +345,7 @@ static int __init fpsimd_init(void) elf_hwcap |= HWCAP_ASIMD; fpsimd_pm_init(); + fpsimd_hotplug_init(); return 0; } diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 19f915e8f..c0ff3ce42 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -237,8 +237,6 @@ ENTRY(stext) bl el2_setup // Drop to EL1, w20=cpu_boot_mode adrp x24, __PHYS_OFFSET bl set_cpu_boot_mode_flag - - bl __vet_fdt bl __create_page_tables // x25=TTBR0, x26=TTBR1 /* * The following calls CPU setup code, see arch/arm64/mm/proc.S for @@ -270,24 +268,6 @@ preserve_boot_args: ENDPROC(preserve_boot_args) /* - * Determine validity of the x21 FDT pointer. - * The dtb must be 8-byte aligned and live in the first 512M of memory. - */ -__vet_fdt: - tst x21, #0x7 - b.ne 1f - cmp x21, x24 - b.lt 1f - mov x0, #(1 << 29) - add x0, x0, x24 - cmp x21, x0 - b.ge 1f - ret -1: - mov x21, #0 - ret -ENDPROC(__vet_fdt) -/* * Macro to create a table entry to the next page. * * tbl: page table address @@ -348,8 +328,7 @@ ENDPROC(__vet_fdt) * required to get the kernel running. The following sections are required: * - identity mapping to enable the MMU (low address, TTBR0) * - first few MB of the kernel linear mapping to jump to once the MMU has - * been enabled, including the FDT blob (TTBR1) - * - pgd entry for fixed mappings (TTBR1) + * been enabled */ __create_page_tables: adrp x25, idmap_pg_dir @@ -382,7 +361,7 @@ __create_page_tables: * Create the identity mapping. */ mov x0, x25 // idmap_pg_dir - adrp x3, KERNEL_START // __pa(KERNEL_START) + adrp x3, __idmap_text_start // __pa(__idmap_text_start) #ifndef CONFIG_ARM64_VA_BITS_48 #define EXTRA_SHIFT (PGDIR_SHIFT + PAGE_SHIFT - 3) @@ -405,11 +384,11 @@ __create_page_tables: /* * Calculate the maximum allowed value for TCR_EL1.T0SZ so that the - * entire kernel image can be ID mapped. As T0SZ == (64 - #bits used), + * entire ID map region can be mapped. As T0SZ == (64 - #bits used), * this number conveniently equals the number of leading zeroes in - * the physical address of KERNEL_END. + * the physical address of __idmap_text_end. */ - adrp x5, KERNEL_END + adrp x5, __idmap_text_end clz x5, x5 cmp x5, TCR_T0SZ(VA_BITS) // default T0SZ small enough? b.ge 1f // .. then skip additional level @@ -424,8 +403,8 @@ __create_page_tables: #endif create_pgd_entry x0, x3, x5, x6 - mov x5, x3 // __pa(KERNEL_START) - adr_l x6, KERNEL_END // __pa(KERNEL_END) + mov x5, x3 // __pa(__idmap_text_start) + adr_l x6, __idmap_text_end // __pa(__idmap_text_end) create_block_map x0, x7, x3, x5, x6 /* @@ -439,22 +418,6 @@ __create_page_tables: create_block_map x0, x7, x3, x5, x6 /* - * Map the FDT blob (maximum 2MB; must be within 512MB of - * PHYS_OFFSET). - */ - mov x3, x21 // FDT phys address - and x3, x3, #~((1 << 21) - 1) // 2MB aligned - mov x6, #PAGE_OFFSET - sub x5, x3, x24 // subtract PHYS_OFFSET - tst x5, #~((1 << 29) - 1) // within 512MB? - csel x21, xzr, x21, ne // zero the FDT pointer - b.ne 1f - add x5, x5, x6 // __va(FDT blob) - add x6, x5, #1 << 21 // 2MB for the FDT blob - sub x6, x6, #1 // inclusive range - create_block_map x0, x7, x3, x5, x6 -1: - /* * Since the page tables have been populated with non-cacheable * accesses (MMU disabled), invalidate the idmap and swapper page * tables again to remove any speculatively loaded cache lines. @@ -669,6 +632,7 @@ ENDPROC(__secondary_switched) * * other registers depend on the function called upon completion */ + .section ".idmap.text", "ax" __enable_mmu: ldr x5, =vectors msr vbar_el1, x5 diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c index e7d934d3a..7a1a5da6c 100644 --- a/arch/arm64/kernel/hw_breakpoint.c +++ b/arch/arm64/kernel/hw_breakpoint.c @@ -31,7 +31,6 @@ #include <asm/current.h> #include <asm/debug-monitors.h> #include <asm/hw_breakpoint.h> -#include <asm/kdebug.h> #include <asm/traps.h> #include <asm/cputype.h> #include <asm/system_misc.h> diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c index 924902083..dd9671cd0 100644 --- a/arch/arm64/kernel/insn.c +++ b/arch/arm64/kernel/insn.c @@ -77,6 +77,14 @@ bool __kprobes aarch64_insn_is_nop(u32 insn) } } +bool aarch64_insn_is_branch_imm(u32 insn) +{ + return (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn) || + aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn) || + aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) || + aarch64_insn_is_bcond(insn)); +} + static DEFINE_SPINLOCK(patch_lock); static void __kprobes *patch_map(void *addr, int fixmap) @@ -1057,6 +1065,58 @@ u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst, return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift); } +/* + * Decode the imm field of a branch, and return the byte offset as a + * signed value (so it can be used when computing a new branch + * target). + */ +s32 aarch64_get_branch_offset(u32 insn) +{ + s32 imm; + + if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn)) { + imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_26, insn); + return (imm << 6) >> 4; + } + + if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) || + aarch64_insn_is_bcond(insn)) { + imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_19, insn); + return (imm << 13) >> 11; + } + + if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn)) { + imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_14, insn); + return (imm << 18) >> 16; + } + + /* Unhandled instruction */ + BUG(); +} + +/* + * Encode the displacement of a branch in the imm field and return the + * updated instruction. + */ +u32 aarch64_set_branch_offset(u32 insn, s32 offset) +{ + if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn)) + return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn, + offset >> 2); + + if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) || + aarch64_insn_is_bcond(insn)) + return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn, + offset >> 2); + + if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn)) + return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_14, insn, + offset >> 2); + + /* Unhandled instruction */ + BUG(); +} + bool aarch32_insn_is_wide(u32 insn) { return insn >= 0xe800; diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c index 240b75c0e..463fa2e7e 100644 --- a/arch/arm64/kernel/irq.c +++ b/arch/arm64/kernel/irq.c @@ -61,7 +61,7 @@ void __init init_IRQ(void) static bool migrate_one_irq(struct irq_desc *desc) { struct irq_data *d = irq_desc_get_irq_data(desc); - const struct cpumask *affinity = d->affinity; + const struct cpumask *affinity = irq_data_get_affinity_mask(d); struct irq_chip *c; bool ret = false; @@ -81,7 +81,7 @@ static bool migrate_one_irq(struct irq_desc *desc) if (!c->irq_set_affinity) pr_debug("IRQ%u: unable to set affinity\n", d->irq); else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret) - cpumask_copy(d->affinity, affinity); + cpumask_copy(irq_data_get_affinity_mask(d), affinity); return ret; } diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index cce18c85d..b31e9a4b6 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c @@ -488,7 +488,7 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu) } err = request_irq(irq, armpmu->handle_irq, - IRQF_NOBALANCING, + IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu", armpmu); if (err) { pr_err("unable to request IRQ%d for ARM PMU counters\n", @@ -1318,7 +1318,7 @@ static int armpmu_device_probe(struct platform_device *pdev) /* Don't bother with PPIs; they're already affine */ irq = platform_get_irq(pdev, 0); if (irq >= 0 && irq_is_percpu(irq)) - return 0; + goto out; irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL); if (!irqs) @@ -1340,12 +1340,13 @@ static int armpmu_device_probe(struct platform_device *pdev) if (arch_find_n_match_cpu_physical_id(dn, cpu, NULL)) break; - of_node_put(dn); if (cpu >= nr_cpu_ids) { pr_warn("Failed to find logical CPU for %s\n", dn->name); + of_node_put(dn); break; } + of_node_put(dn); irqs[i] = cpu; } @@ -1355,6 +1356,7 @@ static int armpmu_device_probe(struct platform_device *pdev) else kfree(irqs); +out: cpu_pmu->plat_device = pdev; return 0; } diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index c6b1f3b96..223b093c9 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -58,14 +58,6 @@ unsigned long __stack_chk_guard __read_mostly; EXPORT_SYMBOL(__stack_chk_guard); #endif -void soft_restart(unsigned long addr) -{ - setup_mm_for_reboot(); - cpu_soft_restart(virt_to_phys(cpu_reset), addr); - /* Should never get here */ - BUG(); -} - /* * Function pointers to optional machine specific functions */ @@ -136,9 +128,7 @@ void machine_power_off(void) /* * Restart requires that the secondary CPUs stop performing any activity - * while the primary CPU resets the system. Systems with a single CPU can - * use soft_restart() as their machine descriptor's .restart hook, since that - * will cause the only available CPU to reset. Systems with multiple CPUs must + * while the primary CPU resets the system. Systems with multiple CPUs must * provide a HW restart implementation, to ensure that all CPUs reset at once. * This is required so that any code running after reset on the primary CPU * doesn't have to co-ordinate with other CPUs to ensure they aren't still @@ -243,7 +233,8 @@ void release_thread(struct task_struct *dead_task) int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) { - fpsimd_preserve_current_state(); + if (current->mm) + fpsimd_preserve_current_state(); *dst = *src; return 0; } @@ -254,35 +245,35 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start, unsigned long stk_sz, struct task_struct *p) { struct pt_regs *childregs = task_pt_regs(p); - unsigned long tls = p->thread.tp_value; memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context)); if (likely(!(p->flags & PF_KTHREAD))) { *childregs = *current_pt_regs(); childregs->regs[0] = 0; - if (is_compat_thread(task_thread_info(p))) { - if (stack_start) + + /* + * Read the current TLS pointer from tpidr_el0 as it may be + * out-of-sync with the saved value. + */ + asm("mrs %0, tpidr_el0" : "=r" (*task_user_tls(p))); + + if (stack_start) { + if (is_compat_thread(task_thread_info(p))) childregs->compat_sp = stack_start; - } else { - /* - * Read the current TLS pointer from tpidr_el0 as it may be - * out-of-sync with the saved value. - */ - asm("mrs %0, tpidr_el0" : "=r" (tls)); - if (stack_start) { - /* 16-byte aligned stack mandatory on AArch64 */ - if (stack_start & 15) - return -EINVAL; + /* 16-byte aligned stack mandatory on AArch64 */ + else if (stack_start & 15) + return -EINVAL; + else childregs->sp = stack_start; - } } + /* * If a TLS pointer was passed to clone (4th argument), use it * for the new thread. */ if (clone_flags & CLONE_SETTLS) - tls = childregs->regs[3]; + p->thread.tp_value = childregs->regs[3]; } else { memset(childregs, 0, sizeof(struct pt_regs)); childregs->pstate = PSR_MODE_EL1h; @@ -291,7 +282,6 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start, } p->thread.cpu_context.pc = (unsigned long)ret_from_fork; p->thread.cpu_context.sp = (unsigned long)childregs; - p->thread.tp_value = tls; ptrace_hw_copy_thread(p); @@ -302,18 +292,12 @@ static void tls_thread_switch(struct task_struct *next) { unsigned long tpidr, tpidrro; - if (!is_compat_task()) { - asm("mrs %0, tpidr_el0" : "=r" (tpidr)); - current->thread.tp_value = tpidr; - } + asm("mrs %0, tpidr_el0" : "=r" (tpidr)); + *task_user_tls(current) = tpidr; - if (is_compat_thread(task_thread_info(next))) { - tpidr = 0; - tpidrro = next->thread.tp_value; - } else { - tpidr = next->thread.tp_value; - tpidrro = 0; - } + tpidr = *task_user_tls(next); + tpidrro = is_compat_thread(task_thread_info(next)) ? + next->thread.tp_value : 0; asm( " msr tpidr_el0, %0\n" diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c index ea18cb539..869f20274 100644 --- a/arch/arm64/kernel/psci.c +++ b/arch/arm64/kernel/psci.c @@ -15,7 +15,6 @@ #define pr_fmt(fmt) "psci: " fmt -#include <linux/acpi.h> #include <linux/init.h> #include <linux/of.h> #include <linux/smp.h> @@ -25,8 +24,8 @@ #include <linux/slab.h> #include <uapi/linux/psci.h> -#include <asm/acpi.h> #include <asm/compiler.h> +#include <asm/cputype.h> #include <asm/cpu_ops.h> #include <asm/errno.h> #include <asm/psci.h> @@ -37,16 +36,31 @@ #define PSCI_POWER_STATE_TYPE_STANDBY 0 #define PSCI_POWER_STATE_TYPE_POWER_DOWN 1 -struct psci_power_state { - u16 id; - u8 type; - u8 affinity_level; -}; +static bool psci_power_state_loses_context(u32 state) +{ + return state & PSCI_0_2_POWER_STATE_TYPE_MASK; +} + +static bool psci_power_state_is_valid(u32 state) +{ + const u32 valid_mask = PSCI_0_2_POWER_STATE_ID_MASK | + PSCI_0_2_POWER_STATE_TYPE_MASK | + PSCI_0_2_POWER_STATE_AFFL_MASK; + + return !(state & ~valid_mask); +} + +/* + * The CPU any Trusted OS is resident on. The trusted OS may reject CPU_OFF + * calls to its resident CPU, so we must avoid issuing those. We never migrate + * a Trusted OS even if it claims to be capable of migration -- doing so will + * require cooperation with a Trusted OS driver. + */ +static int resident_cpu = -1; struct psci_operations { - int (*cpu_suspend)(struct psci_power_state state, - unsigned long entry_point); - int (*cpu_off)(struct psci_power_state state); + int (*cpu_suspend)(u32 state, unsigned long entry_point); + int (*cpu_off)(u32 state); int (*cpu_on)(unsigned long cpuid, unsigned long entry_point); int (*migrate)(unsigned long cpuid); int (*affinity_info)(unsigned long target_affinity, @@ -56,23 +70,21 @@ struct psci_operations { static struct psci_operations psci_ops; -static int (*invoke_psci_fn)(u64, u64, u64, u64); -typedef int (*psci_initcall_t)(const struct device_node *); - -asmlinkage int __invoke_psci_fn_hvc(u64, u64, u64, u64); -asmlinkage int __invoke_psci_fn_smc(u64, u64, u64, u64); +typedef unsigned long (psci_fn)(unsigned long, unsigned long, + unsigned long, unsigned long); +asmlinkage psci_fn __invoke_psci_fn_hvc; +asmlinkage psci_fn __invoke_psci_fn_smc; +static psci_fn *invoke_psci_fn; enum psci_function { PSCI_FN_CPU_SUSPEND, PSCI_FN_CPU_ON, PSCI_FN_CPU_OFF, PSCI_FN_MIGRATE, - PSCI_FN_AFFINITY_INFO, - PSCI_FN_MIGRATE_INFO_TYPE, PSCI_FN_MAX, }; -static DEFINE_PER_CPU_READ_MOSTLY(struct psci_power_state *, psci_power_state); +static DEFINE_PER_CPU_READ_MOSTLY(u32 *, psci_power_state); static u32 psci_function_id[PSCI_FN_MAX]; @@ -92,56 +104,28 @@ static int psci_to_linux_errno(int errno) return -EINVAL; } -static u32 psci_power_state_pack(struct psci_power_state state) -{ - return ((state.id << PSCI_0_2_POWER_STATE_ID_SHIFT) - & PSCI_0_2_POWER_STATE_ID_MASK) | - ((state.type << PSCI_0_2_POWER_STATE_TYPE_SHIFT) - & PSCI_0_2_POWER_STATE_TYPE_MASK) | - ((state.affinity_level << PSCI_0_2_POWER_STATE_AFFL_SHIFT) - & PSCI_0_2_POWER_STATE_AFFL_MASK); -} - -static void psci_power_state_unpack(u32 power_state, - struct psci_power_state *state) -{ - state->id = (power_state & PSCI_0_2_POWER_STATE_ID_MASK) >> - PSCI_0_2_POWER_STATE_ID_SHIFT; - state->type = (power_state & PSCI_0_2_POWER_STATE_TYPE_MASK) >> - PSCI_0_2_POWER_STATE_TYPE_SHIFT; - state->affinity_level = - (power_state & PSCI_0_2_POWER_STATE_AFFL_MASK) >> - PSCI_0_2_POWER_STATE_AFFL_SHIFT; -} - -static int psci_get_version(void) +static u32 psci_get_version(void) { - int err; - - err = invoke_psci_fn(PSCI_0_2_FN_PSCI_VERSION, 0, 0, 0); - return err; + return invoke_psci_fn(PSCI_0_2_FN_PSCI_VERSION, 0, 0, 0); } -static int psci_cpu_suspend(struct psci_power_state state, - unsigned long entry_point) +static int psci_cpu_suspend(u32 state, unsigned long entry_point) { int err; - u32 fn, power_state; + u32 fn; fn = psci_function_id[PSCI_FN_CPU_SUSPEND]; - power_state = psci_power_state_pack(state); - err = invoke_psci_fn(fn, power_state, entry_point, 0); + err = invoke_psci_fn(fn, state, entry_point, 0); return psci_to_linux_errno(err); } -static int psci_cpu_off(struct psci_power_state state) +static int psci_cpu_off(u32 state) { int err; - u32 fn, power_state; + u32 fn; fn = psci_function_id[PSCI_FN_CPU_OFF]; - power_state = psci_power_state_pack(state); - err = invoke_psci_fn(fn, power_state, 0, 0); + err = invoke_psci_fn(fn, state, 0, 0); return psci_to_linux_errno(err); } @@ -168,30 +152,29 @@ static int psci_migrate(unsigned long cpuid) static int psci_affinity_info(unsigned long target_affinity, unsigned long lowest_affinity_level) { - int err; - u32 fn; - - fn = psci_function_id[PSCI_FN_AFFINITY_INFO]; - err = invoke_psci_fn(fn, target_affinity, lowest_affinity_level, 0); - return err; + return invoke_psci_fn(PSCI_0_2_FN64_AFFINITY_INFO, target_affinity, + lowest_affinity_level, 0); } static int psci_migrate_info_type(void) { - int err; - u32 fn; + return invoke_psci_fn(PSCI_0_2_FN_MIGRATE_INFO_TYPE, 0, 0, 0); +} - fn = psci_function_id[PSCI_FN_MIGRATE_INFO_TYPE]; - err = invoke_psci_fn(fn, 0, 0, 0); - return err; +static unsigned long psci_migrate_info_up_cpu(void) +{ + return invoke_psci_fn(PSCI_0_2_FN64_MIGRATE_INFO_UP_CPU, 0, 0, 0); } -static int __maybe_unused cpu_psci_cpu_init_idle(struct device_node *cpu_node, - unsigned int cpu) +static int __maybe_unused cpu_psci_cpu_init_idle(unsigned int cpu) { int i, ret, count = 0; - struct psci_power_state *psci_states; - struct device_node *state_node; + u32 *psci_states; + struct device_node *state_node, *cpu_node; + + cpu_node = of_get_cpu_node(cpu, NULL); + if (!cpu_node) + return -ENODEV; /* * If the PSCI cpu_suspend function hook has not been initialized @@ -215,13 +198,13 @@ static int __maybe_unused cpu_psci_cpu_init_idle(struct device_node *cpu_node, return -ENOMEM; for (i = 0; i < count; i++) { - u32 psci_power_state; + u32 state; state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i); ret = of_property_read_u32(state_node, "arm,psci-suspend-param", - &psci_power_state); + &state); if (ret) { pr_warn(" * %s missing arm,psci-suspend-param property\n", state_node->full_name); @@ -230,9 +213,13 @@ static int __maybe_unused cpu_psci_cpu_init_idle(struct device_node *cpu_node, } of_node_put(state_node); - pr_debug("psci-power-state %#x index %d\n", psci_power_state, - i); - psci_power_state_unpack(psci_power_state, &psci_states[i]); + pr_debug("psci-power-state %#x index %d\n", state, i); + if (!psci_power_state_is_valid(state)) { + pr_warn("Invalid PSCI power state %#x\n", state); + ret = -EINVAL; + goto free_mem; + } + psci_states[i] = state; } /* Idle states parsed correctly, initialize per-cpu pointer */ per_cpu(psci_power_state, cpu) = psci_states; @@ -275,6 +262,46 @@ static void psci_sys_poweroff(void) invoke_psci_fn(PSCI_0_2_FN_SYSTEM_OFF, 0, 0, 0); } +/* + * Detect the presence of a resident Trusted OS which may cause CPU_OFF to + * return DENIED (which would be fatal). + */ +static void __init psci_init_migrate(void) +{ + unsigned long cpuid; + int type, cpu; + + type = psci_ops.migrate_info_type(); + + if (type == PSCI_0_2_TOS_MP) { + pr_info("Trusted OS migration not required\n"); + return; + } + + if (type == PSCI_RET_NOT_SUPPORTED) { + pr_info("MIGRATE_INFO_TYPE not supported.\n"); + return; + } + + if (type != PSCI_0_2_TOS_UP_MIGRATE && + type != PSCI_0_2_TOS_UP_NO_MIGRATE) { + pr_err("MIGRATE_INFO_TYPE returned unknown type (%d)\n", type); + return; + } + + cpuid = psci_migrate_info_up_cpu(); + if (cpuid & ~MPIDR_HWID_BITMASK) { + pr_warn("MIGRATE_INFO_UP_CPU reported invalid physical ID (0x%lx)\n", + cpuid); + return; + } + + cpu = get_logical_index(cpuid); + resident_cpu = cpu >= 0 ? cpu : -1; + + pr_info("Trusted OS resident on physical CPU 0x%lx\n", cpuid); +} + static void __init psci_0_2_set_functions(void) { pr_info("Using standard PSCI v0.2 function IDs\n"); @@ -290,11 +317,8 @@ static void __init psci_0_2_set_functions(void) psci_function_id[PSCI_FN_MIGRATE] = PSCI_0_2_FN64_MIGRATE; psci_ops.migrate = psci_migrate; - psci_function_id[PSCI_FN_AFFINITY_INFO] = PSCI_0_2_FN64_AFFINITY_INFO; psci_ops.affinity_info = psci_affinity_info; - psci_function_id[PSCI_FN_MIGRATE_INFO_TYPE] = - PSCI_0_2_FN_MIGRATE_INFO_TYPE; psci_ops.migrate_info_type = psci_migrate_info_type; arm_pm_restart = psci_sys_reset; @@ -307,32 +331,26 @@ static void __init psci_0_2_set_functions(void) */ static int __init psci_probe(void) { - int ver = psci_get_version(); - - if (ver == PSCI_RET_NOT_SUPPORTED) { - /* - * PSCI versions >=0.2 mandates implementation of - * PSCI_VERSION. - */ - pr_err("PSCI firmware does not comply with the v0.2 spec.\n"); - return -EOPNOTSUPP; - } else { - pr_info("PSCIv%d.%d detected in firmware.\n", - PSCI_VERSION_MAJOR(ver), - PSCI_VERSION_MINOR(ver)); - - if (PSCI_VERSION_MAJOR(ver) == 0 && - PSCI_VERSION_MINOR(ver) < 2) { - pr_err("Conflicting PSCI version detected.\n"); - return -EINVAL; - } + u32 ver = psci_get_version(); + + pr_info("PSCIv%d.%d detected in firmware.\n", + PSCI_VERSION_MAJOR(ver), + PSCI_VERSION_MINOR(ver)); + + if (PSCI_VERSION_MAJOR(ver) == 0 && PSCI_VERSION_MINOR(ver) < 2) { + pr_err("Conflicting PSCI version detected.\n"); + return -EINVAL; } psci_0_2_set_functions(); + psci_init_migrate(); + return 0; } +typedef int (*psci_initcall_t)(const struct device_node *); + /* * PSCI init function for PSCI versions >=0.2 * @@ -421,6 +439,7 @@ int __init psci_dt_init(void) return init_fn(np); } +#ifdef CONFIG_ACPI /* * We use PSCI 0.2+ when ACPI is deployed on ARM64 and it's * explicitly clarified in SBBR @@ -441,10 +460,11 @@ int __init psci_acpi_init(void) return psci_probe(); } +#endif #ifdef CONFIG_SMP -static int __init cpu_psci_cpu_init(struct device_node *dn, unsigned int cpu) +static int __init cpu_psci_cpu_init(unsigned int cpu) { return 0; } @@ -469,11 +489,21 @@ static int cpu_psci_cpu_boot(unsigned int cpu) } #ifdef CONFIG_HOTPLUG_CPU +static bool psci_tos_resident_on(int cpu) +{ + return cpu == resident_cpu; +} + static int cpu_psci_cpu_disable(unsigned int cpu) { /* Fail early if we don't have CPU_OFF support */ if (!psci_ops.cpu_off) return -EOPNOTSUPP; + + /* Trusted OS will deny CPU_OFF */ + if (psci_tos_resident_on(cpu)) + return -EPERM; + return 0; } @@ -484,9 +514,8 @@ static void cpu_psci_cpu_die(unsigned int cpu) * There are no known implementations of PSCI actually using the * power state field, pass a sensible default for now. */ - struct psci_power_state state = { - .type = PSCI_POWER_STATE_TYPE_POWER_DOWN, - }; + u32 state = PSCI_POWER_STATE_TYPE_POWER_DOWN << + PSCI_0_2_POWER_STATE_TYPE_SHIFT; ret = psci_ops.cpu_off(state); @@ -498,7 +527,7 @@ static int cpu_psci_cpu_kill(unsigned int cpu) int err, i; if (!psci_ops.affinity_info) - return 1; + return 0; /* * cpu_kill could race with cpu_die and we can * potentially end up declaring this cpu undead @@ -509,7 +538,7 @@ static int cpu_psci_cpu_kill(unsigned int cpu) err = psci_ops.affinity_info(cpu_logical_map(cpu), 0); if (err == PSCI_0_2_AFFINITY_LEVEL_OFF) { pr_info("CPU%d killed.\n", cpu); - return 1; + return 0; } msleep(10); @@ -518,15 +547,14 @@ static int cpu_psci_cpu_kill(unsigned int cpu) pr_warn("CPU%d may not have shut down cleanly (AFFINITY_INFO reports %d)\n", cpu, err); - /* Make op_cpu_kill() fail. */ - return 0; + return -ETIMEDOUT; } #endif #endif static int psci_suspend_finisher(unsigned long index) { - struct psci_power_state *state = __this_cpu_read(psci_power_state); + u32 *state = __this_cpu_read(psci_power_state); return psci_ops.cpu_suspend(state[index - 1], virt_to_phys(cpu_resume)); @@ -535,7 +563,7 @@ static int psci_suspend_finisher(unsigned long index) static int __maybe_unused cpu_psci_cpu_suspend(unsigned long index) { int ret; - struct psci_power_state *state = __this_cpu_read(psci_power_state); + u32 *state = __this_cpu_read(psci_power_state); /* * idle state index 0 corresponds to wfi, should never be called * from the cpu_suspend operations @@ -543,10 +571,10 @@ static int __maybe_unused cpu_psci_cpu_suspend(unsigned long index) if (WARN_ON_ONCE(!index)) return -EINVAL; - if (state[index - 1].type == PSCI_POWER_STATE_TYPE_STANDBY) + if (!psci_power_state_loses_context(state[index - 1])) ret = psci_ops.cpu_suspend(state[index - 1], 0); else - ret = __cpu_suspend(index, psci_suspend_finisher); + ret = cpu_suspend(index, psci_suspend_finisher); return ret; } diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 74753132c..f3067d4d4 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -64,6 +64,7 @@ #include <asm/psci.h> #include <asm/efi.h> #include <asm/virt.h> +#include <asm/xen/hypervisor.h> unsigned long elf_hwcap __read_mostly; EXPORT_SYMBOL_GPL(elf_hwcap); @@ -105,18 +106,6 @@ static struct resource mem_res[] = { #define kernel_code mem_res[0] #define kernel_data mem_res[1] -void __init early_print(const char *str, ...) -{ - char buf[256]; - va_list ap; - - va_start(ap, str); - vsnprintf(buf, sizeof(buf), str, ap); - va_end(ap); - - printk("%s", buf); -} - /* * The recorded values of x0 .. x3 upon kernel entry. */ @@ -326,12 +315,14 @@ static void __init setup_processor(void) static void __init setup_machine_fdt(phys_addr_t dt_phys) { - if (!dt_phys || !early_init_dt_scan(phys_to_virt(dt_phys))) { - early_print("\n" - "Error: invalid device tree blob at physical address 0x%p (virtual address 0x%p)\n" - "The dtb must be 8-byte aligned and passed in the first 512MB of memory\n" - "\nPlease check your bootloader.\n", - dt_phys, phys_to_virt(dt_phys)); + void *dt_virt = fixmap_remap_fdt(dt_phys); + + if (!dt_virt || !early_init_dt_scan(dt_virt)) { + pr_crit("\n" + "Error: invalid device tree blob at physical address %pa (virtual address 0x%p)\n" + "The dtb must be 8-byte aligned and must not exceed 2 MB in size\n" + "\nPlease check your bootloader.", + &dt_phys, dt_virt); while (true) cpu_relax(); @@ -374,8 +365,6 @@ void __init setup_arch(char **cmdline_p) { setup_processor(); - setup_machine_fdt(__fdt_pointer); - init_mm.start_code = (unsigned long) _text; init_mm.end_code = (unsigned long) _etext; init_mm.end_data = (unsigned long) _edata; @@ -386,6 +375,8 @@ void __init setup_arch(char **cmdline_p) early_fixmap_init(); early_ioremap_init(); + setup_machine_fdt(__fdt_pointer); + parse_early_param(); /* @@ -408,16 +399,14 @@ void __init setup_arch(char **cmdline_p) if (acpi_disabled) { unflatten_device_tree(); psci_dt_init(); - cpu_read_bootcpu_ops(); -#ifdef CONFIG_SMP - of_smp_init_cpus(); -#endif } else { psci_acpi_init(); - acpi_init_cpus(); } + xen_early_init(); + cpu_read_bootcpu_ops(); #ifdef CONFIG_SMP + smp_init_cpus(); smp_build_mpidr_hash(); #endif diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c index c0cff3410..948f0ad2d 100644 --- a/arch/arm64/kernel/signal32.c +++ b/arch/arm64/kernel/signal32.c @@ -369,7 +369,7 @@ badframe: if (show_unhandled_signals) pr_info_ratelimited("%s[%d]: bad frame in %s: pc=%08llx sp=%08llx\n", current->comm, task_pid_nr(current), __func__, - regs->pc, regs->sp); + regs->pc, regs->compat_sp); force_sig(SIGSEGV, current); return 0; } @@ -406,7 +406,7 @@ badframe: if (show_unhandled_signals) pr_info_ratelimited("%s[%d]: bad frame in %s: pc=%08llx sp=%08llx\n", current->comm, task_pid_nr(current), __func__, - regs->pc, regs->sp); + regs->pc, regs->compat_sp); force_sig(SIGSEGV, current); return 0; } diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S index ede186cdd..803cfea41 100644 --- a/arch/arm64/kernel/sleep.S +++ b/arch/arm64/kernel/sleep.S @@ -130,12 +130,14 @@ ENDPROC(__cpu_suspend_enter) /* * x0 must contain the sctlr value retrieved from restored context */ + .pushsection ".idmap.text", "ax" ENTRY(cpu_resume_mmu) ldr x3, =cpu_resume_after_mmu msr sctlr_el1, x0 // restore sctlr_el1 isb br x3 // global jump to virtual address ENDPROC(cpu_resume_mmu) + .popsection cpu_resume_after_mmu: mov x0, #0 // return zero on success ldp x19, x20, [sp, #16] @@ -162,15 +164,12 @@ ENTRY(cpu_resume) #else mov x7, xzr #endif - adrp x0, sleep_save_sp - add x0, x0, #:lo12:sleep_save_sp - ldr x0, [x0, #SLEEP_SAVE_SP_PHYS] + ldr_l x0, sleep_save_sp + SLEEP_SAVE_SP_PHYS ldr x0, [x0, x7, lsl #3] /* load sp from context */ ldr x2, [x0, #CPU_CTX_SP] - adrp x1, sleep_idmap_phys /* load physical address of identity map page table in x1 */ - ldr x1, [x1, #:lo12:sleep_idmap_phys] + adrp x1, idmap_pg_dir mov sp, x2 /* * cpu_do_resume expects x0 to contain context physical address diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index d3a202b85..50fb46966 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -17,6 +17,7 @@ * along with this program. If not, see <http://www.gnu.org/licenses/>. */ +#include <linux/acpi.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/spinlock.h> @@ -248,20 +249,20 @@ static int op_cpu_kill(unsigned int cpu) * time and hope that it's dead, so let's skip the wait and just hope. */ if (!cpu_ops[cpu]->cpu_kill) - return 1; + return 0; return cpu_ops[cpu]->cpu_kill(cpu); } -static DECLARE_COMPLETION(cpu_died); - /* * called on the thread which is asking for a CPU to be shutdown - * waits until shutdown has completed, or it is timed out. */ void __cpu_die(unsigned int cpu) { - if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) { + int err; + + if (!cpu_wait_death(cpu, 5)) { pr_crit("CPU%u: cpu didn't die\n", cpu); return; } @@ -273,8 +274,10 @@ void __cpu_die(unsigned int cpu) * verify that it has really left the kernel before we consider * clobbering anything it might still be using. */ - if (!op_cpu_kill(cpu)) - pr_warn("CPU%d may not have shut down cleanly\n", cpu); + err = op_cpu_kill(cpu); + if (err) + pr_warn("CPU%d may not have shut down cleanly: %d\n", + cpu, err); } /* @@ -294,7 +297,7 @@ void cpu_die(void) local_irq_disable(); /* Tell __cpu_die() that this CPU is now safe to dispose of */ - complete(&cpu_died); + (void)cpu_report_death(); /* * Actually shutdown the CPU. This must never fail. The specific hotplug @@ -318,57 +321,158 @@ void __init smp_prepare_boot_cpu(void) set_my_cpu_offset(per_cpu_offset(smp_processor_id())); } +static u64 __init of_get_cpu_mpidr(struct device_node *dn) +{ + const __be32 *cell; + u64 hwid; + + /* + * A cpu node with missing "reg" property is + * considered invalid to build a cpu_logical_map + * entry. + */ + cell = of_get_property(dn, "reg", NULL); + if (!cell) { + pr_err("%s: missing reg property\n", dn->full_name); + return INVALID_HWID; + } + + hwid = of_read_number(cell, of_n_addr_cells(dn)); + /* + * Non affinity bits must be set to 0 in the DT + */ + if (hwid & ~MPIDR_HWID_BITMASK) { + pr_err("%s: invalid reg property\n", dn->full_name); + return INVALID_HWID; + } + return hwid; +} + +/* + * Duplicate MPIDRs are a recipe for disaster. Scan all initialized + * entries and check for duplicates. If any is found just ignore the + * cpu. cpu_logical_map was initialized to INVALID_HWID to avoid + * matching valid MPIDR values. + */ +static bool __init is_mpidr_duplicate(unsigned int cpu, u64 hwid) +{ + unsigned int i; + + for (i = 1; (i < cpu) && (i < NR_CPUS); i++) + if (cpu_logical_map(i) == hwid) + return true; + return false; +} + +/* + * Initialize cpu operations for a logical cpu and + * set it in the possible mask on success + */ +static int __init smp_cpu_setup(int cpu) +{ + if (cpu_read_ops(cpu)) + return -ENODEV; + + if (cpu_ops[cpu]->cpu_init(cpu)) + return -ENODEV; + + set_cpu_possible(cpu, true); + + return 0; +} + +static bool bootcpu_valid __initdata; +static unsigned int cpu_count = 1; + +#ifdef CONFIG_ACPI +/* + * acpi_map_gic_cpu_interface - parse processor MADT entry + * + * Carry out sanity checks on MADT processor entry and initialize + * cpu_logical_map on success + */ +static void __init +acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor) +{ + u64 hwid = processor->arm_mpidr; + + if (!(processor->flags & ACPI_MADT_ENABLED)) { + pr_debug("skipping disabled CPU entry with 0x%llx MPIDR\n", hwid); + return; + } + + if (hwid & ~MPIDR_HWID_BITMASK || hwid == INVALID_HWID) { + pr_err("skipping CPU entry with invalid MPIDR 0x%llx\n", hwid); + return; + } + + if (is_mpidr_duplicate(cpu_count, hwid)) { + pr_err("duplicate CPU MPIDR 0x%llx in MADT\n", hwid); + return; + } + + /* Check if GICC structure of boot CPU is available in the MADT */ + if (cpu_logical_map(0) == hwid) { + if (bootcpu_valid) { + pr_err("duplicate boot CPU MPIDR: 0x%llx in MADT\n", + hwid); + return; + } + bootcpu_valid = true; + return; + } + + if (cpu_count >= NR_CPUS) + return; + + /* map the logical cpu id to cpu MPIDR */ + cpu_logical_map(cpu_count) = hwid; + + cpu_count++; +} + +static int __init +acpi_parse_gic_cpu_interface(struct acpi_subtable_header *header, + const unsigned long end) +{ + struct acpi_madt_generic_interrupt *processor; + + processor = (struct acpi_madt_generic_interrupt *)header; + if (BAD_MADT_GICC_ENTRY(processor, end)) + return -EINVAL; + + acpi_table_print_madt_entry(header); + + acpi_map_gic_cpu_interface(processor); + + return 0; +} +#else +#define acpi_table_parse_madt(...) do { } while (0) +#endif + /* * Enumerate the possible CPU set from the device tree and build the * cpu logical map array containing MPIDR values related to logical * cpus. Assumes that cpu_logical_map(0) has already been initialized. */ -void __init of_smp_init_cpus(void) +void __init of_parse_and_init_cpus(void) { struct device_node *dn = NULL; - unsigned int i, cpu = 1; - bool bootcpu_valid = false; while ((dn = of_find_node_by_type(dn, "cpu"))) { - const u32 *cell; - u64 hwid; + u64 hwid = of_get_cpu_mpidr(dn); - /* - * A cpu node with missing "reg" property is - * considered invalid to build a cpu_logical_map - * entry. - */ - cell = of_get_property(dn, "reg", NULL); - if (!cell) { - pr_err("%s: missing reg property\n", dn->full_name); + if (hwid == INVALID_HWID) goto next; - } - hwid = of_read_number(cell, of_n_addr_cells(dn)); - /* - * Non affinity bits must be set to 0 in the DT - */ - if (hwid & ~MPIDR_HWID_BITMASK) { - pr_err("%s: invalid reg property\n", dn->full_name); + if (is_mpidr_duplicate(cpu_count, hwid)) { + pr_err("%s: duplicate cpu reg properties in the DT\n", + dn->full_name); goto next; } /* - * Duplicate MPIDRs are a recipe for disaster. Scan - * all initialized entries and check for - * duplicates. If any is found just ignore the cpu. - * cpu_logical_map was initialized to INVALID_HWID to - * avoid matching valid MPIDR values. - */ - for (i = 1; (i < cpu) && (i < NR_CPUS); i++) { - if (cpu_logical_map(i) == hwid) { - pr_err("%s: duplicate cpu reg properties in the DT\n", - dn->full_name); - goto next; - } - } - - /* * The numbering scheme requires that the boot CPU * must be assigned logical id 0. Record it so that * the logical map built from DT is validated and can @@ -392,38 +496,58 @@ void __init of_smp_init_cpus(void) continue; } - if (cpu >= NR_CPUS) - goto next; - - if (cpu_read_ops(dn, cpu) != 0) - goto next; - - if (cpu_ops[cpu]->cpu_init(dn, cpu)) + if (cpu_count >= NR_CPUS) goto next; pr_debug("cpu logical map 0x%llx\n", hwid); - cpu_logical_map(cpu) = hwid; + cpu_logical_map(cpu_count) = hwid; next: - cpu++; + cpu_count++; } +} + +/* + * Enumerate the possible CPU set from the device tree or ACPI and build the + * cpu logical map array containing MPIDR values related to logical + * cpus. Assumes that cpu_logical_map(0) has already been initialized. + */ +void __init smp_init_cpus(void) +{ + int i; - /* sanity check */ - if (cpu > NR_CPUS) - pr_warning("no. of cores (%d) greater than configured maximum of %d - clipping\n", - cpu, NR_CPUS); + if (acpi_disabled) + of_parse_and_init_cpus(); + else + /* + * do a walk of MADT to determine how many CPUs + * we have including disabled CPUs, and get information + * we need for SMP init + */ + acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT, + acpi_parse_gic_cpu_interface, 0); + + if (cpu_count > NR_CPUS) + pr_warn("no. of cores (%d) greater than configured maximum of %d - clipping\n", + cpu_count, NR_CPUS); if (!bootcpu_valid) { - pr_err("DT missing boot CPU MPIDR, not enabling secondaries\n"); + pr_err("missing boot CPU MPIDR, not enabling secondaries\n"); return; } /* - * All the cpus that made it to the cpu_logical_map have been - * validated so set them as possible cpus. + * We need to set the cpu_logical_map entries before enabling + * the cpus so that cpu processor description entries (DT cpu nodes + * and ACPI MADT entries) can be retrieved by matching the cpu hwid + * with entries in cpu_logical_map while initializing the cpus. + * If the cpu set-up fails, invalidate the cpu_logical_map entry. */ - for (i = 0; i < NR_CPUS; i++) - if (cpu_logical_map(i) != INVALID_HWID) - set_cpu_possible(i, true); + for (i = 1; i < NR_CPUS; i++) { + if (cpu_logical_map(i) != INVALID_HWID) { + if (smp_cpu_setup(i)) + cpu_logical_map(i) = INVALID_HWID; + } + } } void __init smp_prepare_cpus(unsigned int max_cpus) diff --git a/arch/arm64/kernel/smp_spin_table.c b/arch/arm64/kernel/smp_spin_table.c index 14944e5b2..aef3605a8 100644 --- a/arch/arm64/kernel/smp_spin_table.c +++ b/arch/arm64/kernel/smp_spin_table.c @@ -49,8 +49,14 @@ static void write_pen_release(u64 val) } -static int smp_spin_table_cpu_init(struct device_node *dn, unsigned int cpu) +static int smp_spin_table_cpu_init(unsigned int cpu) { + struct device_node *dn; + + dn = of_get_cpu_node(cpu, NULL); + if (!dn) + return -ENODEV; + /* * Determine the address from which the CPU is polling. */ diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c index d7daf45ae..8297d5022 100644 --- a/arch/arm64/kernel/suspend.c +++ b/arch/arm64/kernel/suspend.c @@ -51,13 +51,13 @@ void __init cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *)) } /* - * __cpu_suspend + * cpu_suspend * * arg: argument to pass to the finisher function * fn: finisher function pointer * */ -int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) +int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) { struct mm_struct *mm = current->active_mm; int ret; @@ -82,7 +82,7 @@ int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) * We are resuming from reset with TTBR0_EL1 set to the * idmap to enable the MMU; restore the active_mm mappings in * TTBR0_EL1 unless the active_mm == &init_mm, in which case - * the thread entered __cpu_suspend with TTBR0_EL1 set to + * the thread entered cpu_suspend with TTBR0_EL1 set to * reserved TTBR0 page tables and should be restored as such. */ if (mm == &init_mm) @@ -118,7 +118,6 @@ int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) } struct sleep_save_sp sleep_save_sp; -phys_addr_t sleep_idmap_phys; static int __init cpu_suspend_init(void) { @@ -132,9 +131,7 @@ static int __init cpu_suspend_init(void) sleep_save_sp.save_ptr_stash = ctx_ptr; sleep_save_sp.save_ptr_stash_phys = virt_to_phys(ctx_ptr); - sleep_idmap_phys = virt_to_phys(idmap_pg_dir); __flush_dcache_area(&sleep_save_sp, sizeof(struct sleep_save_sp)); - __flush_dcache_area(&sleep_idmap_phys, sizeof(sleep_idmap_phys)); return 0; } diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 1ef2940df..566bc4c35 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -335,8 +335,7 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs) if (call_undef_hook(regs) == 0) return; - if (show_unhandled_signals && unhandled_signal(current, SIGILL) && - printk_ratelimit()) { + if (unhandled_signal(current, SIGILL) && show_unhandled_signals_ratelimited()) { pr_info("%s[%d]: undefined instruction: pc=%p\n", current->comm, task_pid_nr(current), pc); dump_instr(KERN_INFO, regs); @@ -363,7 +362,7 @@ asmlinkage long do_ni_syscall(struct pt_regs *regs) } #endif - if (show_unhandled_signals && printk_ratelimit()) { + if (show_unhandled_signals_ratelimited()) { pr_info("%s[%d]: syscall %d\n", current->comm, task_pid_nr(current), (int)regs->syscallno); dump_instr("", regs); diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c index ec37ab3f5..97bc68f4c 100644 --- a/arch/arm64/kernel/vdso.c +++ b/arch/arm64/kernel/vdso.c @@ -199,16 +199,15 @@ up_fail: */ void update_vsyscall(struct timekeeper *tk) { - struct timespec xtime_coarse; u32 use_syscall = strcmp(tk->tkr_mono.clock->name, "arch_sys_counter"); ++vdso_data->tb_seq_count; smp_wmb(); - xtime_coarse = __current_kernel_time(); vdso_data->use_syscall = use_syscall; - vdso_data->xtime_coarse_sec = xtime_coarse.tv_sec; - vdso_data->xtime_coarse_nsec = xtime_coarse.tv_nsec; + vdso_data->xtime_coarse_sec = tk->xtime_sec; + vdso_data->xtime_coarse_nsec = tk->tkr_mono.xtime_nsec >> + tk->tkr_mono.shift; vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec; vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec; diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index a2c29865c..98073332e 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -38,6 +38,12 @@ jiffies = jiffies_64; *(.hyp.text) \ VMLINUX_SYMBOL(__hyp_text_end) = .; +#define IDMAP_TEXT \ + . = ALIGN(SZ_4K); \ + VMLINUX_SYMBOL(__idmap_text_start) = .; \ + *(.idmap.text) \ + VMLINUX_SYMBOL(__idmap_text_end) = .; + /* * The size of the PE/COFF section that covers the kernel image, which * runs from stext to _edata, must be a round multiple of the PE/COFF @@ -95,6 +101,7 @@ SECTIONS SCHED_TEXT LOCK_TEXT HYPERVISOR_TEXT + IDMAP_TEXT *(.fixup) *(.gnu.warning) . = ALIGN(16); @@ -167,11 +174,13 @@ SECTIONS } /* - * The HYP init code can't be more than a page long, + * The HYP init code and ID map text can't be longer than a page each, * and should not cross a page boundary. */ ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K, "HYP init code too big or misaligned") +ASSERT(__idmap_text_end - (__idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K, + "ID map text too big or misaligned") /* * If padding is applied before .head.text, virt<->phys conversions will fail. |