diff options
Diffstat (limited to 'arch/arc/kernel')
-rw-r--r-- | arch/arc/kernel/Makefile | 6 | ||||
-rw-r--r-- | arch/arc/kernel/asm-offsets.c | 5 | ||||
-rw-r--r-- | arch/arc/kernel/devtree.c | 2 | ||||
-rw-r--r-- | arch/arc/kernel/entry-arcv2.S | 239 | ||||
-rw-r--r-- | arch/arc/kernel/entry-compact.S | 393 | ||||
-rw-r--r-- | arch/arc/kernel/entry.S | 527 | ||||
-rw-r--r-- | arch/arc/kernel/head.S | 4 | ||||
-rw-r--r-- | arch/arc/kernel/intc-arcv2.c | 142 | ||||
-rw-r--r-- | arch/arc/kernel/intc-compact.c | 225 | ||||
-rw-r--r-- | arch/arc/kernel/irq.c | 210 | ||||
-rw-r--r-- | arch/arc/kernel/mcip.c | 356 | ||||
-rw-r--r-- | arch/arc/kernel/perf_event.c | 6 | ||||
-rw-r--r-- | arch/arc/kernel/process.c | 14 | ||||
-rw-r--r-- | arch/arc/kernel/ptrace.c | 92 | ||||
-rw-r--r-- | arch/arc/kernel/setup.c | 84 | ||||
-rw-r--r-- | arch/arc/kernel/signal.c | 62 | ||||
-rw-r--r-- | arch/arc/kernel/smp.c | 24 | ||||
-rw-r--r-- | arch/arc/kernel/stacktrace.c | 18 | ||||
-rw-r--r-- | arch/arc/kernel/time.c | 128 | ||||
-rw-r--r-- | arch/arc/kernel/troubleshoot.c | 44 |
20 files changed, 1797 insertions, 784 deletions
diff --git a/arch/arc/kernel/Makefile b/arch/arc/kernel/Makefile index 113f2033d..e7f3625a1 100644 --- a/arch/arc/kernel/Makefile +++ b/arch/arc/kernel/Makefile @@ -8,12 +8,14 @@ # Pass UTS_MACHINE for user_regset definition CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"' -obj-y := arcksyms.o setup.o irq.o time.o reset.o ptrace.o entry.o process.o +obj-y := arcksyms.o setup.o irq.o time.o reset.o ptrace.o process.o devtree.o obj-y += signal.o traps.o sys.o troubleshoot.o stacktrace.o disasm.o clk.o -obj-y += devtree.o +obj-$(CONFIG_ISA_ARCOMPACT) += entry-compact.o intc-compact.o +obj-$(CONFIG_ISA_ARCV2) += entry-arcv2.o intc-arcv2.o obj-$(CONFIG_MODULES) += arcksyms.o module.o obj-$(CONFIG_SMP) += smp.o +obj-$(CONFIG_ARC_MCIP) += mcip.o obj-$(CONFIG_ARC_DW2_UNWIND) += unwind.o obj-$(CONFIG_KPROBES) += kprobes.o obj-$(CONFIG_ARC_EMUL_UNALIGNED) += unaligned.o diff --git a/arch/arc/kernel/asm-offsets.c b/arch/arc/kernel/asm-offsets.c index 6c3aa0edb..ecaf34e92 100644 --- a/arch/arc/kernel/asm-offsets.c +++ b/arch/arc/kernel/asm-offsets.c @@ -37,6 +37,8 @@ int main(void) DEFINE(TASK_ACT_MM, offsetof(struct task_struct, active_mm)); DEFINE(TASK_TGID, offsetof(struct task_struct, tgid)); + DEFINE(TASK_PID, offsetof(struct task_struct, pid)); + DEFINE(TASK_COMM, offsetof(struct task_struct, comm)); DEFINE(MM_CTXT, offsetof(struct mm_struct, context)); DEFINE(MM_PGD, offsetof(struct mm_struct, pgd)); @@ -56,8 +58,11 @@ int main(void) DEFINE(PT_r5, offsetof(struct pt_regs, r5)); DEFINE(PT_r6, offsetof(struct pt_regs, r6)); DEFINE(PT_r7, offsetof(struct pt_regs, r7)); + DEFINE(PT_ret, offsetof(struct pt_regs, ret)); DEFINE(SZ_CALLEE_REGS, sizeof(struct callee_regs)); DEFINE(SZ_PT_REGS, sizeof(struct pt_regs)); + DEFINE(PT_user_r25, offsetof(struct pt_regs, user_r25)); + return 0; } diff --git a/arch/arc/kernel/devtree.c b/arch/arc/kernel/devtree.c index e32b54abf..7e844fd82 100644 --- a/arch/arc/kernel/devtree.c +++ b/arch/arc/kernel/devtree.c @@ -32,6 +32,8 @@ static void __init arc_set_early_base_baud(unsigned long dt_root) if (of_flat_dt_is_compatible(dt_root, "abilis,arc-tb10x")) arc_base_baud = core_clk/3; + else if (of_flat_dt_is_compatible(dt_root, "snps,arc-sdp")) + arc_base_baud = 33333333; /* Fixed 33MHz clk (AXS10x) */ else arc_base_baud = core_clk; } diff --git a/arch/arc/kernel/entry-arcv2.S b/arch/arc/kernel/entry-arcv2.S new file mode 100644 index 000000000..bd7105d31 --- /dev/null +++ b/arch/arc/kernel/entry-arcv2.S @@ -0,0 +1,239 @@ +/* + * ARCv2 ISA based core Low Level Intr/Traps/Exceptions(non-TLB) Handling + * + * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/linkage.h> /* ARC_{EXTRY,EXIT} */ +#include <asm/entry.h> /* SAVE_ALL_{INT1,INT2,TRAP...} */ +#include <asm/errno.h> +#include <asm/arcregs.h> +#include <asm/irqflags.h> + + .cpu HS + +#define VECTOR .word + +;############################ Vector Table ################################# + + .section .vector,"a",@progbits + .align 4 + +# Initial 16 slots are Exception Vectors +VECTOR stext ; Restart Vector (jump to entry point) +VECTOR mem_service ; Mem exception +VECTOR instr_service ; Instrn Error +VECTOR EV_MachineCheck ; Fatal Machine check +VECTOR EV_TLBMissI ; Intruction TLB miss +VECTOR EV_TLBMissD ; Data TLB miss +VECTOR EV_TLBProtV ; Protection Violation +VECTOR EV_PrivilegeV ; Privilege Violation +VECTOR EV_SWI ; Software Breakpoint +VECTOR EV_Trap ; Trap exception +VECTOR EV_Extension ; Extn Instruction Exception +VECTOR EV_DivZero ; Divide by Zero +VECTOR EV_DCError ; Data Cache Error +VECTOR EV_Misaligned ; Misaligned Data Access +VECTOR reserved ; Reserved slots +VECTOR reserved ; Reserved slots + +# Begin Interrupt Vectors +VECTOR handle_interrupt ; (16) Timer0 +VECTOR handle_interrupt ; unused (Timer1) +VECTOR handle_interrupt ; unused (WDT) +VECTOR handle_interrupt ; (19) ICI (inter core interrupt) +VECTOR handle_interrupt +VECTOR handle_interrupt +VECTOR handle_interrupt +VECTOR handle_interrupt ; (23) End of fixed IRQs + +.rept CONFIG_ARC_NUMBER_OF_INTERRUPTS - 8 + VECTOR handle_interrupt +.endr + + .section .text, "ax",@progbits + +res_service: ; processor restart + flag 0x1 ; not implemented + nop + nop + +reserved: ; processor restart + rtie ; jump to processor initializations + +;##################### Interrupt Handling ############################## + +ENTRY(handle_interrupt) + + INTERRUPT_PROLOGUE irq + + clri ; To make status32.IE agree with CPU internal state + + lr r0, [ICAUSE] + + mov blink, ret_from_exception + + b.d arch_do_IRQ + mov r1, sp + +END(handle_interrupt) + +;################### Non TLB Exception Handling ############################# + +ENTRY(EV_SWI) + flag 1 +END(EV_SWI) + +ENTRY(EV_DivZero) + flag 1 +END(EV_DivZero) + +ENTRY(EV_DCError) + flag 1 +END(EV_DCError) + +ENTRY(EV_Misaligned) + + EXCEPTION_PROLOGUE + + lr r0, [efa] ; Faulting Data address + mov r1, sp + + FAKE_RET_FROM_EXCPN + + SAVE_CALLEE_SAVED_USER + mov r2, sp ; callee_regs + + bl do_misaligned_access + + ; TBD: optimize - do this only if a callee reg was involved + ; either a dst of emulated LD/ST or src with address-writeback + RESTORE_CALLEE_SAVED_USER + + b ret_from_exception +END(EV_Misaligned) + +; --------------------------------------------- +; Protection Violation Exception Handler +; --------------------------------------------- + +ENTRY(EV_TLBProtV) + + EXCEPTION_PROLOGUE + + lr r0, [efa] ; Faulting Data address + mov r1, sp ; pt_regs + + FAKE_RET_FROM_EXCPN + + mov blink, ret_from_exception + b do_page_fault + +END(EV_TLBProtV) + +; From Linux standpoint Slow Path I/D TLB Miss is same a ProtV as they +; need to call do_page_fault(). +; ECR in pt_regs provides whether access was R/W/X + +.global call_do_page_fault +.set call_do_page_fault, EV_TLBProtV + +;############# Common Handlers for ARCompact and ARCv2 ############## + +#include "entry.S" + +;############# Return from Intr/Excp/Trap (ARCv2 ISA Specifics) ############## +; +; Restore the saved sys context (common exit-path for EXCPN/IRQ/Trap) +; IRQ shd definitely not happen between now and rtie +; All 2 entry points to here already disable interrupts + +.Lrestore_regs: + + ld r0, [sp, PT_status32] ; U/K mode at time of entry + lr r10, [AUX_IRQ_ACT] + + bmsk r11, r10, 15 ; AUX_IRQ_ACT.ACTIVE + breq r11, 0, .Lexcept_ret ; No intr active, ret from Exception + +;####### Return from Intr ####### + +debug_marker_l1: + bbit1.nt r0, STATUS_DE_BIT, .Lintr_ret_to_delay_slot + +.Lisr_ret_fast_path: + ; Handle special case #1: (Entry via Exception, Return via IRQ) + ; + ; Exception in U mode, preempted in kernel, Intr taken (K mode), orig + ; task now returning to U mode (riding the Intr) + ; AUX_IRQ_ACTIVE won't have U bit set (since intr in K mode), hence SP + ; won't be switched to correct U mode value (from AUX_SP) + ; So force AUX_IRQ_ACT.U for such a case + + btst r0, STATUS_U_BIT ; Z flag set if K (Z clear for U) + bset.nz r11, r11, AUX_IRQ_ACT_BIT_U ; NZ means U + sr r11, [AUX_IRQ_ACT] + + INTERRUPT_EPILOGUE irq + rtie + +;####### Return from Exception / pure kernel mode ####### + +.Lexcept_ret: ; Expects r0 has PT_status32 + +debug_marker_syscall: + EXCEPTION_EPILOGUE + rtie + +;####### Return from Intr to insn in delay slot ####### + +; Handle special case #2: (Entry via Exception in Delay Slot, Return via IRQ) +; +; Intr returning to a Delay Slot (DS) insn +; (since IRQ NOT allowed in DS in ARCv2, this can only happen if orig +; entry was via Exception in DS which got preempted in kernel). +; +; IRQ RTIE won't reliably restore DE bit and/or BTA, needs handling +.Lintr_ret_to_delay_slot: +debug_marker_ds: + + ld r2, [@intr_to_DE_cnt] + add r2, r2, 1 + st r2, [@intr_to_DE_cnt] + + ld r2, [sp, PT_ret] + ld r3, [sp, PT_status32] + + bic r0, r3, STATUS_U_MASK|STATUS_DE_MASK|STATUS_IE_MASK|STATUS_L_MASK + st r0, [sp, PT_status32] + + mov r1, .Lintr_ret_to_delay_slot_2 + st r1, [sp, PT_ret] + + st r2, [sp, 0] + st r3, [sp, 4] + + b .Lisr_ret_fast_path + +.Lintr_ret_to_delay_slot_2: + sub sp, sp, SZ_PT_REGS + st r9, [sp, -4] + + ld r9, [sp, 0] + sr r9, [eret] + + ld r9, [sp, 4] + sr r9, [erstatus] + + ld r9, [sp, 8] + sr r9, [erbta] + + ld r9, [sp, -4] + add sp, sp, SZ_PT_REGS + rtie + +END(ret_from_exception) diff --git a/arch/arc/kernel/entry-compact.S b/arch/arc/kernel/entry-compact.S new file mode 100644 index 000000000..15d457b44 --- /dev/null +++ b/arch/arc/kernel/entry-compact.S @@ -0,0 +1,393 @@ +/* + * Low Level Interrupts/Traps/Exceptions(non-TLB) Handling for ARCompact ISA + * + * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com) + * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * vineetg: May 2011 + * -Userspace unaligned access emulation + * + * vineetg: Feb 2011 (ptrace low level code fixes) + * -traced syscall return code (r0) was not saved into pt_regs for restoring + * into user reg-file when traded task rets to user space. + * -syscalls needing arch-wrappers (mainly for passing sp as pt_regs) + * were not invoking post-syscall trace hook (jumping directly into + * ret_from_system_call) + * + * vineetg: Nov 2010: + * -Vector table jumps (@8 bytes) converted into branches (@4 bytes) + * -To maintain the slot size of 8 bytes/vector, added nop, which is + * not executed at runtime. + * + * vineetg: Nov 2009 (Everything needed for TIF_RESTORE_SIGMASK) + * -do_signal()invoked upon TIF_RESTORE_SIGMASK as well + * -Wrappers for sys_{,rt_}sigsuspend() nolonger needed as they don't + * need ptregs anymore + * + * Vineetg: Oct 2009 + * -In a rare scenario, Process gets a Priv-V exception and gets scheduled + * out. Since we don't do FAKE RTIE for Priv-V, CPU excpetion state remains + * active (AE bit enabled). This causes a double fault for a subseq valid + * exception. Thus FAKE RTIE needed in low level Priv-Violation handler. + * Instr Error could also cause similar scenario, so same there as well. + * + * Vineetg: March 2009 (Supporting 2 levels of Interrupts) + * + * Vineetg: Aug 28th 2008: Bug #94984 + * -Zero Overhead Loop Context shd be cleared when entering IRQ/EXcp/Trap + * Normally CPU does this automatically, however when doing FAKE rtie, + * we need to explicitly do this. The problem in macros + * FAKE_RET_FROM_EXCPN and FAKE_RET_FROM_EXCPN_LOCK_IRQ was that this bit + * was being "CLEARED" rather then "SET". Since it is Loop INHIBIT Bit, + * setting it and not clearing it clears ZOL context + * + * Vineetg: May 16th, 2008 + * - r25 now contains the Current Task when in kernel + * + * Vineetg: Dec 22, 2007 + * Minor Surgery of Low Level ISR to make it SMP safe + * - MMU_SCRATCH0 Reg used for freeing up r9 in Level 1 ISR + * - _current_task is made an array of NR_CPUS + * - Access of _current_task wrapped inside a macro so that if hardware + * team agrees for a dedicated reg, no other code is touched + * + * Amit Bhor, Rahul Trivedi, Kanika Nema, Sameer Dhavale : Codito Tech 2004 + */ + +#include <linux/errno.h> +#include <linux/linkage.h> /* {EXTRY,EXIT} */ +#include <asm/entry.h> +#include <asm/irqflags.h> + + .cpu A7 + +;############################ Vector Table ################################# + +.macro VECTOR lbl +#if 1 /* Just in case, build breaks */ + j \lbl +#else + b \lbl + nop +#endif +.endm + + .section .vector, "ax",@progbits + .align 4 + +/* Each entry in the vector table must occupy 2 words. Since it is a jump + * across sections (.vector to .text) we are gauranteed that 'j somewhere' + * will use the 'j limm' form of the intrsuction as long as somewhere is in + * a section other than .vector. + */ + +; ********* Critical System Events ********************** +VECTOR res_service ; 0x0, Restart Vector (0x0) +VECTOR mem_service ; 0x8, Mem exception (0x1) +VECTOR instr_service ; 0x10, Instrn Error (0x2) + +; ******************** Device ISRs ********************** +#ifdef CONFIG_ARC_IRQ3_LV2 +VECTOR handle_interrupt_level2 +#else +VECTOR handle_interrupt_level1 +#endif + +VECTOR handle_interrupt_level1 + +#ifdef CONFIG_ARC_IRQ5_LV2 +VECTOR handle_interrupt_level2 +#else +VECTOR handle_interrupt_level1 +#endif + +#ifdef CONFIG_ARC_IRQ6_LV2 +VECTOR handle_interrupt_level2 +#else +VECTOR handle_interrupt_level1 +#endif + +.rept 25 +VECTOR handle_interrupt_level1 ; Other devices +.endr + +/* FOR ARC600: timer = 0x3, uart = 0x8, emac = 0x10 */ + +; ******************** Exceptions ********************** +VECTOR EV_MachineCheck ; 0x100, Fatal Machine check (0x20) +VECTOR EV_TLBMissI ; 0x108, Intruction TLB miss (0x21) +VECTOR EV_TLBMissD ; 0x110, Data TLB miss (0x22) +VECTOR EV_TLBProtV ; 0x118, Protection Violation (0x23) + ; or Misaligned Access +VECTOR EV_PrivilegeV ; 0x120, Privilege Violation (0x24) +VECTOR EV_Trap ; 0x128, Trap exception (0x25) +VECTOR EV_Extension ; 0x130, Extn Intruction Excp (0x26) + +.rept 24 +VECTOR reserved ; Reserved Exceptions +.endr + + +;##################### Scratch Mem for IRQ stack switching ############# + +ARCFP_DATA int1_saved_reg + .align 32 + .type int1_saved_reg, @object + .size int1_saved_reg, 4 +int1_saved_reg: + .zero 4 + +/* Each Interrupt level needs its own scratch */ +#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS + +ARCFP_DATA int2_saved_reg + .type int2_saved_reg, @object + .size int2_saved_reg, 4 +int2_saved_reg: + .zero 4 + +#endif + +; --------------------------------------------- + .section .text, "ax",@progbits + +res_service: ; processor restart + flag 0x1 ; not implemented + nop + nop + +reserved: ; processor restart + rtie ; jump to processor initializations + +;##################### Interrupt Handling ############################## + +#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS +; --------------------------------------------- +; Level 2 ISR: Can interrupt a Level 1 ISR +; --------------------------------------------- +ENTRY(handle_interrupt_level2) + + INTERRUPT_PROLOGUE 2 + + ;------------------------------------------------------ + ; if L2 IRQ interrupted a L1 ISR, disable preemption + ;------------------------------------------------------ + + ld r9, [sp, PT_status32] ; get statu32_l2 (saved in pt_regs) + bbit0 r9, STATUS_A1_BIT, 1f ; L1 not active when L2 IRQ, so normal + + ; A1 is set in status32_l2 + ; bump thread_info->preempt_count (Disable preemption) + GET_CURR_THR_INFO_FROM_SP r10 + ld r9, [r10, THREAD_INFO_PREEMPT_COUNT] + add r9, r9, 1 + st r9, [r10, THREAD_INFO_PREEMPT_COUNT] + +1: + ;------------------------------------------------------ + ; setup params for Linux common ISR and invoke it + ;------------------------------------------------------ + lr r0, [icause2] + and r0, r0, 0x1f + + bl.d @arch_do_IRQ + mov r1, sp + + mov r8,0x2 + sr r8, [AUX_IRQ_LV12] ; clear bit in Sticky Status Reg + + b ret_from_exception + +END(handle_interrupt_level2) + +#endif + +; --------------------------------------------- +; Level 1 ISR +; --------------------------------------------- +ENTRY(handle_interrupt_level1) + + INTERRUPT_PROLOGUE 1 + + lr r0, [icause1] + and r0, r0, 0x1f + +#ifdef CONFIG_TRACE_IRQFLAGS + ; icause1 needs to be read early, before calling tracing, which + ; can clobber scratch regs, hence use of stack to stash it + push r0 + TRACE_ASM_IRQ_DISABLE + pop r0 +#endif + + bl.d @arch_do_IRQ + mov r1, sp + + mov r8,0x1 + sr r8, [AUX_IRQ_LV12] ; clear bit in Sticky Status Reg + + b ret_from_exception +END(handle_interrupt_level1) + +;################### Non TLB Exception Handling ############################# + +; --------------------------------------------- +; Protection Violation Exception Handler +; --------------------------------------------- + +ENTRY(EV_TLBProtV) + + EXCEPTION_PROLOGUE + + lr r2, [ecr] + lr r0, [efa] ; Faulting Data address (not part of pt_regs saved above) + + ; Exception auto-disables further Intr/exceptions. + ; Re-enable them by pretending to return from exception + ; (so rest of handler executes in pure K mode) + + FAKE_RET_FROM_EXCPN + + mov r1, sp ; Handle to pt_regs + + ;------ (5) Type of Protection Violation? ---------- + ; + ; ProtV Hardware Exception is triggered for Access Faults of 2 types + ; -Access Violaton : 00_23_(00|01|02|03)_00 + ; x r w r+w + ; -Unaligned Access : 00_23_04_00 + ; + bbit1 r2, ECR_C_BIT_PROTV_MISALIG_DATA, 4f + + ;========= (6a) Access Violation Processing ======== + bl do_page_fault + b ret_from_exception + + ;========== (6b) Non aligned access ============ +4: + + SAVE_CALLEE_SAVED_USER + mov r2, sp ; callee_regs + + bl do_misaligned_access + + ; TBD: optimize - do this only if a callee reg was involved + ; either a dst of emulated LD/ST or src with address-writeback + RESTORE_CALLEE_SAVED_USER + + b ret_from_exception + +END(EV_TLBProtV) + +; Wrapper for Linux page fault handler called from EV_TLBMiss* +; Very similar to ProtV handler case (6a) above, but avoids the extra checks +; for Misaligned access +; +ENTRY(call_do_page_fault) + + EXCEPTION_PROLOGUE + lr r0, [efa] ; Faulting Data address + mov r1, sp + FAKE_RET_FROM_EXCPN + + mov blink, ret_from_exception + b do_page_fault + +END(call_do_page_fault) + +;############# Common Handlers for ARCompact and ARCv2 ############## + +#include "entry.S" + +;############# Return from Intr/Excp/Trap (ARC Specifics) ############## +; +; Restore the saved sys context (common exit-path for EXCPN/IRQ/Trap) +; IRQ shd definitely not happen between now and rtie +; All 2 entry points to here already disable interrupts + +.Lrestore_regs: + + TRACE_ASM_IRQ_ENABLE + + lr r10, [status32] + + ; Restore REG File. In case multiple Events outstanding, + ; use the same priorty as rtie: EXCPN, L2 IRQ, L1 IRQ, None + ; Note that we use realtime STATUS32 (not pt_regs->status32) to + ; decide that. + + ; if Returning from Exception + btst r10, STATUS_AE_BIT + bnz .Lexcep_ret + + ; Not Exception so maybe Interrupts (Level 1 or 2) + +#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS + + ; Level 2 interrupt return Path - from hardware standpoint + bbit0 r10, STATUS_A2_BIT, not_level2_interrupt + + ;------------------------------------------------------------------ + ; However the context returning might not have taken L2 intr itself + ; e.g. Task'A' user-code -> L2 intr -> schedule -> 'B' user-code ret + ; Special considerations needed for the context which took L2 intr + + ld r9, [sp, PT_event] ; Ensure this is L2 intr context + brne r9, event_IRQ2, 149f + + ;------------------------------------------------------------------ + ; if L2 IRQ interrupted an L1 ISR, we'd disabled preemption earlier + ; so that sched doesn't move to new task, causing L1 to be delayed + ; undeterministically. Now that we've achieved that, let's reset + ; things to what they were, before returning from L2 context + ;---------------------------------------------------------------- + + ld r9, [sp, PT_status32] ; get statu32_l2 (saved in pt_regs) + bbit0 r9, STATUS_A1_BIT, 149f ; L1 not active when L2 IRQ, so normal + + ; decrement thread_info->preempt_count (re-enable preemption) + GET_CURR_THR_INFO_FROM_SP r10 + ld r9, [r10, THREAD_INFO_PREEMPT_COUNT] + + ; paranoid check, given A1 was active when A2 happened, preempt count + ; must not be 0 because we would have incremented it. + ; If this does happen we simply HALT as it means a BUG !!! + cmp r9, 0 + bnz 2f + flag 1 + +2: + sub r9, r9, 1 + st r9, [r10, THREAD_INFO_PREEMPT_COUNT] + +149: + ;return from level 2 + INTERRUPT_EPILOGUE 2 +debug_marker_l2: + rtie + +not_level2_interrupt: + +#endif + + bbit0 r10, STATUS_A1_BIT, .Lpure_k_mode_ret + + ;return from level 1 + INTERRUPT_EPILOGUE 1 +debug_marker_l1: + rtie + +.Lexcep_ret: +.Lpure_k_mode_ret: + + ;this case is for syscalls or Exceptions or pure kernel mode + + EXCEPTION_EPILOGUE +debug_marker_syscall: + rtie + +END(ret_from_exception) diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S index d868289c5..f7a82fd4d 100644 --- a/arch/arc/kernel/entry.S +++ b/arch/arc/kernel/entry.S @@ -1,60 +1,13 @@ /* - * Low Level Interrupts/Traps/Exceptions(non-TLB) Handling for ARC + * Common Low Level Interrupts/Traps/Exceptions(non-TLB) Handling for ARC + * (included from entry-<isa>.S * + * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com) * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. - * - * vineetg: May 2011 - * -Userspace unaligned access emulation - * - * vineetg: Feb 2011 (ptrace low level code fixes) - * -traced syscall return code (r0) was not saved into pt_regs for restoring - * into user reg-file when traded task rets to user space. - * -syscalls needing arch-wrappers (mainly for passing sp as pt_regs) - * were not invoking post-syscall trace hook (jumping directly into - * ret_from_system_call) - * - * vineetg: Nov 2010: - * -Vector table jumps (@8 bytes) converted into branches (@4 bytes) - * -To maintain the slot size of 8 bytes/vector, added nop, which is - * not executed at runtime. - * - * vineetg: Nov 2009 (Everything needed for TIF_RESTORE_SIGMASK) - * -do_signal()invoked upon TIF_RESTORE_SIGMASK as well - * -Wrappers for sys_{,rt_}sigsuspend() nolonger needed as they don't - * need ptregs anymore - * - * Vineetg: Oct 2009 - * -In a rare scenario, Process gets a Priv-V exception and gets scheduled - * out. Since we don't do FAKE RTIE for Priv-V, CPU excpetion state remains - * active (AE bit enabled). This causes a double fault for a subseq valid - * exception. Thus FAKE RTIE needed in low level Priv-Violation handler. - * Instr Error could also cause similar scenario, so same there as well. - * - * Vineetg: March 2009 (Supporting 2 levels of Interrupts) - * - * Vineetg: Aug 28th 2008: Bug #94984 - * -Zero Overhead Loop Context shd be cleared when entering IRQ/EXcp/Trap - * Normally CPU does this automatically, however when doing FAKE rtie, - * we need to explicitly do this. The problem in macros - * FAKE_RET_FROM_EXCPN and FAKE_RET_FROM_EXCPN_LOCK_IRQ was that this bit - * was being "CLEARED" rather then "SET". Since it is Loop INHIBIT Bit, - * setting it and not clearing it clears ZOL context - * - * Vineetg: May 16th, 2008 - * - r25 now contains the Current Task when in kernel - * - * Vineetg: Dec 22, 2007 - * Minor Surgery of Low Level ISR to make it SMP safe - * - MMU_SCRATCH0 Reg used for freeing up r9 in Level 1 ISR - * - _current_task is made an array of NR_CPUS - * - Access of _current_task wrapped inside a macro so that if hardware - * team agrees for a dedicated reg, no other code is touched - * - * Amit Bhor, Rahul Trivedi, Kanika Nema, Sameer Dhavale : Codito Tech 2004 */ /*------------------------------------------------------------------ @@ -67,206 +20,59 @@ * Global Pointer (gp) r26 * Frame Pointer (fp) r27 * Stack Pointer (sp) r28 - * Interrupt link register (ilink1) r29 - * Interrupt link register (ilink2) r30 * Branch link register (blink) r31 *------------------------------------------------------------------ */ - .cpu A7 - -;############################ Vector Table ################################# - -.macro VECTOR lbl -#if 1 /* Just in case, build breaks */ - j \lbl -#else - b \lbl - nop -#endif -.endm - - .section .vector, "ax",@progbits - .align 4 - -/* Each entry in the vector table must occupy 2 words. Since it is a jump - * across sections (.vector to .text) we are gauranteed that 'j somewhere' - * will use the 'j limm' form of the intrsuction as long as somewhere is in - * a section other than .vector. - */ - -; ********* Critical System Events ********************** -VECTOR res_service ; 0x0, Restart Vector (0x0) -VECTOR mem_service ; 0x8, Mem exception (0x1) -VECTOR instr_service ; 0x10, Instrn Error (0x2) - -; ******************** Device ISRs ********************** -#ifdef CONFIG_ARC_IRQ3_LV2 -VECTOR handle_interrupt_level2 -#else -VECTOR handle_interrupt_level1 -#endif - -VECTOR handle_interrupt_level1 - -#ifdef CONFIG_ARC_IRQ5_LV2 -VECTOR handle_interrupt_level2 -#else -VECTOR handle_interrupt_level1 -#endif - -#ifdef CONFIG_ARC_IRQ6_LV2 -VECTOR handle_interrupt_level2 -#else -VECTOR handle_interrupt_level1 -#endif - -.rept 25 -VECTOR handle_interrupt_level1 ; Other devices -.endr - -/* FOR ARC600: timer = 0x3, uart = 0x8, emac = 0x10 */ - -; ******************** Exceptions ********************** -VECTOR EV_MachineCheck ; 0x100, Fatal Machine check (0x20) -VECTOR EV_TLBMissI ; 0x108, Intruction TLB miss (0x21) -VECTOR EV_TLBMissD ; 0x110, Data TLB miss (0x22) -VECTOR EV_TLBProtV ; 0x118, Protection Violation (0x23) - ; or Misaligned Access -VECTOR EV_PrivilegeV ; 0x120, Privilege Violation (0x24) -VECTOR EV_Trap ; 0x128, Trap exception (0x25) -VECTOR EV_Extension ; 0x130, Extn Intruction Excp (0x26) - -.rept 24 -VECTOR reserved ; Reserved Exceptions -.endr - -#include <linux/linkage.h> /* {EXTRY,EXIT} */ -#include <asm/entry.h> /* SAVE_ALL_{INT1,INT2,SYS...} */ -#include <asm/errno.h> -#include <asm/arcregs.h> -#include <asm/irqflags.h> - -;##################### Scratch Mem for IRQ stack switching ############# - -ARCFP_DATA int1_saved_reg - .align 32 - .type int1_saved_reg, @object - .size int1_saved_reg, 4 -int1_saved_reg: - .zero 4 - -/* Each Interrupt level needs its own scratch */ -#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS - -ARCFP_DATA int2_saved_reg - .type int2_saved_reg, @object - .size int2_saved_reg, 4 -int2_saved_reg: - .zero 4 - -#endif - -; --------------------------------------------- - .section .text, "ax",@progbits - -res_service: ; processor restart - flag 0x1 ; not implemented - nop - nop - -reserved: ; processor restart - rtie ; jump to processor initializations - -;##################### Interrupt Handling ############################## - -#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS -; --------------------------------------------- -; Level 2 ISR: Can interrupt a Level 1 ISR -; --------------------------------------------- -ENTRY(handle_interrupt_level2) +;################### Special Sys Call Wrappers ########################## - ; TODO-vineetg for SMP this wont work - ; free up r9 as scratchpad - st r9, [@int2_saved_reg] +ENTRY(sys_clone_wrapper) + SAVE_CALLEE_SAVED_USER + bl @sys_clone + DISCARD_CALLEE_SAVED_USER - ;Which mode (user/kernel) was the system in when intr occured - lr r9, [status32_l2] + GET_CURR_THR_INFO_FLAGS r10 + btst r10, TIF_SYSCALL_TRACE + bnz tracesys_exit - SWITCH_TO_KERNEL_STK - SAVE_ALL_INT2 + b ret_from_system_call +END(sys_clone_wrapper) - ;------------------------------------------------------ - ; if L2 IRQ interrupted a L1 ISR, disable preemption - ;------------------------------------------------------ +ENTRY(ret_from_fork) + ; when the forked child comes here from the __switch_to function + ; r0 has the last task pointer. + ; put last task in scheduler queue + bl @schedule_tail - ld r9, [sp, PT_status32] ; get statu32_l2 (saved in pt_regs) - bbit0 r9, STATUS_A1_BIT, 1f ; L1 not active when L2 IRQ, so normal + ld r9, [sp, PT_status32] + brne r9, 0, 1f - ; A1 is set in status32_l2 - ; bump thread_info->preempt_count (Disable preemption) - GET_CURR_THR_INFO_FROM_SP r10 - ld r9, [r10, THREAD_INFO_PREEMPT_COUNT] - add r9, r9, 1 - st r9, [r10, THREAD_INFO_PREEMPT_COUNT] + jl.d [r14] ; kernel thread entry point + mov r0, r13 ; (see PF_KTHREAD block in copy_thread) 1: - ;------------------------------------------------------ - ; setup params for Linux common ISR and invoke it - ;------------------------------------------------------ - lr r0, [icause2] - and r0, r0, 0x1f - - bl.d @arch_do_IRQ - mov r1, sp - - mov r8,0x2 - sr r8, [AUX_IRQ_LV12] ; clear bit in Sticky Status Reg - - b ret_from_exception - -END(handle_interrupt_level2) - -#endif - -; --------------------------------------------- -; Level 1 ISR -; --------------------------------------------- -ENTRY(handle_interrupt_level1) - - /* free up r9 as scratchpad */ -#ifdef CONFIG_SMP - sr r9, [ARC_REG_SCRATCH_DATA0] -#else - st r9, [@int1_saved_reg] -#endif - - ;Which mode (user/kernel) was the system in when intr occured - lr r9, [status32_l1] - - SWITCH_TO_KERNEL_STK - SAVE_ALL_INT1 + ; Return to user space + ; 1. Any forked task (Reach here via BRne above) + ; 2. First ever init task (Reach here via return from JL above) + ; This is the historic "kernel_execve" use-case, to return to init + ; user mode, in a round about way since that is always done from + ; a kernel thread which is executed via JL above but always returns + ; out whenever kernel_execve (now inline do_fork()) is involved + b ret_from_exception +END(ret_from_fork) - lr r0, [icause1] - and r0, r0, 0x1f +#ifdef CONFIG_ARC_DW2_UNWIND +; Workaround for bug 94179 (STAR ): +; Despite -fasynchronous-unwind-tables, linker is not making dwarf2 unwinder +; section (.debug_frame) as loadable. So we force it here. +; This also fixes STAR 9000487933 where the prev-workaround (objcopy --setflag) +; would not work after a clean build due to kernel build system dependencies. +.section .debug_frame, "wa",@progbits -#ifdef CONFIG_TRACE_IRQFLAGS - ; icause1 needs to be read early, before calling tracing, which - ; can clobber scratch regs, hence use of stack to stash it - push r0 - TRACE_ASM_IRQ_DISABLE - pop r0 +; Reset to .text as this file is included in entry-<isa>.S +.section .text, "ax",@progbits #endif - bl.d @arch_do_IRQ - mov r1, sp - - mov r8,0x1 - sr r8, [AUX_IRQ_LV12] ; clear bit in Sticky Status Reg - - b ret_from_exception -END(handle_interrupt_level1) - ;################### Non TLB Exception Handling ############################# ; --------------------------------------------- @@ -280,7 +86,7 @@ ENTRY(instr_service) lr r0, [efa] mov r1, sp - FAKE_RET_FROM_EXCPN r9 + FAKE_RET_FROM_EXCPN bl do_insterror_or_kprobe b ret_from_exception @@ -297,7 +103,7 @@ ENTRY(mem_service) lr r0, [efa] mov r1, sp - FAKE_RET_FROM_EXCPN r9 + FAKE_RET_FROM_EXCPN bl do_memory_error b ret_from_exception @@ -334,60 +140,6 @@ ENTRY(EV_MachineCheck) END(EV_MachineCheck) ; --------------------------------------------- -; Protection Violation Exception Handler -; --------------------------------------------- - -ENTRY(EV_TLBProtV) - - EXCEPTION_PROLOGUE - - ;---------(3) Save some more regs----------------- - ; vineetg: Mar 6th: Random Seg Fault issue #1 - ; ecr and efa were not saved in case an Intr sneaks in - ; after fake rtie - - lr r2, [ecr] - lr r0, [efa] ; Faulting Data address - - ; --------(4) Return from CPU Exception Mode --------- - ; Fake a rtie, but rtie to next label - ; That way, subsequently, do_page_fault ( ) executes in pure kernel - ; mode with further Exceptions enabled - - FAKE_RET_FROM_EXCPN r9 - - mov r1, sp - - ;------ (5) Type of Protection Violation? ---------- - ; - ; ProtV Hardware Exception is triggered for Access Faults of 2 types - ; -Access Violaton : 00_23_(00|01|02|03)_00 - ; x r w r+w - ; -Unaligned Access : 00_23_04_00 - ; - bbit1 r2, ECR_C_BIT_PROTV_MISALIG_DATA, 4f - - ;========= (6a) Access Violation Processing ======== - bl do_page_fault - b ret_from_exception - - ;========== (6b) Non aligned access ============ -4: - - SAVE_CALLEE_SAVED_USER - mov r2, sp ; callee_regs - - bl do_misaligned_access - - ; TBD: optimize - do this only if a callee reg was involved - ; either a dst of emulated LD/ST or src with address-writeback - RESTORE_CALLEE_SAVED_USER - - b ret_from_exception - -END(EV_TLBProtV) - -; --------------------------------------------- ; Privilege Violation Exception Handler ; --------------------------------------------- ENTRY(EV_PrivilegeV) @@ -397,7 +149,7 @@ ENTRY(EV_PrivilegeV) lr r0, [efa] mov r1, sp - FAKE_RET_FROM_EXCPN r9 + FAKE_RET_FROM_EXCPN bl do_privilege_fault b ret_from_exception @@ -413,14 +165,17 @@ ENTRY(EV_Extension) lr r0, [efa] mov r1, sp - FAKE_RET_FROM_EXCPN r9 + FAKE_RET_FROM_EXCPN bl do_extension_fault b ret_from_exception END(EV_Extension) -;######################### System Call Tracing ######################### +;################ Trap Handling (Syscall, Breakpoint) ################## +; --------------------------------------------- +; syscall Tracing +; --------------------------------------------- tracesys: ; save EFA in case tracer wants the PC of traced task ; using ERET won't work since next-PC has already committed @@ -463,10 +218,9 @@ tracesys_exit: b ret_from_exception ; NOT ret_from_system_call at is saves r0 which ; we'd done before calling post hook above -;################### Break Point TRAP ########################## - - ; ======= (5b) Trap is due to Break-Point ========= - +; --------------------------------------------- +; Breakpoint TRAP +; --------------------------------------------- trap_with_param: ; stop_pc info by gdb needs this info @@ -475,7 +229,7 @@ trap_with_param: ; Now that we have read EFA, it is safe to do "fake" rtie ; and get out of CPU exception mode - FAKE_RET_FROM_EXCPN r11 + FAKE_RET_FROM_EXCPN ; Save callee regs in case gdb wants to have a look ; SP will grow up by size of CALLEE Reg-File @@ -494,37 +248,33 @@ trap_with_param: b ret_from_exception -;##################### Trap Handling ############################## -; -; EV_Trap caused by TRAP_S and TRAP0 instructions. -;------------------------------------------------------------------ -; (1) System Calls -; :parameters in r0-r7. -; :r8 has the system call number -; (2) Break Points -;------------------------------------------------------------------ +; --------------------------------------------- +; syscall TRAP +; ABI: (r0-r7) upto 8 args, (r8) syscall number +; --------------------------------------------- ENTRY(EV_Trap) EXCEPTION_PROLOGUE - ;------- (4) What caused the Trap -------------- - lr r12, [ecr] - bmsk.f 0, r12, 7 + ;============ TRAP 1 :breakpoints + ; Check ECR for trap with arg (PROLOGUE ensures r9 has ECR) + bmsk.f 0, r9, 7 bnz trap_with_param - ; ======= (5a) Trap is due to System Call ======== + ;============ TRAP (no param): syscall top level - ; Before doing anything, return from CPU Exception Mode - FAKE_RET_FROM_EXCPN r11 + ; First return from Exception to pure K mode (Exception/IRQs renabled) + FAKE_RET_FROM_EXCPN - ; If syscall tracing ongoing, invoke pre-pos-hooks + ; If syscall tracing ongoing, invoke pre-post-hooks GET_CURR_THR_INFO_FLAGS r10 btst r10, TIF_SYSCALL_TRACE bnz tracesys ; this never comes back - ;============ This is normal System Call case ========== - ; Sys-call num shd not exceed the total system calls avail + ;============ Normal syscall case + + ; syscall num shd not exceed the total system calls avail cmp r8, NR_syscalls mov.hi r0, -ENOSYS bhi ret_from_system_call @@ -565,7 +315,7 @@ resume_user_mode_begin: ; Fast Path return to user mode if no pending work GET_CURR_THR_INFO_FLAGS r9 and.f 0, r9, _TIF_WORK_MASK - bz restore_regs + bz .Lrestore_regs ; --- (Slow Path #1) task preemption --- bbit0 r9, TIF_NEED_RESCHED, .Lchk_pend_signals @@ -624,11 +374,11 @@ resume_kernel_mode: ; Can't preempt if preemption disabled GET_CURR_THR_INFO_FROM_SP r10 ld r8, [r10, THREAD_INFO_PREEMPT_COUNT] - brne r8, 0, restore_regs + brne r8, 0, .Lrestore_regs ; check if this task's NEED_RESCHED flag set ld r9, [r10, THREAD_INFO_FLAGS] - bbit0 r9, TIF_NEED_RESCHED, restore_regs + bbit0 r9, TIF_NEED_RESCHED, .Lrestore_regs ; Invoke PREEMPTION bl preempt_schedule_irq @@ -636,142 +386,7 @@ resume_kernel_mode: ; preempt_schedule_irq() always returns with IRQ disabled #endif - ; fall through - -;############# Return from Intr/Excp/Trap (ARC Specifics) ############## -; -; Restore the saved sys context (common exit-path for EXCPN/IRQ/Trap) -; IRQ shd definitely not happen between now and rtie -; All 2 entry points to here already disable interrupts - -restore_regs : - - TRACE_ASM_IRQ_ENABLE - - lr r10, [status32] - - ; Restore REG File. In case multiple Events outstanding, - ; use the same priorty as rtie: EXCPN, L2 IRQ, L1 IRQ, None - ; Note that we use realtime STATUS32 (not pt_regs->status32) to - ; decide that. - - ; if Returning from Exception - bbit0 r10, STATUS_AE_BIT, not_exception - RESTORE_ALL_SYS - rtie - - ; Not Exception so maybe Interrupts (Level 1 or 2) - -not_exception: - -#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS - - ; Level 2 interrupt return Path - from hardware standpoint - bbit0 r10, STATUS_A2_BIT, not_level2_interrupt - - ;------------------------------------------------------------------ - ; However the context returning might not have taken L2 intr itself - ; e.g. Task'A' user-code -> L2 intr -> schedule -> 'B' user-code ret - ; Special considerations needed for the context which took L2 intr - - ld r9, [sp, PT_event] ; Ensure this is L2 intr context - brne r9, event_IRQ2, 149f - - ;------------------------------------------------------------------ - ; if L2 IRQ interrupted an L1 ISR, we'd disabled preemption earlier - ; so that sched doesn't move to new task, causing L1 to be delayed - ; undeterministically. Now that we've achieved that, let's reset - ; things to what they were, before returning from L2 context - ;---------------------------------------------------------------- - - ld r9, [sp, PT_status32] ; get statu32_l2 (saved in pt_regs) - bbit0 r9, STATUS_A1_BIT, 149f ; L1 not active when L2 IRQ, so normal - - ; decrement thread_info->preempt_count (re-enable preemption) - GET_CURR_THR_INFO_FROM_SP r10 - ld r9, [r10, THREAD_INFO_PREEMPT_COUNT] - - ; paranoid check, given A1 was active when A2 happened, preempt count - ; must not be 0 because we would have incremented it. - ; If this does happen we simply HALT as it means a BUG !!! - cmp r9, 0 - bnz 2f - flag 1 - -2: - sub r9, r9, 1 - st r9, [r10, THREAD_INFO_PREEMPT_COUNT] - -149: - ;return from level 2 - RESTORE_ALL_INT2 -debug_marker_l2: - rtie - -not_level2_interrupt: - -#endif - - bbit0 r10, STATUS_A1_BIT, not_level1_interrupt + b .Lrestore_regs - ;return from level 1 +##### DONT ADD CODE HERE - .Lrestore_regs actually follows in entry-<isa>.S - RESTORE_ALL_INT1 -debug_marker_l1: - rtie - -not_level1_interrupt: - - ;this case is for syscalls or Exceptions (with fake rtie) - - RESTORE_ALL_SYS -debug_marker_syscall: - rtie - -END(ret_from_exception) - -ENTRY(ret_from_fork) - ; when the forked child comes here from the __switch_to function - ; r0 has the last task pointer. - ; put last task in scheduler queue - bl @schedule_tail - - ld r9, [sp, PT_status32] - brne r9, 0, 1f - - jl.d [r14] ; kernel thread entry point - mov r0, r13 ; (see PF_KTHREAD block in copy_thread) - -1: - ; Return to user space - ; 1. Any forked task (Reach here via BRne above) - ; 2. First ever init task (Reach here via return from JL above) - ; This is the historic "kernel_execve" use-case, to return to init - ; user mode, in a round about way since that is always done from - ; a kernel thread which is executed via JL above but always returns - ; out whenever kernel_execve (now inline do_fork()) is involved - b ret_from_exception -END(ret_from_fork) - -;################### Special Sys Call Wrappers ########################## - -ENTRY(sys_clone_wrapper) - SAVE_CALLEE_SAVED_USER - bl @sys_clone - DISCARD_CALLEE_SAVED_USER - - GET_CURR_THR_INFO_FLAGS r10 - btst r10, TIF_SYSCALL_TRACE - bnz tracesys_exit - - b ret_from_system_call -END(sys_clone_wrapper) - -#ifdef CONFIG_ARC_DW2_UNWIND -; Workaround for bug 94179 (STAR ): -; Despite -fasynchronous-unwind-tables, linker is not making dwarf2 unwinder -; section (.debug_frame) as loadable. So we force it here. -; This also fixes STAR 9000487933 where the prev-workaround (objcopy --setflag) -; would not work after a clean build due to kernel build system dependencies. -.section .debug_frame, "wa",@progbits -#endif diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S index b0e8666fd..812f95e6a 100644 --- a/arch/arc/kernel/head.S +++ b/arch/arc/kernel/head.S @@ -49,8 +49,6 @@ 1: .endm - .cpu A7 - .section .init.text, "ax",@progbits .type stext, @function .globl stext @@ -83,6 +81,7 @@ stext: st.ab 0, [r5, 4] 1: +#ifdef CONFIG_ARC_UBOOT_SUPPORT ; Uboot - kernel ABI ; r0 = [0] No uboot interaction, [1] cmdline in r2, [2] DTB in r2 ; r1 = magic number (board identity, unused as of now @@ -90,6 +89,7 @@ stext: ; These are handled later in setup_arch() st r0, [@uboot_tag] st r2, [@uboot_arg] +#endif ; setup "current" tsk and optionally cache it in dedicated r25 mov r9, @init_task diff --git a/arch/arc/kernel/intc-arcv2.c b/arch/arc/kernel/intc-arcv2.c new file mode 100644 index 000000000..26c156827 --- /dev/null +++ b/arch/arc/kernel/intc-arcv2.c @@ -0,0 +1,142 @@ +/* + * Copyright (C) 2014 Synopsys, Inc. (www.synopsys.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/irqdomain.h> +#include <linux/irqchip.h> +#include <asm/irq.h> + +/* + * Early Hardware specific Interrupt setup + * -Called very early (start_kernel -> setup_arch -> setup_processor) + * -Platform Independent (must for any ARC Core) + * -Needed for each CPU (hence not foldable into init_IRQ) + */ +void arc_init_IRQ(void) +{ + unsigned int tmp; + + struct aux_irq_ctrl { +#ifdef CONFIG_CPU_BIG_ENDIAN + unsigned int res3:18, save_idx_regs:1, res2:1, + save_u_to_u:1, save_lp_regs:1, save_blink:1, + res:4, save_nr_gpr_pairs:5; +#else + unsigned int save_nr_gpr_pairs:5, res:4, + save_blink:1, save_lp_regs:1, save_u_to_u:1, + res2:1, save_idx_regs:1, res3:18; +#endif + } ictrl; + + *(unsigned int *)&ictrl = 0; + + ictrl.save_nr_gpr_pairs = 6; /* r0 to r11 (r12 saved manually) */ + ictrl.save_blink = 1; + ictrl.save_lp_regs = 1; /* LP_COUNT, LP_START, LP_END */ + ictrl.save_u_to_u = 0; /* user ctxt saved on kernel stack */ + ictrl.save_idx_regs = 1; /* JLI, LDI, EI */ + + WRITE_AUX(AUX_IRQ_CTRL, ictrl); + + /* setup status32, don't enable intr yet as kernel doesn't want */ + tmp = read_aux_reg(0xa); + tmp |= ISA_INIT_STATUS_BITS; + tmp &= ~STATUS_IE_MASK; + asm volatile("flag %0 \n"::"r"(tmp)); + + /* + * ARCv2 core intc provides multiple interrupt priorities (upto 16). + * Typical builds though have only two levels (0-high, 1-low) + * Linux by default uses lower prio 1 for most irqs, reserving 0 for + * NMI style interrupts in future (say perf) + * + * Read the intc BCR to confirm that Linux default priority is avail + * in h/w + * + * Note: + * IRQ_BCR[27..24] contains N-1 (for N priority levels) and prio level + * is 0 based. + */ + tmp = (read_aux_reg(ARC_REG_IRQ_BCR) >> 24 ) & 0xF; + if (ARCV2_IRQ_DEF_PRIO > tmp) + panic("Linux default irq prio incorrect\n"); +} + +static void arcv2_irq_mask(struct irq_data *data) +{ + write_aux_reg(AUX_IRQ_SELECT, data->irq); + write_aux_reg(AUX_IRQ_ENABLE, 0); +} + +static void arcv2_irq_unmask(struct irq_data *data) +{ + write_aux_reg(AUX_IRQ_SELECT, data->irq); + write_aux_reg(AUX_IRQ_ENABLE, 1); +} + +void arcv2_irq_enable(struct irq_data *data) +{ + /* set default priority */ + write_aux_reg(AUX_IRQ_SELECT, data->irq); + write_aux_reg(AUX_IRQ_PRIORITY, ARCV2_IRQ_DEF_PRIO); + + /* + * hw auto enables (linux unmask) all by default + * So no need to do IRQ_ENABLE here + * XXX: However OSCI LAN need it + */ + write_aux_reg(AUX_IRQ_ENABLE, 1); +} + +static struct irq_chip arcv2_irq_chip = { + .name = "ARCv2 core Intc", + .irq_mask = arcv2_irq_mask, + .irq_unmask = arcv2_irq_unmask, + .irq_enable = arcv2_irq_enable +}; + +static int arcv2_irq_map(struct irq_domain *d, unsigned int irq, + irq_hw_number_t hw) +{ + if (irq == TIMER0_IRQ || irq == IPI_IRQ) + irq_set_chip_and_handler(irq, &arcv2_irq_chip, handle_percpu_irq); + else + irq_set_chip_and_handler(irq, &arcv2_irq_chip, handle_level_irq); + + return 0; +} + +static const struct irq_domain_ops arcv2_irq_ops = { + .xlate = irq_domain_xlate_onecell, + .map = arcv2_irq_map, +}; + +static struct irq_domain *root_domain; + +static int __init +init_onchip_IRQ(struct device_node *intc, struct device_node *parent) +{ + if (parent) + panic("DeviceTree incore intc not a root irq controller\n"); + + root_domain = irq_domain_add_legacy(intc, NR_CPU_IRQS, 0, 0, + &arcv2_irq_ops, NULL); + + if (!root_domain) + panic("root irq domain not avail\n"); + + /* with this we don't need to export root_domain */ + irq_set_default_host(root_domain); + + return 0; +} + +IRQCHIP_DECLARE(arc_intc, "snps,archs-intc", init_onchip_IRQ); diff --git a/arch/arc/kernel/intc-compact.c b/arch/arc/kernel/intc-compact.c new file mode 100644 index 000000000..039fac30b --- /dev/null +++ b/arch/arc/kernel/intc-compact.c @@ -0,0 +1,225 @@ +/* + * Copyright (C) 2011-12 Synopsys, Inc. (www.synopsys.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/irqdomain.h> +#include <linux/irqchip.h> +#include <asm/irq.h> + +/* + * Early Hardware specific Interrupt setup + * -Platform independent, needed for each CPU (not foldable into init_IRQ) + * -Called very early (start_kernel -> setup_arch -> setup_processor) + * + * what it does ? + * -Optionally, setup the High priority Interrupts as Level 2 IRQs + */ +void arc_init_IRQ(void) +{ + int level_mask = 0; + + /* setup any high priority Interrupts (Level2 in ARCompact jargon) */ + level_mask |= IS_ENABLED(CONFIG_ARC_IRQ3_LV2) << 3; + level_mask |= IS_ENABLED(CONFIG_ARC_IRQ5_LV2) << 5; + level_mask |= IS_ENABLED(CONFIG_ARC_IRQ6_LV2) << 6; + + /* + * Write to register, even if no LV2 IRQs configured to reset it + * in case bootloader had mucked with it + */ + write_aux_reg(AUX_IRQ_LEV, level_mask); + + if (level_mask) + pr_info("Level-2 interrupts bitset %x\n", level_mask); +} + +/* + * ARC700 core includes a simple on-chip intc supporting + * -per IRQ enable/disable + * -2 levels of interrupts (high/low) + * -all interrupts being level triggered + * + * To reduce platform code, we assume all IRQs directly hooked-up into intc. + * Platforms with external intc, hence cascaded IRQs, are free to over-ride + * below, per IRQ. + */ + +static void arc_irq_mask(struct irq_data *data) +{ + unsigned int ienb; + + ienb = read_aux_reg(AUX_IENABLE); + ienb &= ~(1 << data->irq); + write_aux_reg(AUX_IENABLE, ienb); +} + +static void arc_irq_unmask(struct irq_data *data) +{ + unsigned int ienb; + + ienb = read_aux_reg(AUX_IENABLE); + ienb |= (1 << data->irq); + write_aux_reg(AUX_IENABLE, ienb); +} + +static struct irq_chip onchip_intc = { + .name = "ARC In-core Intc", + .irq_mask = arc_irq_mask, + .irq_unmask = arc_irq_unmask, +}; + +static int arc_intc_domain_map(struct irq_domain *d, unsigned int irq, + irq_hw_number_t hw) +{ + /* + * XXX: the IPI IRQ needs to be handled like TIMER too. However ARC core + * code doesn't own it (like TIMER0). ISS IDU / ezchip define it + * in platform header which can't be included here as it goes + * against multi-platform image philisophy + */ + if (irq == TIMER0_IRQ) + irq_set_chip_and_handler(irq, &onchip_intc, handle_percpu_irq); + else + irq_set_chip_and_handler(irq, &onchip_intc, handle_level_irq); + + return 0; +} + +static const struct irq_domain_ops arc_intc_domain_ops = { + .xlate = irq_domain_xlate_onecell, + .map = arc_intc_domain_map, +}; + +static struct irq_domain *root_domain; + +static int __init +init_onchip_IRQ(struct device_node *intc, struct device_node *parent) +{ + if (parent) + panic("DeviceTree incore intc not a root irq controller\n"); + + root_domain = irq_domain_add_legacy(intc, NR_CPU_IRQS, 0, 0, + &arc_intc_domain_ops, NULL); + + if (!root_domain) + panic("root irq domain not avail\n"); + + /* with this we don't need to export root_domain */ + irq_set_default_host(root_domain); + + return 0; +} + +IRQCHIP_DECLARE(arc_intc, "snps,arc700-intc", init_onchip_IRQ); + +/* + * arch_local_irq_enable - Enable interrupts. + * + * 1. Explicitly called to re-enable interrupts + * 2. Implicitly called from spin_unlock_irq, write_unlock_irq etc + * which maybe in hard ISR itself + * + * Semantics of this function change depending on where it is called from: + * + * -If called from hard-ISR, it must not invert interrupt priorities + * e.g. suppose TIMER is high priority (Level 2) IRQ + * Time hard-ISR, timer_interrupt( ) calls spin_unlock_irq several times. + * Here local_irq_enable( ) shd not re-enable lower priority interrupts + * -If called from soft-ISR, it must re-enable all interrupts + * soft ISR are low prioity jobs which can be very slow, thus all IRQs + * must be enabled while they run. + * Now hardware context wise we may still be in L2 ISR (not done rtie) + * still we must re-enable both L1 and L2 IRQs + * Another twist is prev scenario with flow being + * L1 ISR ==> interrupted by L2 ISR ==> L2 soft ISR + * here we must not re-enable Ll as prev Ll Interrupt's h/w context will get + * over-written (this is deficiency in ARC700 Interrupt mechanism) + */ + +#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS /* Complex version for 2 IRQ levels */ + +void arch_local_irq_enable(void) +{ + + unsigned long flags = arch_local_save_flags(); + + /* Allow both L1 and L2 at the onset */ + flags |= (STATUS_E1_MASK | STATUS_E2_MASK); + + /* Called from hard ISR (between irq_enter and irq_exit) */ + if (in_irq()) { + + /* If in L2 ISR, don't re-enable any further IRQs as this can + * cause IRQ priorities to get upside down. e.g. it could allow + * L1 be taken while in L2 hard ISR which is wrong not only in + * theory, it can also cause the dreaded L1-L2-L1 scenario + */ + if (flags & STATUS_A2_MASK) + flags &= ~(STATUS_E1_MASK | STATUS_E2_MASK); + + /* Even if in L1 ISR, allowe Higher prio L2 IRQs */ + else if (flags & STATUS_A1_MASK) + flags &= ~(STATUS_E1_MASK); + } + + /* called from soft IRQ, ideally we want to re-enable all levels */ + + else if (in_softirq()) { + + /* However if this is case of L1 interrupted by L2, + * re-enabling both may cause whaco L1-L2-L1 scenario + * because ARC700 allows level 1 to interrupt an active L2 ISR + * Thus we disable both + * However some code, executing in soft ISR wants some IRQs + * to be enabled so we re-enable L2 only + * + * How do we determine L1 intr by L2 + * -A2 is set (means in L2 ISR) + * -E1 is set in this ISR's pt_regs->status32 which is + * saved copy of status32_l2 when l2 ISR happened + */ + struct pt_regs *pt = get_irq_regs(); + + if ((flags & STATUS_A2_MASK) && pt && + (pt->status32 & STATUS_A1_MASK)) { + /*flags &= ~(STATUS_E1_MASK | STATUS_E2_MASK); */ + flags &= ~(STATUS_E1_MASK); + } + } + + arch_local_irq_restore(flags); +} + +#else /* ! CONFIG_ARC_COMPACT_IRQ_LEVELS */ + +/* + * Simpler version for only 1 level of interrupt + * Here we only Worry about Level 1 Bits + */ +void arch_local_irq_enable(void) +{ + unsigned long flags; + + /* + * ARC IDE Drivers tries to re-enable interrupts from hard-isr + * context which is simply wrong + */ + if (in_irq()) { + WARN_ONCE(1, "IRQ enabled from hard-isr"); + return; + } + + flags = arch_local_save_flags(); + flags |= (STATUS_E1_MASK | STATUS_E2_MASK); + arch_local_irq_restore(flags); +} +#endif +EXPORT_SYMBOL(arch_local_irq_enable); diff --git a/arch/arc/kernel/irq.c b/arch/arc/kernel/irq.c index 620ec2fe3..2989a7bcf 100644 --- a/arch/arc/kernel/irq.c +++ b/arch/arc/kernel/irq.c @@ -8,116 +8,10 @@ */ #include <linux/interrupt.h> -#include <linux/module.h> -#include <linux/of.h> -#include <linux/irqdomain.h> #include <linux/irqchip.h> -#include "../../drivers/irqchip/irqchip.h" -#include <asm/sections.h> -#include <asm/irq.h> #include <asm/mach_desc.h> /* - * Early Hardware specific Interrupt setup - * -Platform independent, needed for each CPU (not foldable into init_IRQ) - * -Called very early (start_kernel -> setup_arch -> setup_processor) - * - * what it does ? - * -Optionally, setup the High priority Interrupts as Level 2 IRQs - */ -void arc_init_IRQ(void) -{ - int level_mask = 0; - - /* setup any high priority Interrupts (Level2 in ARCompact jargon) */ - level_mask |= IS_ENABLED(CONFIG_ARC_IRQ3_LV2) << 3; - level_mask |= IS_ENABLED(CONFIG_ARC_IRQ5_LV2) << 5; - level_mask |= IS_ENABLED(CONFIG_ARC_IRQ6_LV2) << 6; - - /* - * Write to register, even if no LV2 IRQs configured to reset it - * in case bootloader had mucked with it - */ - write_aux_reg(AUX_IRQ_LEV, level_mask); - - if (level_mask) - pr_info("Level-2 interrupts bitset %x\n", level_mask); -} - -/* - * ARC700 core includes a simple on-chip intc supporting - * -per IRQ enable/disable - * -2 levels of interrupts (high/low) - * -all interrupts being level triggered - * - * To reduce platform code, we assume all IRQs directly hooked-up into intc. - * Platforms with external intc, hence cascaded IRQs, are free to over-ride - * below, per IRQ. - */ - -static void arc_irq_mask(struct irq_data *data) -{ - unsigned int ienb; - - ienb = read_aux_reg(AUX_IENABLE); - ienb &= ~(1 << data->irq); - write_aux_reg(AUX_IENABLE, ienb); -} - -static void arc_irq_unmask(struct irq_data *data) -{ - unsigned int ienb; - - ienb = read_aux_reg(AUX_IENABLE); - ienb |= (1 << data->irq); - write_aux_reg(AUX_IENABLE, ienb); -} - -static struct irq_chip onchip_intc = { - .name = "ARC In-core Intc", - .irq_mask = arc_irq_mask, - .irq_unmask = arc_irq_unmask, -}; - -static int arc_intc_domain_map(struct irq_domain *d, unsigned int irq, - irq_hw_number_t hw) -{ - if (irq == TIMER0_IRQ) - irq_set_chip_and_handler(irq, &onchip_intc, handle_percpu_irq); - else - irq_set_chip_and_handler(irq, &onchip_intc, handle_level_irq); - - return 0; -} - -static const struct irq_domain_ops arc_intc_domain_ops = { - .xlate = irq_domain_xlate_onecell, - .map = arc_intc_domain_map, -}; - -static struct irq_domain *root_domain; - -static int __init -init_onchip_IRQ(struct device_node *intc, struct device_node *parent) -{ - if (parent) - panic("DeviceTree incore intc not a root irq controller\n"); - - root_domain = irq_domain_add_legacy(intc, NR_CPU_IRQS, 0, 0, - &arc_intc_domain_ops, NULL); - - if (!root_domain) - panic("root irq domain not avail\n"); - - /* with this we don't need to export root_domain */ - irq_set_default_host(root_domain); - - return 0; -} - -IRQCHIP_DECLARE(arc_intc, "snps,arc700-intc", init_onchip_IRQ); - -/* * Late Interrupt system init called from start_kernel for Boot CPU only * * Since slab must already be initialized, platforms can start doing any @@ -178,107 +72,3 @@ void arc_request_percpu_irq(int irq, int cpu, enable_percpu_irq(irq, 0); } - -/* - * arch_local_irq_enable - Enable interrupts. - * - * 1. Explicitly called to re-enable interrupts - * 2. Implicitly called from spin_unlock_irq, write_unlock_irq etc - * which maybe in hard ISR itself - * - * Semantics of this function change depending on where it is called from: - * - * -If called from hard-ISR, it must not invert interrupt priorities - * e.g. suppose TIMER is high priority (Level 2) IRQ - * Time hard-ISR, timer_interrupt( ) calls spin_unlock_irq several times. - * Here local_irq_enable( ) shd not re-enable lower priority interrupts - * -If called from soft-ISR, it must re-enable all interrupts - * soft ISR are low prioity jobs which can be very slow, thus all IRQs - * must be enabled while they run. - * Now hardware context wise we may still be in L2 ISR (not done rtie) - * still we must re-enable both L1 and L2 IRQs - * Another twist is prev scenario with flow being - * L1 ISR ==> interrupted by L2 ISR ==> L2 soft ISR - * here we must not re-enable Ll as prev Ll Interrupt's h/w context will get - * over-written (this is deficiency in ARC700 Interrupt mechanism) - */ - -#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS /* Complex version for 2 IRQ levels */ - -void arch_local_irq_enable(void) -{ - - unsigned long flags; - flags = arch_local_save_flags(); - - /* Allow both L1 and L2 at the onset */ - flags |= (STATUS_E1_MASK | STATUS_E2_MASK); - - /* Called from hard ISR (between irq_enter and irq_exit) */ - if (in_irq()) { - - /* If in L2 ISR, don't re-enable any further IRQs as this can - * cause IRQ priorities to get upside down. e.g. it could allow - * L1 be taken while in L2 hard ISR which is wrong not only in - * theory, it can also cause the dreaded L1-L2-L1 scenario - */ - if (flags & STATUS_A2_MASK) - flags &= ~(STATUS_E1_MASK | STATUS_E2_MASK); - - /* Even if in L1 ISR, allowe Higher prio L2 IRQs */ - else if (flags & STATUS_A1_MASK) - flags &= ~(STATUS_E1_MASK); - } - - /* called from soft IRQ, ideally we want to re-enable all levels */ - - else if (in_softirq()) { - - /* However if this is case of L1 interrupted by L2, - * re-enabling both may cause whaco L1-L2-L1 scenario - * because ARC700 allows level 1 to interrupt an active L2 ISR - * Thus we disable both - * However some code, executing in soft ISR wants some IRQs - * to be enabled so we re-enable L2 only - * - * How do we determine L1 intr by L2 - * -A2 is set (means in L2 ISR) - * -E1 is set in this ISR's pt_regs->status32 which is - * saved copy of status32_l2 when l2 ISR happened - */ - struct pt_regs *pt = get_irq_regs(); - if ((flags & STATUS_A2_MASK) && pt && - (pt->status32 & STATUS_A1_MASK)) { - /*flags &= ~(STATUS_E1_MASK | STATUS_E2_MASK); */ - flags &= ~(STATUS_E1_MASK); - } - } - - arch_local_irq_restore(flags); -} - -#else /* ! CONFIG_ARC_COMPACT_IRQ_LEVELS */ - -/* - * Simpler version for only 1 level of interrupt - * Here we only Worry about Level 1 Bits - */ -void arch_local_irq_enable(void) -{ - unsigned long flags; - - /* - * ARC IDE Drivers tries to re-enable interrupts from hard-isr - * context which is simply wrong - */ - if (in_irq()) { - WARN_ONCE(1, "IRQ enabled from hard-isr"); - return; - } - - flags = arch_local_save_flags(); - flags |= (STATUS_E1_MASK | STATUS_E2_MASK); - arch_local_irq_restore(flags); -} -#endif -EXPORT_SYMBOL(arch_local_irq_enable); diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c new file mode 100644 index 000000000..2fb865890 --- /dev/null +++ b/arch/arc/kernel/mcip.c @@ -0,0 +1,356 @@ +/* + * ARC ARConnect (MultiCore IP) support (formerly known as MCIP) + * + * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/smp.h> +#include <linux/irq.h> +#include <linux/spinlock.h> +#include <asm/mcip.h> + +static char smp_cpuinfo_buf[128]; +static int idu_detected; + +static DEFINE_RAW_SPINLOCK(mcip_lock); + +/* + * Any SMP specific init any CPU does when it comes up. + * Here we setup the CPU to enable Inter-Processor-Interrupts + * Called for each CPU + * -Master : init_IRQ() + * -Other(s) : start_kernel_secondary() + */ +void mcip_init_smp(unsigned int cpu) +{ + smp_ipi_irq_setup(cpu, IPI_IRQ); +} + +static void mcip_ipi_send(int cpu) +{ + unsigned long flags; + int ipi_was_pending; + + /* + * NOTE: We must spin here if the other cpu hasn't yet + * serviced a previous message. This can burn lots + * of time, but we MUST follows this protocol or + * ipi messages can be lost!!! + * Also, we must release the lock in this loop because + * the other side may get to this same loop and not + * be able to ack -- thus causing deadlock. + */ + + do { + raw_spin_lock_irqsave(&mcip_lock, flags); + __mcip_cmd(CMD_INTRPT_READ_STATUS, cpu); + ipi_was_pending = read_aux_reg(ARC_REG_MCIP_READBACK); + if (ipi_was_pending == 0) + break; /* break out but keep lock */ + raw_spin_unlock_irqrestore(&mcip_lock, flags); + } while (1); + + __mcip_cmd(CMD_INTRPT_GENERATE_IRQ, cpu); + raw_spin_unlock_irqrestore(&mcip_lock, flags); + +#ifdef CONFIG_ARC_IPI_DBG + if (ipi_was_pending) + pr_info("IPI ACK delayed from cpu %d\n", cpu); +#endif +} + +static void mcip_ipi_clear(int irq) +{ + unsigned int cpu, c; + unsigned long flags; + unsigned int __maybe_unused copy; + + raw_spin_lock_irqsave(&mcip_lock, flags); + + /* Who sent the IPI */ + __mcip_cmd(CMD_INTRPT_CHECK_SOURCE, 0); + + copy = cpu = read_aux_reg(ARC_REG_MCIP_READBACK); /* 1,2,4,8... */ + + /* + * In rare case, multiple concurrent IPIs sent to same target can + * possibly be coalesced by MCIP into 1 asserted IRQ, so @cpus can be + * "vectored" (multiple bits sets) as opposed to typical single bit + */ + do { + c = __ffs(cpu); /* 0,1,2,3 */ + __mcip_cmd(CMD_INTRPT_GENERATE_ACK, c); + cpu &= ~(1U << c); + } while (cpu); + + raw_spin_unlock_irqrestore(&mcip_lock, flags); + +#ifdef CONFIG_ARC_IPI_DBG + if (c != __ffs(copy)) + pr_info("IPIs from %x coalesced to %x\n", + copy, raw_smp_processor_id()); +#endif +} + +volatile int wake_flag; + +static void mcip_wakeup_cpu(int cpu, unsigned long pc) +{ + BUG_ON(cpu == 0); + wake_flag = cpu; +} + +void arc_platform_smp_wait_to_boot(int cpu) +{ + while (wake_flag != cpu) + ; + + wake_flag = 0; + __asm__ __volatile__("j @first_lines_of_secondary \n"); +} + +struct plat_smp_ops plat_smp_ops = { + .info = smp_cpuinfo_buf, + .cpu_kick = mcip_wakeup_cpu, + .ipi_send = mcip_ipi_send, + .ipi_clear = mcip_ipi_clear, +}; + +void mcip_init_early_smp(void) +{ +#define IS_AVAIL1(var, str) ((var) ? str : "") + + struct mcip_bcr { +#ifdef CONFIG_CPU_BIG_ENDIAN + unsigned int pad3:8, + idu:1, llm:1, num_cores:6, + iocoh:1, grtc:1, dbg:1, pad2:1, + msg:1, sem:1, ipi:1, pad:1, + ver:8; +#else + unsigned int ver:8, + pad:1, ipi:1, sem:1, msg:1, + pad2:1, dbg:1, grtc:1, iocoh:1, + num_cores:6, llm:1, idu:1, + pad3:8; +#endif + } mp; + + READ_BCR(ARC_REG_MCIP_BCR, mp); + + sprintf(smp_cpuinfo_buf, + "Extn [SMP]\t: ARConnect (v%d): %d cores with %s%s%s%s\n", + mp.ver, mp.num_cores, + IS_AVAIL1(mp.ipi, "IPI "), + IS_AVAIL1(mp.idu, "IDU "), + IS_AVAIL1(mp.dbg, "DEBUG "), + IS_AVAIL1(mp.grtc, "GRTC")); + + idu_detected = mp.idu; + + if (mp.dbg) { + __mcip_cmd_data(CMD_DEBUG_SET_SELECT, 0, 0xf); + __mcip_cmd_data(CMD_DEBUG_SET_MASK, 0xf, 0xf); + } + + if (IS_ENABLED(CONFIG_ARC_HAS_GRTC) && !mp.grtc) + panic("kernel trying to use non-existent GRTC\n"); +} + +/*************************************************************************** + * ARCv2 Interrupt Distribution Unit (IDU) + * + * Connects external "COMMON" IRQs to core intc, providing: + * -dynamic routing (IRQ affinity) + * -load balancing (Round Robin interrupt distribution) + * -1:N distribution + * + * It physically resides in the MCIP hw block + */ + +#include <linux/irqchip.h> +#include <linux/of.h> +#include <linux/of_irq.h> + +/* + * Set the DEST for @cmn_irq to @cpu_mask (1 bit per core) + */ +static void idu_set_dest(unsigned int cmn_irq, unsigned int cpu_mask) +{ + __mcip_cmd_data(CMD_IDU_SET_DEST, cmn_irq, cpu_mask); +} + +static void idu_set_mode(unsigned int cmn_irq, unsigned int lvl, + unsigned int distr) +{ + union { + unsigned int word; + struct { + unsigned int distr:2, pad:2, lvl:1, pad2:27; + }; + } data; + + data.distr = distr; + data.lvl = lvl; + __mcip_cmd_data(CMD_IDU_SET_MODE, cmn_irq, data.word); +} + +static void idu_irq_mask(struct irq_data *data) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&mcip_lock, flags); + __mcip_cmd_data(CMD_IDU_SET_MASK, data->hwirq, 1); + raw_spin_unlock_irqrestore(&mcip_lock, flags); +} + +static void idu_irq_unmask(struct irq_data *data) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&mcip_lock, flags); + __mcip_cmd_data(CMD_IDU_SET_MASK, data->hwirq, 0); + raw_spin_unlock_irqrestore(&mcip_lock, flags); +} + +#ifdef CONFIG_SMP +static int +idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask, + bool force) +{ + unsigned long flags; + cpumask_t online; + + /* errout if no online cpu per @cpumask */ + if (!cpumask_and(&online, cpumask, cpu_online_mask)) + return -EINVAL; + + raw_spin_lock_irqsave(&mcip_lock, flags); + + idu_set_dest(data->hwirq, cpumask_bits(&online)[0]); + idu_set_mode(data->hwirq, IDU_M_TRIG_LEVEL, IDU_M_DISTRI_RR); + + raw_spin_unlock_irqrestore(&mcip_lock, flags); + + return IRQ_SET_MASK_OK; +} +#endif + +static struct irq_chip idu_irq_chip = { + .name = "MCIP IDU Intc", + .irq_mask = idu_irq_mask, + .irq_unmask = idu_irq_unmask, +#ifdef CONFIG_SMP + .irq_set_affinity = idu_irq_set_affinity, +#endif + +}; + +static int idu_first_irq; + +static void idu_cascade_isr(unsigned int core_irq, struct irq_desc *desc) +{ + struct irq_domain *domain = irq_desc_get_handler_data(desc); + unsigned int idu_irq; + + idu_irq = core_irq - idu_first_irq; + generic_handle_irq(irq_find_mapping(domain, idu_irq)); +} + +static int idu_irq_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hwirq) +{ + irq_set_chip_and_handler(virq, &idu_irq_chip, handle_level_irq); + irq_set_status_flags(virq, IRQ_MOVE_PCNTXT); + + return 0; +} + +static int idu_irq_xlate(struct irq_domain *d, struct device_node *n, + const u32 *intspec, unsigned int intsize, + irq_hw_number_t *out_hwirq, unsigned int *out_type) +{ + irq_hw_number_t hwirq = *out_hwirq = intspec[0]; + int distri = intspec[1]; + unsigned long flags; + + *out_type = IRQ_TYPE_NONE; + + /* XXX: validate distribution scheme again online cpu mask */ + if (distri == 0) { + /* 0 - Round Robin to all cpus, otherwise 1 bit per core */ + raw_spin_lock_irqsave(&mcip_lock, flags); + idu_set_dest(hwirq, BIT(num_online_cpus()) - 1); + idu_set_mode(hwirq, IDU_M_TRIG_LEVEL, IDU_M_DISTRI_RR); + raw_spin_unlock_irqrestore(&mcip_lock, flags); + } else { + /* + * DEST based distribution for Level Triggered intr can only + * have 1 CPU, so generalize it to always contain 1 cpu + */ + int cpu = ffs(distri); + + if (cpu != fls(distri)) + pr_warn("IDU irq %lx distri mode set to cpu %x\n", + hwirq, cpu); + + raw_spin_lock_irqsave(&mcip_lock, flags); + idu_set_dest(hwirq, cpu); + idu_set_mode(hwirq, IDU_M_TRIG_LEVEL, IDU_M_DISTRI_DEST); + raw_spin_unlock_irqrestore(&mcip_lock, flags); + } + + return 0; +} + +static const struct irq_domain_ops idu_irq_ops = { + .xlate = idu_irq_xlate, + .map = idu_irq_map, +}; + +/* + * [16, 23]: Statically assigned always private-per-core (Timers, WDT, IPI) + * [24, 23+C]: If C > 0 then "C" common IRQs + * [24+C, N]: Not statically assigned, private-per-core + */ + + +static int __init +idu_of_init(struct device_node *intc, struct device_node *parent) +{ + struct irq_domain *domain; + /* Read IDU BCR to confirm nr_irqs */ + int nr_irqs = of_irq_count(intc); + int i, irq; + + if (!idu_detected) + panic("IDU not detected, but DeviceTree using it"); + + pr_info("MCIP: IDU referenced from Devicetree %d irqs\n", nr_irqs); + + domain = irq_domain_add_linear(intc, nr_irqs, &idu_irq_ops, NULL); + + /* Parent interrupts (core-intc) are already mapped */ + + for (i = 0; i < nr_irqs; i++) { + /* + * Return parent uplink IRQs (towards core intc) 24,25,..... + * this step has been done before already + * however we need it to get the parent virq and set IDU handler + * as first level isr + */ + irq = irq_of_parse_and_map(intc, i); + if (!i) + idu_first_irq = irq; + + irq_set_chained_handler_and_data(irq, idu_cascade_isr, domain); + } + + __mcip_cmd(CMD_IDU_ENABLE, 0); + + return 0; +} +IRQCHIP_DECLARE(arcv2_idu_intc, "snps,archs-idu-intc", idu_of_init); diff --git a/arch/arc/kernel/perf_event.c b/arch/arc/kernel/perf_event.c index 57b58f52d..1287388c2 100644 --- a/arch/arc/kernel/perf_event.c +++ b/arch/arc/kernel/perf_event.c @@ -268,7 +268,7 @@ static int arc_pmu_device_probe(struct platform_device *pdev) { struct arc_reg_pct_build pct_bcr; struct arc_reg_cc_build cc_bcr; - int i, j, ret; + int i, j; union cc_name { struct { @@ -335,9 +335,7 @@ static int arc_pmu_device_probe(struct platform_device *pdev) /* ARC 700 PMU does not support sampling events */ arc_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; - ret = perf_pmu_register(&arc_pmu->pmu, pdev->name, PERF_TYPE_RAW); - - return ret; + return perf_pmu_register(&arc_pmu->pmu, pdev->name, PERF_TYPE_RAW); } #ifdef CONFIG_OF diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c index e095c557a..440924567 100644 --- a/arch/arc/kernel/process.c +++ b/arch/arc/kernel/process.c @@ -44,7 +44,11 @@ SYSCALL_DEFINE0(arc_gettls) void arch_cpu_idle(void) { /* sleep, but enable all interrupts before committing */ - __asm__("sleep 0x3"); + if (is_isa_arcompact()) { + __asm__("sleep 0x3"); + } else { + __asm__("sleep 0x10"); + } } asmlinkage void ret_from_fork(void); @@ -166,8 +170,7 @@ void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long usp) * [L] ZOL loop inhibited to begin with - cleared by a LP insn * Interrupts enabled */ - regs->status32 = STATUS_U_MASK | STATUS_L_MASK | - STATUS_E1_MASK | STATUS_E2_MASK; + regs->status32 = STATUS_U_MASK | STATUS_L_MASK | ISA_INIT_STATUS_BITS; /* bogus seed values for debugging */ regs->lp_start = 0x10; @@ -197,8 +200,11 @@ int elf_check_arch(const struct elf32_hdr *x) { unsigned int eflags; - if (x->e_machine != EM_ARCOMPACT) + if (x->e_machine != EM_ARC_INUSE) { + pr_err("ELF not built for %s ISA\n", + is_isa_arcompact() ? "ARCompact":"ARCv2"); return 0; + } eflags = x->e_flags; if ((eflags & EF_ARC_OSABI_MSK) < EF_ARC_OSABI_CURRENT) { diff --git a/arch/arc/kernel/ptrace.c b/arch/arc/kernel/ptrace.c index 13b3ffb27..4442204fe 100644 --- a/arch/arc/kernel/ptrace.c +++ b/arch/arc/kernel/ptrace.c @@ -47,10 +47,47 @@ static int genregs_get(struct task_struct *target, offsetof(struct user_regs_struct, LOC) + 4); REG_O_ZERO(pad); - REG_O_CHUNK(scratch, callee, ptregs); + REG_O_ONE(scratch.bta, &ptregs->bta); + REG_O_ONE(scratch.lp_start, &ptregs->lp_start); + REG_O_ONE(scratch.lp_end, &ptregs->lp_end); + REG_O_ONE(scratch.lp_count, &ptregs->lp_count); + REG_O_ONE(scratch.status32, &ptregs->status32); + REG_O_ONE(scratch.ret, &ptregs->ret); + REG_O_ONE(scratch.blink, &ptregs->blink); + REG_O_ONE(scratch.fp, &ptregs->fp); + REG_O_ONE(scratch.gp, &ptregs->r26); + REG_O_ONE(scratch.r12, &ptregs->r12); + REG_O_ONE(scratch.r11, &ptregs->r11); + REG_O_ONE(scratch.r10, &ptregs->r10); + REG_O_ONE(scratch.r9, &ptregs->r9); + REG_O_ONE(scratch.r8, &ptregs->r8); + REG_O_ONE(scratch.r7, &ptregs->r7); + REG_O_ONE(scratch.r6, &ptregs->r6); + REG_O_ONE(scratch.r5, &ptregs->r5); + REG_O_ONE(scratch.r4, &ptregs->r4); + REG_O_ONE(scratch.r3, &ptregs->r3); + REG_O_ONE(scratch.r2, &ptregs->r2); + REG_O_ONE(scratch.r1, &ptregs->r1); + REG_O_ONE(scratch.r0, &ptregs->r0); + REG_O_ONE(scratch.sp, &ptregs->sp); + REG_O_ZERO(pad2); - REG_O_CHUNK(callee, efa, cregs); - REG_O_CHUNK(efa, stop_pc, &target->thread.fault_address); + + REG_O_ONE(callee.r25, &cregs->r25); + REG_O_ONE(callee.r24, &cregs->r24); + REG_O_ONE(callee.r23, &cregs->r23); + REG_O_ONE(callee.r22, &cregs->r22); + REG_O_ONE(callee.r21, &cregs->r21); + REG_O_ONE(callee.r20, &cregs->r20); + REG_O_ONE(callee.r19, &cregs->r19); + REG_O_ONE(callee.r18, &cregs->r18); + REG_O_ONE(callee.r17, &cregs->r17); + REG_O_ONE(callee.r16, &cregs->r16); + REG_O_ONE(callee.r15, &cregs->r15); + REG_O_ONE(callee.r14, &cregs->r14); + REG_O_ONE(callee.r13, &cregs->r13); + + REG_O_ONE(efa, &target->thread.fault_address); if (!ret) { if (in_brkpt_trap(ptregs)) { @@ -97,12 +134,51 @@ static int genregs_set(struct task_struct *target, offsetof(struct user_regs_struct, LOC) + 4); REG_IGNORE_ONE(pad); - /* TBD: disallow updates to STATUS32 etc*/ - REG_IN_CHUNK(scratch, pad2, ptregs); /* pt_regs[bta..sp] */ + + REG_IN_ONE(scratch.bta, &ptregs->bta); + REG_IN_ONE(scratch.lp_start, &ptregs->lp_start); + REG_IN_ONE(scratch.lp_end, &ptregs->lp_end); + REG_IN_ONE(scratch.lp_count, &ptregs->lp_count); + + REG_IGNORE_ONE(scratch.status32); + + REG_IN_ONE(scratch.ret, &ptregs->ret); + REG_IN_ONE(scratch.blink, &ptregs->blink); + REG_IN_ONE(scratch.fp, &ptregs->fp); + REG_IN_ONE(scratch.gp, &ptregs->r26); + REG_IN_ONE(scratch.r12, &ptregs->r12); + REG_IN_ONE(scratch.r11, &ptregs->r11); + REG_IN_ONE(scratch.r10, &ptregs->r10); + REG_IN_ONE(scratch.r9, &ptregs->r9); + REG_IN_ONE(scratch.r8, &ptregs->r8); + REG_IN_ONE(scratch.r7, &ptregs->r7); + REG_IN_ONE(scratch.r6, &ptregs->r6); + REG_IN_ONE(scratch.r5, &ptregs->r5); + REG_IN_ONE(scratch.r4, &ptregs->r4); + REG_IN_ONE(scratch.r3, &ptregs->r3); + REG_IN_ONE(scratch.r2, &ptregs->r2); + REG_IN_ONE(scratch.r1, &ptregs->r1); + REG_IN_ONE(scratch.r0, &ptregs->r0); + REG_IN_ONE(scratch.sp, &ptregs->sp); + REG_IGNORE_ONE(pad2); - REG_IN_CHUNK(callee, efa, cregs); /* callee_regs[r25..r13] */ + + REG_IN_ONE(callee.r25, &cregs->r25); + REG_IN_ONE(callee.r24, &cregs->r24); + REG_IN_ONE(callee.r23, &cregs->r23); + REG_IN_ONE(callee.r22, &cregs->r22); + REG_IN_ONE(callee.r21, &cregs->r21); + REG_IN_ONE(callee.r20, &cregs->r20); + REG_IN_ONE(callee.r19, &cregs->r19); + REG_IN_ONE(callee.r18, &cregs->r18); + REG_IN_ONE(callee.r17, &cregs->r17); + REG_IN_ONE(callee.r16, &cregs->r16); + REG_IN_ONE(callee.r15, &cregs->r15); + REG_IN_ONE(callee.r14, &cregs->r14); + REG_IN_ONE(callee.r13, &cregs->r13); + REG_IGNORE_ONE(efa); /* efa update invalid */ - REG_IGNORE_ONE(stop_pc); /* PC updated via @ret */ + REG_IGNORE_ONE(stop_pc); /* PC updated via @ret */ return ret; } @@ -124,7 +200,7 @@ static const struct user_regset arc_regsets[] = { static const struct user_regset_view user_arc_view = { .name = UTS_MACHINE, - .e_machine = EM_ARCOMPACT, + .e_machine = EM_ARC_INUSE, .regsets = arc_regsets, .n = ARRAY_SIZE(arc_regsets) }; diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c index 1d167c6df..cabde9dc0 100644 --- a/arch/arc/kernel/setup.c +++ b/arch/arc/kernel/setup.c @@ -30,6 +30,8 @@ #define FIX_PTR(x) __asm__ __volatile__(";" : "+r"(x)) +unsigned int intr_to_DE_cnt; + /* Part of U-boot ABI: see head.S */ int __initdata uboot_tag; char __initdata *uboot_arg; @@ -45,6 +47,7 @@ static void read_arc_build_cfg_regs(void) struct bcr_perip uncached_space; struct bcr_generic bcr; struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()]; + unsigned long perip_space; FIX_PTR(cpu); READ_BCR(AUX_IDENTITY, cpu->core); @@ -54,7 +57,12 @@ static void read_arc_build_cfg_regs(void) cpu->vec_base = read_aux_reg(AUX_INTR_VEC_BASE); READ_BCR(ARC_REG_D_UNCACH_BCR, uncached_space); - cpu->uncached_base = uncached_space.start << 24; + if (uncached_space.ver < 3) + perip_space = uncached_space.start << 24; + else + perip_space = read_aux_reg(AUX_NON_VOL) & 0xF0000000; + + BUG_ON(perip_space != ARC_UNCACHED_ADDR_SPACE); READ_BCR(ARC_REG_MUL_BCR, cpu->extn_mpy); @@ -96,7 +104,7 @@ static void read_arc_build_cfg_regs(void) read_decode_mmu_bcr(); read_decode_cache_bcr(); - { + if (is_isa_arcompact()) { struct bcr_fp_arcompact sp, dp; struct bcr_bpu_arcompact bpu; @@ -112,6 +120,19 @@ static void read_arc_build_cfg_regs(void) cpu->bpu.num_cache = 256 << (bpu.ent - 1); cpu->bpu.num_pred = 256 << (bpu.ent - 1); } + } else { + struct bcr_fp_arcv2 spdp; + struct bcr_bpu_arcv2 bpu; + + READ_BCR(ARC_REG_FP_V2_BCR, spdp); + cpu->extn.fpu_sp = spdp.sp ? 1 : 0; + cpu->extn.fpu_dp = spdp.dp ? 1 : 0; + + READ_BCR(ARC_REG_BPU_BCR, bpu); + cpu->bpu.ver = bpu.ver; + cpu->bpu.full = bpu.ft; + cpu->bpu.num_cache = 256 << bpu.bce; + cpu->bpu.num_pred = 2048 << bpu.pte; } READ_BCR(ARC_REG_AP_BCR, bcr); @@ -127,16 +148,22 @@ static void read_arc_build_cfg_regs(void) } static const struct cpuinfo_data arc_cpu_tbl[] = { +#ifdef CONFIG_ISA_ARCOMPACT { {0x20, "ARC 600" }, 0x2F}, { {0x30, "ARC 700" }, 0x33}, { {0x34, "ARC 700 R4.10"}, 0x34}, { {0x35, "ARC 700 R4.11"}, 0x35}, +#else + { {0x50, "ARC HS38 R2.0"}, 0x51}, + { {0x52, "ARC HS38 R2.1"}, 0x52}, +#endif { {0x00, NULL } } }; -#define IS_AVAIL1(v, str) ((v) ? str : "") -#define IS_USED(cfg) (IS_ENABLED(cfg) ? "" : "(not used) ") -#define IS_AVAIL2(v, str, cfg) IS_AVAIL1(v, str), IS_AVAIL1(v, IS_USED(cfg)) +#define IS_AVAIL1(v, s) ((v) ? s : "") +#define IS_USED_RUN(v) ((v) ? "" : "(not used) ") +#define IS_USED_CFG(cfg) IS_USED_RUN(IS_ENABLED(cfg)) +#define IS_AVAIL2(v, s, cfg) IS_AVAIL1(v, s), IS_AVAIL1(v, IS_USED_CFG(cfg)) static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len) { @@ -149,13 +176,17 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len) FIX_PTR(cpu); - { + if (is_isa_arcompact()) { isa_nm = "ARCompact"; be = IS_ENABLED(CONFIG_CPU_BIG_ENDIAN); atomic = cpu->isa.atomic1; if (!cpu->isa.ver) /* ISA BCR absent, use Kconfig info */ atomic = IS_ENABLED(CONFIG_ARC_HAS_LLSC); + } else { + isa_nm = "ARCv2"; + be = cpu->isa.be; + atomic = cpu->isa.atomic; } n += scnprintf(buf + n, len - n, @@ -183,16 +214,34 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len) n += scnprintf(buf + n, len - n, "Timers\t\t: %s%s%s%s\nISA Extn\t: ", IS_AVAIL1(cpu->timers.t0, "Timer0 "), IS_AVAIL1(cpu->timers.t1, "Timer1 "), - IS_AVAIL2(cpu->timers.rtsc, "64-bit RTSC ", CONFIG_ARC_HAS_RTSC)); + IS_AVAIL2(cpu->timers.rtc, "64-bit RTC ", + CONFIG_ARC_HAS_RTC)); - n += i = scnprintf(buf + n, len - n, "%s%s", - IS_AVAIL2(atomic, "atomic ", CONFIG_ARC_HAS_LLSC)); + n += i = scnprintf(buf + n, len - n, "%s%s%s%s%s", + IS_AVAIL2(atomic, "atomic ", CONFIG_ARC_HAS_LLSC), + IS_AVAIL2(cpu->isa.ldd, "ll64 ", CONFIG_ARC_HAS_LL64), + IS_AVAIL1(cpu->isa.unalign, "unalign (not used)")); if (i) n += scnprintf(buf + n, len - n, "\n\t\t: "); + if (cpu->extn_mpy.ver) { + if (cpu->extn_mpy.ver <= 0x2) { /* ARCompact */ + n += scnprintf(buf + n, len - n, "mpy "); + } else { + int opt = 2; /* stock MPY/MPYH */ + + if (cpu->extn_mpy.dsp) /* OPT 7-9 */ + opt = cpu->extn_mpy.dsp + 6; + + n += scnprintf(buf + n, len - n, "mpy[opt %d] ", opt); + } + n += scnprintf(buf + n, len - n, "%s", + IS_USED_CFG(CONFIG_ARC_HAS_HW_MPY)); + } + n += scnprintf(buf + n, len - n, "%s%s%s%s%s%s%s%s\n", - IS_AVAIL1(cpu->extn_mpy.ver, "mpy "), + IS_AVAIL1(cpu->isa.div_rem, "div_rem "), IS_AVAIL1(cpu->extn.norm, "norm "), IS_AVAIL1(cpu->extn.barrel, "barrel-shift "), IS_AVAIL1(cpu->extn.swap, "swap "), @@ -219,7 +268,7 @@ static char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len) n += scnprintf(buf + n, len - n, "Vector Table\t: %#x\nUncached Base\t: %#x\n", - cpu->vec_base, cpu->uncached_base); + cpu->vec_base, ARC_UNCACHED_ADDR_SPACE); if (cpu->extn.fpu_sp || cpu->extn.fpu_dp) n += scnprintf(buf + n, len - n, "FPU\t\t: %s%s\n", @@ -254,8 +303,8 @@ static void arc_chk_core_config(void) if (!cpu->timers.t1) panic("Timer1 is not present!\n"); - if (IS_ENABLED(CONFIG_ARC_HAS_RTSC) && !cpu->timers.rtsc) - panic("RTSC is not present\n"); + if (IS_ENABLED(CONFIG_ARC_HAS_RTC) && !cpu->timers.rtc) + panic("RTC is not present\n"); #ifdef CONFIG_ARC_HAS_DCCM /* @@ -287,6 +336,10 @@ static void arc_chk_core_config(void) pr_warn("CONFIG_ARC_FPU_SAVE_RESTORE needed for working apps\n"); else if (!cpu->extn.fpu_dp && fpu_enabled) panic("FPU non-existent, disable CONFIG_ARC_FPU_SAVE_RESTORE\n"); + + if (is_isa_arcv2() && IS_ENABLED(CONFIG_SMP) && cpu->isa.atomic && + !IS_ENABLED(CONFIG_ARC_STAR_9000923308)) + panic("llock/scond livelock workaround missing\n"); } /* @@ -323,13 +376,16 @@ static inline int is_kernel(unsigned long addr) void __init setup_arch(char **cmdline_p) { +#ifdef CONFIG_ARC_UBOOT_SUPPORT /* make sure that uboot passed pointer to cmdline/dtb is valid */ if (uboot_tag && is_kernel((unsigned long)uboot_arg)) panic("Invalid uboot arg\n"); /* See if u-boot passed an external Device Tree blob */ machine_desc = setup_machine_fdt(uboot_arg); /* uboot_tag == 2 */ - if (!machine_desc) { + if (!machine_desc) +#endif + { /* No, so try the embedded one */ machine_desc = setup_machine_fdt(__dtb_start); if (!machine_desc) diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c index 2251fb4bb..004b7f0bc 100644 --- a/arch/arc/kernel/signal.c +++ b/arch/arc/kernel/signal.c @@ -67,7 +67,33 @@ stash_usr_regs(struct rt_sigframe __user *sf, struct pt_regs *regs, sigset_t *set) { int err; - err = __copy_to_user(&(sf->uc.uc_mcontext.regs.scratch), regs, + struct user_regs_struct uregs; + + uregs.scratch.bta = regs->bta; + uregs.scratch.lp_start = regs->lp_start; + uregs.scratch.lp_end = regs->lp_end; + uregs.scratch.lp_count = regs->lp_count; + uregs.scratch.status32 = regs->status32; + uregs.scratch.ret = regs->ret; + uregs.scratch.blink = regs->blink; + uregs.scratch.fp = regs->fp; + uregs.scratch.gp = regs->r26; + uregs.scratch.r12 = regs->r12; + uregs.scratch.r11 = regs->r11; + uregs.scratch.r10 = regs->r10; + uregs.scratch.r9 = regs->r9; + uregs.scratch.r8 = regs->r8; + uregs.scratch.r7 = regs->r7; + uregs.scratch.r6 = regs->r6; + uregs.scratch.r5 = regs->r5; + uregs.scratch.r4 = regs->r4; + uregs.scratch.r3 = regs->r3; + uregs.scratch.r2 = regs->r2; + uregs.scratch.r1 = regs->r1; + uregs.scratch.r0 = regs->r0; + uregs.scratch.sp = regs->sp; + + err = __copy_to_user(&(sf->uc.uc_mcontext.regs.scratch), &uregs.scratch, sizeof(sf->uc.uc_mcontext.regs.scratch)); err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(sigset_t)); @@ -78,14 +104,40 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf) { sigset_t set; int err; + struct user_regs_struct uregs; err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set)); if (!err) set_current_blocked(&set); - err |= __copy_from_user(regs, &(sf->uc.uc_mcontext.regs.scratch), + err |= __copy_from_user(&uregs.scratch, + &(sf->uc.uc_mcontext.regs.scratch), sizeof(sf->uc.uc_mcontext.regs.scratch)); + regs->bta = uregs.scratch.bta; + regs->lp_start = uregs.scratch.lp_start; + regs->lp_end = uregs.scratch.lp_end; + regs->lp_count = uregs.scratch.lp_count; + regs->status32 = uregs.scratch.status32; + regs->ret = uregs.scratch.ret; + regs->blink = uregs.scratch.blink; + regs->fp = uregs.scratch.fp; + regs->r26 = uregs.scratch.gp; + regs->r12 = uregs.scratch.r12; + regs->r11 = uregs.scratch.r11; + regs->r10 = uregs.scratch.r10; + regs->r9 = uregs.scratch.r9; + regs->r8 = uregs.scratch.r8; + regs->r7 = uregs.scratch.r7; + regs->r6 = uregs.scratch.r6; + regs->r5 = uregs.scratch.r5; + regs->r4 = uregs.scratch.r4; + regs->r3 = uregs.scratch.r3; + regs->r2 = uregs.scratch.r2; + regs->r1 = uregs.scratch.r1; + regs->r0 = uregs.scratch.r0; + regs->sp = uregs.scratch.sp; + return err; } @@ -284,7 +336,7 @@ static void arc_restart_syscall(struct k_sigaction *ka, struct pt_regs *regs) * their orig user space value when we ret from kernel */ regs->r0 = regs->orig_r0; - regs->ret -= 4; + regs->ret -= is_isa_arcv2() ? 2 : 4; break; } } @@ -325,10 +377,10 @@ void do_signal(struct pt_regs *regs) if (regs->r0 == -ERESTARTNOHAND || regs->r0 == -ERESTARTSYS || regs->r0 == -ERESTARTNOINTR) { regs->r0 = regs->orig_r0; - regs->ret -= 4; + regs->ret -= is_isa_arcv2() ? 2 : 4; } else if (regs->r0 == -ERESTART_RESTARTBLOCK) { regs->r8 = __NR_restart_syscall; - regs->ret -= 4; + regs->ret -= is_isa_arcv2() ? 2 : 4; } syscall_wont_restart(regs); /* No more restarts */ } diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c index 6a400b1b0..be13d1242 100644 --- a/arch/arc/kernel/smp.c +++ b/arch/arc/kernel/smp.c @@ -31,7 +31,7 @@ arch_spinlock_t smp_atomic_ops_lock = __ARCH_SPIN_LOCK_UNLOCKED; arch_spinlock_t smp_bitops_lock = __ARCH_SPIN_LOCK_UNLOCKED; #endif -struct plat_smp_ops plat_smp_ops; +struct plat_smp_ops __weak plat_smp_ops; /* XXX: per cpu ? Only needed once in early seconday boot */ struct task_struct *secondary_idle_tsk; @@ -182,7 +182,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle) /* * not supported here */ -int __init setup_profiling_timer(unsigned int multiplier) +int setup_profiling_timer(unsigned int multiplier) { return -EINVAL; } @@ -278,8 +278,10 @@ static void ipi_cpu_stop(void) machine_halt(); } -static inline void __do_IPI(unsigned long msg) +static inline int __do_IPI(unsigned long msg) { + int rc = 0; + switch (msg) { case IPI_RESCHEDULE: scheduler_ipi(); @@ -294,8 +296,10 @@ static inline void __do_IPI(unsigned long msg) break; default: - pr_warn("IPI with unexpected msg %ld\n", msg); + rc = 1; } + + return rc; } /* @@ -305,6 +309,7 @@ static inline void __do_IPI(unsigned long msg) irqreturn_t do_IPI(int irq, void *dev_id) { unsigned long pending; + unsigned long __maybe_unused copy; pr_debug("IPI [%ld] received on cpu %d\n", *this_cpu_ptr(&ipi_data), smp_processor_id()); @@ -316,11 +321,18 @@ irqreturn_t do_IPI(int irq, void *dev_id) * "dequeue" the msg corresponding to this IPI (and possibly other * piggybacked msg from elided IPIs: see ipi_send_msg_one() above) */ - pending = xchg(this_cpu_ptr(&ipi_data), 0); + copy = pending = xchg(this_cpu_ptr(&ipi_data), 0); do { unsigned long msg = __ffs(pending); - __do_IPI(msg); + int rc; + + rc = __do_IPI(msg); +#ifdef CONFIG_ARC_IPI_DBG + /* IPI received but no valid @msg */ + if (rc) + pr_info("IPI with bogus msg %ld in %ld\n", msg, copy); +#endif pending &= ~(1U << msg); } while (pending); diff --git a/arch/arc/kernel/stacktrace.c b/arch/arc/kernel/stacktrace.c index 92320d6f7..001de4ce7 100644 --- a/arch/arc/kernel/stacktrace.c +++ b/arch/arc/kernel/stacktrace.c @@ -122,19 +122,17 @@ arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs, while (1) { address = UNW_PC(&frame_info); - if (address && __kernel_text_address(address)) { - if (consumer_fn(address, arg) == -1) - break; - } + if (!address || !__kernel_text_address(address)) + break; - ret = arc_unwind(&frame_info); + if (consumer_fn(address, arg) == -1) + break; - if (ret == 0) { - frame_info.regs.r63 = frame_info.regs.r31; - continue; - } else { + ret = arc_unwind(&frame_info); + if (ret) break; - } + + frame_info.regs.r63 = frame_info.regs.r31; } return address; /* return the last address it saw */ diff --git a/arch/arc/kernel/time.c b/arch/arc/kernel/time.c index dbe74f418..4294761a2 100644 --- a/arch/arc/kernel/time.c +++ b/arch/arc/kernel/time.c @@ -26,6 +26,7 @@ * while TIMER1 for free running (clocksource) * * Newer ARC700 cores have 64bit clk fetching RTSC insn, preferred over TIMER1 + * which however is currently broken */ #include <linux/spinlock.h> @@ -44,6 +45,8 @@ #include <asm/clk.h> #include <asm/mach_desc.h> +#include <asm/mcip.h> + /* Timer related Aux registers */ #define ARC_REG_TIMER0_LIMIT 0x23 /* timer 0 limit */ #define ARC_REG_TIMER0_CTRL 0x22 /* timer 0 control */ @@ -59,14 +62,10 @@ /********** Clock Source Device *********/ -#ifdef CONFIG_ARC_HAS_RTSC +#ifdef CONFIG_ARC_HAS_GRTC -int arc_counter_setup(void) +static int arc_counter_setup(void) { - /* - * For SMP this needs to be 0. However Kconfig glue doesn't - * enable this option for SMP configs - */ return 1; } @@ -75,45 +74,84 @@ static cycle_t arc_counter_read(struct clocksource *cs) unsigned long flags; union { #ifdef CONFIG_CPU_BIG_ENDIAN - struct { u32 high, low; }; + struct { u32 h, l; }; #else - struct { u32 low, high; }; + struct { u32 l, h; }; #endif cycle_t full; } stamp; - flags = arch_local_irq_save(); + local_irq_save(flags); - __asm__ __volatile( - " .extCoreRegister tsch, 58, r, cannot_shortcut \n" - " rtsc %0, 0 \n" - " mov %1, 0 \n" - : "=r" (stamp.low), "=r" (stamp.high)); + __mcip_cmd(CMD_GRTC_READ_LO, 0); + stamp.l = read_aux_reg(ARC_REG_MCIP_READBACK); + + __mcip_cmd(CMD_GRTC_READ_HI, 0); + stamp.h = read_aux_reg(ARC_REG_MCIP_READBACK); - arch_local_irq_restore(flags); + local_irq_restore(flags); return stamp.full; } static struct clocksource arc_counter = { - .name = "ARC RTSC", - .rating = 300, + .name = "ARConnect GRTC", + .rating = 400, .read = arc_counter_read, - .mask = CLOCKSOURCE_MASK(32), + .mask = CLOCKSOURCE_MASK(64), .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; -#else /* !CONFIG_ARC_HAS_RTSC */ +#else -static bool is_usable_as_clocksource(void) +#ifdef CONFIG_ARC_HAS_RTC + +#define AUX_RTC_CTRL 0x103 +#define AUX_RTC_LOW 0x104 +#define AUX_RTC_HIGH 0x105 + +int arc_counter_setup(void) { -#ifdef CONFIG_SMP - return 0; + write_aux_reg(AUX_RTC_CTRL, 1); + + /* Not usable in SMP */ + return !IS_ENABLED(CONFIG_SMP); +} + +static cycle_t arc_counter_read(struct clocksource *cs) +{ + unsigned long status; + union { +#ifdef CONFIG_CPU_BIG_ENDIAN + struct { u32 high, low; }; #else - return 1; + struct { u32 low, high; }; #endif + cycle_t full; + } stamp; + + + __asm__ __volatile( + "1: \n" + " lr %0, [AUX_RTC_LOW] \n" + " lr %1, [AUX_RTC_HIGH] \n" + " lr %2, [AUX_RTC_CTRL] \n" + " bbit0.nt %2, 31, 1b \n" + : "=r" (stamp.low), "=r" (stamp.high), "=r" (status)); + + return stamp.full; } +static struct clocksource arc_counter = { + .name = "ARCv2 RTC", + .rating = 350, + .read = arc_counter_read, + .mask = CLOCKSOURCE_MASK(64), + .flags = CLOCK_SOURCE_IS_CONTINUOUS, +}; + +#else /* !CONFIG_ARC_HAS_RTC */ + /* * set 32bit TIMER1 to keep counting monotonically and wraparound */ @@ -123,7 +161,8 @@ int arc_counter_setup(void) write_aux_reg(ARC_REG_TIMER1_CNT, 0); write_aux_reg(ARC_REG_TIMER1_CTRL, TIMER_CTRL_NH); - return is_usable_as_clocksource(); + /* Not usable in SMP */ + return !IS_ENABLED(CONFIG_SMP); } static cycle_t arc_counter_read(struct clocksource *cs) @@ -140,6 +179,7 @@ static struct clocksource arc_counter = { }; #endif +#endif /********** Clock Event Device *********/ @@ -163,34 +203,24 @@ static int arc_clkevent_set_next_event(unsigned long delta, return 0; } -static void arc_clkevent_set_mode(enum clock_event_mode mode, - struct clock_event_device *dev) +static int arc_clkevent_set_periodic(struct clock_event_device *dev) { - switch (mode) { - case CLOCK_EVT_MODE_PERIODIC: - /* - * At X Hz, 1 sec = 1000ms -> X cycles; - * 10ms -> X / 100 cycles - */ - arc_timer_event_setup(arc_get_core_freq() / HZ); - break; - case CLOCK_EVT_MODE_ONESHOT: - break; - default: - break; - } - - return; + /* + * At X Hz, 1 sec = 1000ms -> X cycles; + * 10ms -> X / 100 cycles + */ + arc_timer_event_setup(arc_get_core_freq() / HZ); + return 0; } static DEFINE_PER_CPU(struct clock_event_device, arc_clockevent_device) = { - .name = "ARC Timer0", - .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC, - .mode = CLOCK_EVT_MODE_UNUSED, - .rating = 300, - .irq = TIMER0_IRQ, /* hardwired, no need for resources */ - .set_next_event = arc_clkevent_set_next_event, - .set_mode = arc_clkevent_set_mode, + .name = "ARC Timer0", + .features = CLOCK_EVT_FEAT_ONESHOT | + CLOCK_EVT_FEAT_PERIODIC, + .rating = 300, + .irq = TIMER0_IRQ, /* hardwired, no need for resources */ + .set_next_event = arc_clkevent_set_next_event, + .set_state_periodic = arc_clkevent_set_periodic, }; static irqreturn_t timer_irq_handler(int irq, void *dev_id) @@ -200,7 +230,7 @@ static irqreturn_t timer_irq_handler(int irq, void *dev_id) * irq_set_chip_and_handler() asked for handle_percpu_devid_irq() */ struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device); - int irq_reenable = evt->mode == CLOCK_EVT_MODE_PERIODIC; + int irq_reenable = clockevent_state_periodic(evt); /* * Any write to CTRL reg ACks the interrupt, we rewrite the diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c index e00a01879..a6f91e88c 100644 --- a/arch/arc/kernel/troubleshoot.c +++ b/arch/arc/kernel/troubleshoot.c @@ -14,6 +14,7 @@ #include <linux/proc_fs.h> #include <linux/file.h> #include <asm/arcregs.h> +#include <asm/irqflags.h> /* * Common routine to print scratch regs (r0-r12) or callee regs (r13-r25) @@ -34,7 +35,10 @@ static noinline void print_reg_file(long *reg_rev, int start_num) n += scnprintf(buf + n, len - n, "\n"); /* because pt_regs has regs reversed: r12..r0, r25..r13 */ - reg_rev--; + if (is_isa_arcv2() && start_num == 0) + reg_rev++; + else + reg_rev--; } if (start_num != 0) @@ -54,7 +58,6 @@ static void show_callee_regs(struct callee_regs *cregs) static void print_task_path_n_nm(struct task_struct *tsk, char *buf) { - struct path path; char *path_nm = NULL; struct mm_struct *mm; struct file *exe_file; @@ -67,15 +70,12 @@ static void print_task_path_n_nm(struct task_struct *tsk, char *buf) mmput(mm); if (exe_file) { - path = exe_file->f_path; - path_get(&exe_file->f_path); + path_nm = file_path(exe_file, buf, 255); fput(exe_file); - path_nm = d_path(&path, buf, 255); - path_put(&path); } done: - pr_info("Path: %s\n", path_nm); + pr_info("Path: %s\n", !IS_ERR(path_nm) ? path_nm : "?"); } static void show_faulting_vma(unsigned long address, char *buf) @@ -99,8 +99,7 @@ static void show_faulting_vma(unsigned long address, char *buf) if (vma && (vma->vm_start <= address)) { struct file *file = vma->vm_file; if (file) { - struct path *path = &file->f_path; - nm = d_path(path, buf, PAGE_SIZE - 1); + nm = file_path(file, buf, PAGE_SIZE - 1); inode = file_inode(vma->vm_file); dev = inode->i_sb->s_dev; ino = inode->i_ino; @@ -152,6 +151,15 @@ static void show_ecr_verbose(struct pt_regs *regs) ((cause_code == 0x02) ? "Write" : "EX")); } else if (vec == ECR_V_INSN_ERR) { pr_cont("Illegal Insn\n"); +#ifdef CONFIG_ISA_ARCV2 + } else if (vec == ECR_V_MEM_ERR) { + if (cause_code == 0x00) + pr_cont("Bus Error from Insn Mem\n"); + else if (cause_code == 0x10) + pr_cont("Bus Error from Data Mem\n"); + else + pr_cont("Bus Error, check PRM\n"); +#endif } else { pr_cont("Check Programmer's Manual\n"); } @@ -185,12 +193,20 @@ void show_regs(struct pt_regs *regs) pr_info("[STAT32]: 0x%08lx", regs->status32); -#define STS_BIT(r, bit) r->status32 & STATUS_##bit##_MASK ? #bit : "" - if (!user_mode(regs)) - pr_cont(" : %2s %2s %2s %2s %2s\n", - STS_BIT(regs, AE), STS_BIT(regs, A2), STS_BIT(regs, A1), - STS_BIT(regs, E2), STS_BIT(regs, E1)); +#define STS_BIT(r, bit) r->status32 & STATUS_##bit##_MASK ? #bit" " : "" +#ifdef CONFIG_ISA_ARCOMPACT + pr_cont(" : %2s%2s%2s%2s%2s%2s%2s\n", + (regs->status32 & STATUS_U_MASK) ? "U " : "K ", + STS_BIT(regs, DE), STS_BIT(regs, AE), + STS_BIT(regs, A2), STS_BIT(regs, A1), + STS_BIT(regs, E2), STS_BIT(regs, E1)); +#else + pr_cont(" : %2s%2s%2s%2s\n", + STS_BIT(regs, IE), + (regs->status32 & STATUS_U_MASK) ? "U " : "K ", + STS_BIT(regs, DE), STS_BIT(regs, AE)); +#endif pr_info("BTA: 0x%08lx\t SP: 0x%08lx\t FP: 0x%08lx\n", regs->bta, regs->sp, regs->fp); pr_info("LPS: 0x%08lx\tLPE: 0x%08lx\tLPC: 0x%08lx\n", |