diff options
Diffstat (limited to 'arch/arc/kernel')
-rw-r--r-- | arch/arc/kernel/ctx_sw.c | 2 | ||||
-rw-r--r-- | arch/arc/kernel/ctx_sw_asm.S | 3 | ||||
-rw-r--r-- | arch/arc/kernel/entry-arcv2.S | 21 | ||||
-rw-r--r-- | arch/arc/kernel/entry-compact.S | 72 | ||||
-rw-r--r-- | arch/arc/kernel/entry.S | 17 | ||||
-rw-r--r-- | arch/arc/kernel/head.S | 49 | ||||
-rw-r--r-- | arch/arc/kernel/intc-arcv2.c | 15 | ||||
-rw-r--r-- | arch/arc/kernel/intc-compact.c | 90 | ||||
-rw-r--r-- | arch/arc/kernel/irq.c | 45 | ||||
-rw-r--r-- | arch/arc/kernel/mcip.c | 46 | ||||
-rw-r--r-- | arch/arc/kernel/perf_event.c | 32 | ||||
-rw-r--r-- | arch/arc/kernel/process.c | 9 | ||||
-rw-r--r-- | arch/arc/kernel/setup.c | 8 | ||||
-rw-r--r-- | arch/arc/kernel/smp.c | 66 | ||||
-rw-r--r-- | arch/arc/kernel/time.c | 3 | ||||
-rw-r--r-- | arch/arc/kernel/unwind.c | 94 | ||||
-rw-r--r-- | arch/arc/kernel/vmlinux.lds.S | 2 |
17 files changed, 255 insertions, 319 deletions
diff --git a/arch/arc/kernel/ctx_sw.c b/arch/arc/kernel/ctx_sw.c index c14a5bea0..5d446df2c 100644 --- a/arch/arc/kernel/ctx_sw.c +++ b/arch/arc/kernel/ctx_sw.c @@ -58,8 +58,6 @@ __switch_to(struct task_struct *prev_task, struct task_struct *next_task) "st sp, [r24] \n\t" #endif - "sync \n\t" - /* * setup _current_task with incoming tsk. * optionally, set r25 to that as well diff --git a/arch/arc/kernel/ctx_sw_asm.S b/arch/arc/kernel/ctx_sw_asm.S index e24859409..e6890b1f8 100644 --- a/arch/arc/kernel/ctx_sw_asm.S +++ b/arch/arc/kernel/ctx_sw_asm.S @@ -44,9 +44,6 @@ __switch_to: * don't need to do anything special to return it */ - /* hardware memory barrier */ - sync - /* * switch to new task, contained in r1 * Temp reg r3 is required to get the ptr to store val diff --git a/arch/arc/kernel/entry-arcv2.S b/arch/arc/kernel/entry-arcv2.S index 8fa76567e..cbfec7913 100644 --- a/arch/arc/kernel/entry-arcv2.S +++ b/arch/arc/kernel/entry-arcv2.S @@ -24,7 +24,7 @@ .align 4 # Initial 16 slots are Exception Vectors -VECTOR stext ; Restart Vector (jump to entry point) +VECTOR res_service ; Reset Vector VECTOR mem_service ; Mem exception VECTOR instr_service ; Instrn Error VECTOR EV_MachineCheck ; Fatal Machine check @@ -91,6 +91,25 @@ ENTRY(EV_DCError) flag 1 END(EV_DCError) +; --------------------------------------------- +; Memory Error Exception Handler +; - Unlike ARCompact, handles Bus errors for both User/Kernel mode, +; Instruction fetch or Data access, under a single Exception Vector +; --------------------------------------------- + +ENTRY(mem_service) + + EXCEPTION_PROLOGUE + + lr r0, [efa] + mov r1, sp + + FAKE_RET_FROM_EXCPN + + bl do_memory_error + b ret_from_exception +END(mem_service) + ENTRY(EV_Misaligned) EXCEPTION_PROLOGUE diff --git a/arch/arc/kernel/entry-compact.S b/arch/arc/kernel/entry-compact.S index 15d457b44..431433929 100644 --- a/arch/arc/kernel/entry-compact.S +++ b/arch/arc/kernel/entry-compact.S @@ -86,7 +86,7 @@ */ ; ********* Critical System Events ********************** -VECTOR res_service ; 0x0, Restart Vector (0x0) +VECTOR res_service ; 0x0, Reset Vector (0x0) VECTOR mem_service ; 0x8, Mem exception (0x1) VECTOR instr_service ; 0x10, Instrn Error (0x2) @@ -142,26 +142,18 @@ int1_saved_reg: .zero 4 /* Each Interrupt level needs its own scratch */ -#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS - ARCFP_DATA int2_saved_reg .type int2_saved_reg, @object .size int2_saved_reg, 4 int2_saved_reg: .zero 4 -#endif - ; --------------------------------------------- .section .text, "ax",@progbits -res_service: ; processor restart - flag 0x1 ; not implemented - nop - nop -reserved: ; processor restart - rtie ; jump to processor initializations +reserved: + flag 1 ; Unexpected event, halt ;##################### Interrupt Handling ############################## @@ -175,12 +167,25 @@ ENTRY(handle_interrupt_level2) ;------------------------------------------------------ ; if L2 IRQ interrupted a L1 ISR, disable preemption + ; + ; This is to avoid a potential L1-L2-L1 scenario + ; -L1 IRQ taken + ; -L2 interrupts L1 (before L1 ISR could run) + ; -preemption off IRQ, user task in syscall picked to run + ; -RTIE to userspace + ; Returns from L2 context fine + ; But both L1 and L2 re-enabled, so another L1 can be taken + ; while prev L1 is still unserviced + ; ;------------------------------------------------------ + ; L2 interrupting L1 implies both L2 and L1 active + ; However both A2 and A1 are NOT set in STATUS32, thus + ; need to check STATUS32_L2 to determine if L1 was active + ld r9, [sp, PT_status32] ; get statu32_l2 (saved in pt_regs) bbit0 r9, STATUS_A1_BIT, 1f ; L1 not active when L2 IRQ, so normal - ; A1 is set in status32_l2 ; bump thread_info->preempt_count (Disable preemption) GET_CURR_THR_INFO_FROM_SP r10 ld r9, [r10, THREAD_INFO_PREEMPT_COUNT] @@ -207,6 +212,31 @@ END(handle_interrupt_level2) #endif ; --------------------------------------------- +; User Mode Memory Bus Error Interrupt Handler +; (Kernel mode memory errors handled via seperate exception vectors) +; --------------------------------------------- +ENTRY(mem_service) + + INTERRUPT_PROLOGUE 2 + + mov r0, ilink2 + mov r1, sp + + ; User process needs to be killed with SIGBUS, but first need to get + ; out of the L2 interrupt context (drop to pure kernel mode) and jump + ; off to "C" code where SIGBUS in enqueued + lr r3, [status32] + bclr r3, r3, STATUS_A2_BIT + or r3, r3, (STATUS_E1_MASK|STATUS_E2_MASK) + sr r3, [status32_l2] + mov ilink2, 1f + rtie +1: + bl do_memory_error + b ret_from_exception +END(mem_service) + +; --------------------------------------------- ; Level 1 ISR ; --------------------------------------------- ENTRY(handle_interrupt_level1) @@ -320,11 +350,10 @@ END(call_do_page_fault) ; Note that we use realtime STATUS32 (not pt_regs->status32) to ; decide that. - ; if Returning from Exception - btst r10, STATUS_AE_BIT - bnz .Lexcep_ret + and.f 0, r10, (STATUS_A1_MASK|STATUS_A2_MASK) + bz .Lexcep_or_pure_K_ret - ; Not Exception so maybe Interrupts (Level 1 or 2) + ; Returning from Interrupts (Level 1 or 2) #ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS @@ -365,8 +394,7 @@ END(call_do_page_fault) st r9, [r10, THREAD_INFO_PREEMPT_COUNT] 149: - ;return from level 2 - INTERRUPT_EPILOGUE 2 + INTERRUPT_EPILOGUE 2 ; return from level 2 interrupt debug_marker_l2: rtie @@ -374,15 +402,11 @@ not_level2_interrupt: #endif - bbit0 r10, STATUS_A1_BIT, .Lpure_k_mode_ret - - ;return from level 1 - INTERRUPT_EPILOGUE 1 + INTERRUPT_EPILOGUE 1 ; return from level 1 interrupt debug_marker_l1: rtie -.Lexcep_ret: -.Lpure_k_mode_ret: +.Lexcep_or_pure_K_ret: ;this case is for syscalls or Exceptions or pure kernel mode diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S index 589abf517..2efb06253 100644 --- a/arch/arc/kernel/entry.S +++ b/arch/arc/kernel/entry.S @@ -93,23 +93,6 @@ ENTRY(instr_service) END(instr_service) ; --------------------------------------------- -; Memory Error Exception Handler -; --------------------------------------------- - -ENTRY(mem_service) - - EXCEPTION_PROLOGUE - - lr r0, [efa] - mov r1, sp - - FAKE_RET_FROM_EXCPN - - bl do_memory_error - b ret_from_exception -END(mem_service) - -; --------------------------------------------- ; Machine Check Exception Handler ; --------------------------------------------- diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S index 812f95e6a..689dd867f 100644 --- a/arch/arc/kernel/head.S +++ b/arch/arc/kernel/head.S @@ -50,28 +50,37 @@ .endm .section .init.text, "ax",@progbits - .type stext, @function - .globl stext -stext: - ;------------------------------------------------------------------- - ; Don't clobber r0-r2 yet. It might have bootloader provided info - ;------------------------------------------------------------------- + +;---------------------------------------------------------------- +; Default Reset Handler (jumped into from Reset vector) +; - Don't clobber r0,r1,r2 as they might have u-boot provided args +; - Platforms can override this weak version if needed +;---------------------------------------------------------------- +WEAK(res_service) + j stext +END(res_service) + +;---------------------------------------------------------------- +; Kernel Entry point +;---------------------------------------------------------------- +ENTRY(stext) CPU_EARLY_SETUP #ifdef CONFIG_SMP - ; Ensure Boot (Master) proceeds. Others wait in platform dependent way - ; IDENTITY Reg [ 3 2 1 0 ] - ; (cpu-id) ^^^ => Zero for UP ARC700 - ; => #Core-ID if SMP (Master 0) - ; Note that non-boot CPUs might not land here if halt-on-reset and - ; instead breath life from @first_lines_of_secondary, but we still - ; need to make sure only boot cpu takes this path. GET_CPU_ID r5 cmp r5, 0 - mov.ne r0, r5 - jne arc_platform_smp_wait_to_boot + mov.nz r0, r5 +#ifdef CONFIG_ARC_SMP_HALT_ON_RESET + ; Non-Master can proceed as system would be booted sufficiently + jnz first_lines_of_secondary +#else + ; Non-Masters wait for Master to boot enough and bring them up + jnz arc_platform_smp_wait_to_boot #endif + ; Master falls thru +#endif + ; Clear BSS before updating any globals ; XXX: use ZOL here mov r5, __bss_start @@ -102,18 +111,14 @@ stext: GET_TSK_STACK_BASE r9, sp ; r9 = tsk, sp = stack base(output) j start_kernel ; "C" entry point +END(stext) #ifdef CONFIG_SMP ;---------------------------------------------------------------- ; First lines of code run by secondary before jumping to 'C' ;---------------------------------------------------------------- .section .text, "ax",@progbits - .type first_lines_of_secondary, @function - .globl first_lines_of_secondary - -first_lines_of_secondary: - - CPU_EARLY_SETUP +ENTRY(first_lines_of_secondary) ; setup per-cpu idle task as "current" on this CPU ld r0, [@secondary_idle_tsk] @@ -126,5 +131,5 @@ first_lines_of_secondary: GET_TSK_STACK_BASE r0, sp j start_kernel_secondary - +END(first_lines_of_secondary) #endif diff --git a/arch/arc/kernel/intc-arcv2.c b/arch/arc/kernel/intc-arcv2.c index 26c156827..0394f9f61 100644 --- a/arch/arc/kernel/intc-arcv2.c +++ b/arch/arc/kernel/intc-arcv2.c @@ -106,10 +106,21 @@ static struct irq_chip arcv2_irq_chip = { static int arcv2_irq_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) { - if (irq == TIMER0_IRQ || irq == IPI_IRQ) + /* + * core intc IRQs [16, 23]: + * Statically assigned always private-per-core (Timers, WDT, IPI, PCT) + */ + if (hw < 24) { + /* + * A subsequent request_percpu_irq() fails if percpu_devid is + * not set. That in turns sets NOAUTOEN, meaning each core needs + * to call enable_percpu_irq() + */ + irq_set_percpu_devid(irq); irq_set_chip_and_handler(irq, &arcv2_irq_chip, handle_percpu_irq); - else + } else { irq_set_chip_and_handler(irq, &arcv2_irq_chip, handle_level_irq); + } return 0; } diff --git a/arch/arc/kernel/intc-compact.c b/arch/arc/kernel/intc-compact.c index 039fac30b..06bcedf19 100644 --- a/arch/arc/kernel/intc-compact.c +++ b/arch/arc/kernel/intc-compact.c @@ -79,17 +79,16 @@ static struct irq_chip onchip_intc = { static int arc_intc_domain_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) { - /* - * XXX: the IPI IRQ needs to be handled like TIMER too. However ARC core - * code doesn't own it (like TIMER0). ISS IDU / ezchip define it - * in platform header which can't be included here as it goes - * against multi-platform image philisophy - */ - if (irq == TIMER0_IRQ) + switch (irq) { + case TIMER0_IRQ: +#ifdef CONFIG_SMP + case IPI_IRQ: +#endif irq_set_chip_and_handler(irq, &onchip_intc, handle_percpu_irq); - else + break; + default: irq_set_chip_and_handler(irq, &onchip_intc, handle_level_irq); - + } return 0; } @@ -148,78 +147,15 @@ IRQCHIP_DECLARE(arc_intc, "snps,arc700-intc", init_onchip_IRQ); void arch_local_irq_enable(void) { - unsigned long flags = arch_local_save_flags(); - /* Allow both L1 and L2 at the onset */ - flags |= (STATUS_E1_MASK | STATUS_E2_MASK); - - /* Called from hard ISR (between irq_enter and irq_exit) */ - if (in_irq()) { - - /* If in L2 ISR, don't re-enable any further IRQs as this can - * cause IRQ priorities to get upside down. e.g. it could allow - * L1 be taken while in L2 hard ISR which is wrong not only in - * theory, it can also cause the dreaded L1-L2-L1 scenario - */ - if (flags & STATUS_A2_MASK) - flags &= ~(STATUS_E1_MASK | STATUS_E2_MASK); - - /* Even if in L1 ISR, allowe Higher prio L2 IRQs */ - else if (flags & STATUS_A1_MASK) - flags &= ~(STATUS_E1_MASK); - } - - /* called from soft IRQ, ideally we want to re-enable all levels */ - - else if (in_softirq()) { - - /* However if this is case of L1 interrupted by L2, - * re-enabling both may cause whaco L1-L2-L1 scenario - * because ARC700 allows level 1 to interrupt an active L2 ISR - * Thus we disable both - * However some code, executing in soft ISR wants some IRQs - * to be enabled so we re-enable L2 only - * - * How do we determine L1 intr by L2 - * -A2 is set (means in L2 ISR) - * -E1 is set in this ISR's pt_regs->status32 which is - * saved copy of status32_l2 when l2 ISR happened - */ - struct pt_regs *pt = get_irq_regs(); - - if ((flags & STATUS_A2_MASK) && pt && - (pt->status32 & STATUS_A1_MASK)) { - /*flags &= ~(STATUS_E1_MASK | STATUS_E2_MASK); */ - flags &= ~(STATUS_E1_MASK); - } - } + if (flags & STATUS_A2_MASK) + flags |= STATUS_E2_MASK; + else if (flags & STATUS_A1_MASK) + flags |= STATUS_E1_MASK; arch_local_irq_restore(flags); } -#else /* ! CONFIG_ARC_COMPACT_IRQ_LEVELS */ - -/* - * Simpler version for only 1 level of interrupt - * Here we only Worry about Level 1 Bits - */ -void arch_local_irq_enable(void) -{ - unsigned long flags; - - /* - * ARC IDE Drivers tries to re-enable interrupts from hard-isr - * context which is simply wrong - */ - if (in_irq()) { - WARN_ONCE(1, "IRQ enabled from hard-isr"); - return; - } - - flags = arch_local_save_flags(); - flags |= (STATUS_E1_MASK | STATUS_E2_MASK); - arch_local_irq_restore(flags); -} -#endif EXPORT_SYMBOL(arch_local_irq_enable); +#endif diff --git a/arch/arc/kernel/irq.c b/arch/arc/kernel/irq.c index 2989a7bcf..ba17f8528 100644 --- a/arch/arc/kernel/irq.c +++ b/arch/arc/kernel/irq.c @@ -10,6 +10,7 @@ #include <linux/interrupt.h> #include <linux/irqchip.h> #include <asm/mach_desc.h> +#include <asm/smp.h> /* * Late Interrupt system init called from start_kernel for Boot CPU only @@ -19,17 +20,20 @@ */ void __init init_IRQ(void) { - /* Any external intc can be setup here */ - if (machine_desc->init_irq) - machine_desc->init_irq(); - - /* process the entire interrupt tree in one go */ + /* + * process the entire interrupt tree in one go + * Any external intc will be setup provided DT chains them + * properly + */ irqchip_init(); #ifdef CONFIG_SMP - /* Master CPU can initialize it's side of IPI */ - if (machine_desc->init_smp) - machine_desc->init_smp(smp_processor_id()); + /* a SMP H/w block could do IPI IRQ request here */ + if (plat_smp_ops.init_per_cpu) + plat_smp_ops.init_per_cpu(smp_processor_id()); + + if (machine_desc->init_per_cpu) + machine_desc->init_per_cpu(smp_processor_id()); #endif } @@ -47,6 +51,18 @@ void arch_do_IRQ(unsigned int irq, struct pt_regs *regs) set_irq_regs(old_regs); } +/* + * API called for requesting percpu interrupts - called by each CPU + * - For boot CPU, actually request the IRQ with genirq core + enables + * - For subsequent callers only enable called locally + * + * Relies on being called by boot cpu first (i.e. request called ahead) of + * any enable as expected by genirq. Hence Suitable only for TIMER, IPI + * which are guaranteed to be setup on boot core first. + * Late probed peripherals such as perf can't use this as there no guarantee + * of being called on boot CPU first. + */ + void arc_request_percpu_irq(int irq, int cpu, irqreturn_t (*isr)(int irq, void *dev), const char *irq_nm, @@ -56,14 +72,17 @@ void arc_request_percpu_irq(int irq, int cpu, if (!cpu) { int rc; +#ifdef CONFIG_ISA_ARCOMPACT /* - * These 2 calls are essential to making percpu IRQ APIs work - * Ideally these details could be hidden in irq chip map function - * but the issue is IPIs IRQs being static (non-DT) and platform - * specific, so we can't identify them there. + * A subsequent request_percpu_irq() fails if percpu_devid is + * not set. That in turns sets NOAUTOEN, meaning each core needs + * to call enable_percpu_irq() + * + * For ARCv2, this is done in irq map function since we know + * which irqs are strictly per cpu */ irq_set_percpu_devid(irq); - irq_modify_status(irq, IRQ_NOAUTOEN, 0); /* @irq, @clr, @set */ +#endif rc = request_percpu_irq(irq, isr, irq_nm, percpu_dev); if (rc) diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c index 4ffd1855f..bd237acdf 100644 --- a/arch/arc/kernel/mcip.c +++ b/arch/arc/kernel/mcip.c @@ -12,20 +12,14 @@ #include <linux/irq.h> #include <linux/spinlock.h> #include <asm/mcip.h> +#include <asm/setup.h> static char smp_cpuinfo_buf[128]; static int idu_detected; static DEFINE_RAW_SPINLOCK(mcip_lock); -/* - * Any SMP specific init any CPU does when it comes up. - * Here we setup the CPU to enable Inter-Processor-Interrupts - * Called for each CPU - * -Master : init_IRQ() - * -Other(s) : start_kernel_secondary() - */ -void mcip_init_smp(unsigned int cpu) +static void mcip_setup_per_cpu(int cpu) { smp_ipi_irq_setup(cpu, IPI_IRQ); } @@ -96,34 +90,8 @@ static void mcip_ipi_clear(int irq) #endif } -volatile int wake_flag; - -static void mcip_wakeup_cpu(int cpu, unsigned long pc) -{ - BUG_ON(cpu == 0); - wake_flag = cpu; -} - -void arc_platform_smp_wait_to_boot(int cpu) +static void mcip_probe_n_setup(void) { - while (wake_flag != cpu) - ; - - wake_flag = 0; - __asm__ __volatile__("j @first_lines_of_secondary \n"); -} - -struct plat_smp_ops plat_smp_ops = { - .info = smp_cpuinfo_buf, - .cpu_kick = mcip_wakeup_cpu, - .ipi_send = mcip_ipi_send, - .ipi_clear = mcip_ipi_clear, -}; - -void mcip_init_early_smp(void) -{ -#define IS_AVAIL1(var, str) ((var) ? str : "") - struct mcip_bcr { #ifdef CONFIG_CPU_BIG_ENDIAN unsigned int pad3:8, @@ -161,6 +129,14 @@ void mcip_init_early_smp(void) panic("kernel trying to use non-existent GRTC\n"); } +struct plat_smp_ops plat_smp_ops = { + .info = smp_cpuinfo_buf, + .init_early_smp = mcip_probe_n_setup, + .init_per_cpu = mcip_setup_per_cpu, + .ipi_send = mcip_ipi_send, + .ipi_clear = mcip_ipi_clear, +}; + /*************************************************************************** * ARCv2 Interrupt Distribution Unit (IDU) * diff --git a/arch/arc/kernel/perf_event.c b/arch/arc/kernel/perf_event.c index 0c08bb1ce..8b134cfe5 100644 --- a/arch/arc/kernel/perf_event.c +++ b/arch/arc/kernel/perf_event.c @@ -428,12 +428,11 @@ static irqreturn_t arc_pmu_intr(int irq, void *dev) #endif /* CONFIG_ISA_ARCV2 */ -void arc_cpu_pmu_irq_init(void) +static void arc_cpu_pmu_irq_init(void *data) { - struct arc_pmu_cpu *pmu_cpu = this_cpu_ptr(&arc_pmu_cpu); + int irq = *(int *)data; - arc_request_percpu_irq(arc_pmu->irq, smp_processor_id(), arc_pmu_intr, - "ARC perf counters", pmu_cpu); + enable_percpu_irq(irq, IRQ_TYPE_NONE); /* Clear all pending interrupt flags */ write_aux_reg(ARC_REG_PCT_INT_ACT, 0xffffffff); @@ -515,7 +514,6 @@ static int arc_pmu_device_probe(struct platform_device *pdev) if (has_interrupts) { int irq = platform_get_irq(pdev, 0); - unsigned long flags; if (irq < 0) { pr_err("Cannot get IRQ number for the platform\n"); @@ -524,24 +522,12 @@ static int arc_pmu_device_probe(struct platform_device *pdev) arc_pmu->irq = irq; - /* - * arc_cpu_pmu_irq_init() needs to be called on all cores for - * their respective local PMU. - * However we use opencoded on_each_cpu() to ensure it is called - * on core0 first, so that arc_request_percpu_irq() sets up - * AUTOEN etc. Otherwise enable_percpu_irq() fails to enable - * perf IRQ on non master cores. - * see arc_request_percpu_irq() - */ - preempt_disable(); - local_irq_save(flags); - arc_cpu_pmu_irq_init(); - local_irq_restore(flags); - smp_call_function((smp_call_func_t)arc_cpu_pmu_irq_init, 0, 1); - preempt_enable(); - - /* Clean all pending interrupt flags */ - write_aux_reg(ARC_REG_PCT_INT_ACT, 0xffffffff); + /* intc map function ensures irq_set_percpu_devid() called */ + request_percpu_irq(irq, arc_pmu_intr, "ARC perf counters", + this_cpu_ptr(&arc_pmu_cpu)); + + on_each_cpu(arc_cpu_pmu_irq_init, &irq, 1); + } else arc_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c index 91d5a0f1f..a3f750e76 100644 --- a/arch/arc/kernel/process.c +++ b/arch/arc/kernel/process.c @@ -44,11 +44,10 @@ SYSCALL_DEFINE0(arc_gettls) void arch_cpu_idle(void) { /* sleep, but enable all interrupts before committing */ - if (is_isa_arcompact()) { - __asm__("sleep 0x3"); - } else { - __asm__("sleep 0x10"); - } + __asm__ __volatile__( + "sleep %0 \n" + : + :"I"(ISA_SLEEP_ARG)); /* can't be "r" has to be embedded const */ } asmlinkage void ret_from_fork(void); diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c index cabde9dc0..e1b87444e 100644 --- a/arch/arc/kernel/setup.c +++ b/arch/arc/kernel/setup.c @@ -160,10 +160,6 @@ static const struct cpuinfo_data arc_cpu_tbl[] = { { {0x00, NULL } } }; -#define IS_AVAIL1(v, s) ((v) ? s : "") -#define IS_USED_RUN(v) ((v) ? "" : "(not used) ") -#define IS_USED_CFG(cfg) IS_USED_RUN(IS_ENABLED(cfg)) -#define IS_AVAIL2(v, s, cfg) IS_AVAIL1(v, s), IS_AVAIL1(v, IS_USED_CFG(cfg)) static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len) { @@ -415,8 +411,9 @@ void __init setup_arch(char **cmdline_p) if (machine_desc->init_early) machine_desc->init_early(); - setup_processor(); smp_init_cpus(); + + setup_processor(); setup_arch_memory(); /* copy flat DT out of .init and then unflatten it */ @@ -432,7 +429,6 @@ void __init setup_arch(char **cmdline_p) #endif arc_unwind_init(); - arc_unwind_setup(); } static int __init customize_machine(void) diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c index be13d1242..ef6e9e15b 100644 --- a/arch/arc/kernel/smp.c +++ b/arch/arc/kernel/smp.c @@ -42,8 +42,13 @@ void __init smp_prepare_boot_cpu(void) } /* - * Initialise the CPU possible map early - this describes the CPUs - * which may be present or become present in the system. + * Called from setup_arch() before calling setup_processor() + * + * - Initialise the CPU possible map early - this describes the CPUs + * which may be present or become present in the system. + * - Call early smp init hook. This can initialize a specific multi-core + * IP which is say common to several platforms (hence not part of + * platform specific int_early() hook) */ void __init smp_init_cpus(void) { @@ -51,6 +56,9 @@ void __init smp_init_cpus(void) for (i = 0; i < NR_CPUS; i++) set_cpu_possible(i, true); + + if (plat_smp_ops.init_early_smp) + plat_smp_ops.init_early_smp(); } /* called from init ( ) => process 1 */ @@ -72,35 +80,29 @@ void __init smp_cpus_done(unsigned int max_cpus) } /* - * After power-up, a non Master CPU needs to wait for Master to kick start it - * - * The default implementation halts - * - * This relies on platform specific support allowing Master to directly set - * this CPU's PC (to be @first_lines_of_secondary() and kick start it. - * - * In lack of such h/w assist, platforms can override this function - * - make this function busy-spin on a token, eventually set by Master - * (from arc_platform_smp_wakeup_cpu()) - * - Once token is available, jump to @first_lines_of_secondary - * (using inline asm). - * - * Alert: can NOT use stack here as it has not been determined/setup for CPU. - * If it turns out to be elaborate, it's better to code it in assembly - * + * Default smp boot helper for Run-on-reset case where all cores start off + * together. Non-masters need to wait for Master to start running. + * This is implemented using a flag in memory, which Non-masters spin-wait on. + * Master sets it to cpu-id of core to "ungate" it. */ -void __weak arc_platform_smp_wait_to_boot(int cpu) +static volatile int wake_flag; + +static void arc_default_smp_cpu_kick(int cpu, unsigned long pc) { - /* - * As a hack for debugging - since debugger will single-step over the - * FLAG insn - wrap the halt itself it in a self loop - */ - __asm__ __volatile__( - "1: \n" - " flag 1 \n" - " b 1b \n"); + BUG_ON(cpu == 0); + wake_flag = cpu; +} + +void arc_platform_smp_wait_to_boot(int cpu) +{ + while (wake_flag != cpu) + ; + + wake_flag = 0; + __asm__ __volatile__("j @first_lines_of_secondary \n"); } + const char *arc_platform_smp_cpuinfo(void) { return plat_smp_ops.info ? : ""; @@ -129,8 +131,12 @@ void start_kernel_secondary(void) pr_info("## CPU%u LIVE ##: Executing Code...\n", cpu); - if (machine_desc->init_smp) - machine_desc->init_smp(cpu); + /* Some SMP H/w setup - for each cpu */ + if (plat_smp_ops.init_per_cpu) + plat_smp_ops.init_per_cpu(cpu); + + if (machine_desc->init_per_cpu) + machine_desc->init_per_cpu(cpu); arc_local_timer_setup(); @@ -161,6 +167,8 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle) if (plat_smp_ops.cpu_kick) plat_smp_ops.cpu_kick(cpu, (unsigned long)first_lines_of_secondary); + else + arc_default_smp_cpu_kick(cpu, (unsigned long)NULL); /* wait for 1 sec after kicking the secondary */ wait_till = jiffies + HZ; diff --git a/arch/arc/kernel/time.c b/arch/arc/kernel/time.c index 4294761a2..dfad287f1 100644 --- a/arch/arc/kernel/time.c +++ b/arch/arc/kernel/time.c @@ -285,7 +285,4 @@ void __init time_init(void) /* sets up the periodic event timer */ arc_local_timer_setup(); - - if (machine_desc->init_time) - machine_desc->init_time(); } diff --git a/arch/arc/kernel/unwind.c b/arch/arc/kernel/unwind.c index 93c6ea52b..5eb707640 100644 --- a/arch/arc/kernel/unwind.c +++ b/arch/arc/kernel/unwind.c @@ -170,6 +170,23 @@ static struct unwind_table *find_table(unsigned long pc) static unsigned long read_pointer(const u8 **pLoc, const void *end, signed ptrType); +static void init_unwind_hdr(struct unwind_table *table, + void *(*alloc) (unsigned long)); + +/* + * wrappers for header alloc (vs. calling one vs. other at call site) + * to elide section mismatches warnings + */ +static void *__init unw_hdr_alloc_early(unsigned long sz) +{ + return __alloc_bootmem_nopanic(sz, sizeof(unsigned int), + MAX_DMA_ADDRESS); +} + +static void *unw_hdr_alloc(unsigned long sz) +{ + return kmalloc(sz, GFP_KERNEL); +} static void init_unwind_table(struct unwind_table *table, const char *name, const void *core_start, unsigned long core_size, @@ -209,6 +226,8 @@ void __init arc_unwind_init(void) __start_unwind, __end_unwind - __start_unwind, NULL, 0); /*__start_unwind_hdr, __end_unwind_hdr - __start_unwind_hdr);*/ + + init_unwind_hdr(&root_table, unw_hdr_alloc_early); } static const u32 bad_cie, not_fde; @@ -241,8 +260,8 @@ static void swap_eh_frame_hdr_table_entries(void *p1, void *p2, int size) e2->fde = v; } -static void __init setup_unwind_table(struct unwind_table *table, - void *(*alloc) (unsigned long)) +static void init_unwind_hdr(struct unwind_table *table, + void *(*alloc) (unsigned long)) { const u8 *ptr; unsigned long tableSize = table->size, hdrSize; @@ -277,10 +296,10 @@ static void __init setup_unwind_table(struct unwind_table *table, if (cie == ¬_fde) continue; if (cie == NULL || cie == &bad_cie) - return; + goto ret_err; ptrType = fde_pointer_type(cie); if (ptrType < 0) - return; + goto ret_err; ptr = (const u8 *)(fde + 2); if (!read_pointer(&ptr, (const u8 *)(fde + 1) + *fde, @@ -296,13 +315,15 @@ static void __init setup_unwind_table(struct unwind_table *table, } if (tableSize || !n) - return; + goto ret_err; hdrSize = 4 + sizeof(unsigned long) + sizeof(unsigned int) + 2 * n * sizeof(unsigned long); + header = alloc(hdrSize); if (!header) - return; + goto ret_err; + header->version = 1; header->eh_frame_ptr_enc = DW_EH_PE_abs | DW_EH_PE_native; header->fde_count_enc = DW_EH_PE_abs | DW_EH_PE_data4; @@ -340,18 +361,10 @@ static void __init setup_unwind_table(struct unwind_table *table, table->hdrsz = hdrSize; smp_wmb(); table->header = (const void *)header; -} - -static void *__init balloc(unsigned long sz) -{ - return __alloc_bootmem_nopanic(sz, - sizeof(unsigned int), - __pa(MAX_DMA_ADDRESS)); -} + return; -void __init arc_unwind_setup(void) -{ - setup_unwind_table(&root_table, balloc); +ret_err: + panic("Attention !!! Dwarf FDE parsing errors\n");; } #ifdef CONFIG_MODULES @@ -377,6 +390,8 @@ void *unwind_add_table(struct module *module, const void *table_start, table_start, table_size, NULL, 0); + init_unwind_hdr(table, unw_hdr_alloc); + #ifdef UNWIND_DEBUG unw_debug("Table added for [%s] %lx %lx\n", module->name, table->core.pc, table->core.range); @@ -439,6 +454,7 @@ void unwind_remove_table(void *handle, int init_only) info.init_only = init_only; unlink_table(&info); /* XXX: SMP */ + kfree(table->header); kfree(table); } @@ -588,9 +604,6 @@ static signed fde_pointer_type(const u32 *cie) const u8 *ptr = (const u8 *)(cie + 2); unsigned version = *ptr; - if (version != 1) - return -1; /* unsupported */ - if (*++ptr) { const char *aug; const u8 *end = (const u8 *)(cie + 1) + *cie; @@ -986,42 +999,13 @@ int arc_unwind(struct unwind_frame_info *frame) (const u8 *)(fde + 1) + *fde, ptrType); - if (pc >= endLoc) + if (pc >= endLoc) { fde = NULL; - } else - fde = NULL; - } - if (fde == NULL) { - for (fde = table->address, tableSize = table->size; - cie = NULL, tableSize > sizeof(*fde) - && tableSize - sizeof(*fde) >= *fde; - tableSize -= sizeof(*fde) + *fde, - fde += 1 + *fde / sizeof(*fde)) { - cie = cie_for_fde(fde, table); - if (cie == &bad_cie) { cie = NULL; - break; } - if (cie == NULL - || cie == ¬_fde - || (ptrType = fde_pointer_type(cie)) < 0) - continue; - ptr = (const u8 *)(fde + 2); - startLoc = read_pointer(&ptr, - (const u8 *)(fde + 1) + - *fde, ptrType); - if (!startLoc) - continue; - if (!(ptrType & DW_EH_PE_indirect)) - ptrType &= - DW_EH_PE_FORM | DW_EH_PE_signed; - endLoc = - startLoc + read_pointer(&ptr, - (const u8 *)(fde + - 1) + - *fde, ptrType); - if (pc >= startLoc && pc < endLoc) - break; + } else { + fde = NULL; + cie = NULL; } } } @@ -1031,9 +1015,7 @@ int arc_unwind(struct unwind_frame_info *frame) ptr = (const u8 *)(cie + 2); end = (const u8 *)(cie + 1) + *cie; frame->call_frame = 1; - if ((state.version = *ptr) != 1) - cie = NULL; /* unsupported version */ - else if (*++ptr) { + if (*++ptr) { /* check if augmentation size is first (thus present) */ if (*ptr == 'z') { while (++ptr < end && *ptr) { diff --git a/arch/arc/kernel/vmlinux.lds.S b/arch/arc/kernel/vmlinux.lds.S index dd35bde39..894e696bd 100644 --- a/arch/arc/kernel/vmlinux.lds.S +++ b/arch/arc/kernel/vmlinux.lds.S @@ -12,7 +12,7 @@ #include <asm/thread_info.h> OUTPUT_ARCH(arc) -ENTRY(_stext) +ENTRY(res_service) #ifdef CONFIG_CPU_BIG_ENDIAN jiffies = jiffies_64 + 4; |