From 29f5b648fa0b31ad614c78468b9279e5fa96397a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Fabian=20Silva=20Delgado?= Date: Fri, 11 Nov 2016 16:22:27 -0300 Subject: Linux-libre 4.8.7-gnu --- kernel/sched/MuQSS.c | 31 ++++++++++++++++++++----------- 1 file changed, 20 insertions(+), 11 deletions(-) (limited to 'kernel/sched/MuQSS.c') diff --git a/kernel/sched/MuQSS.c b/kernel/sched/MuQSS.c index cf7a95286..7c4ddc734 100644 --- a/kernel/sched/MuQSS.c +++ b/kernel/sched/MuQSS.c @@ -760,6 +760,13 @@ static inline bool task_queued(struct task_struct *p) static void enqueue_task(struct rq *rq, struct task_struct *p, int flags); static inline void resched_if_idle(struct rq *rq); +/* Dodgy workaround till we figure out where the softirqs are going */ +static inline void do_pending_softirq(struct rq *rq, struct task_struct *next) +{ + if (unlikely(next == rq->idle && local_softirq_pending() && !in_interrupt())) + do_softirq_own_stack(); +} + static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) { #ifdef CONFIG_SMP @@ -814,7 +821,11 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) raw_spin_unlock(&prev->pi_lock); } #endif - raw_spin_unlock_irq(&rq->lock); + rq_unlock(rq); + + do_pending_softirq(rq, current); + + local_irq_enable(); } static inline bool deadline_before(u64 deadline, u64 time) @@ -2556,7 +2567,7 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev, * past. prev == current is still correct but we need to recalculate this_rq * because prev may have moved to another CPU. */ -static struct rq *finish_task_switch(struct task_struct *prev) +static void finish_task_switch(struct task_struct *prev) __releases(rq->lock) { struct rq *rq = this_rq(); @@ -2609,7 +2620,6 @@ static struct rq *finish_task_switch(struct task_struct *prev) kprobe_flush_task(prev); put_task_struct(prev); } - return rq; } /** @@ -2617,10 +2627,7 @@ static struct rq *finish_task_switch(struct task_struct *prev) * @prev: the thread we just switched away from. */ asmlinkage __visible void schedule_tail(struct task_struct *prev) - __releases(rq->lock) { - struct rq *rq; - /* * New tasks start with FORK_PREEMPT_COUNT, see there and * finish_task_switch() for details. @@ -2630,7 +2637,7 @@ asmlinkage __visible void schedule_tail(struct task_struct *prev) * PREEMPT_COUNT kernels). */ - rq = finish_task_switch(prev); + finish_task_switch(prev); preempt_enable(); if (current->set_child_tid) @@ -2640,7 +2647,7 @@ asmlinkage __visible void schedule_tail(struct task_struct *prev) /* * context_switch - switch to the new MM and the new thread's register state. */ -static __always_inline struct rq * +static __always_inline void context_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next) { @@ -2680,7 +2687,7 @@ context_switch(struct rq *rq, struct task_struct *prev, switch_to(prev, next, prev); barrier(); - return finish_task_switch(prev); + finish_task_switch(prev); } /* @@ -3854,10 +3861,12 @@ static void __sched notrace __schedule(bool preempt) ++*switch_count; trace_sched_switch(preempt, prev, next); - rq = context_switch(rq, prev, next); /* unlocks the rq */ + context_switch(rq, prev, next); /* unlocks the rq */ } else { check_siblings(rq); - rq_unlock_irq(rq); + rq_unlock(rq); + do_pending_softirq(rq, next); + local_irq_enable(); } } -- cgit v1.2.3