summaryrefslogtreecommitdiff
path: root/kernel/sched/MuQSS.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched/MuQSS.c')
-rw-r--r--kernel/sched/MuQSS.c31
1 files changed, 20 insertions, 11 deletions
diff --git a/kernel/sched/MuQSS.c b/kernel/sched/MuQSS.c
index cf7a95286..7c4ddc734 100644
--- a/kernel/sched/MuQSS.c
+++ b/kernel/sched/MuQSS.c
@@ -760,6 +760,13 @@ static inline bool task_queued(struct task_struct *p)
static void enqueue_task(struct rq *rq, struct task_struct *p, int flags);
static inline void resched_if_idle(struct rq *rq);
+/* Dodgy workaround till we figure out where the softirqs are going */
+static inline void do_pending_softirq(struct rq *rq, struct task_struct *next)
+{
+ if (unlikely(next == rq->idle && local_softirq_pending() && !in_interrupt()))
+ do_softirq_own_stack();
+}
+
static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
{
#ifdef CONFIG_SMP
@@ -814,7 +821,11 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
raw_spin_unlock(&prev->pi_lock);
}
#endif
- raw_spin_unlock_irq(&rq->lock);
+ rq_unlock(rq);
+
+ do_pending_softirq(rq, current);
+
+ local_irq_enable();
}
static inline bool deadline_before(u64 deadline, u64 time)
@@ -2556,7 +2567,7 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev,
* past. prev == current is still correct but we need to recalculate this_rq
* because prev may have moved to another CPU.
*/
-static struct rq *finish_task_switch(struct task_struct *prev)
+static void finish_task_switch(struct task_struct *prev)
__releases(rq->lock)
{
struct rq *rq = this_rq();
@@ -2609,7 +2620,6 @@ static struct rq *finish_task_switch(struct task_struct *prev)
kprobe_flush_task(prev);
put_task_struct(prev);
}
- return rq;
}
/**
@@ -2617,10 +2627,7 @@ static struct rq *finish_task_switch(struct task_struct *prev)
* @prev: the thread we just switched away from.
*/
asmlinkage __visible void schedule_tail(struct task_struct *prev)
- __releases(rq->lock)
{
- struct rq *rq;
-
/*
* New tasks start with FORK_PREEMPT_COUNT, see there and
* finish_task_switch() for details.
@@ -2630,7 +2637,7 @@ asmlinkage __visible void schedule_tail(struct task_struct *prev)
* PREEMPT_COUNT kernels).
*/
- rq = finish_task_switch(prev);
+ finish_task_switch(prev);
preempt_enable();
if (current->set_child_tid)
@@ -2640,7 +2647,7 @@ asmlinkage __visible void schedule_tail(struct task_struct *prev)
/*
* context_switch - switch to the new MM and the new thread's register state.
*/
-static __always_inline struct rq *
+static __always_inline void
context_switch(struct rq *rq, struct task_struct *prev,
struct task_struct *next)
{
@@ -2680,7 +2687,7 @@ context_switch(struct rq *rq, struct task_struct *prev,
switch_to(prev, next, prev);
barrier();
- return finish_task_switch(prev);
+ finish_task_switch(prev);
}
/*
@@ -3854,10 +3861,12 @@ static void __sched notrace __schedule(bool preempt)
++*switch_count;
trace_sched_switch(preempt, prev, next);
- rq = context_switch(rq, prev, next); /* unlocks the rq */
+ context_switch(rq, prev, next); /* unlocks the rq */
} else {
check_siblings(rq);
- rq_unlock_irq(rq);
+ rq_unlock(rq);
+ do_pending_softirq(rq, next);
+ local_irq_enable();
}
}