summaryrefslogtreecommitdiff
path: root/kernel/sched
diff options
context:
space:
mode:
authorAndré Fabian Silva Delgado <emulatorman@parabola.nu>2015-08-13 01:32:17 -0300
committerAndré Fabian Silva Delgado <emulatorman@parabola.nu>2015-08-13 14:23:33 -0300
commit0a2bb03fe20f81dc4cac96d7fe0e4194ae6efffd (patch)
treef643c68f37c9aa9e2e0b1623b363777c125350df /kernel/sched
parentc49e505b3486503302e30c4237821bece90b4c2d (diff)
Linux-libre 4.1.5-gnupck-4.1.5-gnu
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/bfs.c39
1 files changed, 15 insertions, 24 deletions
diff --git a/kernel/sched/bfs.c b/kernel/sched/bfs.c
index a6d06efc1..5366182bd 100644
--- a/kernel/sched/bfs.c
+++ b/kernel/sched/bfs.c
@@ -134,7 +134,7 @@
void print_scheduler_version(void)
{
- printk(KERN_INFO "BFS CPU scheduler v0.463 by Con Kolivas.\n");
+ printk(KERN_INFO "BFS CPU scheduler v0.464 by Con Kolivas.\n");
}
/*
@@ -986,13 +986,6 @@ static inline void deactivate_task(struct task_struct *p, struct rq *rq)
clear_sticky(p);
}
-static ATOMIC_NOTIFIER_HEAD(task_migration_notifier);
-
-void register_task_migration_notifier(struct notifier_block *n)
-{
- atomic_notifier_chain_register(&task_migration_notifier, n);
-}
-
#ifdef CONFIG_SMP
void set_task_cpu(struct task_struct *p, unsigned int cpu)
{
@@ -3376,10 +3369,12 @@ static void __sched __schedule(void)
{
struct task_struct *prev, *next, *idle;
unsigned long *switch_count;
- bool deactivate = false;
+ bool deactivate;
struct rq *rq;
int cpu;
+need_resched:
+ deactivate = false;
preempt_disable();
cpu = smp_processor_id();
rq = cpu_rq(cpu);
@@ -3426,6 +3421,17 @@ static void __sched __schedule(void)
switch_count = &prev->nvcsw;
}
+ /*
+ * If we are going to sleep and we have plugged IO queued, make
+ * sure to submit it to avoid deadlocks.
+ */
+ if (unlikely(deactivate && blk_needs_flush_plug(prev))) {
+ grq_unlock_irq();
+ preempt_enable_no_resched();
+ blk_schedule_flush_plug(prev);
+ goto need_resched;
+ }
+
update_clocks(rq);
update_cpu_clock_switch(rq, prev);
if (rq->clock - rq->last_tick > HALF_JIFFY_NS)
@@ -3516,23 +3522,8 @@ rerun_prev_unlocked:
sched_preempt_enable_no_resched();
}
-static inline void sched_submit_work(struct task_struct *tsk)
-{
- if (!tsk->state || tsk_is_pi_blocked(tsk))
- return;
- /*
- * If we are going to sleep and we have plugged IO queued,
- * make sure to submit it to avoid deadlocks.
- */
- if (blk_needs_flush_plug(tsk))
- blk_schedule_flush_plug(tsk);
-}
-
asmlinkage __visible void __sched schedule(void)
{
- struct task_struct *tsk = current;
-
- sched_submit_work(tsk);
do {
__schedule();
} while (need_resched());