summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorAndré Fabian Silva Delgado <emulatorman@parabola.nu>2015-08-13 01:32:17 -0300
committerAndré Fabian Silva Delgado <emulatorman@parabola.nu>2015-08-13 14:23:33 -0300
commit0a2bb03fe20f81dc4cac96d7fe0e4194ae6efffd (patch)
treef643c68f37c9aa9e2e0b1623b363777c125350df /kernel
parentc49e505b3486503302e30c4237821bece90b4c2d (diff)
Linux-libre 4.1.5-gnupck-4.1.5-gnu
Diffstat (limited to 'kernel')
-rw-r--r--kernel/irq/resend.c18
-rw-r--r--kernel/sched/bfs.c39
-rw-r--r--kernel/trace/ftrace.c52
3 files changed, 62 insertions, 47 deletions
diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c
index 9065107f0..7a5237a1b 100644
--- a/kernel/irq/resend.c
+++ b/kernel/irq/resend.c
@@ -75,13 +75,21 @@ void check_irq_resend(struct irq_desc *desc, unsigned int irq)
!desc->irq_data.chip->irq_retrigger(&desc->irq_data)) {
#ifdef CONFIG_HARDIRQS_SW_RESEND
/*
- * If the interrupt has a parent irq and runs
- * in the thread context of the parent irq,
- * retrigger the parent.
+ * If the interrupt is running in the thread
+ * context of the parent irq we need to be
+ * careful, because we cannot trigger it
+ * directly.
*/
- if (desc->parent_irq &&
- irq_settings_is_nested_thread(desc))
+ if (irq_settings_is_nested_thread(desc)) {
+ /*
+ * If the parent_irq is valid, we
+ * retrigger the parent, otherwise we
+ * do nothing.
+ */
+ if (!desc->parent_irq)
+ return;
irq = desc->parent_irq;
+ }
/* Set it pending and activate the softirq: */
set_bit(irq, irqs_resend);
tasklet_schedule(&resend_tasklet);
diff --git a/kernel/sched/bfs.c b/kernel/sched/bfs.c
index a6d06efc1..5366182bd 100644
--- a/kernel/sched/bfs.c
+++ b/kernel/sched/bfs.c
@@ -134,7 +134,7 @@
void print_scheduler_version(void)
{
- printk(KERN_INFO "BFS CPU scheduler v0.463 by Con Kolivas.\n");
+ printk(KERN_INFO "BFS CPU scheduler v0.464 by Con Kolivas.\n");
}
/*
@@ -986,13 +986,6 @@ static inline void deactivate_task(struct task_struct *p, struct rq *rq)
clear_sticky(p);
}
-static ATOMIC_NOTIFIER_HEAD(task_migration_notifier);
-
-void register_task_migration_notifier(struct notifier_block *n)
-{
- atomic_notifier_chain_register(&task_migration_notifier, n);
-}
-
#ifdef CONFIG_SMP
void set_task_cpu(struct task_struct *p, unsigned int cpu)
{
@@ -3376,10 +3369,12 @@ static void __sched __schedule(void)
{
struct task_struct *prev, *next, *idle;
unsigned long *switch_count;
- bool deactivate = false;
+ bool deactivate;
struct rq *rq;
int cpu;
+need_resched:
+ deactivate = false;
preempt_disable();
cpu = smp_processor_id();
rq = cpu_rq(cpu);
@@ -3426,6 +3421,17 @@ static void __sched __schedule(void)
switch_count = &prev->nvcsw;
}
+ /*
+ * If we are going to sleep and we have plugged IO queued, make
+ * sure to submit it to avoid deadlocks.
+ */
+ if (unlikely(deactivate && blk_needs_flush_plug(prev))) {
+ grq_unlock_irq();
+ preempt_enable_no_resched();
+ blk_schedule_flush_plug(prev);
+ goto need_resched;
+ }
+
update_clocks(rq);
update_cpu_clock_switch(rq, prev);
if (rq->clock - rq->last_tick > HALF_JIFFY_NS)
@@ -3516,23 +3522,8 @@ rerun_prev_unlocked:
sched_preempt_enable_no_resched();
}
-static inline void sched_submit_work(struct task_struct *tsk)
-{
- if (!tsk->state || tsk_is_pi_blocked(tsk))
- return;
- /*
- * If we are going to sleep and we have plugged IO queued,
- * make sure to submit it to avoid deadlocks.
- */
- if (blk_needs_flush_plug(tsk))
- blk_schedule_flush_plug(tsk);
-}
-
asmlinkage __visible void __sched schedule(void)
{
- struct task_struct *tsk = current;
-
- sched_submit_work(tsk);
do {
__schedule();
} while (need_resched());
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 02bece4a9..eb11011b5 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -98,6 +98,13 @@ struct ftrace_pid {
struct pid *pid;
};
+static bool ftrace_pids_enabled(void)
+{
+ return !list_empty(&ftrace_pids);
+}
+
+static void ftrace_update_trampoline(struct ftrace_ops *ops);
+
/*
* ftrace_disabled is set when an anomaly is discovered.
* ftrace_disabled is much stronger than ftrace_enabled.
@@ -109,7 +116,6 @@ static DEFINE_MUTEX(ftrace_lock);
static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end;
static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
-ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
static struct ftrace_ops global_ops;
static struct ftrace_ops control_ops;
@@ -183,14 +189,7 @@ static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
if (!test_tsk_trace_trace(current))
return;
- ftrace_pid_function(ip, parent_ip, op, regs);
-}
-
-static void set_ftrace_pid_function(ftrace_func_t func)
-{
- /* do not set ftrace_pid_function to itself! */
- if (func != ftrace_pid_func)
- ftrace_pid_function = func;
+ op->saved_func(ip, parent_ip, op, regs);
}
/**
@@ -202,7 +201,6 @@ static void set_ftrace_pid_function(ftrace_func_t func)
void clear_ftrace_function(void)
{
ftrace_trace_function = ftrace_stub;
- ftrace_pid_function = ftrace_stub;
}
static void control_ops_disable_all(struct ftrace_ops *ops)
@@ -436,6 +434,12 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
} else
add_ftrace_ops(&ftrace_ops_list, ops);
+ /* Always save the function, and reset at unregistering */
+ ops->saved_func = ops->func;
+
+ if (ops->flags & FTRACE_OPS_FL_PID && ftrace_pids_enabled())
+ ops->func = ftrace_pid_func;
+
ftrace_update_trampoline(ops);
if (ftrace_enabled)
@@ -463,15 +467,28 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
if (ftrace_enabled)
update_ftrace_function();
+ ops->func = ops->saved_func;
+
return 0;
}
static void ftrace_update_pid_func(void)
{
+ bool enabled = ftrace_pids_enabled();
+ struct ftrace_ops *op;
+
/* Only do something if we are tracing something */
if (ftrace_trace_function == ftrace_stub)
return;
+ do_for_each_ftrace_op(op, ftrace_ops_list) {
+ if (op->flags & FTRACE_OPS_FL_PID) {
+ op->func = enabled ? ftrace_pid_func :
+ op->saved_func;
+ ftrace_update_trampoline(op);
+ }
+ } while_for_each_ftrace_op(op);
+
update_ftrace_function();
}
@@ -1133,7 +1150,8 @@ static struct ftrace_ops global_ops = {
.local_hash.filter_hash = EMPTY_HASH,
INIT_OPS_HASH(global_ops)
.flags = FTRACE_OPS_FL_RECURSION_SAFE |
- FTRACE_OPS_FL_INITIALIZED,
+ FTRACE_OPS_FL_INITIALIZED |
+ FTRACE_OPS_FL_PID,
};
/*
@@ -5023,7 +5041,9 @@ static void ftrace_update_trampoline(struct ftrace_ops *ops)
static struct ftrace_ops global_ops = {
.func = ftrace_stub,
- .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
+ .flags = FTRACE_OPS_FL_RECURSION_SAFE |
+ FTRACE_OPS_FL_INITIALIZED |
+ FTRACE_OPS_FL_PID,
};
static int __init ftrace_nodyn_init(void)
@@ -5080,11 +5100,6 @@ void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func)
if (WARN_ON(tr->ops->func != ftrace_stub))
printk("ftrace ops had %pS for function\n",
tr->ops->func);
- /* Only the top level instance does pid tracing */
- if (!list_empty(&ftrace_pids)) {
- set_ftrace_pid_function(func);
- func = ftrace_pid_func;
- }
}
tr->ops->func = func;
tr->ops->private = tr;
@@ -5371,7 +5386,7 @@ static void *fpid_start(struct seq_file *m, loff_t *pos)
{
mutex_lock(&ftrace_lock);
- if (list_empty(&ftrace_pids) && (!*pos))
+ if (!ftrace_pids_enabled() && (!*pos))
return (void *) 1;
return seq_list_start(&ftrace_pids, *pos);
@@ -5610,6 +5625,7 @@ static struct ftrace_ops graph_ops = {
.func = ftrace_stub,
.flags = FTRACE_OPS_FL_RECURSION_SAFE |
FTRACE_OPS_FL_INITIALIZED |
+ FTRACE_OPS_FL_PID |
FTRACE_OPS_FL_STUB,
#ifdef FTRACE_GRAPH_TRAMP_ADDR
.trampoline = FTRACE_GRAPH_TRAMP_ADDR,