summaryrefslogtreecommitdiff
path: root/kernel/time/tick-common.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/time/tick-common.c')
-rw-r--r--kernel/time/tick-common.c57
1 files changed, 49 insertions, 8 deletions
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index 3ae6afa1e..f8bf47571 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -19,6 +19,7 @@
#include <linux/profile.h>
#include <linux/sched.h>
#include <linux/module.h>
+#include <trace/events/power.h>
#include <asm/irq_regs.h>
@@ -102,7 +103,17 @@ void tick_handle_periodic(struct clock_event_device *dev)
tick_periodic(cpu);
- if (dev->state != CLOCK_EVT_STATE_ONESHOT)
+#if defined(CONFIG_HIGH_RES_TIMERS) || defined(CONFIG_NO_HZ_COMMON)
+ /*
+ * The cpu might have transitioned to HIGHRES or NOHZ mode via
+ * update_process_times() -> run_local_timers() ->
+ * hrtimer_run_queues().
+ */
+ if (dev->event_handler != tick_handle_periodic)
+ return;
+#endif
+
+ if (!clockevent_state_oneshot(dev))
return;
for (;;) {
/*
@@ -140,7 +151,7 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast)
if ((dev->features & CLOCK_EVT_FEAT_PERIODIC) &&
!tick_broadcast_oneshot_active()) {
- clockevents_set_state(dev, CLOCK_EVT_STATE_PERIODIC);
+ clockevents_switch_state(dev, CLOCK_EVT_STATE_PERIODIC);
} else {
unsigned long seq;
ktime_t next;
@@ -150,7 +161,7 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast)
next = tick_next_period;
} while (read_seqretry(&jiffies_lock, seq));
- clockevents_set_state(dev, CLOCK_EVT_STATE_ONESHOT);
+ clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT);
for (;;) {
if (!clockevents_program_event(dev, next, false))
@@ -332,6 +343,28 @@ out_bc:
tick_install_broadcast_device(newdev);
}
+/**
+ * tick_broadcast_oneshot_control - Enter/exit broadcast oneshot mode
+ * @state: The target state (enter/exit)
+ *
+ * The system enters/leaves a state, where affected devices might stop
+ * Returns 0 on success, -EBUSY if the cpu is used to broadcast wakeups.
+ *
+ * Called with interrupts disabled, so clockevents_lock is not
+ * required here because the local clock event device cannot go away
+ * under us.
+ */
+int tick_broadcast_oneshot_control(enum tick_broadcast_state state)
+{
+ struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
+
+ if (!(td->evtdev->features & CLOCK_EVT_FEAT_C3STOP))
+ return 0;
+
+ return __tick_broadcast_oneshot_control(state);
+}
+EXPORT_SYMBOL_GPL(tick_broadcast_oneshot_control);
+
#ifdef CONFIG_HOTPLUG_CPU
/*
* Transfer the do_timer job away from a dying cpu.
@@ -367,7 +400,7 @@ void tick_shutdown(unsigned int cpu)
* Prevent that the clock events layer tries to call
* the set mode function!
*/
- dev->state = CLOCK_EVT_STATE_DETACHED;
+ clockevent_set_state(dev, CLOCK_EVT_STATE_DETACHED);
dev->mode = CLOCK_EVT_MODE_UNUSED;
clockevents_exchange_device(dev, NULL);
dev->event_handler = clockevents_handle_noop;
@@ -440,6 +473,7 @@ void tick_resume(void)
tick_resume_local();
}
+#ifdef CONFIG_SUSPEND
static DEFINE_RAW_SPINLOCK(tick_freeze_lock);
static unsigned int tick_freeze_depth;
@@ -457,10 +491,13 @@ void tick_freeze(void)
raw_spin_lock(&tick_freeze_lock);
tick_freeze_depth++;
- if (tick_freeze_depth == num_online_cpus())
+ if (tick_freeze_depth == num_online_cpus()) {
+ trace_suspend_resume(TPS("timekeeping_freeze"),
+ smp_processor_id(), true);
timekeeping_suspend();
- else
+ } else {
tick_suspend_local();
+ }
raw_spin_unlock(&tick_freeze_lock);
}
@@ -478,15 +515,19 @@ void tick_unfreeze(void)
{
raw_spin_lock(&tick_freeze_lock);
- if (tick_freeze_depth == num_online_cpus())
+ if (tick_freeze_depth == num_online_cpus()) {
timekeeping_resume();
- else
+ trace_suspend_resume(TPS("timekeeping_freeze"),
+ smp_processor_id(), false);
+ } else {
tick_resume_local();
+ }
tick_freeze_depth--;
raw_spin_unlock(&tick_freeze_lock);
}
+#endif /* CONFIG_SUSPEND */
/**
* tick_init - initialize the tick control