diff options
author | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2015-12-15 14:52:16 -0300 |
---|---|---|
committer | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2015-12-15 14:52:16 -0300 |
commit | 8d91c1e411f55d7ea91b1183a2e9f8088fb4d5be (patch) | |
tree | e9891aa6c295060d065adffd610c4f49ecf884f3 /kernel/time | |
parent | a71852147516bc1cb5b0b3cbd13639bfd4022dc8 (diff) |
Linux-libre 4.3.2-gnu
Diffstat (limited to 'kernel/time')
-rw-r--r-- | kernel/time/Kconfig | 4 | ||||
-rw-r--r-- | kernel/time/clockevents.c | 42 | ||||
-rw-r--r-- | kernel/time/hrtimer.c | 36 | ||||
-rw-r--r-- | kernel/time/ntp.c | 5 | ||||
-rw-r--r-- | kernel/time/posix-cpu-timers.c | 10 | ||||
-rw-r--r-- | kernel/time/tick-broadcast-hrtimer.c | 49 | ||||
-rw-r--r-- | kernel/time/tick-common.c | 4 | ||||
-rw-r--r-- | kernel/time/tick-sched.c | 87 | ||||
-rw-r--r-- | kernel/time/time.c | 53 | ||||
-rw-r--r-- | kernel/time/timekeeping.c | 19 | ||||
-rw-r--r-- | kernel/time/timer_list.c | 56 |
11 files changed, 158 insertions, 207 deletions
diff --git a/kernel/time/Kconfig b/kernel/time/Kconfig index 579ce1b92..6931b6e3c 100644 --- a/kernel/time/Kconfig +++ b/kernel/time/Kconfig @@ -89,15 +89,13 @@ config NO_HZ_IDLE config NO_HZ_FULL bool "Full dynticks system (tickless)" # NO_HZ_COMMON dependency - depends on !ARCH_USES_GETTIMEOFFSET && GENERIC_CLOCKEVENTS + depends on !ARCH_USES_GETTIMEOFFSET && GENERIC_CLOCKEVENTS && !SCHED_BFS # We need at least one periodic CPU for timekeeping depends on SMP - # RCU_USER_QS dependency depends on HAVE_CONTEXT_TRACKING # VIRT_CPU_ACCOUNTING_GEN dependency depends on HAVE_VIRT_CPU_ACCOUNTING_GEN select NO_HZ_COMMON - select RCU_USER_QS select RCU_NOCB_CPU select VIRT_CPU_ACCOUNTING_GEN select IRQ_WORK diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c index 50eb107f1..a9b76a403 100644 --- a/kernel/time/clockevents.c +++ b/kernel/time/clockevents.c @@ -97,20 +97,6 @@ EXPORT_SYMBOL_GPL(clockevent_delta2ns); static int __clockevents_switch_state(struct clock_event_device *dev, enum clock_event_state state) { - /* Transition with legacy set_mode() callback */ - if (dev->set_mode) { - /* Legacy callback doesn't support new modes */ - if (state > CLOCK_EVT_STATE_ONESHOT) - return -ENOSYS; - /* - * 'clock_event_state' and 'clock_event_mode' have 1-to-1 - * mapping until *_ONESHOT, and so a simple cast will work. - */ - dev->set_mode((enum clock_event_mode)state, dev); - dev->mode = (enum clock_event_mode)state; - return 0; - } - if (dev->features & CLOCK_EVT_FEAT_DUMMY) return 0; @@ -204,12 +190,8 @@ int clockevents_tick_resume(struct clock_event_device *dev) { int ret = 0; - if (dev->set_mode) { - dev->set_mode(CLOCK_EVT_MODE_RESUME, dev); - dev->mode = CLOCK_EVT_MODE_RESUME; - } else if (dev->tick_resume) { + if (dev->tick_resume) ret = dev->tick_resume(dev); - } return ret; } @@ -460,26 +442,6 @@ int clockevents_unbind_device(struct clock_event_device *ced, int cpu) } EXPORT_SYMBOL_GPL(clockevents_unbind_device); -/* Sanity check of state transition callbacks */ -static int clockevents_sanity_check(struct clock_event_device *dev) -{ - /* Legacy set_mode() callback */ - if (dev->set_mode) { - /* We shouldn't be supporting new modes now */ - WARN_ON(dev->set_state_periodic || dev->set_state_oneshot || - dev->set_state_shutdown || dev->tick_resume || - dev->set_state_oneshot_stopped); - - BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED); - return 0; - } - - if (dev->features & CLOCK_EVT_FEAT_DUMMY) - return 0; - - return 0; -} - /** * clockevents_register_device - register a clock event device * @dev: device to register @@ -488,8 +450,6 @@ void clockevents_register_device(struct clock_event_device *dev) { unsigned long flags; - BUG_ON(clockevents_sanity_check(dev)); - /* Initialize state to DETACHED */ clockevent_set_state(dev, CLOCK_EVT_STATE_DETACHED); diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c index 5c7ae4b64..457a373e2 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c @@ -183,7 +183,7 @@ struct hrtimer_cpu_base *get_target_base(struct hrtimer_cpu_base *base, int pinned) { if (pinned || !base->migration_enabled) - return this_cpu_ptr(&hrtimer_bases); + return base; return &per_cpu(hrtimer_bases, get_nohz_timer_target()); } #else @@ -191,23 +191,32 @@ static inline struct hrtimer_cpu_base *get_target_base(struct hrtimer_cpu_base *base, int pinned) { - return this_cpu_ptr(&hrtimer_bases); + return base; } #endif /* - * Switch the timer base to the current CPU when possible. + * We switch the timer base to a power-optimized selected CPU target, + * if: + * - NO_HZ_COMMON is enabled + * - timer migration is enabled + * - the timer callback is not running + * - the timer is not the first expiring timer on the new target + * + * If one of the above requirements is not fulfilled we move the timer + * to the current CPU or leave it on the previously assigned CPU if + * the timer callback is currently running. */ static inline struct hrtimer_clock_base * switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base, int pinned) { - struct hrtimer_cpu_base *new_cpu_base, *this_base; + struct hrtimer_cpu_base *new_cpu_base, *this_cpu_base; struct hrtimer_clock_base *new_base; int basenum = base->index; - this_base = this_cpu_ptr(&hrtimer_bases); - new_cpu_base = get_target_base(this_base, pinned); + this_cpu_base = this_cpu_ptr(&hrtimer_bases); + new_cpu_base = get_target_base(this_cpu_base, pinned); again: new_base = &new_cpu_base->clock_base[basenum]; @@ -229,19 +238,19 @@ again: raw_spin_unlock(&base->cpu_base->lock); raw_spin_lock(&new_base->cpu_base->lock); - if (new_cpu_base != this_base && + if (new_cpu_base != this_cpu_base && hrtimer_check_target(timer, new_base)) { raw_spin_unlock(&new_base->cpu_base->lock); raw_spin_lock(&base->cpu_base->lock); - new_cpu_base = this_base; + new_cpu_base = this_cpu_base; timer->base = base; goto again; } timer->base = new_base; } else { - if (new_cpu_base != this_base && + if (new_cpu_base != this_cpu_base && hrtimer_check_target(timer, new_base)) { - new_cpu_base = this_base; + new_cpu_base = this_cpu_base; goto again; } } @@ -679,14 +688,14 @@ static void retrigger_next_event(void *arg) /* * Switch to high resolution mode */ -static int hrtimer_switch_to_hres(void) +static void hrtimer_switch_to_hres(void) { struct hrtimer_cpu_base *base = this_cpu_ptr(&hrtimer_bases); if (tick_init_highres()) { printk(KERN_WARNING "Could not switch to high resolution " "mode on CPU %d\n", base->cpu); - return 0; + return; } base->hres_active = 1; hrtimer_resolution = HIGH_RES_NSEC; @@ -694,7 +703,6 @@ static int hrtimer_switch_to_hres(void) tick_setup_sched_timer(); /* "Retrigger" the interrupt to get things going */ retrigger_next_event(NULL); - return 1; } static void clock_was_set_work(struct work_struct *work) @@ -718,7 +726,7 @@ void clock_was_set_delayed(void) static inline int __hrtimer_hres_active(struct hrtimer_cpu_base *b) { return 0; } static inline int hrtimer_hres_active(void) { return 0; } static inline int hrtimer_is_hres_enabled(void) { return 0; } -static inline int hrtimer_switch_to_hres(void) { return 0; } +static inline void hrtimer_switch_to_hres(void) { } static inline void hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { } static inline int hrtimer_reprogram(struct hrtimer *timer, diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c index fb4d98c7f..df68cb875 100644 --- a/kernel/time/ntp.c +++ b/kernel/time/ntp.c @@ -487,6 +487,11 @@ out: } #ifdef CONFIG_GENERIC_CMOS_UPDATE +int __weak update_persistent_clock(struct timespec now) +{ + return -ENODEV; +} + int __weak update_persistent_clock64(struct timespec64 now64) { struct timespec now; diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c index 892e3dae0..f45729333 100644 --- a/kernel/time/posix-cpu-timers.c +++ b/kernel/time/posix-cpu-timers.c @@ -448,7 +448,7 @@ static void cleanup_timers(struct list_head *head) */ void posix_cpu_timers_exit(struct task_struct *tsk) { - add_device_randomness((const void*) &tsk->se.sum_exec_runtime, + add_device_randomness((const void*) &tsk_seruntime(tsk), sizeof(unsigned long long)); cleanup_timers(tsk->cpu_timers); @@ -871,7 +871,7 @@ static void check_thread_timers(struct task_struct *tsk, tsk_expires->virt_exp = expires_to_cputime(expires); tsk_expires->sched_exp = check_timers_list(++timers, firing, - tsk->se.sum_exec_runtime); + tsk_seruntime(tsk)); /* * Check for the special case thread timers. @@ -882,7 +882,7 @@ static void check_thread_timers(struct task_struct *tsk, READ_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_max); if (hard != RLIM_INFINITY && - tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) { + tsk_rttimeout(tsk) > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) { /* * At the hard limit, we just die. * No need to calculate anything else now. @@ -890,7 +890,7 @@ static void check_thread_timers(struct task_struct *tsk, __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk); return; } - if (tsk->rt.timeout > DIV_ROUND_UP(soft, USEC_PER_SEC/HZ)) { + if (tsk_rttimeout(tsk) > DIV_ROUND_UP(soft, USEC_PER_SEC/HZ)) { /* * At the soft limit, send a SIGXCPU every second. */ @@ -1125,7 +1125,7 @@ static inline int fastpath_timer_check(struct task_struct *tsk) struct task_cputime task_sample = { .utime = utime, .stime = stime, - .sum_exec_runtime = tsk->se.sum_exec_runtime + .sum_exec_runtime = tsk_seruntime(tsk) }; if (task_cputime_expired(&task_sample, &tsk->cputime_expires)) diff --git a/kernel/time/tick-broadcast-hrtimer.c b/kernel/time/tick-broadcast-hrtimer.c index 3e7db49a2..53d7184da 100644 --- a/kernel/time/tick-broadcast-hrtimer.c +++ b/kernel/time/tick-broadcast-hrtimer.c @@ -18,30 +18,23 @@ static struct hrtimer bctimer; -static void bc_set_mode(enum clock_event_mode mode, - struct clock_event_device *bc) +static int bc_shutdown(struct clock_event_device *evt) { - switch (mode) { - case CLOCK_EVT_MODE_UNUSED: - case CLOCK_EVT_MODE_SHUTDOWN: - /* - * Note, we cannot cancel the timer here as we might - * run into the following live lock scenario: - * - * cpu 0 cpu1 - * lock(broadcast_lock); - * hrtimer_interrupt() - * bc_handler() - * tick_handle_oneshot_broadcast(); - * lock(broadcast_lock); - * hrtimer_cancel() - * wait_for_callback() - */ - hrtimer_try_to_cancel(&bctimer); - break; - default: - break; - } + /* + * Note, we cannot cancel the timer here as we might + * run into the following live lock scenario: + * + * cpu 0 cpu1 + * lock(broadcast_lock); + * hrtimer_interrupt() + * bc_handler() + * tick_handle_oneshot_broadcast(); + * lock(broadcast_lock); + * hrtimer_cancel() + * wait_for_callback() + */ + hrtimer_try_to_cancel(&bctimer); + return 0; } /* @@ -82,7 +75,7 @@ static int bc_set_next(ktime_t expires, struct clock_event_device *bc) } static struct clock_event_device ce_broadcast_hrtimer = { - .set_mode = bc_set_mode, + .set_state_shutdown = bc_shutdown, .set_next_ktime = bc_set_next, .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_KTIME | @@ -102,13 +95,11 @@ static enum hrtimer_restart bc_handler(struct hrtimer *t) { ce_broadcast_hrtimer.event_handler(&ce_broadcast_hrtimer); - switch (ce_broadcast_hrtimer.mode) { - case CLOCK_EVT_MODE_ONESHOT: + if (clockevent_state_oneshot(&ce_broadcast_hrtimer)) if (ce_broadcast_hrtimer.next_event.tv64 != KTIME_MAX) return HRTIMER_RESTART; - default: - return HRTIMER_NORESTART; - } + + return HRTIMER_NORESTART; } void tick_setup_hrtimer_broadcast(void) diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index f8bf47571..4fcd99e12 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c @@ -304,9 +304,6 @@ void tick_check_new_device(struct clock_event_device *newdev) int cpu; cpu = smp_processor_id(); - if (!cpumask_test_cpu(cpu, newdev->cpumask)) - goto out_bc; - td = &per_cpu(tick_cpu_device, cpu); curdev = td->evtdev; @@ -401,7 +398,6 @@ void tick_shutdown(unsigned int cpu) * the set mode function! */ clockevent_set_state(dev, CLOCK_EVT_STATE_DETACHED); - dev->mode = CLOCK_EVT_MODE_UNUSED; clockevents_exchange_device(dev, NULL); dev->event_handler = clockevents_handle_noop; td->evtdev = NULL; diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index c792429e9..7c7ec4515 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -197,27 +197,9 @@ static bool can_stop_full_tick(void) return true; } -static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now); - -/* - * Re-evaluate the need for the tick on the current CPU - * and restart it if necessary. - */ -void __tick_nohz_full_check(void) -{ - struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); - - if (tick_nohz_full_cpu(smp_processor_id())) { - if (ts->tick_stopped && !is_idle_task(current)) { - if (!can_stop_full_tick()) - tick_nohz_restart_sched_tick(ts, ktime_get()); - } - } -} - static void nohz_full_kick_work_func(struct irq_work *work) { - __tick_nohz_full_check(); + /* Empty, the tick restart happens on tick_nohz_irq_exit() */ } static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = { @@ -252,7 +234,7 @@ void tick_nohz_full_kick_cpu(int cpu) static void nohz_full_kick_ipi(void *info) { - __tick_nohz_full_check(); + /* Empty, the tick restart happens on tick_nohz_irq_exit() */ } /* @@ -276,7 +258,7 @@ void tick_nohz_full_kick_all(void) * It might need the tick due to per task/process properties: * perf events, posix cpu timers, ... */ -void __tick_nohz_task_switch(struct task_struct *tsk) +void __tick_nohz_task_switch(void) { unsigned long flags; @@ -308,16 +290,17 @@ static int __init tick_nohz_full_setup(char *str) __setup("nohz_full=", tick_nohz_full_setup); static int tick_nohz_cpu_down_callback(struct notifier_block *nfb, - unsigned long action, - void *hcpu) + unsigned long action, + void *hcpu) { unsigned int cpu = (unsigned long)hcpu; switch (action & ~CPU_TASKS_FROZEN) { case CPU_DOWN_PREPARE: /* - * If we handle the timekeeping duty for full dynticks CPUs, - * we can't safely shutdown that CPU. + * The boot CPU handles housekeeping duty (unbound timers, + * workqueues, timekeeping, ...) on behalf of full dynticks + * CPUs. It must remain online when nohz full is enabled. */ if (tick_nohz_full_running && tick_do_timer_cpu == cpu) return NOTIFY_BAD; @@ -388,6 +371,12 @@ void __init tick_nohz_init(void) cpu_notifier(tick_nohz_cpu_down_callback, 0); pr_info("NO_HZ: Full dynticks CPUs: %*pbl.\n", cpumask_pr_args(tick_nohz_full_mask)); + + /* + * We need at least one CPU to handle housekeeping work such + * as timekeeping, unbound timers, workqueues, ... + */ + WARN_ON_ONCE(cpumask_empty(housekeeping_mask)); } #endif @@ -705,21 +694,38 @@ out: return tick; } -static void tick_nohz_full_stop_tick(struct tick_sched *ts) +static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now) +{ + /* Update jiffies first */ + tick_do_update_jiffies64(now); + update_cpu_load_nohz(); + + calc_load_exit_idle(); + touch_softlockup_watchdog(); + /* + * Cancel the scheduled timer and restore the tick + */ + ts->tick_stopped = 0; + ts->idle_exittime = now; + + tick_nohz_restart(ts, now); +} + +static void tick_nohz_full_update_tick(struct tick_sched *ts) { #ifdef CONFIG_NO_HZ_FULL int cpu = smp_processor_id(); - if (!tick_nohz_full_cpu(cpu) || is_idle_task(current)) + if (!tick_nohz_full_cpu(cpu)) return; if (!ts->tick_stopped && ts->nohz_mode == NOHZ_MODE_INACTIVE) return; - if (!can_stop_full_tick()) - return; - - tick_nohz_stop_sched_tick(ts, ktime_get(), cpu); + if (can_stop_full_tick()) + tick_nohz_stop_sched_tick(ts, ktime_get(), cpu); + else if (ts->tick_stopped) + tick_nohz_restart_sched_tick(ts, ktime_get()); #endif } @@ -849,7 +855,7 @@ void tick_nohz_irq_exit(void) if (ts->inidle) __tick_nohz_idle_enter(ts); else - tick_nohz_full_stop_tick(ts); + tick_nohz_full_update_tick(ts); } /** @@ -864,23 +870,6 @@ ktime_t tick_nohz_get_sleep_length(void) return ts->sleep_length; } -static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now) -{ - /* Update jiffies first */ - tick_do_update_jiffies64(now); - update_cpu_load_nohz(); - - calc_load_exit_idle(); - touch_softlockup_watchdog(); - /* - * Cancel the scheduled timer and restore the tick - */ - ts->tick_stopped = 0; - ts->idle_exittime = now; - - tick_nohz_restart(ts, now); -} - static void tick_nohz_account_idle_ticks(struct tick_sched *ts) { #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE diff --git a/kernel/time/time.c b/kernel/time/time.c index 85d5bb1d6..86751c68e 100644 --- a/kernel/time/time.c +++ b/kernel/time/time.c @@ -268,10 +268,14 @@ EXPORT_SYMBOL(jiffies_to_msecs); unsigned int jiffies_to_usecs(const unsigned long j) { -#if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ) + /* + * Hz usually doesn't go much further MSEC_PER_SEC. + * jiffies_to_usecs() and usecs_to_jiffies() depend on that. + */ + BUILD_BUG_ON(HZ > USEC_PER_SEC); + +#if !(USEC_PER_SEC % HZ) return (USEC_PER_SEC / HZ) * j; -#elif HZ > USEC_PER_SEC && !(HZ % USEC_PER_SEC) - return (j + (HZ / USEC_PER_SEC) - 1)/(HZ / USEC_PER_SEC); #else # if BITS_PER_LONG == 32 return (HZ_TO_USEC_MUL32 * j) >> HZ_TO_USEC_SHR32; @@ -287,26 +291,20 @@ EXPORT_SYMBOL(jiffies_to_usecs); * @t: Timespec * @gran: Granularity in ns. * - * Truncate a timespec to a granularity. gran must be smaller than a second. - * Always rounds down. - * - * This function should be only used for timestamps returned by - * current_kernel_time() or CURRENT_TIME, not with do_gettimeofday() because - * it doesn't handle the better resolution of the latter. + * Truncate a timespec to a granularity. Always rounds down. gran must + * not be 0 nor greater than a second (NSEC_PER_SEC, or 10^9 ns). */ struct timespec timespec_trunc(struct timespec t, unsigned gran) { - /* - * Division is pretty slow so avoid it for common cases. - * Currently current_kernel_time() never returns better than - * jiffies resolution. Exploit that. - */ - if (gran <= jiffies_to_usecs(1) * 1000) { + /* Avoid division in the common cases 1 ns and 1 s. */ + if (gran == 1) { /* nothing */ - } else if (gran == 1000000000) { + } else if (gran == NSEC_PER_SEC) { t.tv_nsec = 0; - } else { + } else if (gran > 1 && gran < NSEC_PER_SEC) { t.tv_nsec -= t.tv_nsec % gran; + } else { + WARN(1, "illegal file time granularity: %u", gran); } return t; } @@ -546,7 +544,7 @@ EXPORT_SYMBOL(__usecs_to_jiffies); * value to a scaled second value. */ static unsigned long -__timespec_to_jiffies(unsigned long sec, long nsec) +__timespec64_to_jiffies(u64 sec, long nsec) { nsec = nsec + TICK_NSEC - 1; @@ -554,22 +552,27 @@ __timespec_to_jiffies(unsigned long sec, long nsec) sec = MAX_SEC_IN_JIFFIES; nsec = 0; } - return (((u64)sec * SEC_CONVERSION) + + return ((sec * SEC_CONVERSION) + (((u64)nsec * NSEC_CONVERSION) >> (NSEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC; } -unsigned long -timespec_to_jiffies(const struct timespec *value) +static unsigned long +__timespec_to_jiffies(unsigned long sec, long nsec) { - return __timespec_to_jiffies(value->tv_sec, value->tv_nsec); + return __timespec64_to_jiffies((u64)sec, nsec); } -EXPORT_SYMBOL(timespec_to_jiffies); +unsigned long +timespec64_to_jiffies(const struct timespec64 *value) +{ + return __timespec64_to_jiffies(value->tv_sec, value->tv_nsec); +} +EXPORT_SYMBOL(timespec64_to_jiffies); void -jiffies_to_timespec(const unsigned long jiffies, struct timespec *value) +jiffies_to_timespec64(const unsigned long jiffies, struct timespec64 *value) { /* * Convert jiffies to nanoseconds and separate with @@ -580,7 +583,7 @@ jiffies_to_timespec(const unsigned long jiffies, struct timespec *value) NSEC_PER_SEC, &rem); value->tv_nsec = rem; } -EXPORT_SYMBOL(jiffies_to_timespec); +EXPORT_SYMBOL(jiffies_to_timespec64); /* * We could use a similar algorithm to timespec_to_jiffies (with a diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 3688f1e07..44d2cc043 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -911,6 +911,7 @@ int do_settimeofday64(const struct timespec64 *ts) struct timekeeper *tk = &tk_core.timekeeper; struct timespec64 ts_delta, xt; unsigned long flags; + int ret = 0; if (!timespec64_valid_strict(ts)) return -EINVAL; @@ -924,10 +925,15 @@ int do_settimeofday64(const struct timespec64 *ts) ts_delta.tv_sec = ts->tv_sec - xt.tv_sec; ts_delta.tv_nsec = ts->tv_nsec - xt.tv_nsec; + if (timespec64_compare(&tk->wall_to_monotonic, &ts_delta) > 0) { + ret = -EINVAL; + goto out; + } + tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, ts_delta)); tk_set_xtime(tk, ts); - +out: timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET); write_seqcount_end(&tk_core.seq); @@ -936,7 +942,7 @@ int do_settimeofday64(const struct timespec64 *ts) /* signal hrtimers about time change */ clock_was_set(); - return 0; + return ret; } EXPORT_SYMBOL(do_settimeofday64); @@ -965,7 +971,8 @@ int timekeeping_inject_offset(struct timespec *ts) /* Make sure the proposed value is valid */ tmp = timespec64_add(tk_xtime(tk), ts64); - if (!timespec64_valid_strict(&tmp)) { + if (timespec64_compare(&tk->wall_to_monotonic, &ts64) > 0 || + !timespec64_valid_strict(&tmp)) { ret = -EINVAL; goto error; } @@ -1874,7 +1881,7 @@ struct timespec __current_kernel_time(void) return timespec64_to_timespec(tk_xtime(tk)); } -struct timespec current_kernel_time(void) +struct timespec64 current_kernel_time64(void) { struct timekeeper *tk = &tk_core.timekeeper; struct timespec64 now; @@ -1886,9 +1893,9 @@ struct timespec current_kernel_time(void) now = tk_xtime(tk); } while (read_seqcount_retry(&tk_core.seq, seq)); - return timespec64_to_timespec(now); + return now; } -EXPORT_SYMBOL(current_kernel_time); +EXPORT_SYMBOL(current_kernel_time64); struct timespec64 get_monotonic_coarse64(void) { diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c index a4536e1e3..f75e35b60 100644 --- a/kernel/time/timer_list.c +++ b/kernel/time/timer_list.c @@ -137,7 +137,7 @@ print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now) (unsigned long long) ktime_to_ns(base->offset)); #endif SEQ_printf(m, "active timers:\n"); - print_active_timers(m, base, now); + print_active_timers(m, base, now + ktime_to_ns(base->offset)); } static void print_cpu(struct seq_file *m, int cpu, u64 now) @@ -225,7 +225,7 @@ print_tickdevice(struct seq_file *m, struct tick_device *td, int cpu) (unsigned long long) dev->min_delta_ns); SEQ_printf(m, " mult: %u\n", dev->mult); SEQ_printf(m, " shift: %u\n", dev->shift); - SEQ_printf(m, " mode: %d\n", dev->mode); + SEQ_printf(m, " mode: %d\n", clockevent_get_state(dev)); SEQ_printf(m, " next_event: %Ld nsecs\n", (unsigned long long) ktime_to_ns(dev->next_event)); @@ -233,40 +233,34 @@ print_tickdevice(struct seq_file *m, struct tick_device *td, int cpu) print_name_offset(m, dev->set_next_event); SEQ_printf(m, "\n"); - if (dev->set_mode) { - SEQ_printf(m, " set_mode: "); - print_name_offset(m, dev->set_mode); + if (dev->set_state_shutdown) { + SEQ_printf(m, " shutdown: "); + print_name_offset(m, dev->set_state_shutdown); SEQ_printf(m, "\n"); - } else { - if (dev->set_state_shutdown) { - SEQ_printf(m, " shutdown: "); - print_name_offset(m, dev->set_state_shutdown); - SEQ_printf(m, "\n"); - } + } - if (dev->set_state_periodic) { - SEQ_printf(m, " periodic: "); - print_name_offset(m, dev->set_state_periodic); - SEQ_printf(m, "\n"); - } + if (dev->set_state_periodic) { + SEQ_printf(m, " periodic: "); + print_name_offset(m, dev->set_state_periodic); + SEQ_printf(m, "\n"); + } - if (dev->set_state_oneshot) { - SEQ_printf(m, " oneshot: "); - print_name_offset(m, dev->set_state_oneshot); - SEQ_printf(m, "\n"); - } + if (dev->set_state_oneshot) { + SEQ_printf(m, " oneshot: "); + print_name_offset(m, dev->set_state_oneshot); + SEQ_printf(m, "\n"); + } - if (dev->set_state_oneshot_stopped) { - SEQ_printf(m, " oneshot stopped: "); - print_name_offset(m, dev->set_state_oneshot_stopped); - SEQ_printf(m, "\n"); - } + if (dev->set_state_oneshot_stopped) { + SEQ_printf(m, " oneshot stopped: "); + print_name_offset(m, dev->set_state_oneshot_stopped); + SEQ_printf(m, "\n"); + } - if (dev->tick_resume) { - SEQ_printf(m, " resume: "); - print_name_offset(m, dev->tick_resume); - SEQ_printf(m, "\n"); - } + if (dev->tick_resume) { + SEQ_printf(m, " resume: "); + print_name_offset(m, dev->tick_resume); + SEQ_printf(m, "\n"); } SEQ_printf(m, " event_handler: "); |