summaryrefslogtreecommitdiff
path: root/include/linux/sched.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h48
1 files changed, 42 insertions, 6 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index a1aec10f1..c37f48c80 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -51,6 +51,7 @@ struct sched_param {
#include <linux/resource.h>
#include <linux/timer.h>
#include <linux/hrtimer.h>
+#include <linux/kcov.h>
#include <linux/task_io_accounting.h>
#include <linux/latencytop.h>
#include <linux/cred.h>
@@ -423,6 +424,7 @@ extern signed long schedule_timeout(signed long timeout);
extern signed long schedule_timeout_interruptible(signed long timeout);
extern signed long schedule_timeout_killable(signed long timeout);
extern signed long schedule_timeout_uninterruptible(signed long timeout);
+extern signed long schedule_timeout_idle(signed long timeout);
asmlinkage void schedule(void);
extern void schedule_preempt_disabled(void);
@@ -715,6 +717,10 @@ struct signal_struct {
/* Earliest-expiration cache. */
struct task_cputime cputime_expires;
+#ifdef CONFIG_NO_HZ_FULL
+ atomic_t tick_dep_mask;
+#endif
+
struct list_head cpu_timers[3];
struct pid *tty_old_pgrp;
@@ -771,7 +777,6 @@ struct signal_struct {
#endif
#ifdef CONFIG_AUDIT
unsigned audit_tty;
- unsigned audit_tty_log_passwd;
struct tty_audit_buf *tty_audit_buf;
#endif
@@ -1562,6 +1567,10 @@ struct task_struct {
VTIME_SYS,
} vtime_snap_whence;
#endif
+
+#ifdef CONFIG_NO_HZ_FULL
+ atomic_t tick_dep_mask;
+#endif
unsigned long nvcsw, nivcsw; /* context switch counts */
u64 start_time; /* monotonic time in nsec */
u64 real_start_time; /* boot based time in nsec */
@@ -1804,8 +1813,8 @@ struct task_struct {
* time slack values; these are used to round up poll() and
* select() etc timeout values. These are in nanoseconds.
*/
- unsigned long timer_slack_ns;
- unsigned long default_timer_slack_ns;
+ u64 timer_slack_ns;
+ u64 default_timer_slack_ns;
#ifdef CONFIG_KASAN
unsigned int kasan_depth;
@@ -1831,6 +1840,16 @@ struct task_struct {
/* bitmask and counter of trace recursion */
unsigned long trace_recursion;
#endif /* CONFIG_TRACING */
+#ifdef CONFIG_KCOV
+ /* Coverage collection mode enabled for this task (0 if disabled). */
+ enum kcov_mode kcov_mode;
+ /* Size of the kcov_area. */
+ unsigned kcov_size;
+ /* Buffer for coverage collection. */
+ void *kcov_area;
+ /* kcov desciptor wired with this task or NULL. */
+ struct kcov *kcov;
+#endif
#ifdef CONFIG_MEMCG
struct mem_cgroup *memcg_in_oom;
gfp_t memcg_oom_gfp_mask;
@@ -1850,6 +1869,9 @@ struct task_struct {
unsigned long task_state_change;
#endif
int pagefault_disabled;
+#ifdef CONFIG_MMU
+ struct task_struct *oom_reaper_list;
+#endif
/* CPU-specific state of this task */
struct thread_struct thread;
/*
@@ -2433,10 +2455,7 @@ static inline void wake_up_nohz_cpu(int cpu) { }
#endif
#ifdef CONFIG_NO_HZ_FULL
-extern bool sched_can_stop_tick(void);
extern u64 scheduler_tick_max_deferment(void);
-#else
-static inline bool sched_can_stop_tick(void) { return false; }
#endif
#ifdef CONFIG_SCHED_AUTOGROUP
@@ -2932,10 +2951,18 @@ static inline unsigned long stack_not_used(struct task_struct *p)
unsigned long *n = end_of_stack(p);
do { /* Skip over canary */
+# ifdef CONFIG_STACK_GROWSUP
+ n--;
+# else
n++;
+# endif
} while (!*n);
+# ifdef CONFIG_STACK_GROWSUP
+ return (unsigned long)end_of_stack(p) - (unsigned long)n;
+# else
return (unsigned long)n - (unsigned long)end_of_stack(p);
+# endif
}
#endif
extern void set_task_stack_end_magic(struct task_struct *tsk);
@@ -3284,4 +3311,13 @@ static inline unsigned long rlimit_max(unsigned int limit)
return task_rlimit_max(current, limit);
}
+#ifdef CONFIG_CPU_FREQ
+struct update_util_data {
+ void (*func)(struct update_util_data *data,
+ u64 time, unsigned long util, unsigned long max);
+};
+
+void cpufreq_set_update_util_data(int cpu, struct update_util_data *data);
+#endif /* CONFIG_CPU_FREQ */
+
#endif