diff options
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r-- | include/linux/sched.h | 91 |
1 files changed, 84 insertions, 7 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 838a89a78..a1aec10f1 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -176,7 +176,7 @@ extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load); extern void calc_global_load(unsigned long ticks); -#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) +#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) && !defined(CONFIG_SCHED_BFS) extern void update_cpu_load_nohz(int active); #else static inline void update_cpu_load_nohz(int active) { } @@ -337,8 +337,6 @@ extern void init_idle_bootup_task(struct task_struct *idle); extern cpumask_var_t cpu_isolated_map; -extern int runqueue_is_locked(int cpu); - #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) extern void nohz_balance_enter_idle(int cpu); extern void set_cpu_sd_state_idle(void); @@ -1393,9 +1391,11 @@ struct task_struct { unsigned int flags; /* per process flags, defined below */ unsigned int ptrace; +#if defined(CONFIG_SMP) || defined(CONFIG_SCHED_BFS) + int on_cpu; +#endif #ifdef CONFIG_SMP struct llist_node wake_entry; - int on_cpu; unsigned int wakee_flips; unsigned long wakee_flip_decay_ts; struct task_struct *last_wakee; @@ -1403,12 +1403,29 @@ struct task_struct { int wake_cpu; #endif int on_rq; - int prio, static_prio, normal_prio; unsigned int rt_priority; +#ifdef CONFIG_SCHED_BFS + int time_slice; + u64 deadline; + struct list_head run_list; + u64 last_ran; + u64 sched_time; /* sched_clock time spent running */ +#ifdef CONFIG_SMT_NICE + int smt_bias; /* Policy/nice level bias across smt siblings */ +#endif +#ifdef CONFIG_SMP + bool sticky; /* Soft affined flag */ +#endif +#ifdef CONFIG_HOTPLUG_CPU + bool zerobound; /* Bound to CPU0 for hotplug */ +#endif + unsigned long rt_timeout; +#else /* CONFIG_SCHED_BFS */ const struct sched_class *sched_class; struct sched_entity se; struct sched_rt_entity rt; +#endif #ifdef CONFIG_CGROUP_SCHED struct task_group *sched_task_group; #endif @@ -1528,6 +1545,9 @@ struct task_struct { int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */ cputime_t utime, stime, utimescaled, stimescaled; +#ifdef CONFIG_SCHED_BFS + unsigned long utime_pc, stime_pc; +#endif cputime_t gtime; struct prev_cputime prev_cputime; #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN @@ -1846,6 +1866,63 @@ extern int arch_task_struct_size __read_mostly; # define arch_task_struct_size (sizeof(struct task_struct)) #endif +#ifdef CONFIG_SCHED_BFS +bool grunqueue_is_locked(void); +void grq_unlock_wait(void); +void cpu_scaling(int cpu); +void cpu_nonscaling(int cpu); +#define tsk_seruntime(t) ((t)->sched_time) +#define tsk_rttimeout(t) ((t)->rt_timeout) + +static inline void tsk_cpus_current(struct task_struct *p) +{ +} + +static inline int runqueue_is_locked(int cpu) +{ + return grunqueue_is_locked(); +} + +void print_scheduler_version(void); + +static inline bool iso_task(struct task_struct *p) +{ + return (p->policy == SCHED_ISO); +} +#else /* CFS */ +extern int runqueue_is_locked(int cpu); +static inline void cpu_scaling(int cpu) +{ +} + +static inline void cpu_nonscaling(int cpu) +{ +} +#define tsk_seruntime(t) ((t)->se.sum_exec_runtime) +#define tsk_rttimeout(t) ((t)->rt.timeout) + +static inline void tsk_cpus_current(struct task_struct *p) +{ + p->nr_cpus_allowed = current->nr_cpus_allowed; +} + +static inline void print_scheduler_version(void) +{ + printk(KERN_INFO"CFS CPU scheduler.\n"); +} + +static inline bool iso_task(struct task_struct *p) +{ + return false; +} + +/* Anyone feel like implementing this? */ +static inline bool above_background_load(void) +{ + return false; +} +#endif /* CONFIG_SCHED_BFS */ + /* Future-safe accessor for struct task_struct's cpus_allowed. */ #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed) @@ -2261,7 +2338,7 @@ static inline int set_cpus_allowed_ptr(struct task_struct *p, } #endif -#ifdef CONFIG_NO_HZ_COMMON +#if defined(CONFIG_NO_HZ_COMMON) && !defined(CONFIG_SCHED_BFS) void calc_load_enter_idle(void); void calc_load_exit_idle(void); #else @@ -2334,7 +2411,7 @@ extern unsigned long long task_sched_runtime(struct task_struct *task); /* sched_exec is called by processes performing an exec */ -#ifdef CONFIG_SMP +#if defined(CONFIG_SMP) && !defined(CONFIG_SCHED_BFS) extern void sched_exec(void); #else #define sched_exec() {} |