1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
|
#include <linux/sched.h>
#include <linux/cpuidle.h>
#include <linux/stop_machine.h>
#ifndef BFS_SCHED_H
#define BFS_SCHED_H
/*
* This is the main, per-CPU runqueue data structure.
* This data should only be modified by the local cpu.
*/
struct rq {
struct task_struct *curr, *idle, *stop;
struct mm_struct *prev_mm;
/* Pointer to grq spinlock */
raw_spinlock_t *grq_lock;
/* Stored data about rq->curr to work outside grq lock */
u64 rq_deadline;
unsigned int rq_policy;
int rq_time_slice;
u64 rq_last_ran;
int rq_prio;
bool rq_running; /* There is a task running */
int soft_affined; /* Running or queued tasks with this set as their rq */
#ifdef CONFIG_SMT_NICE
struct mm_struct *rq_mm;
int rq_smt_bias; /* Policy/nice level bias across smt siblings */
#endif
/* Accurate timekeeping data */
u64 timekeep_clock;
unsigned long user_pc, nice_pc, irq_pc, softirq_pc, system_pc,
iowait_pc, idle_pc;
atomic_t nr_iowait;
#ifdef CONFIG_SMP
int cpu; /* cpu of this runqueue */
bool online;
bool scaling; /* This CPU is managed by a scaling CPU freq governor */
struct task_struct *sticky_task;
struct root_domain *rd;
struct sched_domain *sd;
int *cpu_locality; /* CPU relative cache distance */
#ifdef CONFIG_SCHED_SMT
bool (*siblings_idle)(int cpu);
/* See if all smt siblings are idle */
#endif /* CONFIG_SCHED_SMT */
#ifdef CONFIG_SCHED_MC
bool (*cache_idle)(int cpu);
/* See if all cache siblings are idle */
#endif /* CONFIG_SCHED_MC */
u64 last_niffy; /* Last time this RQ updated grq.niffies */
#endif /* CONFIG_SMP */
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
u64 prev_irq_time;
#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
#ifdef CONFIG_PARAVIRT
u64 prev_steal_time;
#endif /* CONFIG_PARAVIRT */
#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
u64 prev_steal_time_rq;
#endif /* CONFIG_PARAVIRT_TIME_ACCOUNTING */
u64 clock, old_clock, last_tick;
u64 clock_task;
bool dither;
#ifdef CONFIG_SCHEDSTATS
/* latency stats */
struct sched_info rq_sched_info;
unsigned long long rq_cpu_time;
/* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
/* sys_sched_yield() stats */
unsigned int yld_count;
/* schedule() stats */
unsigned int sched_switch;
unsigned int sched_count;
unsigned int sched_goidle;
/* try_to_wake_up() stats */
unsigned int ttwu_count;
unsigned int ttwu_local;
#endif /* CONFIG_SCHEDSTATS */
#ifdef CONFIG_CPU_IDLE
/* Must be inspected within a rcu lock section */
struct cpuidle_state *idle_state;
#endif
};
#ifdef CONFIG_SMP
struct rq *cpu_rq(int cpu);
#endif
#ifndef CONFIG_SMP
extern struct rq *uprq;
#define cpu_rq(cpu) (uprq)
#define this_rq() (uprq)
#define raw_rq() (uprq)
#define task_rq(p) (uprq)
#define cpu_curr(cpu) ((uprq)->curr)
#else /* CONFIG_SMP */
DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
#define this_rq() this_cpu_ptr(&runqueues)
#define raw_rq() raw_cpu_ptr(&runqueues)
#endif /* CONFIG_SMP */
static inline u64 __rq_clock_broken(struct rq *rq)
{
return READ_ONCE(rq->clock);
}
static inline u64 rq_clock(struct rq *rq)
{
lockdep_assert_held(rq->grq_lock);
return rq->clock;
}
static inline u64 rq_clock_task(struct rq *rq)
{
lockdep_assert_held(rq->grq_lock);
return rq->clock_task;
}
extern struct mutex sched_domains_mutex;
extern struct static_key_false sched_schedstats;
#define rcu_dereference_check_sched_domain(p) \
rcu_dereference_check((p), \
lockdep_is_held(&sched_domains_mutex))
/*
* The domain tree (rq->sd) is protected by RCU's quiescent state transition.
* See detach_destroy_domains: synchronize_sched for details.
*
* The domain tree of any CPU may only be accessed from within
* preempt-disabled sections.
*/
#define for_each_domain(cpu, __sd) \
for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent)
#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
void register_sched_domain_sysctl(void);
void unregister_sched_domain_sysctl(void);
#else
static inline void register_sched_domain_sysctl(void)
{
}
static inline void unregister_sched_domain_sysctl(void)
{
}
#endif
static inline void sched_ttwu_pending(void) { }
static inline int task_on_rq_queued(struct task_struct *p)
{
return p->on_rq;
}
#ifdef CONFIG_SMP
extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask);
#endif
#ifdef CONFIG_CPU_IDLE
static inline void idle_set_state(struct rq *rq,
struct cpuidle_state *idle_state)
{
rq->idle_state = idle_state;
}
static inline struct cpuidle_state *idle_get_state(struct rq *rq)
{
WARN_ON(!rcu_read_lock_held());
return rq->idle_state;
}
#else
static inline void idle_set_state(struct rq *rq,
struct cpuidle_state *idle_state)
{
}
static inline struct cpuidle_state *idle_get_state(struct rq *rq)
{
return NULL;
}
#endif
#ifdef CONFIG_CPU_FREQ
DECLARE_PER_CPU(struct update_util_data *, cpufreq_update_util_data);
static inline void cpufreq_trigger(u64 time)
{
struct update_util_data *data;
data = rcu_dereference_sched(*this_cpu_ptr(&cpufreq_update_util_data));
if (data)
data->func(data, time, ULONG_MAX, 0);
}
#else
static inline void cpufreq_trigger(u64 __maybe_unused time)
{
}
#endif /* CONFIG_CPU_FREQ */
#endif /* BFS_SCHED_H */
|