1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
|
/*
* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef __WALT_H
#define __WALT_H
#ifdef CONFIG_SCHED_WALT
#include <linux/sched/sysctl.h>
#define WINDOW_STATS_RECENT 0
#define WINDOW_STATS_MAX 1
#define WINDOW_STATS_MAX_RECENT_AVG 2
#define WINDOW_STATS_AVG 3
#define WINDOW_STATS_INVALID_POLICY 4
#define EXITING_TASK_MARKER 0xdeaddead
#define FREQ_REPORT_MAX_CPU_LOAD_TOP_TASK 0
#define FREQ_REPORT_CPU_LOAD 1
#define FREQ_REPORT_TOP_TASK 2
#define for_each_related_thread_group(grp) \
list_for_each_entry(grp, &active_related_thread_groups, list)
#define SCHED_NEW_TASK_WINDOWS 5
extern unsigned int sched_ravg_window;
extern unsigned int max_possible_efficiency;
extern unsigned int min_possible_efficiency;
extern unsigned int max_possible_freq;
extern unsigned int sched_major_task_runtime;
extern unsigned int __read_mostly sched_init_task_load_windows;
extern unsigned int __read_mostly sched_load_granule;
extern struct mutex cluster_lock;
extern rwlock_t related_thread_group_lock;
extern __read_mostly unsigned int sched_ravg_hist_size;
extern __read_mostly unsigned int sched_freq_aggregate;
extern __read_mostly unsigned int sched_window_stats_policy;
extern __read_mostly unsigned int sched_group_upmigrate;
extern __read_mostly unsigned int sched_group_downmigrate;
extern struct sched_cluster init_cluster;
extern void update_task_ravg(struct task_struct *p, struct rq *rq, int event,
u64 wallclock, u64 irqtime);
extern unsigned int walt_big_tasks(int cpu);
static inline void
inc_nr_big_task(struct walt_sched_stats *stats, struct task_struct *p)
{
if (sched_disable_window_stats)
return;
if (p->misfit)
stats->nr_big_tasks++;
}
static inline void
dec_nr_big_task(struct walt_sched_stats *stats, struct task_struct *p)
{
if (sched_disable_window_stats)
return;
if (p->misfit)
stats->nr_big_tasks--;
BUG_ON(stats->nr_big_tasks < 0);
}
static inline void
walt_adjust_nr_big_tasks(struct rq *rq, int delta, bool inc)
{
if (sched_disable_window_stats)
return;
sched_update_nr_prod(cpu_of(rq), 0, true);
rq->walt_stats.nr_big_tasks += inc ? delta : -delta;
BUG_ON(rq->walt_stats.nr_big_tasks < 0);
}
static inline void
fixup_cumulative_runnable_avg(struct walt_sched_stats *stats,
s64 demand_scaled_delta,
s64 pred_demand_scaled_delta)
{
if (sched_disable_window_stats)
return;
stats->cumulative_runnable_avg_scaled += demand_scaled_delta;
BUG_ON((s64)stats->cumulative_runnable_avg_scaled < 0);
stats->pred_demands_sum_scaled += pred_demand_scaled_delta;
BUG_ON((s64)stats->pred_demands_sum_scaled < 0);
}
static inline void
walt_inc_cumulative_runnable_avg(struct rq *rq, struct task_struct *p)
{
if (sched_disable_window_stats)
return;
fixup_cumulative_runnable_avg(&rq->walt_stats, p->ravg.demand_scaled,
p->ravg.pred_demand_scaled);
/*
* Add a task's contribution to the cumulative window demand when
*
* (1) task is enqueued with on_rq = 1 i.e migration,
* prio/cgroup/class change.
* (2) task is waking for the first time in this window.
*/
if (p->on_rq || (p->last_sleep_ts < rq->window_start))
walt_fixup_cum_window_demand(rq, p->ravg.demand_scaled);
}
static inline void
walt_dec_cumulative_runnable_avg(struct rq *rq, struct task_struct *p)
{
if (sched_disable_window_stats)
return;
fixup_cumulative_runnable_avg(&rq->walt_stats,
-(s64)p->ravg.demand_scaled,
-(s64)p->ravg.pred_demand_scaled);
/*
* on_rq will be 1 for sleeping tasks. So check if the task
* is migrating or dequeuing in RUNNING state to change the
* prio/cgroup/class.
*/
if (task_on_rq_migrating(p) || p->state == TASK_RUNNING)
walt_fixup_cum_window_demand(rq, -(s64)p->ravg.demand_scaled);
}
extern void fixup_walt_sched_stats_common(struct rq *rq, struct task_struct *p,
u16 updated_demand_scaled,
u16 updated_pred_demand_scaled);
extern void inc_rq_walt_stats(struct rq *rq, struct task_struct *p);
extern void dec_rq_walt_stats(struct rq *rq, struct task_struct *p);
extern void fixup_busy_time(struct task_struct *p, int new_cpu);
extern void init_new_task_load(struct task_struct *p);
extern void mark_task_starting(struct task_struct *p);
extern void set_window_start(struct rq *rq);
void account_irqtime(int cpu, struct task_struct *curr, u64 delta,
u64 wallclock);
extern bool do_pl_notif(struct rq *rq);
#define SCHED_HIGH_IRQ_TIMEOUT 3
static inline u64 sched_irqload(int cpu)
{
struct rq *rq = cpu_rq(cpu);
s64 delta;
delta = get_jiffies_64() - rq->irqload_ts;
/*
* Current context can be preempted by irq and rq->irqload_ts can be
* updated by irq context so that delta can be negative.
* But this is okay and we can safely return as this means there
* was recent irq occurrence.
*/
if (delta < SCHED_HIGH_IRQ_TIMEOUT)
return rq->avg_irqload;
else
return 0;
}
static inline int sched_cpu_high_irqload(int cpu)
{
return sched_irqload(cpu) >= sysctl_sched_cpu_high_irqload;
}
static inline int exiting_task(struct task_struct *p)
{
return (p->ravg.sum_history[0] == EXITING_TASK_MARKER);
}
static inline struct sched_cluster *cpu_cluster(int cpu)
{
return cpu_rq(cpu)->cluster;
}
static inline u64
scale_load_to_freq(u64 load, unsigned int src_freq, unsigned int dst_freq)
{
return div64_u64(load * (u64)src_freq, (u64)dst_freq);
}
static inline bool is_new_task(struct task_struct *p)
{
return p->ravg.active_windows < SCHED_NEW_TASK_WINDOWS;
}
static inline void clear_top_tasks_table(u8 *table)
{
memset(table, 0, NUM_LOAD_INDICES * sizeof(u8));
}
extern void update_cluster_load_subtractions(struct task_struct *p,
int cpu, u64 ws, bool new_task);
extern void sched_account_irqstart(int cpu, struct task_struct *curr,
u64 wallclock);
static inline unsigned int max_task_load(void)
{
return sched_ravg_window;
}
static inline u32 cpu_cycles_to_freq(u64 cycles, u64 period)
{
return div64_u64(cycles, period);
}
static inline unsigned int cpu_cur_freq(int cpu)
{
return cpu_rq(cpu)->cluster->cur_freq;
}
static inline unsigned int sched_cpu_legacy_freq(int cpu)
{
unsigned long curr_cap = arch_scale_freq_capacity(NULL, cpu);
return (curr_cap * (u64) cpu_rq(cpu)->cluster->max_possible_freq) >>
SCHED_CAPACITY_SHIFT;
}
static inline void
move_list(struct list_head *dst, struct list_head *src, bool sync_rcu)
{
struct list_head *first, *last;
first = src->next;
last = src->prev;
if (sync_rcu) {
INIT_LIST_HEAD_RCU(src);
synchronize_rcu();
}
first->prev = dst;
dst->prev = last;
last->next = dst;
/* Ensure list sanity before making the head visible to all CPUs. */
smp_mb();
dst->next = first;
}
extern void reset_task_stats(struct task_struct *p);
extern void update_cluster_topology(void);
extern struct list_head cluster_head;
#define for_each_sched_cluster(cluster) \
list_for_each_entry_rcu(cluster, &cluster_head, list)
extern void init_clusters(void);
extern void clear_top_tasks_bitmap(unsigned long *bitmap);
extern void sched_account_irqtime(int cpu, struct task_struct *curr,
u64 delta, u64 wallclock);
static inline void assign_cluster_ids(struct list_head *head)
{
struct sched_cluster *cluster;
int pos = 0;
list_for_each_entry(cluster, head, list) {
cluster->id = pos;
sched_cluster[pos++] = cluster;
}
}
static inline int same_cluster(int src_cpu, int dst_cpu)
{
return cpu_rq(src_cpu)->cluster == cpu_rq(dst_cpu)->cluster;
}
void sort_clusters(void);
void walt_irq_work(struct irq_work *irq_work);
void walt_sched_init_rq(struct rq *rq);
static inline void walt_update_last_enqueue(struct task_struct *p)
{
p->last_enqueued_ts = sched_ktime_clock();
}
extern void walt_rotate_work_init(void);
extern void walt_rotation_checkpoint(int nr_big);
extern unsigned int walt_rotation_enabled;
extern unsigned int walt_get_default_coloc_group_load(void);
extern __read_mostly bool sched_freq_aggr_en;
static inline void walt_enable_frequency_aggregation(bool enable)
{
sched_freq_aggr_en = enable;
}
#else /* CONFIG_SCHED_WALT */
static inline void walt_sched_init_rq(struct rq *rq) { }
static inline void walt_rotate_work_init(void) { }
static inline void walt_rotation_checkpoint(int nr_big) { }
static inline void walt_update_last_enqueue(struct task_struct *p) { }
static inline unsigned int walt_get_default_coloc_group_load(void)
{
return 0;
}
static inline void update_task_ravg(struct task_struct *p, struct rq *rq,
int event, u64 wallclock, u64 irqtime) { }
static inline void walt_inc_cumulative_runnable_avg(struct rq *rq,
struct task_struct *p)
{
}
static inline unsigned int walt_big_tasks(int cpu)
{
return 0;
}
static inline void walt_adjust_nr_big_tasks(struct rq *rq,
int delta, bool inc)
{
}
static inline void inc_nr_big_task(struct walt_sched_stats *stats,
struct task_struct *p)
{
}
static inline void dec_nr_big_task(struct walt_sched_stats *stats,
struct task_struct *p)
{
}
static inline void walt_dec_cumulative_runnable_avg(struct rq *rq,
struct task_struct *p)
{
}
static inline void fixup_busy_time(struct task_struct *p, int new_cpu) { }
static inline void init_new_task_load(struct task_struct *p)
{
}
static inline void mark_task_starting(struct task_struct *p) { }
static inline void set_window_start(struct rq *rq) { }
static inline int sched_cpu_high_irqload(int cpu) { return 0; }
static inline void sched_account_irqstart(int cpu, struct task_struct *curr,
u64 wallclock)
{
}
static inline void update_cluster_topology(void) { }
static inline void init_clusters(void) {}
static inline void sched_account_irqtime(int cpu, struct task_struct *curr,
u64 delta, u64 wallclock)
{
}
static inline int same_cluster(int src_cpu, int dst_cpu) { return 1; }
static inline bool do_pl_notif(struct rq *rq) { return false; }
static inline void
inc_rq_walt_stats(struct rq *rq, struct task_struct *p) { }
static inline void
dec_rq_walt_stats(struct rq *rq, struct task_struct *p) { }
static inline void
fixup_walt_sched_stats_common(struct rq *rq, struct task_struct *p,
u16 updated_demand_scaled,
u16 updated_pred_demand_scaled)
{
}
static inline u64 sched_irqload(int cpu)
{
return 0;
}
#endif /* CONFIG_SCHED_WALT */
#endif
|