aboutsummaryrefslogtreecommitdiff
path: root/include/linux/sched.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h226
1 files changed, 21 insertions, 205 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 563e16582f14..7c723d8834b6 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -178,28 +178,8 @@ extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load);
extern u64 nr_running_integral(unsigned int cpu);
#endif
-extern void sched_update_nr_prod(int cpu, long delta, bool inc);
-extern void sched_get_nr_running_avg(int *avg, int *iowait_avg, int *big_avg,
- unsigned int *max_nr,
- unsigned int *big_max_nr);
-extern u64 sched_get_cpu_last_busy_time(int cpu);
-
-#ifdef CONFIG_SMP
extern u32 sched_get_wake_up_idle(struct task_struct *p);
extern int sched_set_wake_up_idle(struct task_struct *p, int wake_up_idle);
-#else
-static inline u32 sched_get_wake_up_idle(struct task_struct *p)
-{
- return 0;
-}
-
-static inline int sched_set_wake_up_idle(struct task_struct *p,
- int wake_up_idle)
-{
- return 0;
-}
-#endif /* CONFIG_SMP */
-
extern void calc_global_load(unsigned long ticks);
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
@@ -341,8 +321,6 @@ extern char ___assert_task_state[1 - 2*!!(
/* Task command name length */
#define TASK_COMM_LEN 16
-extern const char *sched_window_reset_reasons[];
-
enum task_event {
PUT_PREV_TASK = 0,
PICK_NEXT_TASK = 1,
@@ -352,12 +330,6 @@ enum task_event {
IRQ_UPDATE = 5,
};
-/* Note: this need to be in sync with migrate_type_names array */
-enum migrate_types {
- GROUP_TO_RQ,
- RQ_TO_GROUP,
-};
-
#include <linux/spinlock.h>
/*
@@ -385,41 +357,6 @@ extern cpumask_var_t cpu_isolated_map;
extern int runqueue_is_locked(int cpu);
-#ifdef CONFIG_HOTPLUG_CPU
-extern int sched_isolate_count(const cpumask_t *mask, bool include_offline);
-extern int sched_isolate_cpu(int cpu);
-extern int sched_unisolate_cpu(int cpu);
-extern int sched_unisolate_cpu_unlocked(int cpu);
-#else
-static inline int sched_isolate_count(const cpumask_t *mask,
- bool include_offline)
-{
- cpumask_t count_mask;
-
- if (include_offline)
- cpumask_andnot(&count_mask, mask, cpu_online_mask);
- else
- return 0;
-
- return cpumask_weight(&count_mask);
-}
-
-static inline int sched_isolate_cpu(int cpu)
-{
- return 0;
-}
-
-static inline int sched_unisolate_cpu(int cpu)
-{
- return 0;
-}
-
-static inline int sched_unisolate_cpu_unlocked(int cpu)
-{
- return 0;
-}
-#endif
-
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
extern void nohz_balance_enter_idle(int cpu);
extern void set_cpu_sd_state_idle(void);
@@ -1145,8 +1082,9 @@ struct sched_domain_attr {
extern int sched_domain_level_max;
struct capacity_state {
- unsigned long cap; /* compute capacity */
- unsigned long power; /* power consumption at this compute capacity */
+ unsigned long cap; /* capacity - calculated by energy driver */
+ unsigned long frequency;/* frequency */
+ unsigned long power; /* power consumption at this frequency */
};
struct idle_state {
@@ -1447,8 +1385,8 @@ struct sched_statistics {
};
#endif
+#ifdef CONFIG_SCHED_WALT
#define RAVG_HIST_SIZE_MAX 5
-#define NUM_BUSY_BUCKETS 10
/* ravg represents frequency scaled cpu-demand of tasks */
struct ravg {
@@ -1468,31 +1406,19 @@ struct ravg {
* sysctl_sched_ravg_hist_size windows. 'demand' could drive frequency
* demand for tasks.
*
- * 'curr_window_cpu' represents task's contribution to cpu busy time on
- * various CPUs in the current window
- *
- * 'prev_window_cpu' represents task's contribution to cpu busy time on
- * various CPUs in the previous window
- *
- * 'curr_window' represents the sum of all entries in curr_window_cpu
+ * 'curr_window' represents task's contribution to cpu busy time
+ * statistics (rq->curr_runnable_sum) in current window
*
- * 'prev_window' represents the sum of all entries in prev_window_cpu
- *
- * 'pred_demand' represents task's current predicted cpu busy time
- *
- * 'busy_buckets' groups historical busy time into different buckets
- * used for prediction
+ * 'prev_window' represents task's contribution to cpu busy time
+ * statistics (rq->prev_runnable_sum) in previous window
*/
u64 mark_start;
u32 sum, demand;
u32 sum_history[RAVG_HIST_SIZE_MAX];
- u32 *curr_window_cpu, *prev_window_cpu;
u32 curr_window, prev_window;
- u64 curr_burst, avg_burst, avg_sleep_time;
u16 active_windows;
- u32 pred_demand;
- u8 busy_buckets[NUM_BUSY_BUCKETS];
};
+#endif
struct sched_entity {
struct load_weight load; /* for load-balancing */
@@ -1668,19 +1594,13 @@ struct task_struct {
const struct sched_class *sched_class;
struct sched_entity se;
struct sched_rt_entity rt;
-#ifdef CONFIG_SCHED_HMP
+#ifdef CONFIG_SCHED_WALT
struct ravg ravg;
/*
* 'init_load_pct' represents the initial task load assigned to children
* of this task
*/
u32 init_load_pct;
- u64 last_wake_ts;
- u64 last_switch_out_ts;
- u64 last_cpu_selected_ts;
- struct related_thread_group *grp;
- struct list_head grp_list;
- u64 cpu_cycles;
u64 last_sleep_ts;
#endif
#ifdef CONFIG_CGROUP_SCHED
@@ -2561,6 +2481,10 @@ extern void do_set_cpus_allowed(struct task_struct *p,
extern int set_cpus_allowed_ptr(struct task_struct *p,
const struct cpumask *new_mask);
extern bool cpupri_check_rt(void);
+static inline void set_wake_up_idle(bool enabled)
+{
+ /* do nothing for now */
+}
#else
static inline void do_set_cpus_allowed(struct task_struct *p,
const struct cpumask *new_mask)
@@ -2579,97 +2503,6 @@ static inline bool cpupri_check_rt(void)
}
#endif
-struct sched_load {
- unsigned long prev_load;
- unsigned long new_task_load;
- unsigned long predicted_load;
-};
-
-struct cpu_cycle_counter_cb {
- u64 (*get_cpu_cycle_counter)(int cpu);
-};
-
-#define MAX_NUM_CGROUP_COLOC_ID 20
-
-#ifdef CONFIG_SCHED_HMP
-extern void free_task_load_ptrs(struct task_struct *p);
-extern int sched_set_window(u64 window_start, unsigned int window_size);
-extern unsigned long sched_get_busy(int cpu);
-extern void sched_get_cpus_busy(struct sched_load *busy,
- const struct cpumask *query_cpus);
-extern void sched_set_io_is_busy(int val);
-extern int sched_set_boost(int enable);
-extern int sched_set_init_task_load(struct task_struct *p, int init_load_pct);
-extern u32 sched_get_init_task_load(struct task_struct *p);
-extern int sched_set_static_cpu_pwr_cost(int cpu, unsigned int cost);
-extern unsigned int sched_get_static_cpu_pwr_cost(int cpu);
-extern int sched_set_static_cluster_pwr_cost(int cpu, unsigned int cost);
-extern unsigned int sched_get_static_cluster_pwr_cost(int cpu);
-extern int sched_set_cluster_wake_idle(int cpu, unsigned int wake_idle);
-extern unsigned int sched_get_cluster_wake_idle(int cpu);
-extern int sched_update_freq_max_load(const cpumask_t *cpumask);
-extern void sched_update_cpu_freq_min_max(const cpumask_t *cpus,
- u32 fmin, u32 fmax);
-extern void sched_set_cpu_cstate(int cpu, int cstate,
- int wakeup_energy, int wakeup_latency);
-extern void sched_set_cluster_dstate(const cpumask_t *cluster_cpus, int dstate,
- int wakeup_energy, int wakeup_latency);
-extern int register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb);
-extern u64 sched_ktime_clock(void);
-extern int sched_set_group_id(struct task_struct *p, unsigned int group_id);
-extern unsigned int sched_get_group_id(struct task_struct *p);
-
-#else /* CONFIG_SCHED_HMP */
-static inline void free_task_load_ptrs(struct task_struct *p) { }
-
-static inline u64 sched_ktime_clock(void)
-{
- return 0;
-}
-
-static inline int
-register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb)
-{
- return 0;
-}
-
-static inline int sched_set_window(u64 window_start, unsigned int window_size)
-{
- return -EINVAL;
-}
-static inline unsigned long sched_get_busy(int cpu)
-{
- return 0;
-}
-static inline void sched_get_cpus_busy(struct sched_load *busy,
- const struct cpumask *query_cpus) {};
-
-static inline void sched_set_io_is_busy(int val) {};
-
-static inline int sched_set_boost(int enable)
-{
- return -EINVAL;
-}
-
-static inline int sched_update_freq_max_load(const cpumask_t *cpumask)
-{
- return 0;
-}
-
-static inline void sched_update_cpu_freq_min_max(const cpumask_t *cpus,
- u32 fmin, u32 fmax) { }
-
-static inline void
-sched_set_cpu_cstate(int cpu, int cstate, int wakeup_energy, int wakeup_latency)
-{
-}
-
-static inline void sched_set_cluster_dstate(const cpumask_t *cluster_cpus,
- int dstate, int wakeup_energy, int wakeup_latency)
-{
-}
-#endif /* CONFIG_SCHED_HMP */
-
#ifdef CONFIG_NO_HZ_COMMON
void calc_load_enter_idle(void);
void calc_load_exit_idle(void);
@@ -2678,14 +2511,6 @@ static inline void calc_load_enter_idle(void) { }
static inline void calc_load_exit_idle(void) { }
#endif /* CONFIG_NO_HZ_COMMON */
-static inline void set_wake_up_idle(bool enabled)
-{
- if (enabled)
- current->flags |= PF_WAKE_UP_IDLE;
- else
- current->flags &= ~PF_WAKE_UP_IDLE;
-}
-
/*
* Do not use outside of architecture code which knows its limitations.
*
@@ -2751,7 +2576,7 @@ extern unsigned long long
task_sched_runtime(struct task_struct *task);
/* sched_exec is called by processes performing an exec */
-#if defined(CONFIG_SMP)
+#ifdef CONFIG_SMP
extern void sched_exec(void);
#else
#define sched_exec() {}
@@ -2887,7 +2712,6 @@ extern void xtime_update(unsigned long ticks);
extern int wake_up_state(struct task_struct *tsk, unsigned int state);
extern int wake_up_process(struct task_struct *tsk);
-extern int wake_up_process_no_notif(struct task_struct *tsk);
extern void wake_up_new_task(struct task_struct *tsk);
#ifdef CONFIG_SMP
extern void kick_process(struct task_struct *tsk);
@@ -2896,11 +2720,6 @@ extern void wake_up_new_task(struct task_struct *tsk);
#endif
extern int sched_fork(unsigned long clone_flags, struct task_struct *p);
extern void sched_dead(struct task_struct *p);
-#ifdef CONFIG_SCHED_HMP
-extern void sched_exit(struct task_struct *p);
-#else
-static inline void sched_exit(struct task_struct *p) { }
-#endif
extern void proc_caches_init(void);
extern void flush_signals(struct task_struct *);
@@ -3617,15 +3436,6 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
#endif /* CONFIG_SMP */
-extern struct atomic_notifier_head migration_notifier_head;
-struct migration_notify_data {
- int src_cpu;
- int dest_cpu;
- int load;
-};
-
-extern struct atomic_notifier_head load_alert_notifier_head;
-
extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
@@ -3716,6 +3526,12 @@ static inline unsigned long rlimit_max(unsigned int limit)
return task_rlimit_max(current, limit);
}
+struct cpu_cycle_counter_cb {
+ u64 (*get_cpu_cycle_counter)(int cpu);
+ u32 (*get_cpu_cycles_max_per_us)(int cpu);
+};
+int register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb);
+
#define SCHED_CPUFREQ_RT (1U << 0)
#define SCHED_CPUFREQ_DL (1U << 1)
#define SCHED_CPUFREQ_IOWAIT (1U << 2)