aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/linux/blkdev.h5
-rw-r--r--include/linux/cgroup_subsys.h2
-rw-r--r--include/linux/cpufreq.h11
-rw-r--r--include/linux/cpuhotplug.h1
-rw-r--r--include/linux/fs.h2
-rw-r--r--include/linux/sched.h6
-rw-r--r--include/linux/sched/cpufreq.h3
-rw-r--r--include/linux/sched/rt.h4
-rw-r--r--include/linux/sync_file.h9
-rw-r--r--include/uapi/linux/android/binder.h21
10 files changed, 37 insertions, 27 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 3d1d4bc8e037..9dca0ef6d759 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -657,10 +657,9 @@ struct request_queue {
#define QUEUE_FLAG_QUIESCED 28 /* queue has been quiesced */
#define QUEUE_FLAG_PREEMPT_ONLY 29 /* only process REQ_PREEMPT requests */
-#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
+#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_NONROT) | \
(1 << QUEUE_FLAG_STACKABLE) | \
- (1 << QUEUE_FLAG_SAME_COMP) | \
- (1 << QUEUE_FLAG_ADD_RANDOM))
+ (1 << QUEUE_FLAG_SAME_COMP))
#define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_STACKABLE) | \
diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h
index 8996c092568b..d99e801b58cf 100644
--- a/include/linux/cgroup_subsys.h
+++ b/include/linux/cgroup_subsys.h
@@ -21,7 +21,7 @@ SUBSYS(cpu)
SUBSYS(cpuacct)
#endif
-#if IS_ENABLED(CONFIG_SCHED_TUNE)
+#if IS_ENABLED(CONFIG_SCHED_TUNE) || IS_ENABLED(CONFIG_SCHED_TUNE_DUMMY)
SUBSYS(schedtune)
#endif
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index a49623bc9070..78f0584bbbce 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -578,17 +578,6 @@ struct governor_attr {
size_t count);
};
-static inline bool cpufreq_can_do_remote_dvfs(struct cpufreq_policy *policy)
-{
- /*
- * Allow remote callbacks if:
- * - dvfs_possible_from_any_cpu flag is set
- * - the local and remote CPUs share cpufreq policy
- */
- return policy->dvfs_possible_from_any_cpu ||
- cpumask_test_cpu(smp_processor_id(), policy->cpus);
-}
-
/*********************************************************************
* FREQUENCY TABLE HELPERS *
*********************************************************************/
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index c1bd9854ed30..63fbc5f2adfa 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -69,6 +69,7 @@ enum cpuhp_state {
CPUHP_SLAB_PREPARE,
CPUHP_MD_RAID5_PREPARE,
CPUHP_RCUTREE_PREP,
+ CPUHP_CPUDEV_PM_PREPARE,
CPUHP_HYP_CORE_CTL_ISOLATION_DEAD,
CPUHP_CORE_CTL_ISOLATION_DEAD,
CPUHP_CPUIDLE_COUPLED_PREPARE,
diff --git a/include/linux/fs.h b/include/linux/fs.h
index a1bf10263d11..807544a49667 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -903,7 +903,7 @@ struct file {
struct hlist_node f_hash;
#endif /* #ifdef CONFIG_FILE_TABLE_DEBUG */
} __randomize_layout
- __attribute__((aligned(4))); /* lest something weird decides that 2 is OK */
+ __attribute__((aligned(8)));
struct file_handle {
__u32 handle_bytes;
diff --git a/include/linux/sched.h b/include/linux/sched.h
index b36c0b033783..f54a3274da4f 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1435,6 +1435,12 @@ struct task_struct {
void *security;
#endif
+ struct {
+ struct work_struct work;
+ atomic_t running;
+ bool free_stack;
+ } async_free;
+
/*
* New fields for task_struct should be added above here, so that
* they are included in the randomized portion of task_struct.
diff --git a/include/linux/sched/cpufreq.h b/include/linux/sched/cpufreq.h
index e0234142b2f2..e11d47003fca 100644
--- a/include/linux/sched/cpufreq.h
+++ b/include/linux/sched/cpufreq.h
@@ -21,6 +21,8 @@
#define SCHED_CPUFREQ_RT_DL (SCHED_CPUFREQ_RT | SCHED_CPUFREQ_DL)
#ifdef CONFIG_CPU_FREQ
+struct cpufreq_policy;
+
struct update_util_data {
void (*func)(struct update_util_data *data, u64 time, unsigned int flags);
};
@@ -29,6 +31,7 @@ void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data,
void (*func)(struct update_util_data *data, u64 time,
unsigned int flags));
void cpufreq_remove_update_util_hook(int cpu);
+bool cpufreq_can_do_remote_dvfs(struct cpufreq_policy *policy);
#endif /* CONFIG_CPU_FREQ */
#endif /* _LINUX_SCHED_CPUFREQ_H */
diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h
index db865ed25ef3..d88c71cae796 100644
--- a/include/linux/sched/rt.h
+++ b/include/linux/sched/rt.h
@@ -48,9 +48,9 @@ extern void normalize_rt_tasks(void);
/*
- * default timeslice is 100 msecs (used only for SCHED_RR tasks).
+ * default timeslice is 1 jiffy (used only for SCHED_RR tasks).
* Timeslices get refilled after they expire.
*/
-#define RR_TIMESLICE (100 * HZ / 1000)
+#define RR_TIMESLICE (1)
#endif /* _LINUX_SCHED_RT_H */
diff --git a/include/linux/sync_file.h b/include/linux/sync_file.h
index 0ad87c434ae6..b5063948cdd0 100644
--- a/include/linux/sync_file.h
+++ b/include/linux/sync_file.h
@@ -30,14 +30,6 @@
*/
struct sync_file {
struct file *file;
- /**
- * @user_name:
- *
- * Name of the sync file provided by userspace, for merged fences.
- * Otherwise generated through driver callbacks (in which case the
- * entire array is 0).
- */
- char user_name[32];
#ifdef CONFIG_DEBUG_FS
struct list_head sync_file_list;
#endif
@@ -53,6 +45,5 @@ struct sync_file {
struct sync_file *sync_file_create(struct dma_fence *fence);
struct dma_fence *sync_file_get_fence(int fd);
-char *sync_file_get_name(struct sync_file *sync_file, char *buf, int len);
#endif /* _LINUX_SYNC_H */
diff --git a/include/uapi/linux/android/binder.h b/include/uapi/linux/android/binder.h
index 4fed615e494c..59c8a9ec2bca 100644
--- a/include/uapi/linux/android/binder.h
+++ b/include/uapi/linux/android/binder.h
@@ -266,6 +266,18 @@ struct binder_node_info_for_ref {
__u32 reserved3;
};
+struct binder_freeze_info {
+ __u32 pid;
+ __u32 enable;
+ __u32 timeout_ms;
+};
+
+struct binder_frozen_status_info {
+ __u32 pid;
+ __u32 sync_recv;
+ __u32 async_recv;
+};
+
#define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read)
#define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, __s64)
#define BINDER_SET_MAX_THREADS _IOW('b', 5, __u32)
@@ -276,6 +288,8 @@ struct binder_node_info_for_ref {
#define BINDER_GET_NODE_DEBUG_INFO _IOWR('b', 11, struct binder_node_debug_info)
#define BINDER_GET_NODE_INFO_FOR_REF _IOWR('b', 12, struct binder_node_info_for_ref)
#define BINDER_SET_CONTEXT_MGR_EXT _IOW('b', 13, struct flat_binder_object)
+#define BINDER_FREEZE _IOW('b', 14, struct binder_freeze_info)
+#define BINDER_GET_FROZEN_INFO _IOWR('b', 15, struct binder_frozen_status_info)
/*
* NOTE: Two special error codes you should check for when calling
@@ -297,6 +311,7 @@ enum transaction_flags {
TF_ROOT_OBJECT = 0x04, /* contents are the component's root object */
TF_STATUS_CODE = 0x08, /* contents are a 32-bit status code */
TF_ACCEPT_FDS = 0x10, /* allow replies with file descriptors */
+ TF_CLEAR_BUF = 0x20, /* clear buffer on txn complete */
};
struct binder_transaction_data {
@@ -460,6 +475,12 @@ enum binder_driver_return_protocol {
* The the last transaction (either a bcTRANSACTION or
* a bcATTEMPT_ACQUIRE) failed (e.g. out of memory). No parameters.
*/
+
+ BR_FROZEN_REPLY = _IO('r', 18),
+ /*
+ * The target of the last transaction (either a bcTRANSACTION or
+ * a bcATTEMPT_ACQUIRE) is frozen. No parameters.
+ */
};
enum binder_driver_command_protocol {