aboutsummaryrefslogtreecommitdiff
path: root/drivers/android
diff options
context:
space:
mode:
authorspkal01 <kalligeross@gmail.com>2021-05-17 02:37:28 +0530
committerspkal01 <kalligeross@gmail.com>2021-05-17 02:37:28 +0530
commit93b265ae2eba8d93d0ffa406958547232f3114c8 (patch)
treec2f093aa144f732b5cf7bd8a0b45bf35eda42e1c /drivers/android
parent0a82617b8fce8994076b518064e7d420af290ea8 (diff)
parent016f4ba70bffb6d02725e778c3989fa542e6d12a (diff)
Merge branch 'android11' of https://github.com/vantoman/kernel_xiaomi_sm6150 into HEADHEADr11.1
Diffstat (limited to 'drivers/android')
-rw-r--r--drivers/android/binder.c212
-rw-r--r--drivers/android/binder_alloc.c106
-rw-r--r--drivers/android/binder_alloc.h9
-rw-r--r--drivers/android/binder_alloc_selftest.c2
4 files changed, 310 insertions, 19 deletions
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index afb4675d65f3..632308c48bf7 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -459,9 +459,19 @@ struct binder_priority {
* (protected by binder_deferred_lock)
* @deferred_work: bitmap of deferred work to perform
* (protected by binder_deferred_lock)
+ * @outstanding_txns: number of transactions to be transmitted before
+ * processes in freeze_wait are woken up
+ * (protected by @inner_lock)
* @is_dead: process is dead and awaiting free
* when outstanding transactions are cleaned up
* (protected by @inner_lock)
+ * @sync_recv: process received sync transactions since last frozen
+ * (protected by @inner_lock)
+ * @async_recv: process received async transactions since last frozen
+ * (protected by @inner_lock)
+ * @freeze_wait: waitqueue of processes waiting for all outstanding
+ * transactions to be processed
+ * (protected by @inner_lock)
* @todo: list of work for this process
* (protected by @inner_lock)
* @stats: per-process binder statistics
@@ -505,7 +515,12 @@ struct binder_proc {
struct mutex files_lock;
struct hlist_node deferred_work_node;
int deferred_work;
+ int outstanding_txns;
bool is_dead;
+ bool is_frozen;
+ bool sync_recv;
+ bool async_recv;
+ wait_queue_head_t freeze_wait;
struct list_head todo;
struct binder_stats stats;
@@ -2076,6 +2091,10 @@ static void binder_free_transaction(struct binder_transaction *t)
spin_unlock(&t->lock);
binder_inner_proc_lock(target_proc);
+ target_proc->outstanding_txns--;
+ BUG_ON(target_proc->outstanding_txns < 0);
+ if (!target_proc->outstanding_txns && target_proc->is_frozen)
+ wake_up_interruptible_all(&target_proc->freeze_wait);
if (t->buffer)
t->buffer->transaction = NULL;
binder_inner_proc_unlock(target_proc);
@@ -2829,10 +2848,11 @@ static int binder_fixup_parent(struct binder_transaction *t,
* If the @thread parameter is not NULL, the transaction is always queued
* to the waitlist of that specific thread.
*
- * Return: true if the transactions was successfully queued
- * false if the target process or thread is dead
+ * Return: 0 if the transaction was successfully queued
+ * BR_DEAD_REPLY if the target process or thread is dead
+ * BR_FROZEN_REPLY if the target process or thread is frozen
*/
-static bool binder_proc_transaction(struct binder_transaction *t,
+static int binder_proc_transaction(struct binder_transaction *t,
struct binder_proc *proc,
struct binder_thread *thread)
{
@@ -2856,11 +2876,18 @@ static bool binder_proc_transaction(struct binder_transaction *t,
}
binder_inner_proc_lock(proc);
+ if (proc->is_frozen) {
+ proc->sync_recv |= !oneway;
+ proc->async_recv |= oneway;
+ }
- if (proc->is_dead || (thread && thread->is_dead)) {
+ if ((proc->is_frozen && !oneway) || proc->is_dead ||
+ (thread && thread->is_dead)) {
+ bool proc_is_dead = proc->is_dead
+ || (thread && thread->is_dead);
binder_inner_proc_unlock(proc);
binder_node_unlock(node);
- return false;
+ return proc_is_dead ? BR_DEAD_REPLY : BR_FROZEN_REPLY;
}
if (!thread && !pending_async)
@@ -2879,10 +2906,11 @@ static bool binder_proc_transaction(struct binder_transaction *t,
if (!pending_async)
binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
+ proc->outstanding_txns++;
binder_inner_proc_unlock(proc);
binder_node_unlock(node);
- return true;
+ return 0;
}
/**
@@ -3203,7 +3231,7 @@ static void binder_transaction(struct binder_proc *proc,
t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
tr->offsets_size, extra_buffers_size,
- !reply && (t->flags & TF_ONE_WAY));
+ !reply && (t->flags & TF_ONE_WAY), current->tgid);
if (IS_ERR(t->buffer)) {
/*
* -ESRCH indicates VMA cleared. The target is dying.
@@ -3231,6 +3259,7 @@ static void binder_transaction(struct binder_proc *proc,
t->buffer->debug_id = t->debug_id;
t->buffer->transaction = t;
t->buffer->target_node = target_node;
+ t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF);
trace_binder_transaction_alloc_buf(t->buffer);
if (binder_alloc_copy_user_to_buffer(
@@ -3476,13 +3505,16 @@ static void binder_transaction(struct binder_proc *proc,
if (reply) {
binder_enqueue_thread_work(thread, tcomplete);
binder_inner_proc_lock(target_proc);
- if (target_thread->is_dead) {
+ if (target_thread->is_dead || target_proc->is_frozen) {
+ return_error = target_thread->is_dead ?
+ BR_DEAD_REPLY : BR_FROZEN_REPLY;
binder_inner_proc_unlock(target_proc);
goto err_dead_proc_or_thread;
}
BUG_ON(t->buffer->async_transaction != 0);
binder_pop_transaction_ilocked(target_thread, in_reply_to);
binder_enqueue_thread_work_ilocked(target_thread, &t->work);
+ target_proc->outstanding_txns++;
binder_inner_proc_unlock(target_proc);
wake_up_interruptible_sync(&target_thread->wait);
binder_restore_priority(current, in_reply_to->saved_priority);
@@ -3502,7 +3534,9 @@ static void binder_transaction(struct binder_proc *proc,
t->from_parent = thread->transaction_stack;
thread->transaction_stack = t;
binder_inner_proc_unlock(proc);
- if (!binder_proc_transaction(t, target_proc, target_thread)) {
+ return_error = binder_proc_transaction(t,
+ target_proc, target_thread);
+ if (return_error) {
binder_inner_proc_lock(proc);
binder_pop_transaction_ilocked(thread, t);
binder_inner_proc_unlock(proc);
@@ -3512,7 +3546,8 @@ static void binder_transaction(struct binder_proc *proc,
BUG_ON(target_node == NULL);
BUG_ON(t->buffer->async_transaction != 1);
binder_enqueue_thread_work(thread, tcomplete);
- if (!binder_proc_transaction(t, target_proc, NULL))
+ return_error = binder_proc_transaction(t, target_proc, NULL);
+ if (return_error)
goto err_dead_proc_or_thread;
}
if (target_thread)
@@ -3529,7 +3564,6 @@ static void binder_transaction(struct binder_proc *proc,
return;
err_dead_proc_or_thread:
- return_error = BR_DEAD_REPLY;
return_error_line = __LINE__;
binder_dequeue_work(proc, tcomplete);
err_translate_failed:
@@ -4134,7 +4168,7 @@ static int binder_wait_for_work(struct binder_thread *thread,
binder_inner_proc_lock(proc);
list_del_init(&thread->waiting_thread_node);
if (signal_pending(current)) {
- ret = -ERESTARTSYS;
+ ret = -EINTR;
break;
}
}
@@ -4638,6 +4672,7 @@ static void binder_free_proc(struct binder_proc *proc)
BUG_ON(!list_empty(&proc->todo));
BUG_ON(!list_empty(&proc->delivered_death));
+ WARN_ON(proc->outstanding_txns);
device = container_of(proc->context, struct binder_device, context);
if (refcount_dec_and_test(&device->ref)) {
kfree(proc->context->name);
@@ -4698,6 +4733,7 @@ static int binder_thread_release(struct binder_proc *proc,
(t->to_thread == thread) ? "in" : "out");
if (t->to_thread == thread) {
+ t->to_proc->outstanding_txns--;
t->to_proc = NULL;
t->to_thread = NULL;
if (t->buffer) {
@@ -4944,6 +4980,76 @@ static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
return 0;
}
+static int binder_ioctl_freeze(struct binder_freeze_info *info,
+ struct binder_proc *target_proc)
+{
+ int ret = 0;
+
+ if (!info->enable) {
+ binder_inner_proc_lock(target_proc);
+ target_proc->sync_recv = false;
+ target_proc->async_recv = false;
+ target_proc->is_frozen = false;
+ binder_inner_proc_unlock(target_proc);
+ return 0;
+ }
+
+ /*
+ * Freezing the target. Prevent new transactions by
+ * setting frozen state. If timeout specified, wait
+ * for transactions to drain.
+ */
+ binder_inner_proc_lock(target_proc);
+ target_proc->sync_recv = false;
+ target_proc->async_recv = false;
+ target_proc->is_frozen = true;
+ binder_inner_proc_unlock(target_proc);
+
+ if (info->timeout_ms > 0)
+ ret = wait_event_interruptible_timeout(
+ target_proc->freeze_wait,
+ (!target_proc->outstanding_txns),
+ msecs_to_jiffies(info->timeout_ms));
+
+ if (!ret && target_proc->outstanding_txns)
+ ret = -EAGAIN;
+
+ if (ret < 0) {
+ binder_inner_proc_lock(target_proc);
+ target_proc->is_frozen = false;
+ binder_inner_proc_unlock(target_proc);
+ }
+
+ return ret;
+}
+
+static int binder_ioctl_get_freezer_info(
+ struct binder_frozen_status_info *info)
+{
+ struct binder_proc *target_proc;
+ bool found = false;
+
+ info->sync_recv = 0;
+ info->async_recv = 0;
+
+ mutex_lock(&binder_procs_lock);
+ hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
+ if (target_proc->pid == info->pid) {
+ found = true;
+ binder_inner_proc_lock(target_proc);
+ info->sync_recv |= target_proc->sync_recv;
+ info->async_recv |= target_proc->async_recv;
+ binder_inner_proc_unlock(target_proc);
+ }
+ }
+ mutex_unlock(&binder_procs_lock);
+
+ if (!found)
+ return -EINVAL;
+
+ return 0;
+}
+
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
int ret;
@@ -5062,6 +5168,84 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
}
break;
}
+ case BINDER_FREEZE: {
+ struct binder_freeze_info info;
+ struct binder_proc **target_procs = NULL, *target_proc;
+ int target_procs_count = 0, i = 0;
+
+ ret = 0;
+
+ if (copy_from_user(&info, ubuf, sizeof(info))) {
+ ret = -EFAULT;
+ goto err;
+ }
+
+ mutex_lock(&binder_procs_lock);
+ hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
+ if (target_proc->pid == info.pid)
+ target_procs_count++;
+ }
+
+ if (target_procs_count == 0) {
+ mutex_unlock(&binder_procs_lock);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ target_procs = kmalloc(sizeof(struct binder_proc *) *
+ target_procs_count,
+ GFP_KERNEL);
+
+ if (!target_procs) {
+ mutex_unlock(&binder_procs_lock);
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
+ if (target_proc->pid != info.pid)
+ continue;
+
+ binder_inner_proc_lock(target_proc);
+ atomic_inc(&target_proc->tmp_ref);
+ binder_inner_proc_unlock(target_proc);
+
+ target_procs[i++] = target_proc;
+ }
+ mutex_unlock(&binder_procs_lock);
+
+ for (i = 0; i < target_procs_count; i++) {
+ if (ret >= 0)
+ ret = binder_ioctl_freeze(&info,
+ target_procs[i]);
+
+ binder_proc_dec_tmpref(target_procs[i]);
+ }
+
+ kfree(target_procs);
+
+ if (ret < 0)
+ goto err;
+ break;
+ }
+ case BINDER_GET_FROZEN_INFO: {
+ struct binder_frozen_status_info info;
+
+ if (copy_from_user(&info, ubuf, sizeof(info))) {
+ ret = -EFAULT;
+ goto err;
+ }
+
+ ret = binder_ioctl_get_freezer_info(&info);
+ if (ret < 0)
+ goto err;
+
+ if (copy_to_user(ubuf, &info, sizeof(info))) {
+ ret = -EFAULT;
+ goto err;
+ }
+ break;
+ }
default:
ret = -EINVAL;
goto err;
@@ -5176,6 +5360,7 @@ static int binder_open(struct inode *nodp, struct file *filp)
proc->tsk = current->group_leader;
mutex_init(&proc->files_lock);
INIT_LIST_HEAD(&proc->todo);
+ init_waitqueue_head(&proc->freeze_wait);
if (binder_supported_policy(current->policy)) {
proc->default_priority.sched_policy = current->policy;
proc->default_priority.prio = current->normal_prio;
@@ -5397,6 +5582,9 @@ static void binder_deferred_release(struct binder_proc *proc)
atomic_inc(&proc->tmp_ref);
proc->is_dead = true;
+ proc->is_frozen = false;
+ proc->sync_recv = false;
+ proc->async_recv = false;
threads = 0;
active_transactions = 0;
while ((n = rb_first(&proc->threads))) {
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 1b5a131278bc..a0856e920b2b 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -38,11 +38,12 @@ struct list_lru binder_alloc_lru;
static DEFINE_MUTEX(binder_alloc_mmap_lock);
enum {
+ BINDER_DEBUG_USER_ERROR = 1U << 0,
BINDER_DEBUG_OPEN_CLOSE = 1U << 1,
BINDER_DEBUG_BUFFER_ALLOC = 1U << 2,
BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 3,
};
-static uint32_t binder_alloc_debug_mask;
+static uint32_t binder_alloc_debug_mask = 0;
module_param_named(debug_mask, binder_alloc_debug_mask,
uint, 0644);
@@ -343,12 +344,50 @@ static inline struct vm_area_struct *binder_alloc_get_vma(
return vma;
}
+static void debug_low_async_space_locked(struct binder_alloc *alloc, int pid)
+{
+ /*
+ * Find the amount and size of buffers allocated by the current caller;
+ * The idea is that once we cross the threshold, whoever is responsible
+ * for the low async space is likely to try to send another async txn,
+ * and at some point we'll catch them in the act. This is more efficient
+ * than keeping a map per pid.
+ */
+ struct rb_node *n = alloc->free_buffers.rb_node;
+ struct binder_buffer *buffer;
+ size_t total_alloc_size = 0;
+ size_t num_buffers = 0;
+
+ for (n = rb_first(&alloc->allocated_buffers); n != NULL;
+ n = rb_next(n)) {
+ buffer = rb_entry(n, struct binder_buffer, rb_node);
+ if (buffer->pid != pid)
+ continue;
+ if (!buffer->async_transaction)
+ continue;
+ total_alloc_size += binder_alloc_buffer_size(alloc, buffer)
+ + sizeof(struct binder_buffer);
+ num_buffers++;
+ }
+
+ /*
+ * Warn if this pid has more than 50 transactions, or more than 50% of
+ * async space (which is 25% of total buffer size).
+ */
+ if (num_buffers > 50 || total_alloc_size > alloc->buffer_size / 4) {
+ binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
+ "%d: pid %d spamming oneway? %zd buffers allocated for a total size of %zd\n",
+ alloc->pid, pid, num_buffers, total_alloc_size);
+ }
+}
+
static struct binder_buffer *binder_alloc_new_buf_locked(
struct binder_alloc *alloc,
size_t data_size,
size_t offsets_size,
size_t extra_buffers_size,
- int is_async)
+ int is_async,
+ int pid)
{
struct rb_node *n = alloc->free_buffers.rb_node;
struct binder_buffer *buffer;
@@ -487,11 +526,20 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
buffer->offsets_size = offsets_size;
buffer->async_transaction = is_async;
buffer->extra_buffers_size = extra_buffers_size;
+ buffer->pid = pid;
if (is_async) {
alloc->free_async_space -= size + sizeof(struct binder_buffer);
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
"%d: binder_alloc_buf size %zd async free %zd\n",
alloc->pid, size, alloc->free_async_space);
+ if (alloc->free_async_space < alloc->buffer_size / 10) {
+ /*
+ * Start detecting spammers once we have less than 20%
+ * of async space left (which is less than 10% of total
+ * buffer size).
+ */
+ debug_low_async_space_locked(alloc, pid);
+ }
}
return buffer;
@@ -509,6 +557,7 @@ err_alloc_buf_struct_failed:
* @offsets_size: user specified buffer offset
* @extra_buffers_size: size of extra space for meta-data (eg, security context)
* @is_async: buffer for async transaction
+ * @pid: pid to attribute allocation to (used for debugging)
*
* Allocate a new buffer given the requested sizes. Returns
* the kernel version of the buffer pointer. The size allocated
@@ -521,13 +570,14 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
size_t data_size,
size_t offsets_size,
size_t extra_buffers_size,
- int is_async)
+ int is_async,
+ int pid)
{
struct binder_buffer *buffer;
mutex_lock(&alloc->mutex);
buffer = binder_alloc_new_buf_locked(alloc, data_size, offsets_size,
- extra_buffers_size, is_async);
+ extra_buffers_size, is_async, pid);
mutex_unlock(&alloc->mutex);
return buffer;
}
@@ -647,6 +697,8 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
binder_insert_free_buffer(alloc, buffer);
}
+static void binder_alloc_clear_buf(struct binder_alloc *alloc,
+ struct binder_buffer *buffer);
/**
* binder_alloc_free_buf() - free a binder buffer
* @alloc: binder_alloc for this proc
@@ -657,6 +709,18 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
void binder_alloc_free_buf(struct binder_alloc *alloc,
struct binder_buffer *buffer)
{
+ /*
+ * We could eliminate the call to binder_alloc_clear_buf()
+ * from binder_alloc_deferred_release() by moving this to
+ * binder_alloc_free_buf_locked(). However, that could
+ * increase contention for the alloc mutex if clear_on_free
+ * is used frequently for large buffers. The mutex is not
+ * needed for correctness here.
+ */
+ if (buffer->clear_on_free) {
+ binder_alloc_clear_buf(alloc, buffer);
+ buffer->clear_on_free = false;
+ }
mutex_lock(&alloc->mutex);
binder_free_buf_locked(alloc, buffer);
mutex_unlock(&alloc->mutex);
@@ -749,6 +813,10 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
/* Transaction should already have been freed */
BUG_ON(buffer->transaction);
+ if (buffer->clear_on_free) {
+ binder_alloc_clear_buf(alloc, buffer);
+ buffer->clear_on_free = false;
+ }
binder_free_buf_locked(alloc, buffer);
buffers++;
}
@@ -1076,6 +1144,36 @@ static struct page *binder_alloc_get_page(struct binder_alloc *alloc,
}
/**
+ * binder_alloc_clear_buf() - zero out buffer
+ * @alloc: binder_alloc for this proc
+ * @buffer: binder buffer to be cleared
+ *
+ * memset the given buffer to 0
+ */
+static void binder_alloc_clear_buf(struct binder_alloc *alloc,
+ struct binder_buffer *buffer)
+{
+ size_t bytes = binder_alloc_buffer_size(alloc, buffer);
+ binder_size_t buffer_offset = 0;
+
+ while (bytes) {
+ unsigned long size;
+ struct page *page;
+ pgoff_t pgoff;
+ void *kptr;
+
+ page = binder_alloc_get_page(alloc, buffer,
+ buffer_offset, &pgoff);
+ size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
+ kptr = kmap(page) + pgoff;
+ memset(kptr, 0, size);
+ kunmap(page);
+ bytes -= size;
+ buffer_offset += size;
+ }
+}
+
+/**
* binder_alloc_copy_user_to_buffer() - copy src user to tgt user
* @alloc: binder_alloc for this proc
* @buffer: binder buffer to be accessed
diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h
index b60d161b7a7a..fdb220eb55d1 100644
--- a/drivers/android/binder_alloc.h
+++ b/drivers/android/binder_alloc.h
@@ -32,6 +32,7 @@ struct binder_transaction;
* @entry: entry alloc->buffers
* @rb_node: node for allocated_buffers/free_buffers rb trees
* @free: %true if buffer is free
+ * @clear_on_free: %true if buffer must be zeroed after use
* @allow_user_free: %true if user is allowed to free buffer
* @async_transaction: %true if buffer is in use for an async txn
* @debug_id: unique ID for debugging
@@ -41,6 +42,7 @@ struct binder_transaction;
* @offsets_size: size of array of offsets
* @extra_buffers_size: size of space for other objects (like sg lists)
* @user_data: user pointer to base of buffer space
+ * @pid: pid to attribute the buffer to (caller)
*
* Bookkeeping structure for binder transaction buffers
*/
@@ -49,9 +51,10 @@ struct binder_buffer {
struct rb_node rb_node; /* free entry by size or allocated entry */
/* by address */
unsigned free:1;
+ unsigned clear_on_free:1;
unsigned allow_user_free:1;
unsigned async_transaction:1;
- unsigned debug_id:29;
+ unsigned debug_id:28;
struct binder_transaction *transaction;
@@ -60,6 +63,7 @@ struct binder_buffer {
size_t offsets_size;
size_t extra_buffers_size;
void __user *user_data;
+ int pid;
};
/**
@@ -126,7 +130,8 @@ extern struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
size_t data_size,
size_t offsets_size,
size_t extra_buffers_size,
- int is_async);
+ int is_async,
+ int pid);
extern void binder_alloc_init(struct binder_alloc *alloc);
extern int binder_alloc_shrinker_init(void);
extern void binder_alloc_vma_close(struct binder_alloc *alloc);
diff --git a/drivers/android/binder_alloc_selftest.c b/drivers/android/binder_alloc_selftest.c
index b72708918b06..c839c490fde3 100644
--- a/drivers/android/binder_alloc_selftest.c
+++ b/drivers/android/binder_alloc_selftest.c
@@ -128,7 +128,7 @@ static void binder_selftest_alloc_buf(struct binder_alloc *alloc,
int i;
for (i = 0; i < BUFFER_NUM; i++) {
- buffers[i] = binder_alloc_new_buf(alloc, sizes[i], 0, 0, 0);
+ buffers[i] = binder_alloc_new_buf(alloc, sizes[i], 0, 0, 0, 0);
if (IS_ERR(buffers[i]) ||
!check_buffer_pages_allocated(alloc, buffers[i],
sizes[i])) {