aboutsummaryrefslogtreecommitdiff
path: root/drivers/android/binder_alloc.c
diff options
context:
space:
mode:
authorspkal01 <kalligeross@gmail.com>2021-05-17 02:37:28 +0530
committerspkal01 <kalligeross@gmail.com>2021-05-17 02:37:28 +0530
commit93b265ae2eba8d93d0ffa406958547232f3114c8 (patch)
treec2f093aa144f732b5cf7bd8a0b45bf35eda42e1c /drivers/android/binder_alloc.c
parent0a82617b8fce8994076b518064e7d420af290ea8 (diff)
parent016f4ba70bffb6d02725e778c3989fa542e6d12a (diff)
Merge branch 'android11' of https://github.com/vantoman/kernel_xiaomi_sm6150 into HEADHEADr11.1
Diffstat (limited to 'drivers/android/binder_alloc.c')
-rw-r--r--drivers/android/binder_alloc.c106
1 files changed, 102 insertions, 4 deletions
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 1b5a131278bc..a0856e920b2b 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -38,11 +38,12 @@ struct list_lru binder_alloc_lru;
static DEFINE_MUTEX(binder_alloc_mmap_lock);
enum {
+ BINDER_DEBUG_USER_ERROR = 1U << 0,
BINDER_DEBUG_OPEN_CLOSE = 1U << 1,
BINDER_DEBUG_BUFFER_ALLOC = 1U << 2,
BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 3,
};
-static uint32_t binder_alloc_debug_mask;
+static uint32_t binder_alloc_debug_mask = 0;
module_param_named(debug_mask, binder_alloc_debug_mask,
uint, 0644);
@@ -343,12 +344,50 @@ static inline struct vm_area_struct *binder_alloc_get_vma(
return vma;
}
+static void debug_low_async_space_locked(struct binder_alloc *alloc, int pid)
+{
+ /*
+ * Find the amount and size of buffers allocated by the current caller;
+ * The idea is that once we cross the threshold, whoever is responsible
+ * for the low async space is likely to try to send another async txn,
+ * and at some point we'll catch them in the act. This is more efficient
+ * than keeping a map per pid.
+ */
+ struct rb_node *n = alloc->free_buffers.rb_node;
+ struct binder_buffer *buffer;
+ size_t total_alloc_size = 0;
+ size_t num_buffers = 0;
+
+ for (n = rb_first(&alloc->allocated_buffers); n != NULL;
+ n = rb_next(n)) {
+ buffer = rb_entry(n, struct binder_buffer, rb_node);
+ if (buffer->pid != pid)
+ continue;
+ if (!buffer->async_transaction)
+ continue;
+ total_alloc_size += binder_alloc_buffer_size(alloc, buffer)
+ + sizeof(struct binder_buffer);
+ num_buffers++;
+ }
+
+ /*
+ * Warn if this pid has more than 50 transactions, or more than 50% of
+ * async space (which is 25% of total buffer size).
+ */
+ if (num_buffers > 50 || total_alloc_size > alloc->buffer_size / 4) {
+ binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
+ "%d: pid %d spamming oneway? %zd buffers allocated for a total size of %zd\n",
+ alloc->pid, pid, num_buffers, total_alloc_size);
+ }
+}
+
static struct binder_buffer *binder_alloc_new_buf_locked(
struct binder_alloc *alloc,
size_t data_size,
size_t offsets_size,
size_t extra_buffers_size,
- int is_async)
+ int is_async,
+ int pid)
{
struct rb_node *n = alloc->free_buffers.rb_node;
struct binder_buffer *buffer;
@@ -487,11 +526,20 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
buffer->offsets_size = offsets_size;
buffer->async_transaction = is_async;
buffer->extra_buffers_size = extra_buffers_size;
+ buffer->pid = pid;
if (is_async) {
alloc->free_async_space -= size + sizeof(struct binder_buffer);
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
"%d: binder_alloc_buf size %zd async free %zd\n",
alloc->pid, size, alloc->free_async_space);
+ if (alloc->free_async_space < alloc->buffer_size / 10) {
+ /*
+ * Start detecting spammers once we have less than 20%
+ * of async space left (which is less than 10% of total
+ * buffer size).
+ */
+ debug_low_async_space_locked(alloc, pid);
+ }
}
return buffer;
@@ -509,6 +557,7 @@ err_alloc_buf_struct_failed:
* @offsets_size: user specified buffer offset
* @extra_buffers_size: size of extra space for meta-data (eg, security context)
* @is_async: buffer for async transaction
+ * @pid: pid to attribute allocation to (used for debugging)
*
* Allocate a new buffer given the requested sizes. Returns
* the kernel version of the buffer pointer. The size allocated
@@ -521,13 +570,14 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
size_t data_size,
size_t offsets_size,
size_t extra_buffers_size,
- int is_async)
+ int is_async,
+ int pid)
{
struct binder_buffer *buffer;
mutex_lock(&alloc->mutex);
buffer = binder_alloc_new_buf_locked(alloc, data_size, offsets_size,
- extra_buffers_size, is_async);
+ extra_buffers_size, is_async, pid);
mutex_unlock(&alloc->mutex);
return buffer;
}
@@ -647,6 +697,8 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
binder_insert_free_buffer(alloc, buffer);
}
+static void binder_alloc_clear_buf(struct binder_alloc *alloc,
+ struct binder_buffer *buffer);
/**
* binder_alloc_free_buf() - free a binder buffer
* @alloc: binder_alloc for this proc
@@ -657,6 +709,18 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
void binder_alloc_free_buf(struct binder_alloc *alloc,
struct binder_buffer *buffer)
{
+ /*
+ * We could eliminate the call to binder_alloc_clear_buf()
+ * from binder_alloc_deferred_release() by moving this to
+ * binder_alloc_free_buf_locked(). However, that could
+ * increase contention for the alloc mutex if clear_on_free
+ * is used frequently for large buffers. The mutex is not
+ * needed for correctness here.
+ */
+ if (buffer->clear_on_free) {
+ binder_alloc_clear_buf(alloc, buffer);
+ buffer->clear_on_free = false;
+ }
mutex_lock(&alloc->mutex);
binder_free_buf_locked(alloc, buffer);
mutex_unlock(&alloc->mutex);
@@ -749,6 +813,10 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
/* Transaction should already have been freed */
BUG_ON(buffer->transaction);
+ if (buffer->clear_on_free) {
+ binder_alloc_clear_buf(alloc, buffer);
+ buffer->clear_on_free = false;
+ }
binder_free_buf_locked(alloc, buffer);
buffers++;
}
@@ -1076,6 +1144,36 @@ static struct page *binder_alloc_get_page(struct binder_alloc *alloc,
}
/**
+ * binder_alloc_clear_buf() - zero out buffer
+ * @alloc: binder_alloc for this proc
+ * @buffer: binder buffer to be cleared
+ *
+ * memset the given buffer to 0
+ */
+static void binder_alloc_clear_buf(struct binder_alloc *alloc,
+ struct binder_buffer *buffer)
+{
+ size_t bytes = binder_alloc_buffer_size(alloc, buffer);
+ binder_size_t buffer_offset = 0;
+
+ while (bytes) {
+ unsigned long size;
+ struct page *page;
+ pgoff_t pgoff;
+ void *kptr;
+
+ page = binder_alloc_get_page(alloc, buffer,
+ buffer_offset, &pgoff);
+ size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
+ kptr = kmap(page) + pgoff;
+ memset(kptr, 0, size);
+ kunmap(page);
+ bytes -= size;
+ buffer_offset += size;
+ }
+}
+
+/**
* binder_alloc_copy_user_to_buffer() - copy src user to tgt user
* @alloc: binder_alloc for this proc
* @buffer: binder buffer to be accessed