diff options
| author | Joel Fernandes <joelaf@google.com> | 2017-08-01 15:49:20 -0700 |
|---|---|---|
| committer | Julian Veit <claymore1298@gmail.com> | 2018-12-14 00:57:34 +0100 |
| commit | 7f2e80850f03c25aa103dabf842ab98690e737bd (patch) | |
| tree | 464340eb9a16bdc35c2907697e1856af395e8b57 | |
| parent | 772d24c13cd09ff1242d7bbcaa3dfabe9f2ade42 (diff) | |
binder: always allocate/map first BINDER_MIN_ALLOC pages
Certain usecases like camera are constantly allocating and freeing
binder buffers beyond the first 4k resulting in mmap_sem contention.
If we expand the allocated range from 4k to something higher, we can
reduce the contention. Tests show that 6 pages is enough to cause very
little update_page_range operations and reduces contention.
Bug: 36727951
Change-Id: I28bc3fb9b33c764c257e28487712fce2a3c1078b
Reported-by: Tim Murray <timmurray@google.com>
Signed-off-by: Joel Fernandes <joelaf@google.com>
Pre-allocate 1 instead of 6 pages as in the original patch,
as we use this pre-allocated page to prevent the first page
from getting unpinned after removing the buffer headers,
rather than pinning pages to speedup larger transactions.
Change-Id: I7c3e4884a9538ecfd86601d31c5bcfd6611d37a4
Signed-off-by: Sherry Yang <sherryy@android.com>
| -rw-r--r-- | drivers/android/binder.c | 25 |
1 files changed, 21 insertions, 4 deletions
diff --git a/drivers/android/binder.c b/drivers/android/binder.c index 7d9a726fa50..fdc5d24e8ed 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -47,6 +47,8 @@ #include <uapi/linux/android/binder.h> #include "binder_trace.h" +#define BINDER_MIN_ALLOC (1 * PAGE_SIZE) + static HLIST_HEAD(binder_devices); static struct dentry *binder_debugfs_dir_entry_root; @@ -645,9 +647,9 @@ static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc, return NULL; } -static int binder_update_page_range(struct binder_proc *proc, int allocate, - void *start, void *end, - struct vm_area_struct *vma) +static int __binder_update_page_range(struct binder_proc *proc, int allocate, + void *start, void *end, + struct vm_area_struct *vma) { void *page_addr; unsigned long user_page_addr; @@ -756,6 +758,20 @@ err_no_vma: return -ENOMEM; } +static int binder_update_page_range(struct binder_proc *proc, int allocate, + void *start, void *end, + struct vm_area_struct *vma) +{ + /* + * For regular updates, move up start if needed since MIN_ALLOC pages + * are always mapped + */ + if (start - proc->buffer < BINDER_MIN_ALLOC) + start = proc->buffer + BINDER_MIN_ALLOC; + + return __binder_update_page_range(proc, allocate, start, end, vma); +} + static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc, size_t data_size, size_t offsets_size, @@ -3493,7 +3509,8 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma) /* binder_update_page_range assumes preemption is disabled */ preempt_disable(); - ret = binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma); + ret = __binder_update_page_range(proc, 1, proc->buffer, + proc->buffer + BINDER_MIN_ALLOC, vma); preempt_enable_no_resched(); if (ret) { ret = -ENOMEM; |
