aboutsummaryrefslogtreecommitdiff
path: root/kernel/sched/core.c
diff options
context:
space:
mode:
authorspkal01 <kalligeross@gmail.com>2021-05-17 02:37:28 +0530
committerspkal01 <kalligeross@gmail.com>2021-05-17 02:37:28 +0530
commit93b265ae2eba8d93d0ffa406958547232f3114c8 (patch)
treec2f093aa144f732b5cf7bd8a0b45bf35eda42e1c /kernel/sched/core.c
parent0a82617b8fce8994076b518064e7d420af290ea8 (diff)
parent016f4ba70bffb6d02725e778c3989fa542e6d12a (diff)
Merge branch 'android11' of https://github.com/vantoman/kernel_xiaomi_sm6150 into HEADHEADr11.1
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r--kernel/sched/core.c59
1 files changed, 51 insertions, 8 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 2e3c650573bb..5f832ae982e6 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3544,6 +3544,50 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev,
prepare_arch_switch(next);
}
+void release_task_stack(struct task_struct *tsk);
+static void task_async_free(struct work_struct *work)
+{
+ struct task_struct *t = container_of(work, typeof(*t), async_free.work);
+ bool free_stack = READ_ONCE(t->async_free.free_stack);
+
+ atomic_set(&t->async_free.running, 0);
+
+ if (free_stack) {
+ release_task_stack(t);
+ put_task_struct(t);
+ } else {
+ __put_task_struct(t);
+ }
+}
+
+static void finish_task_switch_dead(struct task_struct *prev)
+{
+ if (atomic_cmpxchg(&prev->async_free.running, 0, 1)) {
+ put_task_stack(prev);
+ put_task_struct(prev);
+ return;
+ }
+
+ if (atomic_dec_and_test(&prev->stack_refcount)) {
+ prev->async_free.free_stack = true;
+ } else if (atomic_dec_and_test(&prev->usage)) {
+ prev->async_free.free_stack = false;
+ } else {
+ atomic_set(&prev->async_free.running, 0);
+ return;
+ }
+
+ INIT_WORK(&prev->async_free.work, task_async_free);
+ queue_work(system_unbound_wq, &prev->async_free.work);
+}
+
+static void mmdrop_async_free(struct work_struct *work)
+{
+ struct mm_struct *mm = container_of(work, typeof(*mm), async_put_work);
+
+ __mmdrop(mm);
+}
+
/**
* finish_task_switch - clean up after a task-switch
* @prev: the thread we just switched away from.
@@ -3617,8 +3661,10 @@ static struct rq *finish_task_switch(struct task_struct *prev)
kcov_finish_switch(current);
fire_sched_in_preempt_notifiers(current);
- if (mm)
- mmdrop(mm);
+ if (mm && atomic_dec_and_test(&mm->mm_count)) {
+ INIT_WORK(&mm->async_put_work, mmdrop_async_free);
+ queue_work(system_unbound_wq, &mm->async_put_work);
+ }
if (unlikely(prev_state == TASK_DEAD)) {
if (prev->sched_class->task_dead)
prev->sched_class->task_dead(prev);
@@ -3629,11 +3675,7 @@ static struct rq *finish_task_switch(struct task_struct *prev)
*/
kprobe_flush_task(prev);
- /* Task is done with its stack. */
- put_task_stack(prev);
-
- put_task_struct(prev);
-
+ finish_task_switch_dead(prev);
}
tick_nohz_task_switch();
@@ -5002,7 +5044,8 @@ static void __setscheduler_params(struct task_struct *p,
if (policy == SETPARAM_POLICY)
policy = p->policy;
- p->policy = policy;
+ /* Replace SCHED_FIFO with SCHED_RR to reduce latency */
+ p->policy = policy == SCHED_FIFO ? SCHED_RR : policy;
if (dl_policy(policy))
__setparam_dl(p, attr);