aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJohn Dias <joaodias@google.com>2016-09-15 08:52:27 -0700
committerMichael Bestas <mkbestas@lineageos.org>2018-06-24 15:59:30 +0300
commit5f1ae6224dc19fec3a8b24d76b68aabe52ff881a (patch)
tree20058e892d66d84654887b9fbd1b18f89b3f3c71
parentaa8f5541905f32d846c86974950d5bcc57d1ce8d (diff)
sched: qhmp: avoid scheduling RT threads on cores currently handling softirqs
Bug: 31501544 Change-Id: Iaf7e9472e01c89b26141b44737f74d35aec7004e
-rw-r--r--kernel/sched/qhmp_rt.c41
-rw-r--r--kernel/sched/qhmp_sched.h5
2 files changed, 40 insertions, 6 deletions
diff --git a/kernel/sched/qhmp_rt.c b/kernel/sched/qhmp_rt.c
index 201cadeca153..b418dfbcbc17 100644
--- a/kernel/sched/qhmp_rt.c
+++ b/kernel/sched/qhmp_rt.c
@@ -5,6 +5,7 @@
#include "sched.h"
+#include <linux/interrupt.h>
#include <linux/slab.h>
#include <trace/events/sched.h>
@@ -1428,11 +1429,25 @@ select_task_rq_rt_hmp(struct task_struct *p, int cpu, int sd_flag, int flags)
return cpu;
}
+/*
+ * Return whether the task on the given cpu is currently non-preemptible
+ * while handling a softirq or is likely to block preemptions soon because
+ * it is a ksoftirq thread.
+ */
+bool
+task_may_not_preempt(struct task_struct *task, int cpu)
+{
+ struct task_struct *cpu_ksoftirqd = per_cpu(ksoftirqd, cpu);
+ return (task_thread_info(task)->preempt_count & SOFTIRQ_MASK) ||
+ task == cpu_ksoftirqd;
+}
+
static int
select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
{
struct task_struct *curr;
struct rq *rq;
+ bool may_not_preempt;
if (p->nr_cpus_allowed == 1)
goto out;
@@ -1450,7 +1465,12 @@ select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
curr = ACCESS_ONCE(rq->curr); /* unlocked access */
/*
- * If the current task on @p's runqueue is an RT task, then
+ * If the current task on @p's runqueue is a softirq task,
+ * it may run without preemption for a time that is
+ * ill-suited for a waiting RT task. Therefore, try to
+ * wake this RT task on another runqueue.
+ *
+ * Also, if the current task on @p's runqueue is an RT task, then
* try to see if we can wake this RT task up on another
* runqueue. Otherwise simply start this RT task
* on its current runqueue.
@@ -1471,12 +1491,21 @@ select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
* This test is optimistic, if we get it wrong the load-balancer
* will have to sort it out.
*/
- if (curr && unlikely(rt_task(curr)) &&
- (curr->nr_cpus_allowed < 2 ||
- curr->prio <= p->prio)) {
+ may_not_preempt = task_may_not_preempt(curr, cpu);
+ if (curr && (may_not_preempt ||
+ (unlikely(rt_task(curr)) &&
+ (curr->nr_cpus_allowed < 2 ||
+ curr->prio <= p->prio)))) {
int target = find_lowest_rq(p);
-
- if (target != -1)
+ /*
+ * If cpu is non-preemptible, prefer remote cpu
+ * even if it's running a higher-prio task.
+ * Otherwise: Possible race. Don't bother moving it if the
+ * destination CPU is not running a lower priority task.
+ */
+ if (target != -1 &&
+ (may_not_preempt ||
+ p->prio < cpu_rq(target)->rt.highest_prio.curr))
cpu = target;
}
rcu_read_unlock();
diff --git a/kernel/sched/qhmp_sched.h b/kernel/sched/qhmp_sched.h
index 3a3a048b01e5..7cdd1a6f5383 100644
--- a/kernel/sched/qhmp_sched.h
+++ b/kernel/sched/qhmp_sched.h
@@ -1967,3 +1967,8 @@ static inline u64 irq_time_read(int cpu)
}
#endif /* CONFIG_64BIT */
#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
+
+/*
+ * task_may_not_preempt - check whether a task may not be preemptible soon
+ */
+extern bool task_may_not_preempt(struct task_struct *task, int cpu);