aboutsummaryrefslogtreecommitdiff
path: root/kernel/irq/manage.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/irq/manage.c')
-rw-r--r--kernel/irq/manage.c24
1 files changed, 15 insertions, 9 deletions
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index a79d267b64e..dc4db3228dc 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -150,7 +150,7 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
struct irq_chip *chip = irq_data_get_irq_chip(data);
int ret;
- ret = chip->irq_set_affinity(data, mask, force);
+ ret = chip->irq_set_affinity(data, mask, false);
switch (ret) {
case IRQ_SET_MASK_OK:
cpumask_copy(data->affinity, mask);
@@ -162,8 +162,7 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
return ret;
}
-int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
- bool force)
+int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
{
struct irq_chip *chip = irq_data_get_irq_chip(data);
struct irq_desc *desc = irq_data_to_desc(data);
@@ -173,7 +172,7 @@ int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
return -EINVAL;
if (irq_can_move_pcntxt(data)) {
- ret = irq_do_set_affinity(data, mask, force);
+ ret = irq_do_set_affinity(data, mask, false);
} else {
irqd_set_move_pending(data);
irq_copy_pending(desc, mask);
@@ -188,7 +187,13 @@ int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
return ret;
}
-int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force)
+/**
+ * irq_set_affinity - Set the irq affinity of a given irq
+ * @irq: Interrupt to set affinity
+ * @mask: cpumask
+ *
+ */
+int irq_set_affinity(unsigned int irq, const struct cpumask *mask)
{
struct irq_desc *desc = irq_to_desc(irq);
unsigned long flags;
@@ -198,7 +203,7 @@ int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force)
return -EINVAL;
raw_spin_lock_irqsave(&desc->lock, flags);
- ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
+ ret = __irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask);
raw_spin_unlock_irqrestore(&desc->lock, flags);
return ret;
}
@@ -797,7 +802,8 @@ static irqreturn_t irq_thread_fn(struct irq_desc *desc,
static void wake_threads_waitq(struct irq_desc *desc)
{
- if (atomic_dec_and_test(&desc->threads_active))
+ if (atomic_dec_and_test(&desc->threads_active) &&
+ waitqueue_active(&desc->wait_for_threads))
wake_up(&desc->wait_for_threads);
}
@@ -861,8 +867,8 @@ static int irq_thread(void *data)
irq_thread_check_affinity(desc, action);
action_ret = handler_fn(desc, action);
- if (action_ret == IRQ_HANDLED)
- atomic_inc(&desc->threads_handled);
+ if (!noirqdebug)
+ note_interrupt(action->irq, desc, action_ret);
wake_threads_waitq(desc);
}