summaryrefslogtreecommitdiff
path: root/kernel/irq/manage.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/irq/manage.c')
-rw-r--r--kernel/irq/manage.c100
1 files changed, 4 insertions, 96 deletions
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 252bf10..4c84746 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -22,7 +22,6 @@
#include "internals.h"
#ifdef CONFIG_IRQ_FORCED_THREADING
-# ifndef CONFIG_PREEMPT_RT_BASE
__read_mostly bool force_irqthreads;
static int __init setup_forced_irqthreads(char *arg)
@@ -31,7 +30,6 @@ static int __init setup_forced_irqthreads(char *arg)
return 0;
}
early_param("threadirqs", setup_forced_irqthreads);
-# endif
#endif
/**
@@ -164,62 +162,6 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
return ret;
}
-#ifdef CONFIG_PREEMPT_RT_FULL
-static void _irq_affinity_notify(struct irq_affinity_notify *notify);
-static struct task_struct *set_affinity_helper;
-static LIST_HEAD(affinity_list);
-static DEFINE_RAW_SPINLOCK(affinity_list_lock);
-
-static int set_affinity_thread(void *unused)
-{
- while (1) {
- struct irq_affinity_notify *notify;
- int empty;
-
- set_current_state(TASK_INTERRUPTIBLE);
-
- raw_spin_lock_irq(&affinity_list_lock);
- empty = list_empty(&affinity_list);
- raw_spin_unlock_irq(&affinity_list_lock);
-
- if (empty)
- schedule();
- if (kthread_should_stop())
- break;
- set_current_state(TASK_RUNNING);
-try_next:
- notify = NULL;
-
- raw_spin_lock_irq(&affinity_list_lock);
- if (!list_empty(&affinity_list)) {
- notify = list_first_entry(&affinity_list,
- struct irq_affinity_notify, list);
- list_del_init(&notify->list);
- }
- raw_spin_unlock_irq(&affinity_list_lock);
-
- if (!notify)
- continue;
- _irq_affinity_notify(notify);
- goto try_next;
- }
- return 0;
-}
-
-static void init_helper_thread(void)
-{
- if (set_affinity_helper)
- return;
- set_affinity_helper = kthread_run(set_affinity_thread, NULL,
- "affinity-cb");
- WARN_ON(IS_ERR(set_affinity_helper));
-}
-#else
-
-static inline void init_helper_thread(void) { }
-
-#endif
-
int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
{
struct irq_chip *chip = irq_data_get_irq_chip(data);
@@ -238,17 +180,7 @@ int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
if (desc->affinity_notify) {
kref_get(&desc->affinity_notify->kref);
-
-#ifdef CONFIG_PREEMPT_RT_FULL
- raw_spin_lock(&affinity_list_lock);
- if (list_empty(&desc->affinity_notify->list))
- list_add_tail(&affinity_list,
- &desc->affinity_notify->list);
- raw_spin_unlock(&affinity_list_lock);
- wake_up_process(set_affinity_helper);
-#else
schedule_work(&desc->affinity_notify->work);
-#endif
}
irqd_set(data, IRQD_AFFINITY_SET);
@@ -289,8 +221,10 @@ int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
}
EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
-static void _irq_affinity_notify(struct irq_affinity_notify *notify)
+static void irq_affinity_notify(struct work_struct *work)
{
+ struct irq_affinity_notify *notify =
+ container_of(work, struct irq_affinity_notify, work);
struct irq_desc *desc = irq_to_desc(notify->irq);
cpumask_var_t cpumask;
unsigned long flags;
@@ -312,13 +246,6 @@ out:
kref_put(&notify->kref, notify->release);
}
-static void irq_affinity_notify(struct work_struct *work)
-{
- struct irq_affinity_notify *notify =
- container_of(work, struct irq_affinity_notify, work);
- _irq_affinity_notify(notify);
-}
-
/**
* irq_set_affinity_notifier - control notification of IRQ affinity changes
* @irq: Interrupt for which to enable/disable notification
@@ -348,8 +275,6 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
notify->irq = irq;
kref_init(&notify->kref);
INIT_WORK(&notify->work, irq_affinity_notify);
- INIT_LIST_HEAD(&notify->list);
- init_helper_thread();
}
raw_spin_lock_irqsave(&desc->lock, flags);
@@ -856,15 +781,7 @@ irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
local_bh_disable();
ret = action->thread_fn(action->irq, action->dev_id);
irq_finalize_oneshot(desc, action);
- /*
- * Interrupts which have real time requirements can be set up
- * to avoid softirq processing in the thread handler. This is
- * safe as these interrupts do not raise soft interrupts.
- */
- if (irq_settings_no_softirq_call(desc))
- _local_bh_enable();
- else
- local_bh_enable();
+ local_bh_enable();
return ret;
}
@@ -947,12 +864,6 @@ static int irq_thread(void *data)
if (!noirqdebug)
note_interrupt(action->irq, desc, action_ret);
-#ifdef CONFIG_PREEMPT_RT_FULL
- migrate_disable();
- add_interrupt_randomness(action->irq, 0,
- desc->random_ip ^ (unsigned long) action);
- migrate_enable();
-#endif
wake_threads_waitq(desc);
}
@@ -1215,9 +1126,6 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
}
- if (new->flags & IRQF_NO_SOFTIRQ_CALL)
- irq_settings_set_no_softirq_call(desc);
-
/* Set default affinity mask once everything is setup */
setup_affinity(irq, desc, mask);