summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2011-02-08 16:11:03 (GMT)
committerThomas Gleixner <tglx@linutronix.de>2011-02-19 11:58:20 (GMT)
commita005677b3dd05decdd8880cf3044ae709856f58f (patch)
tree529d1454940fe8c6723bd54f01e009d9be2ab840
parent1ce6068dac1924f7095be5850481e790cbf1b3c1 (diff)
downloadlinux-a005677b3dd05decdd8880cf3044ae709856f58f.tar.xz
genirq: Mirror IRQ_PER_CPU and IRQ_NO_BALANCING in irq_data.state
That's the right data structure to look at for arch code. Accessor functions are provided. irqd_is_per_cpu(irqdata); irqd_can_balance(irqdata); Coders who access them directly will be tracked down and slapped with stinking trouts. Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--include/linux/irq.h16
-rw-r--r--kernel/irq/chip.c15
-rw-r--r--kernel/irq/internals.h11
-rw-r--r--kernel/irq/manage.c16
-rw-r--r--kernel/irq/migration.c2
-rw-r--r--kernel/irq/settings.h36
-rw-r--r--kernel/irq/spurious.c3
7 files changed, 84 insertions, 15 deletions
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 3f607ad..d5312e6 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -132,10 +132,14 @@ struct irq_data {
* Bit masks for irq_data.state
*
* IRQD_SETAFFINITY_PENDING - Affinity setting is pending
+ * IRQD_NO_BALANCING - Balancing disabled for this IRQ
+ * IRQD_PER_CPU - Interrupt is per cpu
*/
enum {
/* Bit 0 - 7 reserved for TYPE will use later */
- IRQD_SETAFFINITY_PENDING = (1 << 8),
+ IRQD_SETAFFINITY_PENDING = (1 << 8),
+ IRQD_NO_BALANCING = (1 << 10),
+ IRQD_PER_CPU = (1 << 11),
};
static inline bool irqd_is_setaffinity_pending(struct irq_data *d)
@@ -143,6 +147,16 @@ static inline bool irqd_is_setaffinity_pending(struct irq_data *d)
return d->state_use_accessors & IRQD_SETAFFINITY_PENDING;
}
+static inline bool irqd_is_per_cpu(struct irq_data *d)
+{
+ return d->state_use_accessors & IRQD_PER_CPU;
+}
+
+static inline bool irqd_can_balance(struct irq_data *d)
+{
+ return !(d->state_use_accessors & (IRQD_PER_CPU | IRQD_NO_BALANCING));
+}
+
/**
* struct irq_chip - hardware interrupt chip descriptor
*
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 73b2e7e..b8aa3df 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -706,12 +706,15 @@ void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
if (!desc)
return;
- /* Sanitize flags */
- set &= IRQF_MODIFY_MASK;
- clr &= IRQF_MODIFY_MASK;
-
raw_spin_lock_irqsave(&desc->lock, flags);
- desc->status &= ~clr;
- desc->status |= set;
+
+ irq_settings_clr_and_set(desc, clr, set);
+
+ irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU);
+ if (irq_settings_has_no_balance_set(desc))
+ irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
+ if (irq_settings_is_per_cpu(desc))
+ irqd_set(&desc->irq_data, IRQD_PER_CPU);
+
raw_spin_unlock_irqrestore(&desc->lock, flags);
}
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index b2ba59e..a80b44d 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -139,3 +139,14 @@ static inline void irqd_clr_move_pending(struct irq_data *d)
d->state_use_accessors &= ~IRQD_SETAFFINITY_PENDING;
irq_compat_clr_move_pending(irq_data_to_desc(d));
}
+
+static inline void irqd_clear(struct irq_data *d, unsigned int mask)
+{
+ d->state_use_accessors &= ~mask;
+}
+
+static inline void irqd_set(struct irq_data *d, unsigned int mask)
+{
+ d->state_use_accessors |= mask;
+}
+
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index f1cfa27..84a0a9c 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -73,8 +73,8 @@ int irq_can_set_affinity(unsigned int irq)
{
struct irq_desc *desc = irq_to_desc(irq);
- if ((desc->status & (IRQ_PER_CPU | IRQ_NO_BALANCING)) ||
- !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
+ if (!irqd_can_balance(&desc->irq_data) || !desc->irq_data.chip ||
+ !desc->irq_data.chip->irq_set_affinity)
return 0;
return 1;
@@ -897,8 +897,10 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
IRQS_INPROGRESS | IRQS_ONESHOT | \
IRQS_WAITING);
- if (new->flags & IRQF_PERCPU)
- desc->status |= IRQ_PER_CPU;
+ if (new->flags & IRQF_PERCPU) {
+ irqd_set(&desc->irq_data, IRQD_PER_CPU);
+ irq_settings_set_per_cpu(desc);
+ }
if (new->flags & IRQF_ONESHOT)
desc->istate |= IRQS_ONESHOT;
@@ -910,8 +912,10 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
desc->depth = 1;
/* Exclude IRQ from balancing if requested */
- if (new->flags & IRQF_NOBALANCING)
- desc->status |= IRQ_NO_BALANCING;
+ if (new->flags & IRQF_NOBALANCING) {
+ irq_settings_set_no_balancing(desc);
+ irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
+ }
/* Set default affinity mask once everything is setup */
setup_affinity(irq, desc, mask);
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c
index 24f53ca..7a93c6b 100644
--- a/kernel/irq/migration.c
+++ b/kernel/irq/migration.c
@@ -15,7 +15,7 @@ void move_masked_irq(int irq)
/*
* Paranoia: cpu-local interrupts shouldn't be calling in here anyway.
*/
- if (desc->status & (IRQ_PER_CPU | IRQ_NO_BALANCING)) {
+ if (!irqd_can_balance(&desc->irq_data)) {
WARN_ON(1);
return;
}
diff --git a/kernel/irq/settings.h b/kernel/irq/settings.h
index bb104a2..ba0fffe 100644
--- a/kernel/irq/settings.h
+++ b/kernel/irq/settings.h
@@ -4,6 +4,9 @@
*/
enum {
_IRQ_DEFAULT_INIT_FLAGS = IRQ_DEFAULT_INIT_FLAGS,
+ _IRQ_PER_CPU = IRQ_PER_CPU,
+ _IRQ_NO_BALANCING = IRQ_NO_BALANCING,
+ _IRQF_MODIFY_MASK = IRQF_MODIFY_MASK,
};
#undef IRQ_INPROGRESS
@@ -22,3 +25,36 @@ enum {
#define IRQ_WAKEUP GOT_YOU_MORON
#undef IRQ_MOVE_PENDING
#define IRQ_MOVE_PENDING GOT_YOU_MORON
+#undef IRQ_PER_CPU
+#define IRQ_PER_CPU GOT_YOU_MORON
+#undef IRQ_NO_BALANCING
+#define IRQ_NO_BALANCING GOT_YOU_MORON
+#undef IRQF_MODIFY_MASK
+#define IRQF_MODIFY_MASK GOT_YOU_MORON
+
+static inline void
+irq_settings_clr_and_set(struct irq_desc *desc, u32 clr, u32 set)
+{
+ desc->status &= ~(clr & _IRQF_MODIFY_MASK);
+ desc->status |= (set & _IRQF_MODIFY_MASK);
+}
+
+static inline bool irq_settings_is_per_cpu(struct irq_desc *desc)
+{
+ return desc->status & _IRQ_PER_CPU;
+}
+
+static inline void irq_settings_set_per_cpu(struct irq_desc *desc)
+{
+ desc->status |= _IRQ_PER_CPU;
+}
+
+static inline void irq_settings_set_no_balancing(struct irq_desc *desc)
+{
+ desc->status |= _IRQ_NO_BALANCING;
+}
+
+static inline bool irq_settings_has_no_balance_set(struct irq_desc *desc)
+{
+ return desc->status & _IRQ_NO_BALANCING;
+}
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
index 692ce2b..226ed7d 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -68,7 +68,8 @@ static int try_one_irq(int irq, struct irq_desc *desc, bool force)
raw_spin_lock(&desc->lock);
/* PER_CPU and nested thread interrupts are never polled */
- if (desc->status & (IRQ_PER_CPU | IRQ_NESTED_THREAD))
+ if (irq_settings_is_per_cpu(desc) ||
+ (desc->status & IRQ_NESTED_THREAD))
goto out;
/*