aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/uv_irq.c
diff options
context:
space:
mode:
authorDimitri Sivanich <sivanich@sgi.com>2009-09-30 12:02:59 -0400
committerIngo Molnar <mingo@elte.hu>2009-10-14 03:17:01 -0400
commit6c2c502910247d2820cb630e7b28fb6bdecdbf45 (patch)
treed2bc695c68e57d853bcc66195628a98e34bb01ef /arch/x86/kernel/uv_irq.c
parent2626eb2b2fd958dc0f683126aa84e93b939699a1 (diff)
x86: SGI UV: Fix irq affinity for hub based interrupts
This patch fixes handling of uv hub irq affinity. IRQs with ALL or NODE affinity can be routed to cpus other than their originally assigned cpu. Those with CPU affinity cannot be rerouted. Signed-off-by: Dimitri Sivanich <sivanich@sgi.com> LKML-Reference: <20090930160259.GA7822@sgi.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/uv_irq.c')
-rw-r--r--arch/x86/kernel/uv_irq.c128
1 files changed, 120 insertions, 8 deletions
diff --git a/arch/x86/kernel/uv_irq.c b/arch/x86/kernel/uv_irq.c
index aeef529917e4..9a83775ab0f3 100644
--- a/arch/x86/kernel/uv_irq.c
+++ b/arch/x86/kernel/uv_irq.c
@@ -9,10 +9,22 @@
9 */ 9 */
10 10
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/rbtree.h>
12#include <linux/irq.h> 13#include <linux/irq.h>
13 14
14#include <asm/apic.h> 15#include <asm/apic.h>
15#include <asm/uv/uv_irq.h> 16#include <asm/uv/uv_irq.h>
17#include <asm/uv/uv_hub.h>
18
19/* MMR offset and pnode of hub sourcing interrupts for a given irq */
20struct uv_irq_2_mmr_pnode{
21 struct rb_node list;
22 unsigned long offset;
23 int pnode;
24 int irq;
25};
26static spinlock_t uv_irq_lock;
27static struct rb_root uv_irq_root;
16 28
17static void uv_noop(unsigned int irq) 29static void uv_noop(unsigned int irq)
18{ 30{
@@ -39,25 +51,106 @@ struct irq_chip uv_irq_chip = {
39 .unmask = uv_noop, 51 .unmask = uv_noop,
40 .eoi = uv_ack_apic, 52 .eoi = uv_ack_apic,
41 .end = uv_noop, 53 .end = uv_noop,
54 .set_affinity = uv_set_irq_affinity,
42}; 55};
43 56
44/* 57/*
58 * Add offset and pnode information of the hub sourcing interrupts to the
59 * rb tree for a specific irq.
60 */
61static int uv_set_irq_2_mmr_info(int irq, unsigned long offset, unsigned blade)
62{
63 struct rb_node **link = &uv_irq_root.rb_node;
64 struct rb_node *parent = NULL;
65 struct uv_irq_2_mmr_pnode *n;
66 struct uv_irq_2_mmr_pnode *e;
67 unsigned long irqflags;
68
69 n = kmalloc_node(sizeof(struct uv_irq_2_mmr_pnode), GFP_KERNEL,
70 uv_blade_to_memory_nid(blade));
71 if (!n)
72 return -ENOMEM;
73
74 n->irq = irq;
75 n->offset = offset;
76 n->pnode = uv_blade_to_pnode(blade);
77 spin_lock_irqsave(&uv_irq_lock, irqflags);
78 /* Find the right place in the rbtree: */
79 while (*link) {
80 parent = *link;
81 e = rb_entry(parent, struct uv_irq_2_mmr_pnode, list);
82
83 if (unlikely(irq == e->irq)) {
84 /* irq entry exists */
85 e->pnode = uv_blade_to_pnode(blade);
86 e->offset = offset;
87 spin_unlock_irqrestore(&uv_irq_lock, irqflags);
88 kfree(n);
89 return 0;
90 }
91
92 if (irq < e->irq)
93 link = &(*link)->rb_left;
94 else
95 link = &(*link)->rb_right;
96 }
97
98 /* Insert the node into the rbtree. */
99 rb_link_node(&n->list, parent, link);
100 rb_insert_color(&n->list, &uv_irq_root);
101
102 spin_unlock_irqrestore(&uv_irq_lock, irqflags);
103 return 0;
104}
105
106/* Retrieve offset and pnode information from the rb tree for a specific irq */
107int uv_irq_2_mmr_info(int irq, unsigned long *offset, int *pnode)
108{
109 struct uv_irq_2_mmr_pnode *e;
110 struct rb_node *n;
111 unsigned long irqflags;
112
113 spin_lock_irqsave(&uv_irq_lock, irqflags);
114 n = uv_irq_root.rb_node;
115 while (n) {
116 e = rb_entry(n, struct uv_irq_2_mmr_pnode, list);
117
118 if (e->irq == irq) {
119 *offset = e->offset;
120 *pnode = e->pnode;
121 spin_unlock_irqrestore(&uv_irq_lock, irqflags);
122 return 0;
123 }
124
125 if (irq < e->irq)
126 n = n->rb_left;
127 else
128 n = n->rb_right;
129 }
130 spin_unlock_irqrestore(&uv_irq_lock, irqflags);
131 return -1;
132}
133
134/*
45 * Set up a mapping of an available irq and vector, and enable the specified 135 * Set up a mapping of an available irq and vector, and enable the specified
46 * MMR that defines the MSI that is to be sent to the specified CPU when an 136 * MMR that defines the MSI that is to be sent to the specified CPU when an
47 * interrupt is raised. 137 * interrupt is raised.
48 */ 138 */
49int uv_setup_irq(char *irq_name, int cpu, int mmr_blade, 139int uv_setup_irq(char *irq_name, int cpu, int mmr_blade,
50 unsigned long mmr_offset) 140 unsigned long mmr_offset, int restrict)
51{ 141{
52 int irq; 142 int irq, ret;
53 int ret; 143
144 irq = create_irq_nr(NR_IRQS_LEGACY, uv_blade_to_memory_nid(mmr_blade));
54 145
55 irq = create_irq();
56 if (irq <= 0) 146 if (irq <= 0)
57 return -EBUSY; 147 return -EBUSY;
58 148
59 ret = arch_enable_uv_irq(irq_name, irq, cpu, mmr_blade, mmr_offset); 149 ret = arch_enable_uv_irq(irq_name, irq, cpu, mmr_blade, mmr_offset,
60 if (ret != irq) 150 restrict);
151 if (ret == irq)
152 uv_set_irq_2_mmr_info(irq, mmr_offset, mmr_blade);
153 else
61 destroy_irq(irq); 154 destroy_irq(irq);
62 155
63 return ret; 156 return ret;
@@ -71,9 +164,28 @@ EXPORT_SYMBOL_GPL(uv_setup_irq);
71 * 164 *
72 * Set mmr_blade and mmr_offset to what was passed in on uv_setup_irq(). 165 * Set mmr_blade and mmr_offset to what was passed in on uv_setup_irq().
73 */ 166 */
74void uv_teardown_irq(unsigned int irq, int mmr_blade, unsigned long mmr_offset) 167void uv_teardown_irq(unsigned int irq)
75{ 168{
76 arch_disable_uv_irq(mmr_blade, mmr_offset); 169 struct uv_irq_2_mmr_pnode *e;
170 struct rb_node *n;
171 unsigned long irqflags;
172
173 spin_lock_irqsave(&uv_irq_lock, irqflags);
174 n = uv_irq_root.rb_node;
175 while (n) {
176 e = rb_entry(n, struct uv_irq_2_mmr_pnode, list);
177 if (e->irq == irq) {
178 arch_disable_uv_irq(e->pnode, e->offset);
179 rb_erase(n, &uv_irq_root);
180 kfree(e);
181 break;
182 }
183 if (irq < e->irq)
184 n = n->rb_left;
185 else
186 n = n->rb_right;
187 }
188 spin_unlock_irqrestore(&uv_irq_lock, irqflags);
77 destroy_irq(irq); 189 destroy_irq(irq);
78} 190}
79EXPORT_SYMBOL_GPL(uv_teardown_irq); 191EXPORT_SYMBOL_GPL(uv_teardown_irq);