diff options
Diffstat (limited to 'drivers/pci/intr_remapping.c')
-rw-r--r-- | drivers/pci/intr_remapping.c | 139 |
1 files changed, 90 insertions, 49 deletions
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c index 738d4c89581c..2de5a3238c94 100644 --- a/drivers/pci/intr_remapping.c +++ b/drivers/pci/intr_remapping.c | |||
@@ -1,3 +1,4 @@ | |||
1 | #include <linux/interrupt.h> | ||
1 | #include <linux/dmar.h> | 2 | #include <linux/dmar.h> |
2 | #include <linux/spinlock.h> | 3 | #include <linux/spinlock.h> |
3 | #include <linux/jiffies.h> | 4 | #include <linux/jiffies.h> |
@@ -11,41 +12,64 @@ static struct ioapic_scope ir_ioapic[MAX_IO_APICS]; | |||
11 | static int ir_ioapic_num; | 12 | static int ir_ioapic_num; |
12 | int intr_remapping_enabled; | 13 | int intr_remapping_enabled; |
13 | 14 | ||
14 | static struct { | 15 | struct irq_2_iommu { |
15 | struct intel_iommu *iommu; | 16 | struct intel_iommu *iommu; |
16 | u16 irte_index; | 17 | u16 irte_index; |
17 | u16 sub_handle; | 18 | u16 sub_handle; |
18 | u8 irte_mask; | 19 | u8 irte_mask; |
19 | } irq_2_iommu[NR_IRQS]; | 20 | }; |
21 | |||
22 | static struct irq_2_iommu irq_2_iommuX[NR_IRQS]; | ||
23 | |||
24 | static struct irq_2_iommu *irq_2_iommu(unsigned int irq) | ||
25 | { | ||
26 | return (irq < nr_irqs) ? irq_2_iommuX + irq : NULL; | ||
27 | } | ||
28 | |||
29 | static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq) | ||
30 | { | ||
31 | return irq_2_iommu(irq); | ||
32 | } | ||
20 | 33 | ||
21 | static DEFINE_SPINLOCK(irq_2_ir_lock); | 34 | static DEFINE_SPINLOCK(irq_2_ir_lock); |
22 | 35 | ||
23 | int irq_remapped(int irq) | 36 | static struct irq_2_iommu *valid_irq_2_iommu(unsigned int irq) |
24 | { | 37 | { |
25 | if (irq > NR_IRQS) | 38 | struct irq_2_iommu *irq_iommu; |
26 | return 0; | 39 | |
40 | irq_iommu = irq_2_iommu(irq); | ||
41 | |||
42 | if (!irq_iommu) | ||
43 | return NULL; | ||
44 | |||
45 | if (!irq_iommu->iommu) | ||
46 | return NULL; | ||
27 | 47 | ||
28 | if (!irq_2_iommu[irq].iommu) | 48 | return irq_iommu; |
29 | return 0; | 49 | } |
30 | 50 | ||
31 | return 1; | 51 | int irq_remapped(int irq) |
52 | { | ||
53 | return valid_irq_2_iommu(irq) != NULL; | ||
32 | } | 54 | } |
33 | 55 | ||
34 | int get_irte(int irq, struct irte *entry) | 56 | int get_irte(int irq, struct irte *entry) |
35 | { | 57 | { |
36 | int index; | 58 | int index; |
59 | struct irq_2_iommu *irq_iommu; | ||
37 | 60 | ||
38 | if (!entry || irq > NR_IRQS) | 61 | if (!entry) |
39 | return -1; | 62 | return -1; |
40 | 63 | ||
41 | spin_lock(&irq_2_ir_lock); | 64 | spin_lock(&irq_2_ir_lock); |
42 | if (!irq_2_iommu[irq].iommu) { | 65 | irq_iommu = valid_irq_2_iommu(irq); |
66 | if (!irq_iommu) { | ||
43 | spin_unlock(&irq_2_ir_lock); | 67 | spin_unlock(&irq_2_ir_lock); |
44 | return -1; | 68 | return -1; |
45 | } | 69 | } |
46 | 70 | ||
47 | index = irq_2_iommu[irq].irte_index + irq_2_iommu[irq].sub_handle; | 71 | index = irq_iommu->irte_index + irq_iommu->sub_handle; |
48 | *entry = *(irq_2_iommu[irq].iommu->ir_table->base + index); | 72 | *entry = *(irq_iommu->iommu->ir_table->base + index); |
49 | 73 | ||
50 | spin_unlock(&irq_2_ir_lock); | 74 | spin_unlock(&irq_2_ir_lock); |
51 | return 0; | 75 | return 0; |
@@ -54,6 +78,7 @@ int get_irte(int irq, struct irte *entry) | |||
54 | int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) | 78 | int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) |
55 | { | 79 | { |
56 | struct ir_table *table = iommu->ir_table; | 80 | struct ir_table *table = iommu->ir_table; |
81 | struct irq_2_iommu *irq_iommu; | ||
57 | u16 index, start_index; | 82 | u16 index, start_index; |
58 | unsigned int mask = 0; | 83 | unsigned int mask = 0; |
59 | int i; | 84 | int i; |
@@ -61,6 +86,10 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) | |||
61 | if (!count) | 86 | if (!count) |
62 | return -1; | 87 | return -1; |
63 | 88 | ||
89 | /* protect irq_2_iommu_alloc later */ | ||
90 | if (irq >= nr_irqs) | ||
91 | return -1; | ||
92 | |||
64 | /* | 93 | /* |
65 | * start the IRTE search from index 0. | 94 | * start the IRTE search from index 0. |
66 | */ | 95 | */ |
@@ -100,10 +129,11 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) | |||
100 | for (i = index; i < index + count; i++) | 129 | for (i = index; i < index + count; i++) |
101 | table->base[i].present = 1; | 130 | table->base[i].present = 1; |
102 | 131 | ||
103 | irq_2_iommu[irq].iommu = iommu; | 132 | irq_iommu = irq_2_iommu_alloc(irq); |
104 | irq_2_iommu[irq].irte_index = index; | 133 | irq_iommu->iommu = iommu; |
105 | irq_2_iommu[irq].sub_handle = 0; | 134 | irq_iommu->irte_index = index; |
106 | irq_2_iommu[irq].irte_mask = mask; | 135 | irq_iommu->sub_handle = 0; |
136 | irq_iommu->irte_mask = mask; | ||
107 | 137 | ||
108 | spin_unlock(&irq_2_ir_lock); | 138 | spin_unlock(&irq_2_ir_lock); |
109 | 139 | ||
@@ -124,31 +154,33 @@ static void qi_flush_iec(struct intel_iommu *iommu, int index, int mask) | |||
124 | int map_irq_to_irte_handle(int irq, u16 *sub_handle) | 154 | int map_irq_to_irte_handle(int irq, u16 *sub_handle) |
125 | { | 155 | { |
126 | int index; | 156 | int index; |
157 | struct irq_2_iommu *irq_iommu; | ||
127 | 158 | ||
128 | spin_lock(&irq_2_ir_lock); | 159 | spin_lock(&irq_2_ir_lock); |
129 | if (irq >= NR_IRQS || !irq_2_iommu[irq].iommu) { | 160 | irq_iommu = valid_irq_2_iommu(irq); |
161 | if (!irq_iommu) { | ||
130 | spin_unlock(&irq_2_ir_lock); | 162 | spin_unlock(&irq_2_ir_lock); |
131 | return -1; | 163 | return -1; |
132 | } | 164 | } |
133 | 165 | ||
134 | *sub_handle = irq_2_iommu[irq].sub_handle; | 166 | *sub_handle = irq_iommu->sub_handle; |
135 | index = irq_2_iommu[irq].irte_index; | 167 | index = irq_iommu->irte_index; |
136 | spin_unlock(&irq_2_ir_lock); | 168 | spin_unlock(&irq_2_ir_lock); |
137 | return index; | 169 | return index; |
138 | } | 170 | } |
139 | 171 | ||
140 | int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle) | 172 | int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle) |
141 | { | 173 | { |
174 | struct irq_2_iommu *irq_iommu; | ||
175 | |||
142 | spin_lock(&irq_2_ir_lock); | 176 | spin_lock(&irq_2_ir_lock); |
143 | if (irq >= NR_IRQS || irq_2_iommu[irq].iommu) { | ||
144 | spin_unlock(&irq_2_ir_lock); | ||
145 | return -1; | ||
146 | } | ||
147 | 177 | ||
148 | irq_2_iommu[irq].iommu = iommu; | 178 | irq_iommu = irq_2_iommu_alloc(irq); |
149 | irq_2_iommu[irq].irte_index = index; | 179 | |
150 | irq_2_iommu[irq].sub_handle = subhandle; | 180 | irq_iommu->iommu = iommu; |
151 | irq_2_iommu[irq].irte_mask = 0; | 181 | irq_iommu->irte_index = index; |
182 | irq_iommu->sub_handle = subhandle; | ||
183 | irq_iommu->irte_mask = 0; | ||
152 | 184 | ||
153 | spin_unlock(&irq_2_ir_lock); | 185 | spin_unlock(&irq_2_ir_lock); |
154 | 186 | ||
@@ -157,16 +189,19 @@ int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle) | |||
157 | 189 | ||
158 | int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index) | 190 | int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index) |
159 | { | 191 | { |
192 | struct irq_2_iommu *irq_iommu; | ||
193 | |||
160 | spin_lock(&irq_2_ir_lock); | 194 | spin_lock(&irq_2_ir_lock); |
161 | if (irq >= NR_IRQS || !irq_2_iommu[irq].iommu) { | 195 | irq_iommu = valid_irq_2_iommu(irq); |
196 | if (!irq_iommu) { | ||
162 | spin_unlock(&irq_2_ir_lock); | 197 | spin_unlock(&irq_2_ir_lock); |
163 | return -1; | 198 | return -1; |
164 | } | 199 | } |
165 | 200 | ||
166 | irq_2_iommu[irq].iommu = NULL; | 201 | irq_iommu->iommu = NULL; |
167 | irq_2_iommu[irq].irte_index = 0; | 202 | irq_iommu->irte_index = 0; |
168 | irq_2_iommu[irq].sub_handle = 0; | 203 | irq_iommu->sub_handle = 0; |
169 | irq_2_iommu[irq].irte_mask = 0; | 204 | irq_2_iommu(irq)->irte_mask = 0; |
170 | 205 | ||
171 | spin_unlock(&irq_2_ir_lock); | 206 | spin_unlock(&irq_2_ir_lock); |
172 | 207 | ||
@@ -178,16 +213,18 @@ int modify_irte(int irq, struct irte *irte_modified) | |||
178 | int index; | 213 | int index; |
179 | struct irte *irte; | 214 | struct irte *irte; |
180 | struct intel_iommu *iommu; | 215 | struct intel_iommu *iommu; |
216 | struct irq_2_iommu *irq_iommu; | ||
181 | 217 | ||
182 | spin_lock(&irq_2_ir_lock); | 218 | spin_lock(&irq_2_ir_lock); |
183 | if (irq >= NR_IRQS || !irq_2_iommu[irq].iommu) { | 219 | irq_iommu = valid_irq_2_iommu(irq); |
220 | if (!irq_iommu) { | ||
184 | spin_unlock(&irq_2_ir_lock); | 221 | spin_unlock(&irq_2_ir_lock); |
185 | return -1; | 222 | return -1; |
186 | } | 223 | } |
187 | 224 | ||
188 | iommu = irq_2_iommu[irq].iommu; | 225 | iommu = irq_iommu->iommu; |
189 | 226 | ||
190 | index = irq_2_iommu[irq].irte_index + irq_2_iommu[irq].sub_handle; | 227 | index = irq_iommu->irte_index + irq_iommu->sub_handle; |
191 | irte = &iommu->ir_table->base[index]; | 228 | irte = &iommu->ir_table->base[index]; |
192 | 229 | ||
193 | set_64bit((unsigned long *)irte, irte_modified->low | (1 << 1)); | 230 | set_64bit((unsigned long *)irte, irte_modified->low | (1 << 1)); |
@@ -203,18 +240,20 @@ int flush_irte(int irq) | |||
203 | { | 240 | { |
204 | int index; | 241 | int index; |
205 | struct intel_iommu *iommu; | 242 | struct intel_iommu *iommu; |
243 | struct irq_2_iommu *irq_iommu; | ||
206 | 244 | ||
207 | spin_lock(&irq_2_ir_lock); | 245 | spin_lock(&irq_2_ir_lock); |
208 | if (irq >= NR_IRQS || !irq_2_iommu[irq].iommu) { | 246 | irq_iommu = valid_irq_2_iommu(irq); |
247 | if (!irq_iommu) { | ||
209 | spin_unlock(&irq_2_ir_lock); | 248 | spin_unlock(&irq_2_ir_lock); |
210 | return -1; | 249 | return -1; |
211 | } | 250 | } |
212 | 251 | ||
213 | iommu = irq_2_iommu[irq].iommu; | 252 | iommu = irq_iommu->iommu; |
214 | 253 | ||
215 | index = irq_2_iommu[irq].irte_index + irq_2_iommu[irq].sub_handle; | 254 | index = irq_iommu->irte_index + irq_iommu->sub_handle; |
216 | 255 | ||
217 | qi_flush_iec(iommu, index, irq_2_iommu[irq].irte_mask); | 256 | qi_flush_iec(iommu, index, irq_iommu->irte_mask); |
218 | spin_unlock(&irq_2_ir_lock); | 257 | spin_unlock(&irq_2_ir_lock); |
219 | 258 | ||
220 | return 0; | 259 | return 0; |
@@ -246,28 +285,30 @@ int free_irte(int irq) | |||
246 | int index, i; | 285 | int index, i; |
247 | struct irte *irte; | 286 | struct irte *irte; |
248 | struct intel_iommu *iommu; | 287 | struct intel_iommu *iommu; |
288 | struct irq_2_iommu *irq_iommu; | ||
249 | 289 | ||
250 | spin_lock(&irq_2_ir_lock); | 290 | spin_lock(&irq_2_ir_lock); |
251 | if (irq >= NR_IRQS || !irq_2_iommu[irq].iommu) { | 291 | irq_iommu = valid_irq_2_iommu(irq); |
292 | if (!irq_iommu) { | ||
252 | spin_unlock(&irq_2_ir_lock); | 293 | spin_unlock(&irq_2_ir_lock); |
253 | return -1; | 294 | return -1; |
254 | } | 295 | } |
255 | 296 | ||
256 | iommu = irq_2_iommu[irq].iommu; | 297 | iommu = irq_iommu->iommu; |
257 | 298 | ||
258 | index = irq_2_iommu[irq].irte_index + irq_2_iommu[irq].sub_handle; | 299 | index = irq_iommu->irte_index + irq_iommu->sub_handle; |
259 | irte = &iommu->ir_table->base[index]; | 300 | irte = &iommu->ir_table->base[index]; |
260 | 301 | ||
261 | if (!irq_2_iommu[irq].sub_handle) { | 302 | if (!irq_iommu->sub_handle) { |
262 | for (i = 0; i < (1 << irq_2_iommu[irq].irte_mask); i++) | 303 | for (i = 0; i < (1 << irq_iommu->irte_mask); i++) |
263 | set_64bit((unsigned long *)irte, 0); | 304 | set_64bit((unsigned long *)irte, 0); |
264 | qi_flush_iec(iommu, index, irq_2_iommu[irq].irte_mask); | 305 | qi_flush_iec(iommu, index, irq_iommu->irte_mask); |
265 | } | 306 | } |
266 | 307 | ||
267 | irq_2_iommu[irq].iommu = NULL; | 308 | irq_iommu->iommu = NULL; |
268 | irq_2_iommu[irq].irte_index = 0; | 309 | irq_iommu->irte_index = 0; |
269 | irq_2_iommu[irq].sub_handle = 0; | 310 | irq_iommu->sub_handle = 0; |
270 | irq_2_iommu[irq].irte_mask = 0; | 311 | irq_iommu->irte_mask = 0; |
271 | 312 | ||
272 | spin_unlock(&irq_2_ir_lock); | 313 | spin_unlock(&irq_2_ir_lock); |
273 | 314 | ||