diff options
Diffstat (limited to 'drivers/pci/intr_remapping.c')
-rw-r--r-- | drivers/pci/intr_remapping.c | 213 |
1 files changed, 165 insertions, 48 deletions
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c index 6961be807684..23372c811159 100644 --- a/drivers/pci/intr_remapping.c +++ b/drivers/pci/intr_remapping.c | |||
@@ -19,41 +19,136 @@ struct irq_2_iommu { | |||
19 | u8 irte_mask; | 19 | u8 irte_mask; |
20 | }; | 20 | }; |
21 | 21 | ||
22 | #ifdef CONFIG_HAVE_DYNA_ARRAY | 22 | #ifdef CONFIG_HAVE_SPARSE_IRQ |
23 | static struct irq_2_iommu *irq_2_iommu; | 23 | static struct irq_2_iommu *irq_2_iommuX; |
24 | DEFINE_DYN_ARRAY(irq_2_iommu, sizeof(struct irq_2_iommu), nr_irqs, PAGE_SIZE, NULL); | 24 | /* fill one page ? */ |
25 | static int nr_irq_2_iommu = 0x100; | ||
26 | static int irq_2_iommu_index; | ||
27 | DEFINE_DYN_ARRAY(irq_2_iommuX, sizeof(struct irq_2_iommu), nr_irq_2_iommu, PAGE_SIZE, NULL); | ||
28 | |||
29 | extern void *__alloc_bootmem_nopanic(unsigned long size, | ||
30 | unsigned long align, | ||
31 | unsigned long goal); | ||
32 | |||
33 | static struct irq_2_iommu *get_one_free_irq_2_iommu(int not_used) | ||
34 | { | ||
35 | struct irq_2_iommu *iommu; | ||
36 | unsigned long total_bytes; | ||
37 | |||
38 | if (irq_2_iommu_index >= nr_irq_2_iommu) { | ||
39 | /* | ||
40 | * we run out of pre-allocate ones, allocate more | ||
41 | */ | ||
42 | printk(KERN_DEBUG "try to get more irq_2_iommu %d\n", nr_irq_2_iommu); | ||
43 | |||
44 | total_bytes = sizeof(struct irq_2_iommu)*nr_irq_2_iommu; | ||
45 | |||
46 | if (after_bootmem) | ||
47 | iommu = kzalloc(total_bytes, GFP_ATOMIC); | ||
48 | else | ||
49 | iommu = __alloc_bootmem_nopanic(total_bytes, PAGE_SIZE, 0); | ||
50 | |||
51 | if (!iommu) | ||
52 | panic("can not get more irq_2_iommu\n"); | ||
53 | |||
54 | irq_2_iommuX = iommu; | ||
55 | irq_2_iommu_index = 0; | ||
56 | } | ||
57 | |||
58 | iommu = &irq_2_iommuX[irq_2_iommu_index]; | ||
59 | irq_2_iommu_index++; | ||
60 | return iommu; | ||
61 | } | ||
62 | |||
63 | static struct irq_2_iommu *irq_2_iommu(unsigned int irq) | ||
64 | { | ||
65 | struct irq_desc *desc; | ||
66 | |||
67 | desc = irq_to_desc(irq); | ||
68 | |||
69 | BUG_ON(!desc); | ||
70 | |||
71 | return desc->irq_2_iommu; | ||
72 | } | ||
73 | |||
74 | static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq) | ||
75 | { | ||
76 | struct irq_desc *desc; | ||
77 | struct irq_2_iommu *irq_iommu; | ||
78 | |||
79 | desc = irq_to_desc(irq); | ||
80 | |||
81 | BUG_ON(!desc); | ||
82 | |||
83 | irq_iommu = desc->irq_2_iommu; | ||
84 | |||
85 | if (!irq_iommu) | ||
86 | desc->irq_2_iommu = get_one_free_irq_2_iommu(irq); | ||
87 | |||
88 | return desc->irq_2_iommu; | ||
89 | } | ||
90 | |||
91 | #else /* !CONFIG_HAVE_SPARSE_IRQ */ | ||
92 | |||
93 | #ifdef CONFIG_HAVE_DYN_ARRAY | ||
94 | static struct irq_2_iommu *irq_2_iommuX; | ||
95 | DEFINE_DYN_ARRAY(irq_2_iommuX, sizeof(struct irq_2_iommu), nr_irqs, PAGE_SIZE, NULL); | ||
25 | #else | 96 | #else |
26 | static struct irq_2_iommu irq_2_iommu[NR_IRQS]; | 97 | static struct irq_2_iommu irq_2_iommuX[NR_IRQS]; |
98 | #endif | ||
99 | |||
100 | static struct irq_2_iommu *irq_2_iommu(unsigned int irq) | ||
101 | { | ||
102 | if (irq < nr_irqs) | ||
103 | return &irq_2_iommuX[irq]; | ||
104 | |||
105 | return NULL; | ||
106 | } | ||
107 | static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq) | ||
108 | { | ||
109 | return irq_2_iommu(irq); | ||
110 | } | ||
27 | #endif | 111 | #endif |
28 | 112 | ||
29 | static DEFINE_SPINLOCK(irq_2_ir_lock); | 113 | static DEFINE_SPINLOCK(irq_2_ir_lock); |
30 | 114 | ||
31 | int irq_remapped(int irq) | 115 | static struct irq_2_iommu *valid_irq_2_iommu(unsigned int irq) |
32 | { | 116 | { |
33 | if (irq > nr_irqs) | 117 | struct irq_2_iommu *irq_iommu; |
34 | return 0; | 118 | |
119 | irq_iommu = irq_2_iommu(irq); | ||
35 | 120 | ||
36 | if (!irq_2_iommu[irq].iommu) | 121 | if (!irq_iommu) |
37 | return 0; | 122 | return NULL; |
38 | 123 | ||
39 | return 1; | 124 | if (!irq_iommu->iommu) |
125 | return NULL; | ||
126 | |||
127 | return irq_iommu; | ||
128 | } | ||
129 | |||
130 | int irq_remapped(int irq) | ||
131 | { | ||
132 | return valid_irq_2_iommu(irq) != NULL; | ||
40 | } | 133 | } |
41 | 134 | ||
42 | int get_irte(int irq, struct irte *entry) | 135 | int get_irte(int irq, struct irte *entry) |
43 | { | 136 | { |
44 | int index; | 137 | int index; |
138 | struct irq_2_iommu *irq_iommu; | ||
45 | 139 | ||
46 | if (!entry || irq > nr_irqs) | 140 | if (!entry) |
47 | return -1; | 141 | return -1; |
48 | 142 | ||
49 | spin_lock(&irq_2_ir_lock); | 143 | spin_lock(&irq_2_ir_lock); |
50 | if (!irq_2_iommu[irq].iommu) { | 144 | irq_iommu = valid_irq_2_iommu(irq); |
145 | if (!irq_iommu) { | ||
51 | spin_unlock(&irq_2_ir_lock); | 146 | spin_unlock(&irq_2_ir_lock); |
52 | return -1; | 147 | return -1; |
53 | } | 148 | } |
54 | 149 | ||
55 | index = irq_2_iommu[irq].irte_index + irq_2_iommu[irq].sub_handle; | 150 | index = irq_iommu->irte_index + irq_iommu->sub_handle; |
56 | *entry = *(irq_2_iommu[irq].iommu->ir_table->base + index); | 151 | *entry = *(irq_iommu->iommu->ir_table->base + index); |
57 | 152 | ||
58 | spin_unlock(&irq_2_ir_lock); | 153 | spin_unlock(&irq_2_ir_lock); |
59 | return 0; | 154 | return 0; |
@@ -62,6 +157,7 @@ int get_irte(int irq, struct irte *entry) | |||
62 | int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) | 157 | int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) |
63 | { | 158 | { |
64 | struct ir_table *table = iommu->ir_table; | 159 | struct ir_table *table = iommu->ir_table; |
160 | struct irq_2_iommu *irq_iommu; | ||
65 | u16 index, start_index; | 161 | u16 index, start_index; |
66 | unsigned int mask = 0; | 162 | unsigned int mask = 0; |
67 | int i; | 163 | int i; |
@@ -69,6 +165,12 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) | |||
69 | if (!count) | 165 | if (!count) |
70 | return -1; | 166 | return -1; |
71 | 167 | ||
168 | #ifndef CONFIG_HAVE_SPARSE_IRQ | ||
169 | /* protect irq_2_iommu_alloc later */ | ||
170 | if (irq >= nr_irqs) | ||
171 | return -1; | ||
172 | #endif | ||
173 | |||
72 | /* | 174 | /* |
73 | * start the IRTE search from index 0. | 175 | * start the IRTE search from index 0. |
74 | */ | 176 | */ |
@@ -108,10 +210,11 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) | |||
108 | for (i = index; i < index + count; i++) | 210 | for (i = index; i < index + count; i++) |
109 | table->base[i].present = 1; | 211 | table->base[i].present = 1; |
110 | 212 | ||
111 | irq_2_iommu[irq].iommu = iommu; | 213 | irq_iommu = irq_2_iommu_alloc(irq); |
112 | irq_2_iommu[irq].irte_index = index; | 214 | irq_iommu->iommu = iommu; |
113 | irq_2_iommu[irq].sub_handle = 0; | 215 | irq_iommu->irte_index = index; |
114 | irq_2_iommu[irq].irte_mask = mask; | 216 | irq_iommu->sub_handle = 0; |
217 | irq_iommu->irte_mask = mask; | ||
115 | 218 | ||
116 | spin_unlock(&irq_2_ir_lock); | 219 | spin_unlock(&irq_2_ir_lock); |
117 | 220 | ||
@@ -132,31 +235,36 @@ static void qi_flush_iec(struct intel_iommu *iommu, int index, int mask) | |||
132 | int map_irq_to_irte_handle(int irq, u16 *sub_handle) | 235 | int map_irq_to_irte_handle(int irq, u16 *sub_handle) |
133 | { | 236 | { |
134 | int index; | 237 | int index; |
238 | struct irq_2_iommu *irq_iommu; | ||
135 | 239 | ||
136 | spin_lock(&irq_2_ir_lock); | 240 | spin_lock(&irq_2_ir_lock); |
137 | if (irq >= nr_irqs || !irq_2_iommu[irq].iommu) { | 241 | irq_iommu = valid_irq_2_iommu(irq); |
242 | if (!irq_iommu) { | ||
138 | spin_unlock(&irq_2_ir_lock); | 243 | spin_unlock(&irq_2_ir_lock); |
139 | return -1; | 244 | return -1; |
140 | } | 245 | } |
141 | 246 | ||
142 | *sub_handle = irq_2_iommu[irq].sub_handle; | 247 | *sub_handle = irq_iommu->sub_handle; |
143 | index = irq_2_iommu[irq].irte_index; | 248 | index = irq_iommu->irte_index; |
144 | spin_unlock(&irq_2_ir_lock); | 249 | spin_unlock(&irq_2_ir_lock); |
145 | return index; | 250 | return index; |
146 | } | 251 | } |
147 | 252 | ||
148 | int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle) | 253 | int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle) |
149 | { | 254 | { |
255 | struct irq_2_iommu *irq_iommu; | ||
256 | |||
150 | spin_lock(&irq_2_ir_lock); | 257 | spin_lock(&irq_2_ir_lock); |
151 | if (irq >= nr_irqs || irq_2_iommu[irq].iommu) { | 258 | irq_iommu = valid_irq_2_iommu(irq); |
259 | if (!irq_iommu) { | ||
152 | spin_unlock(&irq_2_ir_lock); | 260 | spin_unlock(&irq_2_ir_lock); |
153 | return -1; | 261 | return -1; |
154 | } | 262 | } |
155 | 263 | ||
156 | irq_2_iommu[irq].iommu = iommu; | 264 | irq_iommu->iommu = iommu; |
157 | irq_2_iommu[irq].irte_index = index; | 265 | irq_iommu->irte_index = index; |
158 | irq_2_iommu[irq].sub_handle = subhandle; | 266 | irq_iommu->sub_handle = subhandle; |
159 | irq_2_iommu[irq].irte_mask = 0; | 267 | irq_iommu->irte_mask = 0; |
160 | 268 | ||
161 | spin_unlock(&irq_2_ir_lock); | 269 | spin_unlock(&irq_2_ir_lock); |
162 | 270 | ||
@@ -165,16 +273,19 @@ int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle) | |||
165 | 273 | ||
166 | int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index) | 274 | int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index) |
167 | { | 275 | { |
276 | struct irq_2_iommu *irq_iommu; | ||
277 | |||
168 | spin_lock(&irq_2_ir_lock); | 278 | spin_lock(&irq_2_ir_lock); |
169 | if (irq >= nr_irqs || !irq_2_iommu[irq].iommu) { | 279 | irq_iommu = valid_irq_2_iommu(irq); |
280 | if (!irq_iommu) { | ||
170 | spin_unlock(&irq_2_ir_lock); | 281 | spin_unlock(&irq_2_ir_lock); |
171 | return -1; | 282 | return -1; |
172 | } | 283 | } |
173 | 284 | ||
174 | irq_2_iommu[irq].iommu = NULL; | 285 | irq_iommu->iommu = NULL; |
175 | irq_2_iommu[irq].irte_index = 0; | 286 | irq_iommu->irte_index = 0; |
176 | irq_2_iommu[irq].sub_handle = 0; | 287 | irq_iommu->sub_handle = 0; |
177 | irq_2_iommu[irq].irte_mask = 0; | 288 | irq_2_iommu(irq)->irte_mask = 0; |
178 | 289 | ||
179 | spin_unlock(&irq_2_ir_lock); | 290 | spin_unlock(&irq_2_ir_lock); |
180 | 291 | ||
@@ -186,16 +297,18 @@ int modify_irte(int irq, struct irte *irte_modified) | |||
186 | int index; | 297 | int index; |
187 | struct irte *irte; | 298 | struct irte *irte; |
188 | struct intel_iommu *iommu; | 299 | struct intel_iommu *iommu; |
300 | struct irq_2_iommu *irq_iommu; | ||
189 | 301 | ||
190 | spin_lock(&irq_2_ir_lock); | 302 | spin_lock(&irq_2_ir_lock); |
191 | if (irq >= nr_irqs || !irq_2_iommu[irq].iommu) { | 303 | irq_iommu = valid_irq_2_iommu(irq); |
304 | if (!irq_iommu) { | ||
192 | spin_unlock(&irq_2_ir_lock); | 305 | spin_unlock(&irq_2_ir_lock); |
193 | return -1; | 306 | return -1; |
194 | } | 307 | } |
195 | 308 | ||
196 | iommu = irq_2_iommu[irq].iommu; | 309 | iommu = irq_iommu->iommu; |
197 | 310 | ||
198 | index = irq_2_iommu[irq].irte_index + irq_2_iommu[irq].sub_handle; | 311 | index = irq_iommu->irte_index + irq_iommu->sub_handle; |
199 | irte = &iommu->ir_table->base[index]; | 312 | irte = &iommu->ir_table->base[index]; |
200 | 313 | ||
201 | set_64bit((unsigned long *)irte, irte_modified->low | (1 << 1)); | 314 | set_64bit((unsigned long *)irte, irte_modified->low | (1 << 1)); |
@@ -211,18 +324,20 @@ int flush_irte(int irq) | |||
211 | { | 324 | { |
212 | int index; | 325 | int index; |
213 | struct intel_iommu *iommu; | 326 | struct intel_iommu *iommu; |
327 | struct irq_2_iommu *irq_iommu; | ||
214 | 328 | ||
215 | spin_lock(&irq_2_ir_lock); | 329 | spin_lock(&irq_2_ir_lock); |
216 | if (irq >= nr_irqs || !irq_2_iommu[irq].iommu) { | 330 | irq_iommu = valid_irq_2_iommu(irq); |
331 | if (!irq_iommu) { | ||
217 | spin_unlock(&irq_2_ir_lock); | 332 | spin_unlock(&irq_2_ir_lock); |
218 | return -1; | 333 | return -1; |
219 | } | 334 | } |
220 | 335 | ||
221 | iommu = irq_2_iommu[irq].iommu; | 336 | iommu = irq_iommu->iommu; |
222 | 337 | ||
223 | index = irq_2_iommu[irq].irte_index + irq_2_iommu[irq].sub_handle; | 338 | index = irq_iommu->irte_index + irq_iommu->sub_handle; |
224 | 339 | ||
225 | qi_flush_iec(iommu, index, irq_2_iommu[irq].irte_mask); | 340 | qi_flush_iec(iommu, index, irq_iommu->irte_mask); |
226 | spin_unlock(&irq_2_ir_lock); | 341 | spin_unlock(&irq_2_ir_lock); |
227 | 342 | ||
228 | return 0; | 343 | return 0; |
@@ -254,28 +369,30 @@ int free_irte(int irq) | |||
254 | int index, i; | 369 | int index, i; |
255 | struct irte *irte; | 370 | struct irte *irte; |
256 | struct intel_iommu *iommu; | 371 | struct intel_iommu *iommu; |
372 | struct irq_2_iommu *irq_iommu; | ||
257 | 373 | ||
258 | spin_lock(&irq_2_ir_lock); | 374 | spin_lock(&irq_2_ir_lock); |
259 | if (irq >= nr_irqs || !irq_2_iommu[irq].iommu) { | 375 | irq_iommu = valid_irq_2_iommu(irq); |
376 | if (!irq_iommu) { | ||
260 | spin_unlock(&irq_2_ir_lock); | 377 | spin_unlock(&irq_2_ir_lock); |
261 | return -1; | 378 | return -1; |
262 | } | 379 | } |
263 | 380 | ||
264 | iommu = irq_2_iommu[irq].iommu; | 381 | iommu = irq_iommu->iommu; |
265 | 382 | ||
266 | index = irq_2_iommu[irq].irte_index + irq_2_iommu[irq].sub_handle; | 383 | index = irq_iommu->irte_index + irq_iommu->sub_handle; |
267 | irte = &iommu->ir_table->base[index]; | 384 | irte = &iommu->ir_table->base[index]; |
268 | 385 | ||
269 | if (!irq_2_iommu[irq].sub_handle) { | 386 | if (!irq_iommu->sub_handle) { |
270 | for (i = 0; i < (1 << irq_2_iommu[irq].irte_mask); i++) | 387 | for (i = 0; i < (1 << irq_iommu->irte_mask); i++) |
271 | set_64bit((unsigned long *)irte, 0); | 388 | set_64bit((unsigned long *)irte, 0); |
272 | qi_flush_iec(iommu, index, irq_2_iommu[irq].irte_mask); | 389 | qi_flush_iec(iommu, index, irq_iommu->irte_mask); |
273 | } | 390 | } |
274 | 391 | ||
275 | irq_2_iommu[irq].iommu = NULL; | 392 | irq_iommu->iommu = NULL; |
276 | irq_2_iommu[irq].irte_index = 0; | 393 | irq_iommu->irte_index = 0; |
277 | irq_2_iommu[irq].sub_handle = 0; | 394 | irq_iommu->sub_handle = 0; |
278 | irq_2_iommu[irq].irte_mask = 0; | 395 | irq_iommu->irte_mask = 0; |
279 | 396 | ||
280 | spin_unlock(&irq_2_ir_lock); | 397 | spin_unlock(&irq_2_ir_lock); |
281 | 398 | ||