diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2008-10-11 14:47:30 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-10-11 14:51:16 -0400 |
commit | ead9d23d803ea3a73766c3cb27bf7563ac8d7266 (patch) | |
tree | 42225fadd0d5388bf21d1658e56879e14f23e013 /drivers/pci/dmar.c | |
parent | bf6f51e3a46f6a602853d3cbacd05864bc6e2a37 (diff) | |
parent | 0afe2db21394820d32646a695eccf3fbfe6ab5c7 (diff) |
Merge phase #4 (X2APIC, APIC unification, CPU identification unification) of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'x86-v28-for-linus-phase4-D' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (186 commits)
x86, debug: print more information about unknown CPUs
x86 setup: handle more than 8 CPU flag words
x86: cpuid, fix typo
x86: move transmeta cap read to early_init_transmeta()
x86: identify_cpu_without_cpuid v2
x86: extended "flags" to show virtualization HW feature in /proc/cpuinfo
x86: move VMX MSRs to msr-index.h
x86: centaur_64.c remove duplicated setting of CONSTANT_TSC
x86: intel.c put workaround for old cpus together
x86: let intel 64-bit use intel.c
x86: make intel_64.c the same as intel.c
x86: make intel.c have 64-bit support code
x86: little clean up of intel.c/intel_64.c
x86: make 64 bit to use amd.c
x86: make amd_64 have 32 bit code
x86: make amd.c have 64bit support code
x86: merge header in amd_64.c
x86: add srat_detect_node for amd64
x86: remove duplicated force_mwait
x86: cpu make amd.c more like amd_64.c v2
...
Diffstat (limited to 'drivers/pci/dmar.c')
-rw-r--r-- | drivers/pci/dmar.c | 397 |
1 files changed, 378 insertions, 19 deletions
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c index 8bf86ae2333f..bd2c01674f5e 100644 --- a/drivers/pci/dmar.c +++ b/drivers/pci/dmar.c | |||
@@ -19,13 +19,16 @@ | |||
19 | * Author: Shaohua Li <shaohua.li@intel.com> | 19 | * Author: Shaohua Li <shaohua.li@intel.com> |
20 | * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> | 20 | * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> |
21 | * | 21 | * |
22 | * This file implements early detection/parsing of DMA Remapping Devices | 22 | * This file implements early detection/parsing of Remapping Devices |
23 | * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI | 23 | * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI |
24 | * tables. | 24 | * tables. |
25 | * | ||
26 | * These routines are used by both DMA-remapping and Interrupt-remapping | ||
25 | */ | 27 | */ |
26 | 28 | ||
27 | #include <linux/pci.h> | 29 | #include <linux/pci.h> |
28 | #include <linux/dmar.h> | 30 | #include <linux/dmar.h> |
31 | #include <linux/timer.h> | ||
29 | #include "iova.h" | 32 | #include "iova.h" |
30 | #include "intel-iommu.h" | 33 | #include "intel-iommu.h" |
31 | 34 | ||
@@ -37,7 +40,6 @@ | |||
37 | * these units are not supported by the architecture. | 40 | * these units are not supported by the architecture. |
38 | */ | 41 | */ |
39 | LIST_HEAD(dmar_drhd_units); | 42 | LIST_HEAD(dmar_drhd_units); |
40 | LIST_HEAD(dmar_rmrr_units); | ||
41 | 43 | ||
42 | static struct acpi_table_header * __initdata dmar_tbl; | 44 | static struct acpi_table_header * __initdata dmar_tbl; |
43 | 45 | ||
@@ -53,11 +55,6 @@ static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd) | |||
53 | list_add(&drhd->list, &dmar_drhd_units); | 55 | list_add(&drhd->list, &dmar_drhd_units); |
54 | } | 56 | } |
55 | 57 | ||
56 | static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr) | ||
57 | { | ||
58 | list_add(&rmrr->list, &dmar_rmrr_units); | ||
59 | } | ||
60 | |||
61 | static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope, | 58 | static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope, |
62 | struct pci_dev **dev, u16 segment) | 59 | struct pci_dev **dev, u16 segment) |
63 | { | 60 | { |
@@ -172,19 +169,37 @@ dmar_parse_one_drhd(struct acpi_dmar_header *header) | |||
172 | struct acpi_dmar_hardware_unit *drhd; | 169 | struct acpi_dmar_hardware_unit *drhd; |
173 | struct dmar_drhd_unit *dmaru; | 170 | struct dmar_drhd_unit *dmaru; |
174 | int ret = 0; | 171 | int ret = 0; |
175 | static int include_all; | ||
176 | 172 | ||
177 | dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL); | 173 | dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL); |
178 | if (!dmaru) | 174 | if (!dmaru) |
179 | return -ENOMEM; | 175 | return -ENOMEM; |
180 | 176 | ||
177 | dmaru->hdr = header; | ||
181 | drhd = (struct acpi_dmar_hardware_unit *)header; | 178 | drhd = (struct acpi_dmar_hardware_unit *)header; |
182 | dmaru->reg_base_addr = drhd->address; | 179 | dmaru->reg_base_addr = drhd->address; |
183 | dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */ | 180 | dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */ |
184 | 181 | ||
182 | ret = alloc_iommu(dmaru); | ||
183 | if (ret) { | ||
184 | kfree(dmaru); | ||
185 | return ret; | ||
186 | } | ||
187 | dmar_register_drhd_unit(dmaru); | ||
188 | return 0; | ||
189 | } | ||
190 | |||
191 | static int __init | ||
192 | dmar_parse_dev(struct dmar_drhd_unit *dmaru) | ||
193 | { | ||
194 | struct acpi_dmar_hardware_unit *drhd; | ||
195 | static int include_all; | ||
196 | int ret; | ||
197 | |||
198 | drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr; | ||
199 | |||
185 | if (!dmaru->include_all) | 200 | if (!dmaru->include_all) |
186 | ret = dmar_parse_dev_scope((void *)(drhd + 1), | 201 | ret = dmar_parse_dev_scope((void *)(drhd + 1), |
187 | ((void *)drhd) + header->length, | 202 | ((void *)drhd) + drhd->header.length, |
188 | &dmaru->devices_cnt, &dmaru->devices, | 203 | &dmaru->devices_cnt, &dmaru->devices, |
189 | drhd->segment); | 204 | drhd->segment); |
190 | else { | 205 | else { |
@@ -197,37 +212,59 @@ dmar_parse_one_drhd(struct acpi_dmar_header *header) | |||
197 | include_all = 1; | 212 | include_all = 1; |
198 | } | 213 | } |
199 | 214 | ||
200 | if (ret || (dmaru->devices_cnt == 0 && !dmaru->include_all)) | 215 | if (ret || (dmaru->devices_cnt == 0 && !dmaru->include_all)) { |
216 | list_del(&dmaru->list); | ||
201 | kfree(dmaru); | 217 | kfree(dmaru); |
202 | else | 218 | } |
203 | dmar_register_drhd_unit(dmaru); | ||
204 | return ret; | 219 | return ret; |
205 | } | 220 | } |
206 | 221 | ||
222 | #ifdef CONFIG_DMAR | ||
223 | LIST_HEAD(dmar_rmrr_units); | ||
224 | |||
225 | static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr) | ||
226 | { | ||
227 | list_add(&rmrr->list, &dmar_rmrr_units); | ||
228 | } | ||
229 | |||
230 | |||
207 | static int __init | 231 | static int __init |
208 | dmar_parse_one_rmrr(struct acpi_dmar_header *header) | 232 | dmar_parse_one_rmrr(struct acpi_dmar_header *header) |
209 | { | 233 | { |
210 | struct acpi_dmar_reserved_memory *rmrr; | 234 | struct acpi_dmar_reserved_memory *rmrr; |
211 | struct dmar_rmrr_unit *rmrru; | 235 | struct dmar_rmrr_unit *rmrru; |
212 | int ret = 0; | ||
213 | 236 | ||
214 | rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL); | 237 | rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL); |
215 | if (!rmrru) | 238 | if (!rmrru) |
216 | return -ENOMEM; | 239 | return -ENOMEM; |
217 | 240 | ||
241 | rmrru->hdr = header; | ||
218 | rmrr = (struct acpi_dmar_reserved_memory *)header; | 242 | rmrr = (struct acpi_dmar_reserved_memory *)header; |
219 | rmrru->base_address = rmrr->base_address; | 243 | rmrru->base_address = rmrr->base_address; |
220 | rmrru->end_address = rmrr->end_address; | 244 | rmrru->end_address = rmrr->end_address; |
245 | |||
246 | dmar_register_rmrr_unit(rmrru); | ||
247 | return 0; | ||
248 | } | ||
249 | |||
250 | static int __init | ||
251 | rmrr_parse_dev(struct dmar_rmrr_unit *rmrru) | ||
252 | { | ||
253 | struct acpi_dmar_reserved_memory *rmrr; | ||
254 | int ret; | ||
255 | |||
256 | rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr; | ||
221 | ret = dmar_parse_dev_scope((void *)(rmrr + 1), | 257 | ret = dmar_parse_dev_scope((void *)(rmrr + 1), |
222 | ((void *)rmrr) + header->length, | 258 | ((void *)rmrr) + rmrr->header.length, |
223 | &rmrru->devices_cnt, &rmrru->devices, rmrr->segment); | 259 | &rmrru->devices_cnt, &rmrru->devices, rmrr->segment); |
224 | 260 | ||
225 | if (ret || (rmrru->devices_cnt == 0)) | 261 | if (ret || (rmrru->devices_cnt == 0)) { |
262 | list_del(&rmrru->list); | ||
226 | kfree(rmrru); | 263 | kfree(rmrru); |
227 | else | 264 | } |
228 | dmar_register_rmrr_unit(rmrru); | ||
229 | return ret; | 265 | return ret; |
230 | } | 266 | } |
267 | #endif | ||
231 | 268 | ||
232 | static void __init | 269 | static void __init |
233 | dmar_table_print_dmar_entry(struct acpi_dmar_header *header) | 270 | dmar_table_print_dmar_entry(struct acpi_dmar_header *header) |
@@ -252,6 +289,7 @@ dmar_table_print_dmar_entry(struct acpi_dmar_header *header) | |||
252 | } | 289 | } |
253 | } | 290 | } |
254 | 291 | ||
292 | |||
255 | /** | 293 | /** |
256 | * parse_dmar_table - parses the DMA reporting table | 294 | * parse_dmar_table - parses the DMA reporting table |
257 | */ | 295 | */ |
@@ -284,7 +322,9 @@ parse_dmar_table(void) | |||
284 | ret = dmar_parse_one_drhd(entry_header); | 322 | ret = dmar_parse_one_drhd(entry_header); |
285 | break; | 323 | break; |
286 | case ACPI_DMAR_TYPE_RESERVED_MEMORY: | 324 | case ACPI_DMAR_TYPE_RESERVED_MEMORY: |
325 | #ifdef CONFIG_DMAR | ||
287 | ret = dmar_parse_one_rmrr(entry_header); | 326 | ret = dmar_parse_one_rmrr(entry_header); |
327 | #endif | ||
288 | break; | 328 | break; |
289 | default: | 329 | default: |
290 | printk(KERN_WARNING PREFIX | 330 | printk(KERN_WARNING PREFIX |
@@ -300,15 +340,77 @@ parse_dmar_table(void) | |||
300 | return ret; | 340 | return ret; |
301 | } | 341 | } |
302 | 342 | ||
343 | int dmar_pci_device_match(struct pci_dev *devices[], int cnt, | ||
344 | struct pci_dev *dev) | ||
345 | { | ||
346 | int index; | ||
347 | |||
348 | while (dev) { | ||
349 | for (index = 0; index < cnt; index++) | ||
350 | if (dev == devices[index]) | ||
351 | return 1; | ||
303 | 352 | ||
304 | int __init dmar_table_init(void) | 353 | /* Check our parent */ |
354 | dev = dev->bus->self; | ||
355 | } | ||
356 | |||
357 | return 0; | ||
358 | } | ||
359 | |||
360 | struct dmar_drhd_unit * | ||
361 | dmar_find_matched_drhd_unit(struct pci_dev *dev) | ||
305 | { | 362 | { |
363 | struct dmar_drhd_unit *drhd = NULL; | ||
364 | |||
365 | list_for_each_entry(drhd, &dmar_drhd_units, list) { | ||
366 | if (drhd->include_all || dmar_pci_device_match(drhd->devices, | ||
367 | drhd->devices_cnt, dev)) | ||
368 | return drhd; | ||
369 | } | ||
370 | |||
371 | return NULL; | ||
372 | } | ||
373 | |||
374 | int __init dmar_dev_scope_init(void) | ||
375 | { | ||
376 | struct dmar_drhd_unit *drhd; | ||
377 | int ret = -ENODEV; | ||
378 | |||
379 | for_each_drhd_unit(drhd) { | ||
380 | ret = dmar_parse_dev(drhd); | ||
381 | if (ret) | ||
382 | return ret; | ||
383 | } | ||
384 | |||
385 | #ifdef CONFIG_DMAR | ||
386 | { | ||
387 | struct dmar_rmrr_unit *rmrr; | ||
388 | for_each_rmrr_units(rmrr) { | ||
389 | ret = rmrr_parse_dev(rmrr); | ||
390 | if (ret) | ||
391 | return ret; | ||
392 | } | ||
393 | } | ||
394 | #endif | ||
395 | |||
396 | return ret; | ||
397 | } | ||
306 | 398 | ||
399 | |||
400 | int __init dmar_table_init(void) | ||
401 | { | ||
402 | static int dmar_table_initialized; | ||
307 | int ret; | 403 | int ret; |
308 | 404 | ||
405 | if (dmar_table_initialized) | ||
406 | return 0; | ||
407 | |||
408 | dmar_table_initialized = 1; | ||
409 | |||
309 | ret = parse_dmar_table(); | 410 | ret = parse_dmar_table(); |
310 | if (ret) { | 411 | if (ret) { |
311 | printk(KERN_INFO PREFIX "parse DMAR table failure.\n"); | 412 | if (ret != -ENODEV) |
413 | printk(KERN_INFO PREFIX "parse DMAR table failure.\n"); | ||
312 | return ret; | 414 | return ret; |
313 | } | 415 | } |
314 | 416 | ||
@@ -317,9 +419,14 @@ int __init dmar_table_init(void) | |||
317 | return -ENODEV; | 419 | return -ENODEV; |
318 | } | 420 | } |
319 | 421 | ||
422 | #ifdef CONFIG_DMAR | ||
320 | if (list_empty(&dmar_rmrr_units)) | 423 | if (list_empty(&dmar_rmrr_units)) |
321 | printk(KERN_INFO PREFIX "No RMRR found\n"); | 424 | printk(KERN_INFO PREFIX "No RMRR found\n"); |
425 | #endif | ||
322 | 426 | ||
427 | #ifdef CONFIG_INTR_REMAP | ||
428 | parse_ioapics_under_ir(); | ||
429 | #endif | ||
323 | return 0; | 430 | return 0; |
324 | } | 431 | } |
325 | 432 | ||
@@ -341,3 +448,255 @@ int __init early_dmar_detect(void) | |||
341 | 448 | ||
342 | return (ACPI_SUCCESS(status) ? 1 : 0); | 449 | return (ACPI_SUCCESS(status) ? 1 : 0); |
343 | } | 450 | } |
451 | |||
452 | void __init detect_intel_iommu(void) | ||
453 | { | ||
454 | int ret; | ||
455 | |||
456 | ret = early_dmar_detect(); | ||
457 | |||
458 | #ifdef CONFIG_DMAR | ||
459 | { | ||
460 | struct acpi_table_dmar *dmar; | ||
461 | /* | ||
462 | * for now we will disable dma-remapping when interrupt | ||
463 | * remapping is enabled. | ||
464 | * When support for queued invalidation for IOTLB invalidation | ||
465 | * is added, we will not need this any more. | ||
466 | */ | ||
467 | dmar = (struct acpi_table_dmar *) dmar_tbl; | ||
468 | if (ret && cpu_has_x2apic && dmar->flags & 0x1) { | ||
469 | printk(KERN_INFO | ||
470 | "Queued invalidation will be enabled to support " | ||
471 | "x2apic and Intr-remapping.\n"); | ||
472 | printk(KERN_INFO | ||
473 | "Disabling IOMMU detection, because of missing " | ||
474 | "queued invalidation support for IOTLB " | ||
475 | "invalidation\n"); | ||
476 | printk(KERN_INFO | ||
477 | "Use \"nox2apic\", if you want to use Intel " | ||
478 | " IOMMU for DMA-remapping and don't care about " | ||
479 | " x2apic support\n"); | ||
480 | |||
481 | dmar_disabled = 1; | ||
482 | return; | ||
483 | } | ||
484 | |||
485 | if (ret && !no_iommu && !iommu_detected && !swiotlb && | ||
486 | !dmar_disabled) | ||
487 | iommu_detected = 1; | ||
488 | } | ||
489 | #endif | ||
490 | } | ||
491 | |||
492 | |||
493 | int alloc_iommu(struct dmar_drhd_unit *drhd) | ||
494 | { | ||
495 | struct intel_iommu *iommu; | ||
496 | int map_size; | ||
497 | u32 ver; | ||
498 | static int iommu_allocated = 0; | ||
499 | |||
500 | iommu = kzalloc(sizeof(*iommu), GFP_KERNEL); | ||
501 | if (!iommu) | ||
502 | return -ENOMEM; | ||
503 | |||
504 | iommu->seq_id = iommu_allocated++; | ||
505 | |||
506 | iommu->reg = ioremap(drhd->reg_base_addr, PAGE_SIZE_4K); | ||
507 | if (!iommu->reg) { | ||
508 | printk(KERN_ERR "IOMMU: can't map the region\n"); | ||
509 | goto error; | ||
510 | } | ||
511 | iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG); | ||
512 | iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG); | ||
513 | |||
514 | /* the registers might be more than one page */ | ||
515 | map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap), | ||
516 | cap_max_fault_reg_offset(iommu->cap)); | ||
517 | map_size = PAGE_ALIGN_4K(map_size); | ||
518 | if (map_size > PAGE_SIZE_4K) { | ||
519 | iounmap(iommu->reg); | ||
520 | iommu->reg = ioremap(drhd->reg_base_addr, map_size); | ||
521 | if (!iommu->reg) { | ||
522 | printk(KERN_ERR "IOMMU: can't map the region\n"); | ||
523 | goto error; | ||
524 | } | ||
525 | } | ||
526 | |||
527 | ver = readl(iommu->reg + DMAR_VER_REG); | ||
528 | pr_debug("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n", | ||
529 | drhd->reg_base_addr, DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver), | ||
530 | iommu->cap, iommu->ecap); | ||
531 | |||
532 | spin_lock_init(&iommu->register_lock); | ||
533 | |||
534 | drhd->iommu = iommu; | ||
535 | return 0; | ||
536 | error: | ||
537 | kfree(iommu); | ||
538 | return -1; | ||
539 | } | ||
540 | |||
541 | void free_iommu(struct intel_iommu *iommu) | ||
542 | { | ||
543 | if (!iommu) | ||
544 | return; | ||
545 | |||
546 | #ifdef CONFIG_DMAR | ||
547 | free_dmar_iommu(iommu); | ||
548 | #endif | ||
549 | |||
550 | if (iommu->reg) | ||
551 | iounmap(iommu->reg); | ||
552 | kfree(iommu); | ||
553 | } | ||
554 | |||
555 | /* | ||
556 | * Reclaim all the submitted descriptors which have completed its work. | ||
557 | */ | ||
558 | static inline void reclaim_free_desc(struct q_inval *qi) | ||
559 | { | ||
560 | while (qi->desc_status[qi->free_tail] == QI_DONE) { | ||
561 | qi->desc_status[qi->free_tail] = QI_FREE; | ||
562 | qi->free_tail = (qi->free_tail + 1) % QI_LENGTH; | ||
563 | qi->free_cnt++; | ||
564 | } | ||
565 | } | ||
566 | |||
567 | /* | ||
568 | * Submit the queued invalidation descriptor to the remapping | ||
569 | * hardware unit and wait for its completion. | ||
570 | */ | ||
571 | void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu) | ||
572 | { | ||
573 | struct q_inval *qi = iommu->qi; | ||
574 | struct qi_desc *hw, wait_desc; | ||
575 | int wait_index, index; | ||
576 | unsigned long flags; | ||
577 | |||
578 | if (!qi) | ||
579 | return; | ||
580 | |||
581 | hw = qi->desc; | ||
582 | |||
583 | spin_lock(&qi->q_lock); | ||
584 | while (qi->free_cnt < 3) { | ||
585 | spin_unlock(&qi->q_lock); | ||
586 | cpu_relax(); | ||
587 | spin_lock(&qi->q_lock); | ||
588 | } | ||
589 | |||
590 | index = qi->free_head; | ||
591 | wait_index = (index + 1) % QI_LENGTH; | ||
592 | |||
593 | qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE; | ||
594 | |||
595 | hw[index] = *desc; | ||
596 | |||
597 | wait_desc.low = QI_IWD_STATUS_DATA(2) | QI_IWD_STATUS_WRITE | QI_IWD_TYPE; | ||
598 | wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]); | ||
599 | |||
600 | hw[wait_index] = wait_desc; | ||
601 | |||
602 | __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc)); | ||
603 | __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc)); | ||
604 | |||
605 | qi->free_head = (qi->free_head + 2) % QI_LENGTH; | ||
606 | qi->free_cnt -= 2; | ||
607 | |||
608 | spin_lock_irqsave(&iommu->register_lock, flags); | ||
609 | /* | ||
610 | * update the HW tail register indicating the presence of | ||
611 | * new descriptors. | ||
612 | */ | ||
613 | writel(qi->free_head << 4, iommu->reg + DMAR_IQT_REG); | ||
614 | spin_unlock_irqrestore(&iommu->register_lock, flags); | ||
615 | |||
616 | while (qi->desc_status[wait_index] != QI_DONE) { | ||
617 | spin_unlock(&qi->q_lock); | ||
618 | cpu_relax(); | ||
619 | spin_lock(&qi->q_lock); | ||
620 | } | ||
621 | |||
622 | qi->desc_status[index] = QI_DONE; | ||
623 | |||
624 | reclaim_free_desc(qi); | ||
625 | spin_unlock(&qi->q_lock); | ||
626 | } | ||
627 | |||
628 | /* | ||
629 | * Flush the global interrupt entry cache. | ||
630 | */ | ||
631 | void qi_global_iec(struct intel_iommu *iommu) | ||
632 | { | ||
633 | struct qi_desc desc; | ||
634 | |||
635 | desc.low = QI_IEC_TYPE; | ||
636 | desc.high = 0; | ||
637 | |||
638 | qi_submit_sync(&desc, iommu); | ||
639 | } | ||
640 | |||
641 | /* | ||
642 | * Enable Queued Invalidation interface. This is a must to support | ||
643 | * interrupt-remapping. Also used by DMA-remapping, which replaces | ||
644 | * register based IOTLB invalidation. | ||
645 | */ | ||
646 | int dmar_enable_qi(struct intel_iommu *iommu) | ||
647 | { | ||
648 | u32 cmd, sts; | ||
649 | unsigned long flags; | ||
650 | struct q_inval *qi; | ||
651 | |||
652 | if (!ecap_qis(iommu->ecap)) | ||
653 | return -ENOENT; | ||
654 | |||
655 | /* | ||
656 | * queued invalidation is already setup and enabled. | ||
657 | */ | ||
658 | if (iommu->qi) | ||
659 | return 0; | ||
660 | |||
661 | iommu->qi = kmalloc(sizeof(*qi), GFP_KERNEL); | ||
662 | if (!iommu->qi) | ||
663 | return -ENOMEM; | ||
664 | |||
665 | qi = iommu->qi; | ||
666 | |||
667 | qi->desc = (void *)(get_zeroed_page(GFP_KERNEL)); | ||
668 | if (!qi->desc) { | ||
669 | kfree(qi); | ||
670 | iommu->qi = 0; | ||
671 | return -ENOMEM; | ||
672 | } | ||
673 | |||
674 | qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_KERNEL); | ||
675 | if (!qi->desc_status) { | ||
676 | free_page((unsigned long) qi->desc); | ||
677 | kfree(qi); | ||
678 | iommu->qi = 0; | ||
679 | return -ENOMEM; | ||
680 | } | ||
681 | |||
682 | qi->free_head = qi->free_tail = 0; | ||
683 | qi->free_cnt = QI_LENGTH; | ||
684 | |||
685 | spin_lock_init(&qi->q_lock); | ||
686 | |||
687 | spin_lock_irqsave(&iommu->register_lock, flags); | ||
688 | /* write zero to the tail reg */ | ||
689 | writel(0, iommu->reg + DMAR_IQT_REG); | ||
690 | |||
691 | dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc)); | ||
692 | |||
693 | cmd = iommu->gcmd | DMA_GCMD_QIE; | ||
694 | iommu->gcmd |= DMA_GCMD_QIE; | ||
695 | writel(cmd, iommu->reg + DMAR_GCMD_REG); | ||
696 | |||
697 | /* Make sure hardware complete it */ | ||
698 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts); | ||
699 | spin_unlock_irqrestore(&iommu->register_lock, flags); | ||
700 | |||
701 | return 0; | ||
702 | } | ||