diff options
Diffstat (limited to 'drivers/pci')
-rw-r--r-- | drivers/pci/dmar.c | 119 | ||||
-rw-r--r-- | drivers/pci/intel-iommu.c | 250 | ||||
-rw-r--r-- | drivers/pci/quirks.c | 14 |
3 files changed, 229 insertions, 154 deletions
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c index 8b29c307f1a1..691b3adeb870 100644 --- a/drivers/pci/dmar.c +++ b/drivers/pci/dmar.c | |||
@@ -188,8 +188,7 @@ dmar_parse_one_drhd(struct acpi_dmar_header *header) | |||
188 | return 0; | 188 | return 0; |
189 | } | 189 | } |
190 | 190 | ||
191 | static int __init | 191 | static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru) |
192 | dmar_parse_dev(struct dmar_drhd_unit *dmaru) | ||
193 | { | 192 | { |
194 | struct acpi_dmar_hardware_unit *drhd; | 193 | struct acpi_dmar_hardware_unit *drhd; |
195 | static int include_all; | 194 | static int include_all; |
@@ -277,14 +276,15 @@ dmar_table_print_dmar_entry(struct acpi_dmar_header *header) | |||
277 | drhd = (struct acpi_dmar_hardware_unit *)header; | 276 | drhd = (struct acpi_dmar_hardware_unit *)header; |
278 | printk (KERN_INFO PREFIX | 277 | printk (KERN_INFO PREFIX |
279 | "DRHD (flags: 0x%08x)base: 0x%016Lx\n", | 278 | "DRHD (flags: 0x%08x)base: 0x%016Lx\n", |
280 | drhd->flags, drhd->address); | 279 | drhd->flags, (unsigned long long)drhd->address); |
281 | break; | 280 | break; |
282 | case ACPI_DMAR_TYPE_RESERVED_MEMORY: | 281 | case ACPI_DMAR_TYPE_RESERVED_MEMORY: |
283 | rmrr = (struct acpi_dmar_reserved_memory *)header; | 282 | rmrr = (struct acpi_dmar_reserved_memory *)header; |
284 | 283 | ||
285 | printk (KERN_INFO PREFIX | 284 | printk (KERN_INFO PREFIX |
286 | "RMRR base: 0x%016Lx end: 0x%016Lx\n", | 285 | "RMRR base: 0x%016Lx end: 0x%016Lx\n", |
287 | rmrr->base_address, rmrr->end_address); | 286 | (unsigned long long)rmrr->base_address, |
287 | (unsigned long long)rmrr->end_address); | ||
288 | break; | 288 | break; |
289 | } | 289 | } |
290 | } | 290 | } |
@@ -328,7 +328,7 @@ parse_dmar_table(void) | |||
328 | if (!dmar) | 328 | if (!dmar) |
329 | return -ENODEV; | 329 | return -ENODEV; |
330 | 330 | ||
331 | if (dmar->width < PAGE_SHIFT_4K - 1) { | 331 | if (dmar->width < PAGE_SHIFT - 1) { |
332 | printk(KERN_WARNING PREFIX "Invalid DMAR haw\n"); | 332 | printk(KERN_WARNING PREFIX "Invalid DMAR haw\n"); |
333 | return -EINVAL; | 333 | return -EINVAL; |
334 | } | 334 | } |
@@ -460,8 +460,8 @@ void __init detect_intel_iommu(void) | |||
460 | 460 | ||
461 | ret = dmar_table_detect(); | 461 | ret = dmar_table_detect(); |
462 | 462 | ||
463 | #ifdef CONFIG_DMAR | ||
464 | { | 463 | { |
464 | #ifdef CONFIG_INTR_REMAP | ||
465 | struct acpi_table_dmar *dmar; | 465 | struct acpi_table_dmar *dmar; |
466 | /* | 466 | /* |
467 | * for now we will disable dma-remapping when interrupt | 467 | * for now we will disable dma-remapping when interrupt |
@@ -470,29 +470,17 @@ void __init detect_intel_iommu(void) | |||
470 | * is added, we will not need this any more. | 470 | * is added, we will not need this any more. |
471 | */ | 471 | */ |
472 | dmar = (struct acpi_table_dmar *) dmar_tbl; | 472 | dmar = (struct acpi_table_dmar *) dmar_tbl; |
473 | if (ret && cpu_has_x2apic && dmar->flags & 0x1) { | 473 | if (ret && cpu_has_x2apic && dmar->flags & 0x1) |
474 | printk(KERN_INFO | 474 | printk(KERN_INFO |
475 | "Queued invalidation will be enabled to support " | 475 | "Queued invalidation will be enabled to support " |
476 | "x2apic and Intr-remapping.\n"); | 476 | "x2apic and Intr-remapping.\n"); |
477 | printk(KERN_INFO | 477 | #endif |
478 | "Disabling IOMMU detection, because of missing " | 478 | #ifdef CONFIG_DMAR |
479 | "queued invalidation support for IOTLB " | ||
480 | "invalidation\n"); | ||
481 | printk(KERN_INFO | ||
482 | "Use \"nox2apic\", if you want to use Intel " | ||
483 | " IOMMU for DMA-remapping and don't care about " | ||
484 | " x2apic support\n"); | ||
485 | |||
486 | dmar_disabled = 1; | ||
487 | goto end; | ||
488 | } | ||
489 | |||
490 | if (ret && !no_iommu && !iommu_detected && !swiotlb && | 479 | if (ret && !no_iommu && !iommu_detected && !swiotlb && |
491 | !dmar_disabled) | 480 | !dmar_disabled) |
492 | iommu_detected = 1; | 481 | iommu_detected = 1; |
493 | } | ||
494 | end: | ||
495 | #endif | 482 | #endif |
483 | } | ||
496 | dmar_tbl = NULL; | 484 | dmar_tbl = NULL; |
497 | } | 485 | } |
498 | 486 | ||
@@ -510,7 +498,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) | |||
510 | 498 | ||
511 | iommu->seq_id = iommu_allocated++; | 499 | iommu->seq_id = iommu_allocated++; |
512 | 500 | ||
513 | iommu->reg = ioremap(drhd->reg_base_addr, PAGE_SIZE_4K); | 501 | iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE); |
514 | if (!iommu->reg) { | 502 | if (!iommu->reg) { |
515 | printk(KERN_ERR "IOMMU: can't map the region\n"); | 503 | printk(KERN_ERR "IOMMU: can't map the region\n"); |
516 | goto error; | 504 | goto error; |
@@ -521,8 +509,8 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) | |||
521 | /* the registers might be more than one page */ | 509 | /* the registers might be more than one page */ |
522 | map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap), | 510 | map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap), |
523 | cap_max_fault_reg_offset(iommu->cap)); | 511 | cap_max_fault_reg_offset(iommu->cap)); |
524 | map_size = PAGE_ALIGN_4K(map_size); | 512 | map_size = VTD_PAGE_ALIGN(map_size); |
525 | if (map_size > PAGE_SIZE_4K) { | 513 | if (map_size > VTD_PAGE_SIZE) { |
526 | iounmap(iommu->reg); | 514 | iounmap(iommu->reg); |
527 | iommu->reg = ioremap(drhd->reg_base_addr, map_size); | 515 | iommu->reg = ioremap(drhd->reg_base_addr, map_size); |
528 | if (!iommu->reg) { | 516 | if (!iommu->reg) { |
@@ -533,8 +521,10 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) | |||
533 | 521 | ||
534 | ver = readl(iommu->reg + DMAR_VER_REG); | 522 | ver = readl(iommu->reg + DMAR_VER_REG); |
535 | pr_debug("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n", | 523 | pr_debug("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n", |
536 | drhd->reg_base_addr, DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver), | 524 | (unsigned long long)drhd->reg_base_addr, |
537 | iommu->cap, iommu->ecap); | 525 | DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver), |
526 | (unsigned long long)iommu->cap, | ||
527 | (unsigned long long)iommu->ecap); | ||
538 | 528 | ||
539 | spin_lock_init(&iommu->register_lock); | 529 | spin_lock_init(&iommu->register_lock); |
540 | 530 | ||
@@ -587,11 +577,11 @@ void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu) | |||
587 | 577 | ||
588 | hw = qi->desc; | 578 | hw = qi->desc; |
589 | 579 | ||
590 | spin_lock(&qi->q_lock); | 580 | spin_lock_irqsave(&qi->q_lock, flags); |
591 | while (qi->free_cnt < 3) { | 581 | while (qi->free_cnt < 3) { |
592 | spin_unlock(&qi->q_lock); | 582 | spin_unlock_irqrestore(&qi->q_lock, flags); |
593 | cpu_relax(); | 583 | cpu_relax(); |
594 | spin_lock(&qi->q_lock); | 584 | spin_lock_irqsave(&qi->q_lock, flags); |
595 | } | 585 | } |
596 | 586 | ||
597 | index = qi->free_head; | 587 | index = qi->free_head; |
@@ -612,15 +602,22 @@ void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu) | |||
612 | qi->free_head = (qi->free_head + 2) % QI_LENGTH; | 602 | qi->free_head = (qi->free_head + 2) % QI_LENGTH; |
613 | qi->free_cnt -= 2; | 603 | qi->free_cnt -= 2; |
614 | 604 | ||
615 | spin_lock_irqsave(&iommu->register_lock, flags); | 605 | spin_lock(&iommu->register_lock); |
616 | /* | 606 | /* |
617 | * update the HW tail register indicating the presence of | 607 | * update the HW tail register indicating the presence of |
618 | * new descriptors. | 608 | * new descriptors. |
619 | */ | 609 | */ |
620 | writel(qi->free_head << 4, iommu->reg + DMAR_IQT_REG); | 610 | writel(qi->free_head << 4, iommu->reg + DMAR_IQT_REG); |
621 | spin_unlock_irqrestore(&iommu->register_lock, flags); | 611 | spin_unlock(&iommu->register_lock); |
622 | 612 | ||
623 | while (qi->desc_status[wait_index] != QI_DONE) { | 613 | while (qi->desc_status[wait_index] != QI_DONE) { |
614 | /* | ||
615 | * We will leave the interrupts disabled, to prevent interrupt | ||
616 | * context to queue another cmd while a cmd is already submitted | ||
617 | * and waiting for completion on this cpu. This is to avoid | ||
618 | * a deadlock where the interrupt context can wait indefinitely | ||
619 | * for free slots in the queue. | ||
620 | */ | ||
624 | spin_unlock(&qi->q_lock); | 621 | spin_unlock(&qi->q_lock); |
625 | cpu_relax(); | 622 | cpu_relax(); |
626 | spin_lock(&qi->q_lock); | 623 | spin_lock(&qi->q_lock); |
@@ -629,7 +626,7 @@ void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu) | |||
629 | qi->desc_status[index] = QI_DONE; | 626 | qi->desc_status[index] = QI_DONE; |
630 | 627 | ||
631 | reclaim_free_desc(qi); | 628 | reclaim_free_desc(qi); |
632 | spin_unlock(&qi->q_lock); | 629 | spin_unlock_irqrestore(&qi->q_lock, flags); |
633 | } | 630 | } |
634 | 631 | ||
635 | /* | 632 | /* |
@@ -645,6 +642,62 @@ void qi_global_iec(struct intel_iommu *iommu) | |||
645 | qi_submit_sync(&desc, iommu); | 642 | qi_submit_sync(&desc, iommu); |
646 | } | 643 | } |
647 | 644 | ||
645 | int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm, | ||
646 | u64 type, int non_present_entry_flush) | ||
647 | { | ||
648 | |||
649 | struct qi_desc desc; | ||
650 | |||
651 | if (non_present_entry_flush) { | ||
652 | if (!cap_caching_mode(iommu->cap)) | ||
653 | return 1; | ||
654 | else | ||
655 | did = 0; | ||
656 | } | ||
657 | |||
658 | desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did) | ||
659 | | QI_CC_GRAN(type) | QI_CC_TYPE; | ||
660 | desc.high = 0; | ||
661 | |||
662 | qi_submit_sync(&desc, iommu); | ||
663 | |||
664 | return 0; | ||
665 | |||
666 | } | ||
667 | |||
668 | int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, | ||
669 | unsigned int size_order, u64 type, | ||
670 | int non_present_entry_flush) | ||
671 | { | ||
672 | u8 dw = 0, dr = 0; | ||
673 | |||
674 | struct qi_desc desc; | ||
675 | int ih = 0; | ||
676 | |||
677 | if (non_present_entry_flush) { | ||
678 | if (!cap_caching_mode(iommu->cap)) | ||
679 | return 1; | ||
680 | else | ||
681 | did = 0; | ||
682 | } | ||
683 | |||
684 | if (cap_write_drain(iommu->cap)) | ||
685 | dw = 1; | ||
686 | |||
687 | if (cap_read_drain(iommu->cap)) | ||
688 | dr = 1; | ||
689 | |||
690 | desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw) | ||
691 | | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE; | ||
692 | desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih) | ||
693 | | QI_IOTLB_AM(size_order); | ||
694 | |||
695 | qi_submit_sync(&desc, iommu); | ||
696 | |||
697 | return 0; | ||
698 | |||
699 | } | ||
700 | |||
648 | /* | 701 | /* |
649 | * Enable Queued Invalidation interface. This is a must to support | 702 | * Enable Queued Invalidation interface. This is a must to support |
650 | * interrupt-remapping. Also used by DMA-remapping, which replaces | 703 | * interrupt-remapping. Also used by DMA-remapping, which replaces |
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index 8b51e10b7783..a2692724b68f 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c | |||
@@ -18,6 +18,7 @@ | |||
18 | * Author: Ashok Raj <ashok.raj@intel.com> | 18 | * Author: Ashok Raj <ashok.raj@intel.com> |
19 | * Author: Shaohua Li <shaohua.li@intel.com> | 19 | * Author: Shaohua Li <shaohua.li@intel.com> |
20 | * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> | 20 | * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> |
21 | * Author: Fenghua Yu <fenghua.yu@intel.com> | ||
21 | */ | 22 | */ |
22 | 23 | ||
23 | #include <linux/init.h> | 24 | #include <linux/init.h> |
@@ -35,11 +36,13 @@ | |||
35 | #include <linux/timer.h> | 36 | #include <linux/timer.h> |
36 | #include <linux/iova.h> | 37 | #include <linux/iova.h> |
37 | #include <linux/intel-iommu.h> | 38 | #include <linux/intel-iommu.h> |
38 | #include <asm/proto.h> /* force_iommu in this header in x86-64*/ | ||
39 | #include <asm/cacheflush.h> | 39 | #include <asm/cacheflush.h> |
40 | #include <asm/iommu.h> | 40 | #include <asm/iommu.h> |
41 | #include "pci.h" | 41 | #include "pci.h" |
42 | 42 | ||
43 | #define ROOT_SIZE VTD_PAGE_SIZE | ||
44 | #define CONTEXT_SIZE VTD_PAGE_SIZE | ||
45 | |||
43 | #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY) | 46 | #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY) |
44 | #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA) | 47 | #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA) |
45 | 48 | ||
@@ -199,7 +202,7 @@ static struct context_entry * device_to_context_entry(struct intel_iommu *iommu, | |||
199 | spin_unlock_irqrestore(&iommu->lock, flags); | 202 | spin_unlock_irqrestore(&iommu->lock, flags); |
200 | return NULL; | 203 | return NULL; |
201 | } | 204 | } |
202 | __iommu_flush_cache(iommu, (void *)context, PAGE_SIZE_4K); | 205 | __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE); |
203 | phy_addr = virt_to_phys((void *)context); | 206 | phy_addr = virt_to_phys((void *)context); |
204 | set_root_value(root, phy_addr); | 207 | set_root_value(root, phy_addr); |
205 | set_root_present(root); | 208 | set_root_present(root); |
@@ -345,7 +348,7 @@ static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr) | |||
345 | return NULL; | 348 | return NULL; |
346 | } | 349 | } |
347 | __iommu_flush_cache(domain->iommu, tmp_page, | 350 | __iommu_flush_cache(domain->iommu, tmp_page, |
348 | PAGE_SIZE_4K); | 351 | PAGE_SIZE); |
349 | dma_set_pte_addr(*pte, virt_to_phys(tmp_page)); | 352 | dma_set_pte_addr(*pte, virt_to_phys(tmp_page)); |
350 | /* | 353 | /* |
351 | * high level table always sets r/w, last level page | 354 | * high level table always sets r/w, last level page |
@@ -408,13 +411,13 @@ static void dma_pte_clear_range(struct dmar_domain *domain, u64 start, u64 end) | |||
408 | start &= (((u64)1) << addr_width) - 1; | 411 | start &= (((u64)1) << addr_width) - 1; |
409 | end &= (((u64)1) << addr_width) - 1; | 412 | end &= (((u64)1) << addr_width) - 1; |
410 | /* in case it's partial page */ | 413 | /* in case it's partial page */ |
411 | start = PAGE_ALIGN_4K(start); | 414 | start = PAGE_ALIGN(start); |
412 | end &= PAGE_MASK_4K; | 415 | end &= PAGE_MASK; |
413 | 416 | ||
414 | /* we don't need lock here, nobody else touches the iova range */ | 417 | /* we don't need lock here, nobody else touches the iova range */ |
415 | while (start < end) { | 418 | while (start < end) { |
416 | dma_pte_clear_one(domain, start); | 419 | dma_pte_clear_one(domain, start); |
417 | start += PAGE_SIZE_4K; | 420 | start += VTD_PAGE_SIZE; |
418 | } | 421 | } |
419 | } | 422 | } |
420 | 423 | ||
@@ -468,7 +471,7 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu) | |||
468 | if (!root) | 471 | if (!root) |
469 | return -ENOMEM; | 472 | return -ENOMEM; |
470 | 473 | ||
471 | __iommu_flush_cache(iommu, root, PAGE_SIZE_4K); | 474 | __iommu_flush_cache(iommu, root, ROOT_SIZE); |
472 | 475 | ||
473 | spin_lock_irqsave(&iommu->lock, flags); | 476 | spin_lock_irqsave(&iommu->lock, flags); |
474 | iommu->root_entry = root; | 477 | iommu->root_entry = root; |
@@ -567,27 +570,6 @@ static int __iommu_flush_context(struct intel_iommu *iommu, | |||
567 | return 0; | 570 | return 0; |
568 | } | 571 | } |
569 | 572 | ||
570 | static int inline iommu_flush_context_global(struct intel_iommu *iommu, | ||
571 | int non_present_entry_flush) | ||
572 | { | ||
573 | return __iommu_flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL, | ||
574 | non_present_entry_flush); | ||
575 | } | ||
576 | |||
577 | static int inline iommu_flush_context_domain(struct intel_iommu *iommu, u16 did, | ||
578 | int non_present_entry_flush) | ||
579 | { | ||
580 | return __iommu_flush_context(iommu, did, 0, 0, DMA_CCMD_DOMAIN_INVL, | ||
581 | non_present_entry_flush); | ||
582 | } | ||
583 | |||
584 | static int inline iommu_flush_context_device(struct intel_iommu *iommu, | ||
585 | u16 did, u16 source_id, u8 function_mask, int non_present_entry_flush) | ||
586 | { | ||
587 | return __iommu_flush_context(iommu, did, source_id, function_mask, | ||
588 | DMA_CCMD_DEVICE_INVL, non_present_entry_flush); | ||
589 | } | ||
590 | |||
591 | /* return value determine if we need a write buffer flush */ | 573 | /* return value determine if we need a write buffer flush */ |
592 | static int __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did, | 574 | static int __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did, |
593 | u64 addr, unsigned int size_order, u64 type, | 575 | u64 addr, unsigned int size_order, u64 type, |
@@ -655,37 +637,25 @@ static int __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did, | |||
655 | printk(KERN_ERR"IOMMU: flush IOTLB failed\n"); | 637 | printk(KERN_ERR"IOMMU: flush IOTLB failed\n"); |
656 | if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type)) | 638 | if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type)) |
657 | pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n", | 639 | pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n", |
658 | DMA_TLB_IIRG(type), DMA_TLB_IAIG(val)); | 640 | (unsigned long long)DMA_TLB_IIRG(type), |
641 | (unsigned long long)DMA_TLB_IAIG(val)); | ||
659 | /* flush iotlb entry will implicitly flush write buffer */ | 642 | /* flush iotlb entry will implicitly flush write buffer */ |
660 | return 0; | 643 | return 0; |
661 | } | 644 | } |
662 | 645 | ||
663 | static int inline iommu_flush_iotlb_global(struct intel_iommu *iommu, | ||
664 | int non_present_entry_flush) | ||
665 | { | ||
666 | return __iommu_flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH, | ||
667 | non_present_entry_flush); | ||
668 | } | ||
669 | |||
670 | static int inline iommu_flush_iotlb_dsi(struct intel_iommu *iommu, u16 did, | ||
671 | int non_present_entry_flush) | ||
672 | { | ||
673 | return __iommu_flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH, | ||
674 | non_present_entry_flush); | ||
675 | } | ||
676 | |||
677 | static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did, | 646 | static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did, |
678 | u64 addr, unsigned int pages, int non_present_entry_flush) | 647 | u64 addr, unsigned int pages, int non_present_entry_flush) |
679 | { | 648 | { |
680 | unsigned int mask; | 649 | unsigned int mask; |
681 | 650 | ||
682 | BUG_ON(addr & (~PAGE_MASK_4K)); | 651 | BUG_ON(addr & (~VTD_PAGE_MASK)); |
683 | BUG_ON(pages == 0); | 652 | BUG_ON(pages == 0); |
684 | 653 | ||
685 | /* Fallback to domain selective flush if no PSI support */ | 654 | /* Fallback to domain selective flush if no PSI support */ |
686 | if (!cap_pgsel_inv(iommu->cap)) | 655 | if (!cap_pgsel_inv(iommu->cap)) |
687 | return iommu_flush_iotlb_dsi(iommu, did, | 656 | return iommu->flush.flush_iotlb(iommu, did, 0, 0, |
688 | non_present_entry_flush); | 657 | DMA_TLB_DSI_FLUSH, |
658 | non_present_entry_flush); | ||
689 | 659 | ||
690 | /* | 660 | /* |
691 | * PSI requires page size to be 2 ^ x, and the base address is naturally | 661 | * PSI requires page size to be 2 ^ x, and the base address is naturally |
@@ -694,11 +664,12 @@ static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did, | |||
694 | mask = ilog2(__roundup_pow_of_two(pages)); | 664 | mask = ilog2(__roundup_pow_of_two(pages)); |
695 | /* Fallback to domain selective flush if size is too big */ | 665 | /* Fallback to domain selective flush if size is too big */ |
696 | if (mask > cap_max_amask_val(iommu->cap)) | 666 | if (mask > cap_max_amask_val(iommu->cap)) |
697 | return iommu_flush_iotlb_dsi(iommu, did, | 667 | return iommu->flush.flush_iotlb(iommu, did, 0, 0, |
698 | non_present_entry_flush); | 668 | DMA_TLB_DSI_FLUSH, non_present_entry_flush); |
699 | 669 | ||
700 | return __iommu_flush_iotlb(iommu, did, addr, mask, | 670 | return iommu->flush.flush_iotlb(iommu, did, addr, mask, |
701 | DMA_TLB_PSI_FLUSH, non_present_entry_flush); | 671 | DMA_TLB_PSI_FLUSH, |
672 | non_present_entry_flush); | ||
702 | } | 673 | } |
703 | 674 | ||
704 | static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu) | 675 | static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu) |
@@ -831,7 +802,7 @@ void dmar_msi_read(int irq, struct msi_msg *msg) | |||
831 | } | 802 | } |
832 | 803 | ||
833 | static int iommu_page_fault_do_one(struct intel_iommu *iommu, int type, | 804 | static int iommu_page_fault_do_one(struct intel_iommu *iommu, int type, |
834 | u8 fault_reason, u16 source_id, u64 addr) | 805 | u8 fault_reason, u16 source_id, unsigned long long addr) |
835 | { | 806 | { |
836 | const char *reason; | 807 | const char *reason; |
837 | 808 | ||
@@ -1084,9 +1055,9 @@ static void dmar_init_reserved_ranges(void) | |||
1084 | if (!r->flags || !(r->flags & IORESOURCE_MEM)) | 1055 | if (!r->flags || !(r->flags & IORESOURCE_MEM)) |
1085 | continue; | 1056 | continue; |
1086 | addr = r->start; | 1057 | addr = r->start; |
1087 | addr &= PAGE_MASK_4K; | 1058 | addr &= PAGE_MASK; |
1088 | size = r->end - addr; | 1059 | size = r->end - addr; |
1089 | size = PAGE_ALIGN_4K(size); | 1060 | size = PAGE_ALIGN(size); |
1090 | iova = reserve_iova(&reserved_iova_list, IOVA_PFN(addr), | 1061 | iova = reserve_iova(&reserved_iova_list, IOVA_PFN(addr), |
1091 | IOVA_PFN(size + addr) - 1); | 1062 | IOVA_PFN(size + addr) - 1); |
1092 | if (!iova) | 1063 | if (!iova) |
@@ -1148,7 +1119,7 @@ static int domain_init(struct dmar_domain *domain, int guest_width) | |||
1148 | domain->pgd = (struct dma_pte *)alloc_pgtable_page(); | 1119 | domain->pgd = (struct dma_pte *)alloc_pgtable_page(); |
1149 | if (!domain->pgd) | 1120 | if (!domain->pgd) |
1150 | return -ENOMEM; | 1121 | return -ENOMEM; |
1151 | __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE_4K); | 1122 | __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE); |
1152 | return 0; | 1123 | return 0; |
1153 | } | 1124 | } |
1154 | 1125 | ||
@@ -1164,7 +1135,7 @@ static void domain_exit(struct dmar_domain *domain) | |||
1164 | /* destroy iovas */ | 1135 | /* destroy iovas */ |
1165 | put_iova_domain(&domain->iovad); | 1136 | put_iova_domain(&domain->iovad); |
1166 | end = DOMAIN_MAX_ADDR(domain->gaw); | 1137 | end = DOMAIN_MAX_ADDR(domain->gaw); |
1167 | end = end & (~PAGE_MASK_4K); | 1138 | end = end & (~PAGE_MASK); |
1168 | 1139 | ||
1169 | /* clear ptes */ | 1140 | /* clear ptes */ |
1170 | dma_pte_clear_range(domain, 0, end); | 1141 | dma_pte_clear_range(domain, 0, end); |
@@ -1204,11 +1175,13 @@ static int domain_context_mapping_one(struct dmar_domain *domain, | |||
1204 | __iommu_flush_cache(iommu, context, sizeof(*context)); | 1175 | __iommu_flush_cache(iommu, context, sizeof(*context)); |
1205 | 1176 | ||
1206 | /* it's a non-present to present mapping */ | 1177 | /* it's a non-present to present mapping */ |
1207 | if (iommu_flush_context_device(iommu, domain->id, | 1178 | if (iommu->flush.flush_context(iommu, domain->id, |
1208 | (((u16)bus) << 8) | devfn, DMA_CCMD_MASK_NOBIT, 1)) | 1179 | (((u16)bus) << 8) | devfn, DMA_CCMD_MASK_NOBIT, |
1180 | DMA_CCMD_DEVICE_INVL, 1)) | ||
1209 | iommu_flush_write_buffer(iommu); | 1181 | iommu_flush_write_buffer(iommu); |
1210 | else | 1182 | else |
1211 | iommu_flush_iotlb_dsi(iommu, 0, 0); | 1183 | iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH, 0); |
1184 | |||
1212 | spin_unlock_irqrestore(&iommu->lock, flags); | 1185 | spin_unlock_irqrestore(&iommu->lock, flags); |
1213 | return 0; | 1186 | return 0; |
1214 | } | 1187 | } |
@@ -1283,22 +1256,25 @@ domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova, | |||
1283 | u64 start_pfn, end_pfn; | 1256 | u64 start_pfn, end_pfn; |
1284 | struct dma_pte *pte; | 1257 | struct dma_pte *pte; |
1285 | int index; | 1258 | int index; |
1259 | int addr_width = agaw_to_width(domain->agaw); | ||
1260 | |||
1261 | hpa &= (((u64)1) << addr_width) - 1; | ||
1286 | 1262 | ||
1287 | if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0) | 1263 | if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0) |
1288 | return -EINVAL; | 1264 | return -EINVAL; |
1289 | iova &= PAGE_MASK_4K; | 1265 | iova &= PAGE_MASK; |
1290 | start_pfn = ((u64)hpa) >> PAGE_SHIFT_4K; | 1266 | start_pfn = ((u64)hpa) >> VTD_PAGE_SHIFT; |
1291 | end_pfn = (PAGE_ALIGN_4K(((u64)hpa) + size)) >> PAGE_SHIFT_4K; | 1267 | end_pfn = (VTD_PAGE_ALIGN(((u64)hpa) + size)) >> VTD_PAGE_SHIFT; |
1292 | index = 0; | 1268 | index = 0; |
1293 | while (start_pfn < end_pfn) { | 1269 | while (start_pfn < end_pfn) { |
1294 | pte = addr_to_dma_pte(domain, iova + PAGE_SIZE_4K * index); | 1270 | pte = addr_to_dma_pte(domain, iova + VTD_PAGE_SIZE * index); |
1295 | if (!pte) | 1271 | if (!pte) |
1296 | return -ENOMEM; | 1272 | return -ENOMEM; |
1297 | /* We don't need lock here, nobody else | 1273 | /* We don't need lock here, nobody else |
1298 | * touches the iova range | 1274 | * touches the iova range |
1299 | */ | 1275 | */ |
1300 | BUG_ON(dma_pte_addr(*pte)); | 1276 | BUG_ON(dma_pte_addr(*pte)); |
1301 | dma_set_pte_addr(*pte, start_pfn << PAGE_SHIFT_4K); | 1277 | dma_set_pte_addr(*pte, start_pfn << VTD_PAGE_SHIFT); |
1302 | dma_set_pte_prot(*pte, prot); | 1278 | dma_set_pte_prot(*pte, prot); |
1303 | __iommu_flush_cache(domain->iommu, pte, sizeof(*pte)); | 1279 | __iommu_flush_cache(domain->iommu, pte, sizeof(*pte)); |
1304 | start_pfn++; | 1280 | start_pfn++; |
@@ -1310,8 +1286,10 @@ domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova, | |||
1310 | static void detach_domain_for_dev(struct dmar_domain *domain, u8 bus, u8 devfn) | 1286 | static void detach_domain_for_dev(struct dmar_domain *domain, u8 bus, u8 devfn) |
1311 | { | 1287 | { |
1312 | clear_context_table(domain->iommu, bus, devfn); | 1288 | clear_context_table(domain->iommu, bus, devfn); |
1313 | iommu_flush_context_global(domain->iommu, 0); | 1289 | domain->iommu->flush.flush_context(domain->iommu, 0, 0, 0, |
1314 | iommu_flush_iotlb_global(domain->iommu, 0); | 1290 | DMA_CCMD_GLOBAL_INVL, 0); |
1291 | domain->iommu->flush.flush_iotlb(domain->iommu, 0, 0, 0, | ||
1292 | DMA_TLB_GLOBAL_FLUSH, 0); | ||
1315 | } | 1293 | } |
1316 | 1294 | ||
1317 | static void domain_remove_dev_info(struct dmar_domain *domain) | 1295 | static void domain_remove_dev_info(struct dmar_domain *domain) |
@@ -1474,11 +1452,13 @@ error: | |||
1474 | return find_domain(pdev); | 1452 | return find_domain(pdev); |
1475 | } | 1453 | } |
1476 | 1454 | ||
1477 | static int iommu_prepare_identity_map(struct pci_dev *pdev, u64 start, u64 end) | 1455 | static int iommu_prepare_identity_map(struct pci_dev *pdev, |
1456 | unsigned long long start, | ||
1457 | unsigned long long end) | ||
1478 | { | 1458 | { |
1479 | struct dmar_domain *domain; | 1459 | struct dmar_domain *domain; |
1480 | unsigned long size; | 1460 | unsigned long size; |
1481 | u64 base; | 1461 | unsigned long long base; |
1482 | int ret; | 1462 | int ret; |
1483 | 1463 | ||
1484 | printk(KERN_INFO | 1464 | printk(KERN_INFO |
@@ -1490,9 +1470,9 @@ static int iommu_prepare_identity_map(struct pci_dev *pdev, u64 start, u64 end) | |||
1490 | return -ENOMEM; | 1470 | return -ENOMEM; |
1491 | 1471 | ||
1492 | /* The address might not be aligned */ | 1472 | /* The address might not be aligned */ |
1493 | base = start & PAGE_MASK_4K; | 1473 | base = start & PAGE_MASK; |
1494 | size = end - base; | 1474 | size = end - base; |
1495 | size = PAGE_ALIGN_4K(size); | 1475 | size = PAGE_ALIGN(size); |
1496 | if (!reserve_iova(&domain->iovad, IOVA_PFN(base), | 1476 | if (!reserve_iova(&domain->iovad, IOVA_PFN(base), |
1497 | IOVA_PFN(base + size) - 1)) { | 1477 | IOVA_PFN(base + size) - 1)) { |
1498 | printk(KERN_ERR "IOMMU: reserve iova failed\n"); | 1478 | printk(KERN_ERR "IOMMU: reserve iova failed\n"); |
@@ -1662,6 +1642,28 @@ int __init init_dmars(void) | |||
1662 | } | 1642 | } |
1663 | } | 1643 | } |
1664 | 1644 | ||
1645 | for_each_drhd_unit(drhd) { | ||
1646 | if (drhd->ignored) | ||
1647 | continue; | ||
1648 | |||
1649 | iommu = drhd->iommu; | ||
1650 | if (dmar_enable_qi(iommu)) { | ||
1651 | /* | ||
1652 | * Queued Invalidate not enabled, use Register Based | ||
1653 | * Invalidate | ||
1654 | */ | ||
1655 | iommu->flush.flush_context = __iommu_flush_context; | ||
1656 | iommu->flush.flush_iotlb = __iommu_flush_iotlb; | ||
1657 | printk(KERN_INFO "IOMMU 0x%Lx: using Register based " | ||
1658 | "invalidation\n", drhd->reg_base_addr); | ||
1659 | } else { | ||
1660 | iommu->flush.flush_context = qi_flush_context; | ||
1661 | iommu->flush.flush_iotlb = qi_flush_iotlb; | ||
1662 | printk(KERN_INFO "IOMMU 0x%Lx: using Queued " | ||
1663 | "invalidation\n", drhd->reg_base_addr); | ||
1664 | } | ||
1665 | } | ||
1666 | |||
1665 | /* | 1667 | /* |
1666 | * For each rmrr | 1668 | * For each rmrr |
1667 | * for each dev attached to rmrr | 1669 | * for each dev attached to rmrr |
@@ -1714,9 +1716,10 @@ int __init init_dmars(void) | |||
1714 | 1716 | ||
1715 | iommu_set_root_entry(iommu); | 1717 | iommu_set_root_entry(iommu); |
1716 | 1718 | ||
1717 | iommu_flush_context_global(iommu, 0); | 1719 | iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL, |
1718 | iommu_flush_iotlb_global(iommu, 0); | 1720 | 0); |
1719 | 1721 | iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH, | |
1722 | 0); | ||
1720 | iommu_disable_protect_mem_regions(iommu); | 1723 | iommu_disable_protect_mem_regions(iommu); |
1721 | 1724 | ||
1722 | ret = iommu_enable_translation(iommu); | 1725 | ret = iommu_enable_translation(iommu); |
@@ -1738,8 +1741,8 @@ error: | |||
1738 | static inline u64 aligned_size(u64 host_addr, size_t size) | 1741 | static inline u64 aligned_size(u64 host_addr, size_t size) |
1739 | { | 1742 | { |
1740 | u64 addr; | 1743 | u64 addr; |
1741 | addr = (host_addr & (~PAGE_MASK_4K)) + size; | 1744 | addr = (host_addr & (~PAGE_MASK)) + size; |
1742 | return PAGE_ALIGN_4K(addr); | 1745 | return PAGE_ALIGN(addr); |
1743 | } | 1746 | } |
1744 | 1747 | ||
1745 | struct iova * | 1748 | struct iova * |
@@ -1753,20 +1756,20 @@ iommu_alloc_iova(struct dmar_domain *domain, size_t size, u64 end) | |||
1753 | return NULL; | 1756 | return NULL; |
1754 | 1757 | ||
1755 | piova = alloc_iova(&domain->iovad, | 1758 | piova = alloc_iova(&domain->iovad, |
1756 | size >> PAGE_SHIFT_4K, IOVA_PFN(end), 1); | 1759 | size >> PAGE_SHIFT, IOVA_PFN(end), 1); |
1757 | return piova; | 1760 | return piova; |
1758 | } | 1761 | } |
1759 | 1762 | ||
1760 | static struct iova * | 1763 | static struct iova * |
1761 | __intel_alloc_iova(struct device *dev, struct dmar_domain *domain, | 1764 | __intel_alloc_iova(struct device *dev, struct dmar_domain *domain, |
1762 | size_t size) | 1765 | size_t size, u64 dma_mask) |
1763 | { | 1766 | { |
1764 | struct pci_dev *pdev = to_pci_dev(dev); | 1767 | struct pci_dev *pdev = to_pci_dev(dev); |
1765 | struct iova *iova = NULL; | 1768 | struct iova *iova = NULL; |
1766 | 1769 | ||
1767 | if ((pdev->dma_mask <= DMA_32BIT_MASK) || (dmar_forcedac)) { | 1770 | if (dma_mask <= DMA_32BIT_MASK || dmar_forcedac) |
1768 | iova = iommu_alloc_iova(domain, size, pdev->dma_mask); | 1771 | iova = iommu_alloc_iova(domain, size, dma_mask); |
1769 | } else { | 1772 | else { |
1770 | /* | 1773 | /* |
1771 | * First try to allocate an io virtual address in | 1774 | * First try to allocate an io virtual address in |
1772 | * DMA_32BIT_MASK and if that fails then try allocating | 1775 | * DMA_32BIT_MASK and if that fails then try allocating |
@@ -1774,7 +1777,7 @@ __intel_alloc_iova(struct device *dev, struct dmar_domain *domain, | |||
1774 | */ | 1777 | */ |
1775 | iova = iommu_alloc_iova(domain, size, DMA_32BIT_MASK); | 1778 | iova = iommu_alloc_iova(domain, size, DMA_32BIT_MASK); |
1776 | if (!iova) | 1779 | if (!iova) |
1777 | iova = iommu_alloc_iova(domain, size, pdev->dma_mask); | 1780 | iova = iommu_alloc_iova(domain, size, dma_mask); |
1778 | } | 1781 | } |
1779 | 1782 | ||
1780 | if (!iova) { | 1783 | if (!iova) { |
@@ -1813,12 +1816,12 @@ get_valid_domain_for_dev(struct pci_dev *pdev) | |||
1813 | return domain; | 1816 | return domain; |
1814 | } | 1817 | } |
1815 | 1818 | ||
1816 | static dma_addr_t | 1819 | static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, |
1817 | intel_map_single(struct device *hwdev, phys_addr_t paddr, size_t size, int dir) | 1820 | size_t size, int dir, u64 dma_mask) |
1818 | { | 1821 | { |
1819 | struct pci_dev *pdev = to_pci_dev(hwdev); | 1822 | struct pci_dev *pdev = to_pci_dev(hwdev); |
1820 | struct dmar_domain *domain; | 1823 | struct dmar_domain *domain; |
1821 | unsigned long start_paddr; | 1824 | phys_addr_t start_paddr; |
1822 | struct iova *iova; | 1825 | struct iova *iova; |
1823 | int prot = 0; | 1826 | int prot = 0; |
1824 | int ret; | 1827 | int ret; |
@@ -1833,11 +1836,11 @@ intel_map_single(struct device *hwdev, phys_addr_t paddr, size_t size, int dir) | |||
1833 | 1836 | ||
1834 | size = aligned_size((u64)paddr, size); | 1837 | size = aligned_size((u64)paddr, size); |
1835 | 1838 | ||
1836 | iova = __intel_alloc_iova(hwdev, domain, size); | 1839 | iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask); |
1837 | if (!iova) | 1840 | if (!iova) |
1838 | goto error; | 1841 | goto error; |
1839 | 1842 | ||
1840 | start_paddr = iova->pfn_lo << PAGE_SHIFT_4K; | 1843 | start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT; |
1841 | 1844 | ||
1842 | /* | 1845 | /* |
1843 | * Check if DMAR supports zero-length reads on write only | 1846 | * Check if DMAR supports zero-length reads on write only |
@@ -1855,30 +1858,33 @@ intel_map_single(struct device *hwdev, phys_addr_t paddr, size_t size, int dir) | |||
1855 | * is not a big problem | 1858 | * is not a big problem |
1856 | */ | 1859 | */ |
1857 | ret = domain_page_mapping(domain, start_paddr, | 1860 | ret = domain_page_mapping(domain, start_paddr, |
1858 | ((u64)paddr) & PAGE_MASK_4K, size, prot); | 1861 | ((u64)paddr) & PAGE_MASK, size, prot); |
1859 | if (ret) | 1862 | if (ret) |
1860 | goto error; | 1863 | goto error; |
1861 | 1864 | ||
1862 | pr_debug("Device %s request: %lx@%llx mapping: %lx@%llx, dir %d\n", | ||
1863 | pci_name(pdev), size, (u64)paddr, | ||
1864 | size, (u64)start_paddr, dir); | ||
1865 | |||
1866 | /* it's a non-present to present mapping */ | 1865 | /* it's a non-present to present mapping */ |
1867 | ret = iommu_flush_iotlb_psi(domain->iommu, domain->id, | 1866 | ret = iommu_flush_iotlb_psi(domain->iommu, domain->id, |
1868 | start_paddr, size >> PAGE_SHIFT_4K, 1); | 1867 | start_paddr, size >> VTD_PAGE_SHIFT, 1); |
1869 | if (ret) | 1868 | if (ret) |
1870 | iommu_flush_write_buffer(domain->iommu); | 1869 | iommu_flush_write_buffer(domain->iommu); |
1871 | 1870 | ||
1872 | return (start_paddr + ((u64)paddr & (~PAGE_MASK_4K))); | 1871 | return start_paddr + ((u64)paddr & (~PAGE_MASK)); |
1873 | 1872 | ||
1874 | error: | 1873 | error: |
1875 | if (iova) | 1874 | if (iova) |
1876 | __free_iova(&domain->iovad, iova); | 1875 | __free_iova(&domain->iovad, iova); |
1877 | printk(KERN_ERR"Device %s request: %lx@%llx dir %d --- failed\n", | 1876 | printk(KERN_ERR"Device %s request: %lx@%llx dir %d --- failed\n", |
1878 | pci_name(pdev), size, (u64)paddr, dir); | 1877 | pci_name(pdev), size, (unsigned long long)paddr, dir); |
1879 | return 0; | 1878 | return 0; |
1880 | } | 1879 | } |
1881 | 1880 | ||
1881 | dma_addr_t intel_map_single(struct device *hwdev, phys_addr_t paddr, | ||
1882 | size_t size, int dir) | ||
1883 | { | ||
1884 | return __intel_map_single(hwdev, paddr, size, dir, | ||
1885 | to_pci_dev(hwdev)->dma_mask); | ||
1886 | } | ||
1887 | |||
1882 | static void flush_unmaps(void) | 1888 | static void flush_unmaps(void) |
1883 | { | 1889 | { |
1884 | int i, j; | 1890 | int i, j; |
@@ -1891,7 +1897,8 @@ static void flush_unmaps(void) | |||
1891 | struct intel_iommu *iommu = | 1897 | struct intel_iommu *iommu = |
1892 | deferred_flush[i].domain[0]->iommu; | 1898 | deferred_flush[i].domain[0]->iommu; |
1893 | 1899 | ||
1894 | iommu_flush_iotlb_global(iommu, 0); | 1900 | iommu->flush.flush_iotlb(iommu, 0, 0, 0, |
1901 | DMA_TLB_GLOBAL_FLUSH, 0); | ||
1895 | for (j = 0; j < deferred_flush[i].next; j++) { | 1902 | for (j = 0; j < deferred_flush[i].next; j++) { |
1896 | __free_iova(&deferred_flush[i].domain[j]->iovad, | 1903 | __free_iova(&deferred_flush[i].domain[j]->iovad, |
1897 | deferred_flush[i].iova[j]); | 1904 | deferred_flush[i].iova[j]); |
@@ -1936,8 +1943,8 @@ static void add_unmap(struct dmar_domain *dom, struct iova *iova) | |||
1936 | spin_unlock_irqrestore(&async_umap_flush_lock, flags); | 1943 | spin_unlock_irqrestore(&async_umap_flush_lock, flags); |
1937 | } | 1944 | } |
1938 | 1945 | ||
1939 | static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, | 1946 | void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size, |
1940 | size_t size, int dir) | 1947 | int dir) |
1941 | { | 1948 | { |
1942 | struct pci_dev *pdev = to_pci_dev(dev); | 1949 | struct pci_dev *pdev = to_pci_dev(dev); |
1943 | struct dmar_domain *domain; | 1950 | struct dmar_domain *domain; |
@@ -1953,11 +1960,11 @@ static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, | |||
1953 | if (!iova) | 1960 | if (!iova) |
1954 | return; | 1961 | return; |
1955 | 1962 | ||
1956 | start_addr = iova->pfn_lo << PAGE_SHIFT_4K; | 1963 | start_addr = iova->pfn_lo << PAGE_SHIFT; |
1957 | size = aligned_size((u64)dev_addr, size); | 1964 | size = aligned_size((u64)dev_addr, size); |
1958 | 1965 | ||
1959 | pr_debug("Device %s unmapping: %lx@%llx\n", | 1966 | pr_debug("Device %s unmapping: %lx@%llx\n", |
1960 | pci_name(pdev), size, (u64)start_addr); | 1967 | pci_name(pdev), size, (unsigned long long)start_addr); |
1961 | 1968 | ||
1962 | /* clear the whole page */ | 1969 | /* clear the whole page */ |
1963 | dma_pte_clear_range(domain, start_addr, start_addr + size); | 1970 | dma_pte_clear_range(domain, start_addr, start_addr + size); |
@@ -1965,7 +1972,7 @@ static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, | |||
1965 | dma_pte_free_pagetable(domain, start_addr, start_addr + size); | 1972 | dma_pte_free_pagetable(domain, start_addr, start_addr + size); |
1966 | if (intel_iommu_strict) { | 1973 | if (intel_iommu_strict) { |
1967 | if (iommu_flush_iotlb_psi(domain->iommu, | 1974 | if (iommu_flush_iotlb_psi(domain->iommu, |
1968 | domain->id, start_addr, size >> PAGE_SHIFT_4K, 0)) | 1975 | domain->id, start_addr, size >> VTD_PAGE_SHIFT, 0)) |
1969 | iommu_flush_write_buffer(domain->iommu); | 1976 | iommu_flush_write_buffer(domain->iommu); |
1970 | /* free iova */ | 1977 | /* free iova */ |
1971 | __free_iova(&domain->iovad, iova); | 1978 | __free_iova(&domain->iovad, iova); |
@@ -1978,13 +1985,13 @@ static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, | |||
1978 | } | 1985 | } |
1979 | } | 1986 | } |
1980 | 1987 | ||
1981 | static void * intel_alloc_coherent(struct device *hwdev, size_t size, | 1988 | void *intel_alloc_coherent(struct device *hwdev, size_t size, |
1982 | dma_addr_t *dma_handle, gfp_t flags) | 1989 | dma_addr_t *dma_handle, gfp_t flags) |
1983 | { | 1990 | { |
1984 | void *vaddr; | 1991 | void *vaddr; |
1985 | int order; | 1992 | int order; |
1986 | 1993 | ||
1987 | size = PAGE_ALIGN_4K(size); | 1994 | size = PAGE_ALIGN(size); |
1988 | order = get_order(size); | 1995 | order = get_order(size); |
1989 | flags &= ~(GFP_DMA | GFP_DMA32); | 1996 | flags &= ~(GFP_DMA | GFP_DMA32); |
1990 | 1997 | ||
@@ -1993,19 +2000,21 @@ static void * intel_alloc_coherent(struct device *hwdev, size_t size, | |||
1993 | return NULL; | 2000 | return NULL; |
1994 | memset(vaddr, 0, size); | 2001 | memset(vaddr, 0, size); |
1995 | 2002 | ||
1996 | *dma_handle = intel_map_single(hwdev, virt_to_bus(vaddr), size, DMA_BIDIRECTIONAL); | 2003 | *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size, |
2004 | DMA_BIDIRECTIONAL, | ||
2005 | hwdev->coherent_dma_mask); | ||
1997 | if (*dma_handle) | 2006 | if (*dma_handle) |
1998 | return vaddr; | 2007 | return vaddr; |
1999 | free_pages((unsigned long)vaddr, order); | 2008 | free_pages((unsigned long)vaddr, order); |
2000 | return NULL; | 2009 | return NULL; |
2001 | } | 2010 | } |
2002 | 2011 | ||
2003 | static void intel_free_coherent(struct device *hwdev, size_t size, | 2012 | void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, |
2004 | void *vaddr, dma_addr_t dma_handle) | 2013 | dma_addr_t dma_handle) |
2005 | { | 2014 | { |
2006 | int order; | 2015 | int order; |
2007 | 2016 | ||
2008 | size = PAGE_ALIGN_4K(size); | 2017 | size = PAGE_ALIGN(size); |
2009 | order = get_order(size); | 2018 | order = get_order(size); |
2010 | 2019 | ||
2011 | intel_unmap_single(hwdev, dma_handle, size, DMA_BIDIRECTIONAL); | 2020 | intel_unmap_single(hwdev, dma_handle, size, DMA_BIDIRECTIONAL); |
@@ -2013,8 +2022,9 @@ static void intel_free_coherent(struct device *hwdev, size_t size, | |||
2013 | } | 2022 | } |
2014 | 2023 | ||
2015 | #define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg))) | 2024 | #define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg))) |
2016 | static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, | 2025 | |
2017 | int nelems, int dir) | 2026 | void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, |
2027 | int nelems, int dir) | ||
2018 | { | 2028 | { |
2019 | int i; | 2029 | int i; |
2020 | struct pci_dev *pdev = to_pci_dev(hwdev); | 2030 | struct pci_dev *pdev = to_pci_dev(hwdev); |
@@ -2038,7 +2048,7 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, | |||
2038 | size += aligned_size((u64)addr, sg->length); | 2048 | size += aligned_size((u64)addr, sg->length); |
2039 | } | 2049 | } |
2040 | 2050 | ||
2041 | start_addr = iova->pfn_lo << PAGE_SHIFT_4K; | 2051 | start_addr = iova->pfn_lo << PAGE_SHIFT; |
2042 | 2052 | ||
2043 | /* clear the whole page */ | 2053 | /* clear the whole page */ |
2044 | dma_pte_clear_range(domain, start_addr, start_addr + size); | 2054 | dma_pte_clear_range(domain, start_addr, start_addr + size); |
@@ -2046,7 +2056,7 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, | |||
2046 | dma_pte_free_pagetable(domain, start_addr, start_addr + size); | 2056 | dma_pte_free_pagetable(domain, start_addr, start_addr + size); |
2047 | 2057 | ||
2048 | if (iommu_flush_iotlb_psi(domain->iommu, domain->id, start_addr, | 2058 | if (iommu_flush_iotlb_psi(domain->iommu, domain->id, start_addr, |
2049 | size >> PAGE_SHIFT_4K, 0)) | 2059 | size >> VTD_PAGE_SHIFT, 0)) |
2050 | iommu_flush_write_buffer(domain->iommu); | 2060 | iommu_flush_write_buffer(domain->iommu); |
2051 | 2061 | ||
2052 | /* free iova */ | 2062 | /* free iova */ |
@@ -2067,8 +2077,8 @@ static int intel_nontranslate_map_sg(struct device *hddev, | |||
2067 | return nelems; | 2077 | return nelems; |
2068 | } | 2078 | } |
2069 | 2079 | ||
2070 | static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, | 2080 | int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, |
2071 | int nelems, int dir) | 2081 | int dir) |
2072 | { | 2082 | { |
2073 | void *addr; | 2083 | void *addr; |
2074 | int i; | 2084 | int i; |
@@ -2096,7 +2106,7 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, | |||
2096 | size += aligned_size((u64)addr, sg->length); | 2106 | size += aligned_size((u64)addr, sg->length); |
2097 | } | 2107 | } |
2098 | 2108 | ||
2099 | iova = __intel_alloc_iova(hwdev, domain, size); | 2109 | iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask); |
2100 | if (!iova) { | 2110 | if (!iova) { |
2101 | sglist->dma_length = 0; | 2111 | sglist->dma_length = 0; |
2102 | return 0; | 2112 | return 0; |
@@ -2112,14 +2122,14 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, | |||
2112 | if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) | 2122 | if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) |
2113 | prot |= DMA_PTE_WRITE; | 2123 | prot |= DMA_PTE_WRITE; |
2114 | 2124 | ||
2115 | start_addr = iova->pfn_lo << PAGE_SHIFT_4K; | 2125 | start_addr = iova->pfn_lo << PAGE_SHIFT; |
2116 | offset = 0; | 2126 | offset = 0; |
2117 | for_each_sg(sglist, sg, nelems, i) { | 2127 | for_each_sg(sglist, sg, nelems, i) { |
2118 | addr = SG_ENT_VIRT_ADDRESS(sg); | 2128 | addr = SG_ENT_VIRT_ADDRESS(sg); |
2119 | addr = (void *)virt_to_phys(addr); | 2129 | addr = (void *)virt_to_phys(addr); |
2120 | size = aligned_size((u64)addr, sg->length); | 2130 | size = aligned_size((u64)addr, sg->length); |
2121 | ret = domain_page_mapping(domain, start_addr + offset, | 2131 | ret = domain_page_mapping(domain, start_addr + offset, |
2122 | ((u64)addr) & PAGE_MASK_4K, | 2132 | ((u64)addr) & PAGE_MASK, |
2123 | size, prot); | 2133 | size, prot); |
2124 | if (ret) { | 2134 | if (ret) { |
2125 | /* clear the page */ | 2135 | /* clear the page */ |
@@ -2133,14 +2143,14 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, | |||
2133 | return 0; | 2143 | return 0; |
2134 | } | 2144 | } |
2135 | sg->dma_address = start_addr + offset + | 2145 | sg->dma_address = start_addr + offset + |
2136 | ((u64)addr & (~PAGE_MASK_4K)); | 2146 | ((u64)addr & (~PAGE_MASK)); |
2137 | sg->dma_length = sg->length; | 2147 | sg->dma_length = sg->length; |
2138 | offset += size; | 2148 | offset += size; |
2139 | } | 2149 | } |
2140 | 2150 | ||
2141 | /* it's a non-present to present mapping */ | 2151 | /* it's a non-present to present mapping */ |
2142 | if (iommu_flush_iotlb_psi(domain->iommu, domain->id, | 2152 | if (iommu_flush_iotlb_psi(domain->iommu, domain->id, |
2143 | start_addr, offset >> PAGE_SHIFT_4K, 1)) | 2153 | start_addr, offset >> VTD_PAGE_SHIFT, 1)) |
2144 | iommu_flush_write_buffer(domain->iommu); | 2154 | iommu_flush_write_buffer(domain->iommu); |
2145 | return nelems; | 2155 | return nelems; |
2146 | } | 2156 | } |
@@ -2180,7 +2190,6 @@ static inline int iommu_devinfo_cache_init(void) | |||
2180 | sizeof(struct device_domain_info), | 2190 | sizeof(struct device_domain_info), |
2181 | 0, | 2191 | 0, |
2182 | SLAB_HWCACHE_ALIGN, | 2192 | SLAB_HWCACHE_ALIGN, |
2183 | |||
2184 | NULL); | 2193 | NULL); |
2185 | if (!iommu_devinfo_cache) { | 2194 | if (!iommu_devinfo_cache) { |
2186 | printk(KERN_ERR "Couldn't create devinfo cache\n"); | 2195 | printk(KERN_ERR "Couldn't create devinfo cache\n"); |
@@ -2198,7 +2207,6 @@ static inline int iommu_iova_cache_init(void) | |||
2198 | sizeof(struct iova), | 2207 | sizeof(struct iova), |
2199 | 0, | 2208 | 0, |
2200 | SLAB_HWCACHE_ALIGN, | 2209 | SLAB_HWCACHE_ALIGN, |
2201 | |||
2202 | NULL); | 2210 | NULL); |
2203 | if (!iommu_iova_cache) { | 2211 | if (!iommu_iova_cache) { |
2204 | printk(KERN_ERR "Couldn't create iova cache\n"); | 2212 | printk(KERN_ERR "Couldn't create iova cache\n"); |
@@ -2327,7 +2335,7 @@ void intel_iommu_domain_exit(struct dmar_domain *domain) | |||
2327 | return; | 2335 | return; |
2328 | 2336 | ||
2329 | end = DOMAIN_MAX_ADDR(domain->gaw); | 2337 | end = DOMAIN_MAX_ADDR(domain->gaw); |
2330 | end = end & (~PAGE_MASK_4K); | 2338 | end = end & (~VTD_PAGE_MASK); |
2331 | 2339 | ||
2332 | /* clear ptes */ | 2340 | /* clear ptes */ |
2333 | dma_pte_clear_range(domain, 0, end); | 2341 | dma_pte_clear_range(domain, 0, end); |
@@ -2423,6 +2431,6 @@ u64 intel_iommu_iova_to_pfn(struct dmar_domain *domain, u64 iova) | |||
2423 | if (pte) | 2431 | if (pte) |
2424 | pfn = dma_pte_addr(*pte); | 2432 | pfn = dma_pte_addr(*pte); |
2425 | 2433 | ||
2426 | return pfn >> PAGE_SHIFT_4K; | 2434 | return pfn >> VTD_PAGE_SHIFT; |
2427 | } | 2435 | } |
2428 | EXPORT_SYMBOL_GPL(intel_iommu_iova_to_pfn); | 2436 | EXPORT_SYMBOL_GPL(intel_iommu_iova_to_pfn); |
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index bbf66ea8fd87..96cf8ecd04ce 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c | |||
@@ -43,6 +43,20 @@ static void __devinit quirk_mellanox_tavor(struct pci_dev *dev) | |||
43 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX,PCI_DEVICE_ID_MELLANOX_TAVOR,quirk_mellanox_tavor); | 43 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX,PCI_DEVICE_ID_MELLANOX_TAVOR,quirk_mellanox_tavor); |
44 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX,PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE,quirk_mellanox_tavor); | 44 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX,PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE,quirk_mellanox_tavor); |
45 | 45 | ||
46 | /* Many VIA bridges seem to corrupt data for DAC. Disable it here */ | ||
47 | int forbid_dac __read_mostly; | ||
48 | EXPORT_SYMBOL(forbid_dac); | ||
49 | |||
50 | static __devinit void via_no_dac(struct pci_dev *dev) | ||
51 | { | ||
52 | if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) { | ||
53 | dev_info(&dev->dev, | ||
54 | "VIA PCI bridge detected. Disabling DAC.\n"); | ||
55 | forbid_dac = 1; | ||
56 | } | ||
57 | } | ||
58 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac); | ||
59 | |||
46 | /* Deal with broken BIOS'es that neglect to enable passive release, | 60 | /* Deal with broken BIOS'es that neglect to enable passive release, |
47 | which can cause problems in combination with the 82441FX/PPro MTRRs */ | 61 | which can cause problems in combination with the 82441FX/PPro MTRRs */ |
48 | static void quirk_passive_release(struct pci_dev *dev) | 62 | static void quirk_passive_release(struct pci_dev *dev) |