diff options
| author | David Woodhouse <David.Woodhouse@intel.com> | 2008-10-20 15:16:53 -0400 |
|---|---|---|
| committer | David Woodhouse <David.Woodhouse@intel.com> | 2008-10-20 15:19:36 -0400 |
| commit | b364776ad1208a71f0c53578c84619a395412a8d (patch) | |
| tree | d6050e5db6298095324ccb8af7d477684485d52e | |
| parent | 6da0b38f4433fb0f24615449d7966471b6e5eae0 (diff) | |
| parent | 6c8909b42fee1be67647bcd2518161a0fa8ca533 (diff) | |
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/torvalds/linux-2.6
Conflicts:
drivers/pci/intel-iommu.c
| -rw-r--r-- | MAINTAINERS | 7 | ||||
| -rw-r--r-- | arch/x86/kernel/amd_iommu_init.c | 2 | ||||
| -rw-r--r-- | arch/x86/kernel/pci-dma.c | 16 | ||||
| -rw-r--r-- | drivers/pci/dmar.c | 119 | ||||
| -rw-r--r-- | drivers/pci/intel-iommu.c | 250 | ||||
| -rw-r--r-- | drivers/pci/quirks.c | 14 | ||||
| -rw-r--r-- | include/asm-x86/iommu.h | 4 | ||||
| -rw-r--r-- | include/linux/dma_remapping.h | 27 | ||||
| -rw-r--r-- | include/linux/intel-iommu.h | 66 |
9 files changed, 306 insertions, 199 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index 355c192d6997..22303e5fe4ce 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -2176,6 +2176,13 @@ M: maciej.sosnowski@intel.com | |||
| 2176 | L: linux-kernel@vger.kernel.org | 2176 | L: linux-kernel@vger.kernel.org |
| 2177 | S: Supported | 2177 | S: Supported |
| 2178 | 2178 | ||
| 2179 | INTEL IOMMU (VT-d) | ||
| 2180 | P: David Woodhouse | ||
| 2181 | M: dwmw2@infradead.org | ||
| 2182 | L: iommu@lists.linux-foundation.org | ||
| 2183 | T: git://git.infradead.org/iommu-2.6.git | ||
| 2184 | S: Supported | ||
| 2185 | |||
| 2179 | INTEL IOP-ADMA DMA DRIVER | 2186 | INTEL IOP-ADMA DMA DRIVER |
| 2180 | P: Dan Williams | 2187 | P: Dan Williams |
| 2181 | M: dan.j.williams@intel.com | 2188 | M: dan.j.williams@intel.com |
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c index 4cd8083c58be..0cdcda35a05f 100644 --- a/arch/x86/kernel/amd_iommu_init.c +++ b/arch/x86/kernel/amd_iommu_init.c | |||
| @@ -212,7 +212,7 @@ static void __init iommu_set_exclusion_range(struct amd_iommu *iommu) | |||
| 212 | /* Programs the physical address of the device table into the IOMMU hardware */ | 212 | /* Programs the physical address of the device table into the IOMMU hardware */ |
| 213 | static void __init iommu_set_device_table(struct amd_iommu *iommu) | 213 | static void __init iommu_set_device_table(struct amd_iommu *iommu) |
| 214 | { | 214 | { |
| 215 | u32 entry; | 215 | u64 entry; |
| 216 | 216 | ||
| 217 | BUG_ON(iommu->mmio_base == NULL); | 217 | BUG_ON(iommu->mmio_base == NULL); |
| 218 | 218 | ||
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index 192624820217..1972266e8ba5 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c | |||
| @@ -9,8 +9,6 @@ | |||
| 9 | #include <asm/calgary.h> | 9 | #include <asm/calgary.h> |
| 10 | #include <asm/amd_iommu.h> | 10 | #include <asm/amd_iommu.h> |
| 11 | 11 | ||
| 12 | static int forbid_dac __read_mostly; | ||
| 13 | |||
| 14 | struct dma_mapping_ops *dma_ops; | 12 | struct dma_mapping_ops *dma_ops; |
| 15 | EXPORT_SYMBOL(dma_ops); | 13 | EXPORT_SYMBOL(dma_ops); |
| 16 | 14 | ||
| @@ -293,17 +291,3 @@ void pci_iommu_shutdown(void) | |||
| 293 | } | 291 | } |
| 294 | /* Must execute after PCI subsystem */ | 292 | /* Must execute after PCI subsystem */ |
| 295 | fs_initcall(pci_iommu_init); | 293 | fs_initcall(pci_iommu_init); |
| 296 | |||
| 297 | #ifdef CONFIG_PCI | ||
| 298 | /* Many VIA bridges seem to corrupt data for DAC. Disable it here */ | ||
| 299 | |||
| 300 | static __devinit void via_no_dac(struct pci_dev *dev) | ||
| 301 | { | ||
| 302 | if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) { | ||
| 303 | printk(KERN_INFO "PCI: VIA PCI bridge detected." | ||
| 304 | "Disabling DAC.\n"); | ||
| 305 | forbid_dac = 1; | ||
| 306 | } | ||
| 307 | } | ||
| 308 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac); | ||
| 309 | #endif | ||
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c index e842e756308a..7b3751136e63 100644 --- a/drivers/pci/dmar.c +++ b/drivers/pci/dmar.c | |||
| @@ -188,12 +188,11 @@ dmar_parse_one_drhd(struct acpi_dmar_header *header) | |||
| 188 | return 0; | 188 | return 0; |
| 189 | } | 189 | } |
| 190 | 190 | ||
| 191 | static int __init | 191 | static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru) |
| 192 | dmar_parse_dev(struct dmar_drhd_unit *dmaru) | ||
| 193 | { | 192 | { |
| 194 | struct acpi_dmar_hardware_unit *drhd; | 193 | struct acpi_dmar_hardware_unit *drhd; |
| 195 | static int include_all; | 194 | static int include_all; |
| 196 | int ret; | 195 | int ret = 0; |
| 197 | 196 | ||
| 198 | drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr; | 197 | drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr; |
| 199 | 198 | ||
| @@ -277,14 +276,15 @@ dmar_table_print_dmar_entry(struct acpi_dmar_header *header) | |||
| 277 | drhd = (struct acpi_dmar_hardware_unit *)header; | 276 | drhd = (struct acpi_dmar_hardware_unit *)header; |
| 278 | printk (KERN_INFO PREFIX | 277 | printk (KERN_INFO PREFIX |
| 279 | "DRHD (flags: 0x%08x)base: 0x%016Lx\n", | 278 | "DRHD (flags: 0x%08x)base: 0x%016Lx\n", |
| 280 | drhd->flags, drhd->address); | 279 | drhd->flags, (unsigned long long)drhd->address); |
| 281 | break; | 280 | break; |
| 282 | case ACPI_DMAR_TYPE_RESERVED_MEMORY: | 281 | case ACPI_DMAR_TYPE_RESERVED_MEMORY: |
| 283 | rmrr = (struct acpi_dmar_reserved_memory *)header; | 282 | rmrr = (struct acpi_dmar_reserved_memory *)header; |
| 284 | 283 | ||
| 285 | printk (KERN_INFO PREFIX | 284 | printk (KERN_INFO PREFIX |
| 286 | "RMRR base: 0x%016Lx end: 0x%016Lx\n", | 285 | "RMRR base: 0x%016Lx end: 0x%016Lx\n", |
| 287 | rmrr->base_address, rmrr->end_address); | 286 | (unsigned long long)rmrr->base_address, |
| 287 | (unsigned long long)rmrr->end_address); | ||
| 288 | break; | 288 | break; |
| 289 | } | 289 | } |
| 290 | } | 290 | } |
| @@ -304,7 +304,7 @@ parse_dmar_table(void) | |||
| 304 | if (!dmar) | 304 | if (!dmar) |
| 305 | return -ENODEV; | 305 | return -ENODEV; |
| 306 | 306 | ||
| 307 | if (dmar->width < PAGE_SHIFT_4K - 1) { | 307 | if (dmar->width < PAGE_SHIFT - 1) { |
| 308 | printk(KERN_WARNING PREFIX "Invalid DMAR haw\n"); | 308 | printk(KERN_WARNING PREFIX "Invalid DMAR haw\n"); |
| 309 | return -EINVAL; | 309 | return -EINVAL; |
| 310 | } | 310 | } |
| @@ -455,8 +455,8 @@ void __init detect_intel_iommu(void) | |||
| 455 | 455 | ||
| 456 | ret = early_dmar_detect(); | 456 | ret = early_dmar_detect(); |
| 457 | 457 | ||
| 458 | #ifdef CONFIG_DMAR | ||
| 459 | { | 458 | { |
| 459 | #ifdef CONFIG_INTR_REMAP | ||
| 460 | struct acpi_table_dmar *dmar; | 460 | struct acpi_table_dmar *dmar; |
| 461 | /* | 461 | /* |
| 462 | * for now we will disable dma-remapping when interrupt | 462 | * for now we will disable dma-remapping when interrupt |
| @@ -465,28 +465,18 @@ void __init detect_intel_iommu(void) | |||
| 465 | * is added, we will not need this any more. | 465 | * is added, we will not need this any more. |
| 466 | */ | 466 | */ |
| 467 | dmar = (struct acpi_table_dmar *) dmar_tbl; | 467 | dmar = (struct acpi_table_dmar *) dmar_tbl; |
| 468 | if (ret && cpu_has_x2apic && dmar->flags & 0x1) { | 468 | if (ret && cpu_has_x2apic && dmar->flags & 0x1) |
| 469 | printk(KERN_INFO | 469 | printk(KERN_INFO |
| 470 | "Queued invalidation will be enabled to support " | 470 | "Queued invalidation will be enabled to support " |
| 471 | "x2apic and Intr-remapping.\n"); | 471 | "x2apic and Intr-remapping.\n"); |
| 472 | printk(KERN_INFO | 472 | #endif |
| 473 | "Disabling IOMMU detection, because of missing " | ||
| 474 | "queued invalidation support for IOTLB " | ||
| 475 | "invalidation\n"); | ||
| 476 | printk(KERN_INFO | ||
| 477 | "Use \"nox2apic\", if you want to use Intel " | ||
| 478 | " IOMMU for DMA-remapping and don't care about " | ||
| 479 | " x2apic support\n"); | ||
| 480 | |||
| 481 | dmar_disabled = 1; | ||
| 482 | return; | ||
| 483 | } | ||
| 484 | 473 | ||
| 474 | #ifdef CONFIG_DMAR | ||
| 485 | if (ret && !no_iommu && !iommu_detected && !swiotlb && | 475 | if (ret && !no_iommu && !iommu_detected && !swiotlb && |
| 486 | !dmar_disabled) | 476 | !dmar_disabled) |
| 487 | iommu_detected = 1; | 477 | iommu_detected = 1; |
| 488 | } | ||
| 489 | #endif | 478 | #endif |
| 479 | } | ||
| 490 | } | 480 | } |
| 491 | 481 | ||
| 492 | 482 | ||
| @@ -503,7 +493,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) | |||
| 503 | 493 | ||
| 504 | iommu->seq_id = iommu_allocated++; | 494 | iommu->seq_id = iommu_allocated++; |
| 505 | 495 | ||
| 506 | iommu->reg = ioremap(drhd->reg_base_addr, PAGE_SIZE_4K); | 496 | iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE); |
| 507 | if (!iommu->reg) { | 497 | if (!iommu->reg) { |
| 508 | printk(KERN_ERR "IOMMU: can't map the region\n"); | 498 | printk(KERN_ERR "IOMMU: can't map the region\n"); |
| 509 | goto error; | 499 | goto error; |
| @@ -514,8 +504,8 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) | |||
| 514 | /* the registers might be more than one page */ | 504 | /* the registers might be more than one page */ |
| 515 | map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap), | 505 | map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap), |
| 516 | cap_max_fault_reg_offset(iommu->cap)); | 506 | cap_max_fault_reg_offset(iommu->cap)); |
| 517 | map_size = PAGE_ALIGN_4K(map_size); | 507 | map_size = VTD_PAGE_ALIGN(map_size); |
| 518 | if (map_size > PAGE_SIZE_4K) { | 508 | if (map_size > VTD_PAGE_SIZE) { |
| 519 | iounmap(iommu->reg); | 509 | iounmap(iommu->reg); |
| 520 | iommu->reg = ioremap(drhd->reg_base_addr, map_size); | 510 | iommu->reg = ioremap(drhd->reg_base_addr, map_size); |
| 521 | if (!iommu->reg) { | 511 | if (!iommu->reg) { |
| @@ -526,8 +516,10 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) | |||
| 526 | 516 | ||
| 527 | ver = readl(iommu->reg + DMAR_VER_REG); | 517 | ver = readl(iommu->reg + DMAR_VER_REG); |
| 528 | pr_debug("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n", | 518 | pr_debug("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n", |
| 529 | drhd->reg_base_addr, DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver), | 519 | (unsigned long long)drhd->reg_base_addr, |
| 530 | iommu->cap, iommu->ecap); | 520 | DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver), |
| 521 | (unsigned long long)iommu->cap, | ||
| 522 | (unsigned long long)iommu->ecap); | ||
| 531 | 523 | ||
| 532 | spin_lock_init(&iommu->register_lock); | 524 | spin_lock_init(&iommu->register_lock); |
| 533 | 525 | ||
| @@ -580,11 +572,11 @@ void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu) | |||
| 580 | 572 | ||
| 581 | hw = qi->desc; | 573 | hw = qi->desc; |
| 582 | 574 | ||
| 583 | spin_lock(&qi->q_lock); | 575 | spin_lock_irqsave(&qi->q_lock, flags); |
| 584 | while (qi->free_cnt < 3) { | 576 | while (qi->free_cnt < 3) { |
| 585 | spin_unlock(&qi->q_lock); | 577 | spin_unlock_irqrestore(&qi->q_lock, flags); |
| 586 | cpu_relax(); | 578 | cpu_relax(); |
| 587 | spin_lock(&qi->q_lock); | 579 | spin_lock_irqsave(&qi->q_lock, flags); |
| 588 | } | 580 | } |
| 589 | 581 | ||
| 590 | index = qi->free_head; | 582 | index = qi->free_head; |
| @@ -605,15 +597,22 @@ void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu) | |||
| 605 | qi->free_head = (qi->free_head + 2) % QI_LENGTH; | 597 | qi->free_head = (qi->free_head + 2) % QI_LENGTH; |
| 606 | qi->free_cnt -= 2; | 598 | qi->free_cnt -= 2; |
| 607 | 599 | ||
| 608 | spin_lock_irqsave(&iommu->register_lock, flags); | 600 | spin_lock(&iommu->register_lock); |
| 609 | /* | 601 | /* |
| 610 | * update the HW tail register indicating the presence of | 602 | * update the HW tail register indicating the presence of |
| 611 | * new descriptors. | 603 | * new descriptors. |
| 612 | */ | 604 | */ |
| 613 | writel(qi->free_head << 4, iommu->reg + DMAR_IQT_REG); | 605 | writel(qi->free_head << 4, iommu->reg + DMAR_IQT_REG); |
| 614 | spin_unlock_irqrestore(&iommu->register_lock, flags); | 606 | spin_unlock(&iommu->register_lock); |
| 615 | 607 | ||
| 616 | while (qi->desc_status[wait_index] != QI_DONE) { | 608 | while (qi->desc_status[wait_index] != QI_DONE) { |
| 609 | /* | ||
| 610 | * We will leave the interrupts disabled, to prevent interrupt | ||
| 611 | * context to queue another cmd while a cmd is already submitted | ||
| 612 | * and waiting for completion on this cpu. This is to avoid | ||
| 613 | * a deadlock where the interrupt context can wait indefinitely | ||
| 614 | * for free slots in the queue. | ||
| 615 | */ | ||
| 617 | spin_unlock(&qi->q_lock); | 616 | spin_unlock(&qi->q_lock); |
| 618 | cpu_relax(); | 617 | cpu_relax(); |
| 619 | spin_lock(&qi->q_lock); | 618 | spin_lock(&qi->q_lock); |
| @@ -622,7 +621,7 @@ void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu) | |||
| 622 | qi->desc_status[index] = QI_DONE; | 621 | qi->desc_status[index] = QI_DONE; |
| 623 | 622 | ||
| 624 | reclaim_free_desc(qi); | 623 | reclaim_free_desc(qi); |
| 625 | spin_unlock(&qi->q_lock); | 624 | spin_unlock_irqrestore(&qi->q_lock, flags); |
| 626 | } | 625 | } |
| 627 | 626 | ||
| 628 | /* | 627 | /* |
| @@ -638,6 +637,62 @@ void qi_global_iec(struct intel_iommu *iommu) | |||
| 638 | qi_submit_sync(&desc, iommu); | 637 | qi_submit_sync(&desc, iommu); |
| 639 | } | 638 | } |
| 640 | 639 | ||
| 640 | int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm, | ||
| 641 | u64 type, int non_present_entry_flush) | ||
| 642 | { | ||
| 643 | |||
| 644 | struct qi_desc desc; | ||
| 645 | |||
| 646 | if (non_present_entry_flush) { | ||
| 647 | if (!cap_caching_mode(iommu->cap)) | ||
| 648 | return 1; | ||
| 649 | else | ||
| 650 | did = 0; | ||
| 651 | } | ||
| 652 | |||
| 653 | desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did) | ||
| 654 | | QI_CC_GRAN(type) | QI_CC_TYPE; | ||
| 655 | desc.high = 0; | ||
| 656 | |||
| 657 | qi_submit_sync(&desc, iommu); | ||
| 658 | |||
| 659 | return 0; | ||
| 660 | |||
| 661 | } | ||
| 662 | |||
| 663 | int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, | ||
| 664 | unsigned int size_order, u64 type, | ||
| 665 | int non_present_entry_flush) | ||
| 666 | { | ||
| 667 | u8 dw = 0, dr = 0; | ||
| 668 | |||
| 669 | struct qi_desc desc; | ||
| 670 | int ih = 0; | ||
| 671 | |||
| 672 | if (non_present_entry_flush) { | ||
| 673 | if (!cap_caching_mode(iommu->cap)) | ||
| 674 | return 1; | ||
| 675 | else | ||
| 676 | did = 0; | ||
| 677 | } | ||
| 678 | |||
| 679 | if (cap_write_drain(iommu->cap)) | ||
| 680 | dw = 1; | ||
| 681 | |||
| 682 | if (cap_read_drain(iommu->cap)) | ||
| 683 | dr = 1; | ||
| 684 | |||
| 685 | desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw) | ||
| 686 | | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE; | ||
| 687 | desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih) | ||
| 688 | | QI_IOTLB_AM(size_order); | ||
| 689 | |||
| 690 | qi_submit_sync(&desc, iommu); | ||
| 691 | |||
| 692 | return 0; | ||
| 693 | |||
| 694 | } | ||
| 695 | |||
| 641 | /* | 696 | /* |
| 642 | * Enable Queued Invalidation interface. This is a must to support | 697 | * Enable Queued Invalidation interface. This is a must to support |
| 643 | * interrupt-remapping. Also used by DMA-remapping, which replaces | 698 | * interrupt-remapping. Also used by DMA-remapping, which replaces |
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index 8b51e10b7783..a2692724b68f 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c | |||
| @@ -18,6 +18,7 @@ | |||
| 18 | * Author: Ashok Raj <ashok.raj@intel.com> | 18 | * Author: Ashok Raj <ashok.raj@intel.com> |
| 19 | * Author: Shaohua Li <shaohua.li@intel.com> | 19 | * Author: Shaohua Li <shaohua.li@intel.com> |
| 20 | * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> | 20 | * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> |
| 21 | * Author: Fenghua Yu <fenghua.yu@intel.com> | ||
| 21 | */ | 22 | */ |
| 22 | 23 | ||
| 23 | #include <linux/init.h> | 24 | #include <linux/init.h> |
| @@ -35,11 +36,13 @@ | |||
| 35 | #include <linux/timer.h> | 36 | #include <linux/timer.h> |
| 36 | #include <linux/iova.h> | 37 | #include <linux/iova.h> |
| 37 | #include <linux/intel-iommu.h> | 38 | #include <linux/intel-iommu.h> |
| 38 | #include <asm/proto.h> /* force_iommu in this header in x86-64*/ | ||
| 39 | #include <asm/cacheflush.h> | 39 | #include <asm/cacheflush.h> |
| 40 | #include <asm/iommu.h> | 40 | #include <asm/iommu.h> |
| 41 | #include "pci.h" | 41 | #include "pci.h" |
| 42 | 42 | ||
| 43 | #define ROOT_SIZE VTD_PAGE_SIZE | ||
| 44 | #define CONTEXT_SIZE VTD_PAGE_SIZE | ||
| 45 | |||
| 43 | #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY) | 46 | #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY) |
| 44 | #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA) | 47 | #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA) |
| 45 | 48 | ||
| @@ -199,7 +202,7 @@ static struct context_entry * device_to_context_entry(struct intel_iommu *iommu, | |||
| 199 | spin_unlock_irqrestore(&iommu->lock, flags); | 202 | spin_unlock_irqrestore(&iommu->lock, flags); |
| 200 | return NULL; | 203 | return NULL; |
| 201 | } | 204 | } |
| 202 | __iommu_flush_cache(iommu, (void *)context, PAGE_SIZE_4K); | 205 | __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE); |
| 203 | phy_addr = virt_to_phys((void *)context); | 206 | phy_addr = virt_to_phys((void *)context); |
| 204 | set_root_value(root, phy_addr); | 207 | set_root_value(root, phy_addr); |
| 205 | set_root_present(root); | 208 | set_root_present(root); |
| @@ -345,7 +348,7 @@ static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr) | |||
| 345 | return NULL; | 348 | return NULL; |
| 346 | } | 349 | } |
| 347 | __iommu_flush_cache(domain->iommu, tmp_page, | 350 | __iommu_flush_cache(domain->iommu, tmp_page, |
| 348 | PAGE_SIZE_4K); | 351 | PAGE_SIZE); |
| 349 | dma_set_pte_addr(*pte, virt_to_phys(tmp_page)); | 352 | dma_set_pte_addr(*pte, virt_to_phys(tmp_page)); |
| 350 | /* | 353 | /* |
| 351 | * high level table always sets r/w, last level page | 354 | * high level table always sets r/w, last level page |
| @@ -408,13 +411,13 @@ static void dma_pte_clear_range(struct dmar_domain *domain, u64 start, u64 end) | |||
| 408 | start &= (((u64)1) << addr_width) - 1; | 411 | start &= (((u64)1) << addr_width) - 1; |
| 409 | end &= (((u64)1) << addr_width) - 1; | 412 | end &= (((u64)1) << addr_width) - 1; |
| 410 | /* in case it's partial page */ | 413 | /* in case it's partial page */ |
| 411 | start = PAGE_ALIGN_4K(start); | 414 | start = PAGE_ALIGN(start); |
| 412 | end &= PAGE_MASK_4K; | 415 | end &= PAGE_MASK; |
| 413 | 416 | ||
| 414 | /* we don't need lock here, nobody else touches the iova range */ | 417 | /* we don't need lock here, nobody else touches the iova range */ |
| 415 | while (start < end) { | 418 | while (start < end) { |
| 416 | dma_pte_clear_one(domain, start); | 419 | dma_pte_clear_one(domain, start); |
| 417 | start += PAGE_SIZE_4K; | 420 | start += VTD_PAGE_SIZE; |
| 418 | } | 421 | } |
| 419 | } | 422 | } |
| 420 | 423 | ||
| @@ -468,7 +471,7 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu) | |||
| 468 | if (!root) | 471 | if (!root) |
| 469 | return -ENOMEM; | 472 | return -ENOMEM; |
| 470 | 473 | ||
| 471 | __iommu_flush_cache(iommu, root, PAGE_SIZE_4K); | 474 | __iommu_flush_cache(iommu, root, ROOT_SIZE); |
| 472 | 475 | ||
| 473 | spin_lock_irqsave(&iommu->lock, flags); | 476 | spin_lock_irqsave(&iommu->lock, flags); |
| 474 | iommu->root_entry = root; | 477 | iommu->root_entry = root; |
| @@ -567,27 +570,6 @@ static int __iommu_flush_context(struct intel_iommu *iommu, | |||
| 567 | return 0; | 570 | return 0; |
| 568 | } | 571 | } |
| 569 | 572 | ||
| 570 | static int inline iommu_flush_context_global(struct intel_iommu *iommu, | ||
| 571 | int non_present_entry_flush) | ||
| 572 | { | ||
| 573 | return __iommu_flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL, | ||
| 574 | non_present_entry_flush); | ||
| 575 | } | ||
| 576 | |||
| 577 | static int inline iommu_flush_context_domain(struct intel_iommu *iommu, u16 did, | ||
| 578 | int non_present_entry_flush) | ||
| 579 | { | ||
| 580 | return __iommu_flush_context(iommu, did, 0, 0, DMA_CCMD_DOMAIN_INVL, | ||
| 581 | non_present_entry_flush); | ||
| 582 | } | ||
| 583 | |||
| 584 | static int inline iommu_flush_context_device(struct intel_iommu *iommu, | ||
| 585 | u16 did, u16 source_id, u8 function_mask, int non_present_entry_flush) | ||
| 586 | { | ||
| 587 | return __iommu_flush_context(iommu, did, source_id, function_mask, | ||
| 588 | DMA_CCMD_DEVICE_INVL, non_present_entry_flush); | ||
| 589 | } | ||
| 590 | |||
| 591 | /* return value determine if we need a write buffer flush */ | 573 | /* return value determine if we need a write buffer flush */ |
| 592 | static int __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did, | 574 | static int __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did, |
| 593 | u64 addr, unsigned int size_order, u64 type, | 575 | u64 addr, unsigned int size_order, u64 type, |
| @@ -655,37 +637,25 @@ static int __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did, | |||
| 655 | printk(KERN_ERR"IOMMU: flush IOTLB failed\n"); | 637 | printk(KERN_ERR"IOMMU: flush IOTLB failed\n"); |
| 656 | if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type)) | 638 | if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type)) |
| 657 | pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n", | 639 | pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n", |
| 658 | DMA_TLB_IIRG(type), DMA_TLB_IAIG(val)); | 640 | (unsigned long long)DMA_TLB_IIRG(type), |
| 641 | (unsigned long long)DMA_TLB_IAIG(val)); | ||
| 659 | /* flush iotlb entry will implicitly flush write buffer */ | 642 | /* flush iotlb entry will implicitly flush write buffer */ |
| 660 | return 0; | 643 | return 0; |
| 661 | } | 644 | } |
| 662 | 645 | ||
| 663 | static int inline iommu_flush_iotlb_global(struct intel_iommu *iommu, | ||
| 664 | int non_present_entry_flush) | ||
| 665 | { | ||
| 666 | return __iommu_flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH, | ||
| 667 | non_present_entry_flush); | ||
| 668 | } | ||
| 669 | |||
| 670 | static int inline iommu_flush_iotlb_dsi(struct intel_iommu *iommu, u16 did, | ||
| 671 | int non_present_entry_flush) | ||
| 672 | { | ||
| 673 | return __iommu_flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH, | ||
| 674 | non_present_entry_flush); | ||
| 675 | } | ||
| 676 | |||
| 677 | static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did, | 646 | static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did, |
| 678 | u64 addr, unsigned int pages, int non_present_entry_flush) | 647 | u64 addr, unsigned int pages, int non_present_entry_flush) |
| 679 | { | 648 | { |
| 680 | unsigned int mask; | 649 | unsigned int mask; |
| 681 | 650 | ||
| 682 | BUG_ON(addr & (~PAGE_MASK_4K)); | 651 | BUG_ON(addr & (~VTD_PAGE_MASK)); |
| 683 | BUG_ON(pages == 0); | 652 | BUG_ON(pages == 0); |
| 684 | 653 | ||
| 685 | /* Fallback to domain selective flush if no PSI support */ | 654 | /* Fallback to domain selective flush if no PSI support */ |
| 686 | if (!cap_pgsel_inv(iommu->cap)) | 655 | if (!cap_pgsel_inv(iommu->cap)) |
| 687 | return iommu_flush_iotlb_dsi(iommu, did, | 656 | return iommu->flush.flush_iotlb(iommu, did, 0, 0, |
| 688 | non_present_entry_flush); | 657 | DMA_TLB_DSI_FLUSH, |
| 658 | non_present_entry_flush); | ||
| 689 | 659 | ||
| 690 | /* | 660 | /* |
| 691 | * PSI requires page size to be 2 ^ x, and the base address is naturally | 661 | * PSI requires page size to be 2 ^ x, and the base address is naturally |
| @@ -694,11 +664,12 @@ static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did, | |||
| 694 | mask = ilog2(__roundup_pow_of_two(pages)); | 664 | mask = ilog2(__roundup_pow_of_two(pages)); |
| 695 | /* Fallback to domain selective flush if size is too big */ | 665 | /* Fallback to domain selective flush if size is too big */ |
| 696 | if (mask > cap_max_amask_val(iommu->cap)) | 666 | if (mask > cap_max_amask_val(iommu->cap)) |
| 697 | return iommu_flush_iotlb_dsi(iommu, did, | 667 | return iommu->flush.flush_iotlb(iommu, did, 0, 0, |
| 698 | non_present_entry_flush); | 668 | DMA_TLB_DSI_FLUSH, non_present_entry_flush); |
| 699 | 669 | ||
| 700 | return __iommu_flush_iotlb(iommu, did, addr, mask, | 670 | return iommu->flush.flush_iotlb(iommu, did, addr, mask, |
| 701 | DMA_TLB_PSI_FLUSH, non_present_entry_flush); | 671 | DMA_TLB_PSI_FLUSH, |
| 672 | non_present_entry_flush); | ||
| 702 | } | 673 | } |
| 703 | 674 | ||
| 704 | static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu) | 675 | static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu) |
| @@ -831,7 +802,7 @@ void dmar_msi_read(int irq, struct msi_msg *msg) | |||
| 831 | } | 802 | } |
| 832 | 803 | ||
| 833 | static int iommu_page_fault_do_one(struct intel_iommu *iommu, int type, | 804 | static int iommu_page_fault_do_one(struct intel_iommu *iommu, int type, |
| 834 | u8 fault_reason, u16 source_id, u64 addr) | 805 | u8 fault_reason, u16 source_id, unsigned long long addr) |
| 835 | { | 806 | { |
| 836 | const char *reason; | 807 | const char *reason; |
| 837 | 808 | ||
| @@ -1084,9 +1055,9 @@ static void dmar_init_reserved_ranges(void) | |||
| 1084 | if (!r->flags || !(r->flags & IORESOURCE_MEM)) | 1055 | if (!r->flags || !(r->flags & IORESOURCE_MEM)) |
| 1085 | continue; | 1056 | continue; |
| 1086 | addr = r->start; | 1057 | addr = r->start; |
| 1087 | addr &= PAGE_MASK_4K; | 1058 | addr &= PAGE_MASK; |
| 1088 | size = r->end - addr; | 1059 | size = r->end - addr; |
| 1089 | size = PAGE_ALIGN_4K(size); | 1060 | size = PAGE_ALIGN(size); |
| 1090 | iova = reserve_iova(&reserved_iova_list, IOVA_PFN(addr), | 1061 | iova = reserve_iova(&reserved_iova_list, IOVA_PFN(addr), |
| 1091 | IOVA_PFN(size + addr) - 1); | 1062 | IOVA_PFN(size + addr) - 1); |
| 1092 | if (!iova) | 1063 | if (!iova) |
| @@ -1148,7 +1119,7 @@ static int domain_init(struct dmar_domain *domain, int guest_width) | |||
| 1148 | domain->pgd = (struct dma_pte *)alloc_pgtable_page(); | 1119 | domain->pgd = (struct dma_pte *)alloc_pgtable_page(); |
| 1149 | if (!domain->pgd) | 1120 | if (!domain->pgd) |
| 1150 | return -ENOMEM; | 1121 | return -ENOMEM; |
| 1151 | __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE_4K); | 1122 | __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE); |
| 1152 | return 0; | 1123 | return 0; |
| 1153 | } | 1124 | } |
| 1154 | 1125 | ||
| @@ -1164,7 +1135,7 @@ static void domain_exit(struct dmar_domain *domain) | |||
| 1164 | /* destroy iovas */ | 1135 | /* destroy iovas */ |
| 1165 | put_iova_domain(&domain->iovad); | 1136 | put_iova_domain(&domain->iovad); |
| 1166 | end = DOMAIN_MAX_ADDR(domain->gaw); | 1137 | end = DOMAIN_MAX_ADDR(domain->gaw); |
| 1167 | end = end & (~PAGE_MASK_4K); | 1138 | end = end & (~PAGE_MASK); |
| 1168 | 1139 | ||
| 1169 | /* clear ptes */ | 1140 | /* clear ptes */ |
| 1170 | dma_pte_clear_range(domain, 0, end); | 1141 | dma_pte_clear_range(domain, 0, end); |
| @@ -1204,11 +1175,13 @@ static int domain_context_mapping_one(struct dmar_domain *domain, | |||
| 1204 | __iommu_flush_cache(iommu, context, sizeof(*context)); | 1175 | __iommu_flush_cache(iommu, context, sizeof(*context)); |
| 1205 | 1176 | ||
| 1206 | /* it's a non-present to present mapping */ | 1177 | /* it's a non-present to present mapping */ |
| 1207 | if (iommu_flush_context_device(iommu, domain->id, | 1178 | if (iommu->flush.flush_context(iommu, domain->id, |
| 1208 | (((u16)bus) << 8) | devfn, DMA_CCMD_MASK_NOBIT, 1)) | 1179 | (((u16)bus) << 8) | devfn, DMA_CCMD_MASK_NOBIT, |
| 1180 | DMA_CCMD_DEVICE_INVL, 1)) | ||
| 1209 | iommu_flush_write_buffer(iommu); | 1181 | iommu_flush_write_buffer(iommu); |
| 1210 | else | 1182 | else |
| 1211 | iommu_flush_iotlb_dsi(iommu, 0, 0); | 1183 | iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH, 0); |
| 1184 | |||
| 1212 | spin_unlock_irqrestore(&iommu->lock, flags); | 1185 | spin_unlock_irqrestore(&iommu->lock, flags); |
| 1213 | return 0; | 1186 | return 0; |
| 1214 | } | 1187 | } |
| @@ -1283,22 +1256,25 @@ domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova, | |||
| 1283 | u64 start_pfn, end_pfn; | 1256 | u64 start_pfn, end_pfn; |
| 1284 | struct dma_pte *pte; | 1257 | struct dma_pte *pte; |
| 1285 | int index; | 1258 | int index; |
| 1259 | int addr_width = agaw_to_width(domain->agaw); | ||
| 1260 | |||
| 1261 | hpa &= (((u64)1) << addr_width) - 1; | ||
| 1286 | 1262 | ||
| 1287 | if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0) | 1263 | if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0) |
| 1288 | return -EINVAL; | 1264 | return -EINVAL; |
| 1289 | iova &= PAGE_MASK_4K; | 1265 | iova &= PAGE_MASK; |
| 1290 | start_pfn = ((u64)hpa) >> PAGE_SHIFT_4K; | 1266 | start_pfn = ((u64)hpa) >> VTD_PAGE_SHIFT; |
| 1291 | end_pfn = (PAGE_ALIGN_4K(((u64)hpa) + size)) >> PAGE_SHIFT_4K; | 1267 | end_pfn = (VTD_PAGE_ALIGN(((u64)hpa) + size)) >> VTD_PAGE_SHIFT; |
| 1292 | index = 0; | 1268 | index = 0; |
| 1293 | while (start_pfn < end_pfn) { | 1269 | while (start_pfn < end_pfn) { |
| 1294 | pte = addr_to_dma_pte(domain, iova + PAGE_SIZE_4K * index); | 1270 | pte = addr_to_dma_pte(domain, iova + VTD_PAGE_SIZE * index); |
| 1295 | if (!pte) | 1271 | if (!pte) |
| 1296 | return -ENOMEM; | 1272 | return -ENOMEM; |
| 1297 | /* We don't need lock here, nobody else | 1273 | /* We don't need lock here, nobody else |
| 1298 | * touches the iova range | 1274 | * touches the iova range |
| 1299 | */ | 1275 | */ |
| 1300 | BUG_ON(dma_pte_addr(*pte)); | 1276 | BUG_ON(dma_pte_addr(*pte)); |
| 1301 | dma_set_pte_addr(*pte, start_pfn << PAGE_SHIFT_4K); | 1277 | dma_set_pte_addr(*pte, start_pfn << VTD_PAGE_SHIFT); |
| 1302 | dma_set_pte_prot(*pte, prot); | 1278 | dma_set_pte_prot(*pte, prot); |
| 1303 | __iommu_flush_cache(domain->iommu, pte, sizeof(*pte)); | 1279 | __iommu_flush_cache(domain->iommu, pte, sizeof(*pte)); |
| 1304 | start_pfn++; | 1280 | start_pfn++; |
| @@ -1310,8 +1286,10 @@ domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova, | |||
| 1310 | static void detach_domain_for_dev(struct dmar_domain *domain, u8 bus, u8 devfn) | 1286 | static void detach_domain_for_dev(struct dmar_domain *domain, u8 bus, u8 devfn) |
| 1311 | { | 1287 | { |
| 1312 | clear_context_table(domain->iommu, bus, devfn); | 1288 | clear_context_table(domain->iommu, bus, devfn); |
| 1313 | iommu_flush_context_global(domain->iommu, 0); | 1289 | domain->iommu->flush.flush_context(domain->iommu, 0, 0, 0, |
| 1314 | iommu_flush_iotlb_global(domain->iommu, 0); | 1290 | DMA_CCMD_GLOBAL_INVL, 0); |
| 1291 | domain->iommu->flush.flush_iotlb(domain->iommu, 0, 0, 0, | ||
| 1292 | DMA_TLB_GLOBAL_FLUSH, 0); | ||
| 1315 | } | 1293 | } |
| 1316 | 1294 | ||
| 1317 | static void domain_remove_dev_info(struct dmar_domain *domain) | 1295 | static void domain_remove_dev_info(struct dmar_domain *domain) |
| @@ -1474,11 +1452,13 @@ error: | |||
| 1474 | return find_domain(pdev); | 1452 | return find_domain(pdev); |
| 1475 | } | 1453 | } |
| 1476 | 1454 | ||
| 1477 | static int iommu_prepare_identity_map(struct pci_dev *pdev, u64 start, u64 end) | 1455 | static int iommu_prepare_identity_map(struct pci_dev *pdev, |
| 1456 | unsigned long long start, | ||
| 1457 | unsigned long long end) | ||
| 1478 | { | 1458 | { |
| 1479 | struct dmar_domain *domain; | 1459 | struct dmar_domain *domain; |
| 1480 | unsigned long size; | 1460 | unsigned long size; |
| 1481 | u64 base; | 1461 | unsigned long long base; |
| 1482 | int ret; | 1462 | int ret; |
| 1483 | 1463 | ||
| 1484 | printk(KERN_INFO | 1464 | printk(KERN_INFO |
| @@ -1490,9 +1470,9 @@ static int iommu_prepare_identity_map(struct pci_dev *pdev, u64 start, u64 end) | |||
| 1490 | return -ENOMEM; | 1470 | return -ENOMEM; |
| 1491 | 1471 | ||
| 1492 | /* The address might not be aligned */ | 1472 | /* The address might not be aligned */ |
| 1493 | base = start & PAGE_MASK_4K; | 1473 | base = start & PAGE_MASK; |
| 1494 | size = end - base; | 1474 | size = end - base; |
| 1495 | size = PAGE_ALIGN_4K(size); | 1475 | size = PAGE_ALIGN(size); |
| 1496 | if (!reserve_iova(&domain->iovad, IOVA_PFN(base), | 1476 | if (!reserve_iova(&domain->iovad, IOVA_PFN(base), |
| 1497 | IOVA_PFN(base + size) - 1)) { | 1477 | IOVA_PFN(base + size) - 1)) { |
| 1498 | printk(KERN_ERR "IOMMU: reserve iova failed\n"); | 1478 | printk(KERN_ERR "IOMMU: reserve iova failed\n"); |
| @@ -1662,6 +1642,28 @@ int __init init_dmars(void) | |||
| 1662 | } | 1642 | } |
| 1663 | } | 1643 | } |
| 1664 | 1644 | ||
| 1645 | for_each_drhd_unit(drhd) { | ||
| 1646 | if (drhd->ignored) | ||
| 1647 | continue; | ||
| 1648 | |||
| 1649 | iommu = drhd->iommu; | ||
| 1650 | if (dmar_enable_qi(iommu)) { | ||
| 1651 | /* | ||
| 1652 | * Queued Invalidate not enabled, use Register Based | ||
| 1653 | * Invalidate | ||
| 1654 | */ | ||
| 1655 | iommu->flush.flush_context = __iommu_flush_context; | ||
| 1656 | iommu->flush.flush_iotlb = __iommu_flush_iotlb; | ||
| 1657 | printk(KERN_INFO "IOMMU 0x%Lx: using Register based " | ||
| 1658 | "invalidation\n", drhd->reg_base_addr); | ||
| 1659 | } else { | ||
| 1660 | iommu->flush.flush_context = qi_flush_context; | ||
| 1661 | iommu->flush.flush_iotlb = qi_flush_iotlb; | ||
| 1662 | printk(KERN_INFO "IOMMU 0x%Lx: using Queued " | ||
| 1663 | "invalidation\n", drhd->reg_base_addr); | ||
| 1664 | } | ||
| 1665 | } | ||
| 1666 | |||
| 1665 | /* | 1667 | /* |
| 1666 | * For each rmrr | 1668 | * For each rmrr |
| 1667 | * for each dev attached to rmrr | 1669 | * for each dev attached to rmrr |
| @@ -1714,9 +1716,10 @@ int __init init_dmars(void) | |||
| 1714 | 1716 | ||
| 1715 | iommu_set_root_entry(iommu); | 1717 | iommu_set_root_entry(iommu); |
| 1716 | 1718 | ||
| 1717 | iommu_flush_context_global(iommu, 0); | 1719 | iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL, |
| 1718 | iommu_flush_iotlb_global(iommu, 0); | 1720 | 0); |
| 1719 | 1721 | iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH, | |
| 1722 | 0); | ||
| 1720 | iommu_disable_protect_mem_regions(iommu); | 1723 | iommu_disable_protect_mem_regions(iommu); |
| 1721 | 1724 | ||
| 1722 | ret = iommu_enable_translation(iommu); | 1725 | ret = iommu_enable_translation(iommu); |
| @@ -1738,8 +1741,8 @@ error: | |||
| 1738 | static inline u64 aligned_size(u64 host_addr, size_t size) | 1741 | static inline u64 aligned_size(u64 host_addr, size_t size) |
| 1739 | { | 1742 | { |
| 1740 | u64 addr; | 1743 | u64 addr; |
| 1741 | addr = (host_addr & (~PAGE_MASK_4K)) + size; | 1744 | addr = (host_addr & (~PAGE_MASK)) + size; |
| 1742 | return PAGE_ALIGN_4K(addr); | 1745 | return PAGE_ALIGN(addr); |
| 1743 | } | 1746 | } |
| 1744 | 1747 | ||
| 1745 | struct iova * | 1748 | struct iova * |
| @@ -1753,20 +1756,20 @@ iommu_alloc_iova(struct dmar_domain *domain, size_t size, u64 end) | |||
| 1753 | return NULL; | 1756 | return NULL; |
| 1754 | 1757 | ||
| 1755 | piova = alloc_iova(&domain->iovad, | 1758 | piova = alloc_iova(&domain->iovad, |
| 1756 | size >> PAGE_SHIFT_4K, IOVA_PFN(end), 1); | 1759 | size >> PAGE_SHIFT, IOVA_PFN(end), 1); |
| 1757 | return piova; | 1760 | return piova; |
| 1758 | } | 1761 | } |
| 1759 | 1762 | ||
| 1760 | static struct iova * | 1763 | static struct iova * |
| 1761 | __intel_alloc_iova(struct device *dev, struct dmar_domain *domain, | 1764 | __intel_alloc_iova(struct device *dev, struct dmar_domain *domain, |
| 1762 | size_t size) | 1765 | size_t size, u64 dma_mask) |
| 1763 | { | 1766 | { |
| 1764 | struct pci_dev *pdev = to_pci_dev(dev); | 1767 | struct pci_dev *pdev = to_pci_dev(dev); |
| 1765 | struct iova *iova = NULL; | 1768 | struct iova *iova = NULL; |
| 1766 | 1769 | ||
| 1767 | if ((pdev->dma_mask <= DMA_32BIT_MASK) || (dmar_forcedac)) { | 1770 | if (dma_mask <= DMA_32BIT_MASK || dmar_forcedac) |
| 1768 | iova = iommu_alloc_iova(domain, size, pdev->dma_mask); | 1771 | iova = iommu_alloc_iova(domain, size, dma_mask); |
| 1769 | } else { | 1772 | else { |
| 1770 | /* | 1773 | /* |
| 1771 | * First try to allocate an io virtual address in | 1774 | * First try to allocate an io virtual address in |
| 1772 | * DMA_32BIT_MASK and if that fails then try allocating | 1775 | * DMA_32BIT_MASK and if that fails then try allocating |
| @@ -1774,7 +1777,7 @@ __intel_alloc_iova(struct device *dev, struct dmar_domain *domain, | |||
| 1774 | */ | 1777 | */ |
| 1775 | iova = iommu_alloc_iova(domain, size, DMA_32BIT_MASK); | 1778 | iova = iommu_alloc_iova(domain, size, DMA_32BIT_MASK); |
| 1776 | if (!iova) | 1779 | if (!iova) |
| 1777 | iova = iommu_alloc_iova(domain, size, pdev->dma_mask); | 1780 | iova = iommu_alloc_iova(domain, size, dma_mask); |
| 1778 | } | 1781 | } |
| 1779 | 1782 | ||
| 1780 | if (!iova) { | 1783 | if (!iova) { |
| @@ -1813,12 +1816,12 @@ get_valid_domain_for_dev(struct pci_dev *pdev) | |||
| 1813 | return domain; | 1816 | return domain; |
| 1814 | } | 1817 | } |
| 1815 | 1818 | ||
| 1816 | static dma_addr_t | 1819 | static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, |
| 1817 | intel_map_single(struct device *hwdev, phys_addr_t paddr, size_t size, int dir) | 1820 | size_t size, int dir, u64 dma_mask) |
| 1818 | { | 1821 | { |
| 1819 | struct pci_dev *pdev = to_pci_dev(hwdev); | 1822 | struct pci_dev *pdev = to_pci_dev(hwdev); |
| 1820 | struct dmar_domain *domain; | 1823 | struct dmar_domain *domain; |
| 1821 | unsigned long start_paddr; | 1824 | phys_addr_t start_paddr; |
| 1822 | struct iova *iova; | 1825 | struct iova *iova; |
| 1823 | int prot = 0; | 1826 | int prot = 0; |
| 1824 | int ret; | 1827 | int ret; |
| @@ -1833,11 +1836,11 @@ intel_map_single(struct device *hwdev, phys_addr_t paddr, size_t size, int dir) | |||
| 1833 | 1836 | ||
| 1834 | size = aligned_size((u64)paddr, size); | 1837 | size = aligned_size((u64)paddr, size); |
| 1835 | 1838 | ||
| 1836 | iova = __intel_alloc_iova(hwdev, domain, size); | 1839 | iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask); |
| 1837 | if (!iova) | 1840 | if (!iova) |
| 1838 | goto error; | 1841 | goto error; |
| 1839 | 1842 | ||
| 1840 | start_paddr = iova->pfn_lo << PAGE_SHIFT_4K; | 1843 | start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT; |
| 1841 | 1844 | ||
| 1842 | /* | 1845 | /* |
| 1843 | * Check if DMAR supports zero-length reads on write only | 1846 | * Check if DMAR supports zero-length reads on write only |
| @@ -1855,30 +1858,33 @@ intel_map_single(struct device *hwdev, phys_addr_t paddr, size_t size, int dir) | |||
| 1855 | * is not a big problem | 1858 | * is not a big problem |
| 1856 | */ | 1859 | */ |
| 1857 | ret = domain_page_mapping(domain, start_paddr, | 1860 | ret = domain_page_mapping(domain, start_paddr, |
| 1858 | ((u64)paddr) & PAGE_MASK_4K, size, prot); | 1861 | ((u64)paddr) & PAGE_MASK, size, prot); |
| 1859 | if (ret) | 1862 | if (ret) |
| 1860 | goto error; | 1863 | goto error; |
| 1861 | 1864 | ||
| 1862 | pr_debug("Device %s request: %lx@%llx mapping: %lx@%llx, dir %d\n", | ||
| 1863 | pci_name(pdev), size, (u64)paddr, | ||
| 1864 | size, (u64)start_paddr, dir); | ||
| 1865 | |||
| 1866 | /* it's a non-present to present mapping */ | 1865 | /* it's a non-present to present mapping */ |
| 1867 | ret = iommu_flush_iotlb_psi(domain->iommu, domain->id, | 1866 | ret = iommu_flush_iotlb_psi(domain->iommu, domain->id, |
| 1868 | start_paddr, size >> PAGE_SHIFT_4K, 1); | 1867 | start_paddr, size >> VTD_PAGE_SHIFT, 1); |
| 1869 | if (ret) | 1868 | if (ret) |
| 1870 | iommu_flush_write_buffer(domain->iommu); | 1869 | iommu_flush_write_buffer(domain->iommu); |
| 1871 | 1870 | ||
| 1872 | return (start_paddr + ((u64)paddr & (~PAGE_MASK_4K))); | 1871 | return start_paddr + ((u64)paddr & (~PAGE_MASK)); |
| 1873 | 1872 | ||
| 1874 | error: | 1873 | error: |
| 1875 | if (iova) | 1874 | if (iova) |
| 1876 | __free_iova(&domain->iovad, iova); | 1875 | __free_iova(&domain->iovad, iova); |
| 1877 | printk(KERN_ERR"Device %s request: %lx@%llx dir %d --- failed\n", | 1876 | printk(KERN_ERR"Device %s request: %lx@%llx dir %d --- failed\n", |
| 1878 | pci_name(pdev), size, (u64)paddr, dir); | 1877 | pci_name(pdev), size, (unsigned long long)paddr, dir); |
| 1879 | return 0; | 1878 | return 0; |
| 1880 | } | 1879 | } |
| 1881 | 1880 | ||
| 1881 | dma_addr_t intel_map_single(struct device *hwdev, phys_addr_t paddr, | ||
| 1882 | size_t size, int dir) | ||
| 1883 | { | ||
| 1884 | return __intel_map_single(hwdev, paddr, size, dir, | ||
| 1885 | to_pci_dev(hwdev)->dma_mask); | ||
| 1886 | } | ||
| 1887 | |||
| 1882 | static void flush_unmaps(void) | 1888 | static void flush_unmaps(void) |
| 1883 | { | 1889 | { |
| 1884 | int i, j; | 1890 | int i, j; |
| @@ -1891,7 +1897,8 @@ static void flush_unmaps(void) | |||
| 1891 | struct intel_iommu *iommu = | 1897 | struct intel_iommu *iommu = |
| 1892 | deferred_flush[i].domain[0]->iommu; | 1898 | deferred_flush[i].domain[0]->iommu; |
| 1893 | 1899 | ||
| 1894 | iommu_flush_iotlb_global(iommu, 0); | 1900 | iommu->flush.flush_iotlb(iommu, 0, 0, 0, |
| 1901 | DMA_TLB_GLOBAL_FLUSH, 0); | ||
| 1895 | for (j = 0; j < deferred_flush[i].next; j++) { | 1902 | for (j = 0; j < deferred_flush[i].next; j++) { |
| 1896 | __free_iova(&deferred_flush[i].domain[j]->iovad, | 1903 | __free_iova(&deferred_flush[i].domain[j]->iovad, |
| 1897 | deferred_flush[i].iova[j]); | 1904 | deferred_flush[i].iova[j]); |
| @@ -1936,8 +1943,8 @@ static void add_unmap(struct dmar_domain *dom, struct iova *iova) | |||
| 1936 | spin_unlock_irqrestore(&async_umap_flush_lock, flags); | 1943 | spin_unlock_irqrestore(&async_umap_flush_lock, flags); |
| 1937 | } | 1944 | } |
| 1938 | 1945 | ||
| 1939 | static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, | 1946 | void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size, |
| 1940 | size_t size, int dir) | 1947 | int dir) |
| 1941 | { | 1948 | { |
| 1942 | struct pci_dev *pdev = to_pci_dev(dev); | 1949 | struct pci_dev *pdev = to_pci_dev(dev); |
| 1943 | struct dmar_domain *domain; | 1950 | struct dmar_domain *domain; |
| @@ -1953,11 +1960,11 @@ static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, | |||
| 1953 | if (!iova) | 1960 | if (!iova) |
| 1954 | return; | 1961 | return; |
| 1955 | 1962 | ||
| 1956 | start_addr = iova->pfn_lo << PAGE_SHIFT_4K; | 1963 | start_addr = iova->pfn_lo << PAGE_SHIFT; |
| 1957 | size = aligned_size((u64)dev_addr, size); | 1964 | size = aligned_size((u64)dev_addr, size); |
| 1958 | 1965 | ||
| 1959 | pr_debug("Device %s unmapping: %lx@%llx\n", | 1966 | pr_debug("Device %s unmapping: %lx@%llx\n", |
| 1960 | pci_name(pdev), size, (u64)start_addr); | 1967 | pci_name(pdev), size, (unsigned long long)start_addr); |
| 1961 | 1968 | ||
| 1962 | /* clear the whole page */ | 1969 | /* clear the whole page */ |
| 1963 | dma_pte_clear_range(domain, start_addr, start_addr + size); | 1970 | dma_pte_clear_range(domain, start_addr, start_addr + size); |
| @@ -1965,7 +1972,7 @@ static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, | |||
| 1965 | dma_pte_free_pagetable(domain, start_addr, start_addr + size); | 1972 | dma_pte_free_pagetable(domain, start_addr, start_addr + size); |
| 1966 | if (intel_iommu_strict) { | 1973 | if (intel_iommu_strict) { |
| 1967 | if (iommu_flush_iotlb_psi(domain->iommu, | 1974 | if (iommu_flush_iotlb_psi(domain->iommu, |
| 1968 | domain->id, start_addr, size >> PAGE_SHIFT_4K, 0)) | 1975 | domain->id, start_addr, size >> VTD_PAGE_SHIFT, 0)) |
| 1969 | iommu_flush_write_buffer(domain->iommu); | 1976 | iommu_flush_write_buffer(domain->iommu); |
| 1970 | /* free iova */ | 1977 | /* free iova */ |
| 1971 | __free_iova(&domain->iovad, iova); | 1978 | __free_iova(&domain->iovad, iova); |
| @@ -1978,13 +1985,13 @@ static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, | |||
| 1978 | } | 1985 | } |
| 1979 | } | 1986 | } |
| 1980 | 1987 | ||
| 1981 | static void * intel_alloc_coherent(struct device *hwdev, size_t size, | 1988 | void *intel_alloc_coherent(struct device *hwdev, size_t size, |
| 1982 | dma_addr_t *dma_handle, gfp_t flags) | 1989 | dma_addr_t *dma_handle, gfp_t flags) |
| 1983 | { | 1990 | { |
| 1984 | void *vaddr; | 1991 | void *vaddr; |
| 1985 | int order; | 1992 | int order; |
| 1986 | 1993 | ||
| 1987 | size = PAGE_ALIGN_4K(size); | 1994 | size = PAGE_ALIGN(size); |
| 1988 | order = get_order(size); | 1995 | order = get_order(size); |
| 1989 | flags &= ~(GFP_DMA | GFP_DMA32); | 1996 | flags &= ~(GFP_DMA | GFP_DMA32); |
| 1990 | 1997 | ||
| @@ -1993,19 +2000,21 @@ static void * intel_alloc_coherent(struct device *hwdev, size_t size, | |||
| 1993 | return NULL; | 2000 | return NULL; |
| 1994 | memset(vaddr, 0, size); | 2001 | memset(vaddr, 0, size); |
| 1995 | 2002 | ||
| 1996 | *dma_handle = intel_map_single(hwdev, virt_to_bus(vaddr), size, DMA_BIDIRECTIONAL); | 2003 | *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size, |
| 2004 | DMA_BIDIRECTIONAL, | ||
| 2005 | hwdev->coherent_dma_mask); | ||
| 1997 | if (*dma_handle) | 2006 | if (*dma_handle) |
| 1998 | return vaddr; | 2007 | return vaddr; |
| 1999 | free_pages((unsigned long)vaddr, order); | 2008 | free_pages((unsigned long)vaddr, order); |
| 2000 | return NULL; | 2009 | return NULL; |
| 2001 | } | 2010 | } |
| 2002 | 2011 | ||
| 2003 | static void intel_free_coherent(struct device *hwdev, size_t size, | 2012 | void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, |
| 2004 | void *vaddr, dma_addr_t dma_handle) | 2013 | dma_addr_t dma_handle) |
| 2005 | { | 2014 | { |
| 2006 | int order; | 2015 | int order; |
| 2007 | 2016 | ||
| 2008 | size = PAGE_ALIGN_4K(size); | 2017 | size = PAGE_ALIGN(size); |
| 2009 | order = get_order(size); | 2018 | order = get_order(size); |
| 2010 | 2019 | ||
| 2011 | intel_unmap_single(hwdev, dma_handle, size, DMA_BIDIRECTIONAL); | 2020 | intel_unmap_single(hwdev, dma_handle, size, DMA_BIDIRECTIONAL); |
| @@ -2013,8 +2022,9 @@ static void intel_free_coherent(struct device *hwdev, size_t size, | |||
| 2013 | } | 2022 | } |
| 2014 | 2023 | ||
| 2015 | #define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg))) | 2024 | #define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg))) |
| 2016 | static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, | 2025 | |
| 2017 | int nelems, int dir) | 2026 | void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, |
| 2027 | int nelems, int dir) | ||
| 2018 | { | 2028 | { |
| 2019 | int i; | 2029 | int i; |
| 2020 | struct pci_dev *pdev = to_pci_dev(hwdev); | 2030 | struct pci_dev *pdev = to_pci_dev(hwdev); |
| @@ -2038,7 +2048,7 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, | |||
| 2038 | size += aligned_size((u64)addr, sg->length); | 2048 | size += aligned_size((u64)addr, sg->length); |
| 2039 | } | 2049 | } |
| 2040 | 2050 | ||
| 2041 | start_addr = iova->pfn_lo << PAGE_SHIFT_4K; | 2051 | start_addr = iova->pfn_lo << PAGE_SHIFT; |
| 2042 | 2052 | ||
| 2043 | /* clear the whole page */ | 2053 | /* clear the whole page */ |
| 2044 | dma_pte_clear_range(domain, start_addr, start_addr + size); | 2054 | dma_pte_clear_range(domain, start_addr, start_addr + size); |
| @@ -2046,7 +2056,7 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, | |||
| 2046 | dma_pte_free_pagetable(domain, start_addr, start_addr + size); | 2056 | dma_pte_free_pagetable(domain, start_addr, start_addr + size); |
| 2047 | 2057 | ||
| 2048 | if (iommu_flush_iotlb_psi(domain->iommu, domain->id, start_addr, | 2058 | if (iommu_flush_iotlb_psi(domain->iommu, domain->id, start_addr, |
| 2049 | size >> PAGE_SHIFT_4K, 0)) | 2059 | size >> VTD_PAGE_SHIFT, 0)) |
| 2050 | iommu_flush_write_buffer(domain->iommu); | 2060 | iommu_flush_write_buffer(domain->iommu); |
| 2051 | 2061 | ||
| 2052 | /* free iova */ | 2062 | /* free iova */ |
| @@ -2067,8 +2077,8 @@ static int intel_nontranslate_map_sg(struct device *hddev, | |||
| 2067 | return nelems; | 2077 | return nelems; |
| 2068 | } | 2078 | } |
| 2069 | 2079 | ||
| 2070 | static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, | 2080 | int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, |
| 2071 | int nelems, int dir) | 2081 | int dir) |
| 2072 | { | 2082 | { |
| 2073 | void *addr; | 2083 | void *addr; |
| 2074 | int i; | 2084 | int i; |
| @@ -2096,7 +2106,7 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, | |||
| 2096 | size += aligned_size((u64)addr, sg->length); | 2106 | size += aligned_size((u64)addr, sg->length); |
| 2097 | } | 2107 | } |
| 2098 | 2108 | ||
| 2099 | iova = __intel_alloc_iova(hwdev, domain, size); | 2109 | iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask); |
| 2100 | if (!iova) { | 2110 | if (!iova) { |
| 2101 | sglist->dma_length = 0; | 2111 | sglist->dma_length = 0; |
| 2102 | return 0; | 2112 | return 0; |
| @@ -2112,14 +2122,14 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, | |||
| 2112 | if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) | 2122 | if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) |
| 2113 | prot |= DMA_PTE_WRITE; | 2123 | prot |= DMA_PTE_WRITE; |
| 2114 | 2124 | ||
| 2115 | start_addr = iova->pfn_lo << PAGE_SHIFT_4K; | 2125 | start_addr = iova->pfn_lo << PAGE_SHIFT; |
| 2116 | offset = 0; | 2126 | offset = 0; |
| 2117 | for_each_sg(sglist, sg, nelems, i) { | 2127 | for_each_sg(sglist, sg, nelems, i) { |
| 2118 | addr = SG_ENT_VIRT_ADDRESS(sg); | 2128 | addr = SG_ENT_VIRT_ADDRESS(sg); |
| 2119 | addr = (void *)virt_to_phys(addr); | 2129 | addr = (void *)virt_to_phys(addr); |
| 2120 | size = aligned_size((u64)addr, sg->length); | 2130 | size = aligned_size((u64)addr, sg->length); |
| 2121 | ret = domain_page_mapping(domain, start_addr + offset, | 2131 | ret = domain_page_mapping(domain, start_addr + offset, |
| 2122 | ((u64)addr) & PAGE_MASK_4K, | 2132 | ((u64)addr) & PAGE_MASK, |
| 2123 | size, prot); | 2133 | size, prot); |
| 2124 | if (ret) { | 2134 | if (ret) { |
| 2125 | /* clear the page */ | 2135 | /* clear the page */ |
| @@ -2133,14 +2143,14 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, | |||
| 2133 | return 0; | 2143 | return 0; |
| 2134 | } | 2144 | } |
| 2135 | sg->dma_address = start_addr + offset + | 2145 | sg->dma_address = start_addr + offset + |
| 2136 | ((u64)addr & (~PAGE_MASK_4K)); | 2146 | ((u64)addr & (~PAGE_MASK)); |
| 2137 | sg->dma_length = sg->length; | 2147 | sg->dma_length = sg->length; |
| 2138 | offset += size; | 2148 | offset += size; |
| 2139 | } | 2149 | } |
| 2140 | 2150 | ||
| 2141 | /* it's a non-present to present mapping */ | 2151 | /* it's a non-present to present mapping */ |
| 2142 | if (iommu_flush_iotlb_psi(domain->iommu, domain->id, | 2152 | if (iommu_flush_iotlb_psi(domain->iommu, domain->id, |
| 2143 | start_addr, offset >> PAGE_SHIFT_4K, 1)) | 2153 | start_addr, offset >> VTD_PAGE_SHIFT, 1)) |
| 2144 | iommu_flush_write_buffer(domain->iommu); | 2154 | iommu_flush_write_buffer(domain->iommu); |
| 2145 | return nelems; | 2155 | return nelems; |
| 2146 | } | 2156 | } |
| @@ -2180,7 +2190,6 @@ static inline int iommu_devinfo_cache_init(void) | |||
| 2180 | sizeof(struct device_domain_info), | 2190 | sizeof(struct device_domain_info), |
| 2181 | 0, | 2191 | 0, |
| 2182 | SLAB_HWCACHE_ALIGN, | 2192 | SLAB_HWCACHE_ALIGN, |
| 2183 | |||
| 2184 | NULL); | 2193 | NULL); |
| 2185 | if (!iommu_devinfo_cache) { | 2194 | if (!iommu_devinfo_cache) { |
| 2186 | printk(KERN_ERR "Couldn't create devinfo cache\n"); | 2195 | printk(KERN_ERR "Couldn't create devinfo cache\n"); |
| @@ -2198,7 +2207,6 @@ static inline int iommu_iova_cache_init(void) | |||
| 2198 | sizeof(struct iova), | 2207 | sizeof(struct iova), |
| 2199 | 0, | 2208 | 0, |
| 2200 | SLAB_HWCACHE_ALIGN, | 2209 | SLAB_HWCACHE_ALIGN, |
| 2201 | |||
| 2202 | NULL); | 2210 | NULL); |
| 2203 | if (!iommu_iova_cache) { | 2211 | if (!iommu_iova_cache) { |
| 2204 | printk(KERN_ERR "Couldn't create iova cache\n"); | 2212 | printk(KERN_ERR "Couldn't create iova cache\n"); |
| @@ -2327,7 +2335,7 @@ void intel_iommu_domain_exit(struct dmar_domain *domain) | |||
| 2327 | return; | 2335 | return; |
| 2328 | 2336 | ||
| 2329 | end = DOMAIN_MAX_ADDR(domain->gaw); | 2337 | end = DOMAIN_MAX_ADDR(domain->gaw); |
| 2330 | end = end & (~PAGE_MASK_4K); | 2338 | end = end & (~VTD_PAGE_MASK); |
| 2331 | 2339 | ||
| 2332 | /* clear ptes */ | 2340 | /* clear ptes */ |
| 2333 | dma_pte_clear_range(domain, 0, end); | 2341 | dma_pte_clear_range(domain, 0, end); |
| @@ -2423,6 +2431,6 @@ u64 intel_iommu_iova_to_pfn(struct dmar_domain *domain, u64 iova) | |||
| 2423 | if (pte) | 2431 | if (pte) |
| 2424 | pfn = dma_pte_addr(*pte); | 2432 | pfn = dma_pte_addr(*pte); |
| 2425 | 2433 | ||
| 2426 | return pfn >> PAGE_SHIFT_4K; | 2434 | return pfn >> VTD_PAGE_SHIFT; |
| 2427 | } | 2435 | } |
| 2428 | EXPORT_SYMBOL_GPL(intel_iommu_iova_to_pfn); | 2436 | EXPORT_SYMBOL_GPL(intel_iommu_iova_to_pfn); |
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index e872ac925b4b..832175d9ca25 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c | |||
| @@ -35,6 +35,20 @@ static void __devinit quirk_mellanox_tavor(struct pci_dev *dev) | |||
| 35 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX,PCI_DEVICE_ID_MELLANOX_TAVOR,quirk_mellanox_tavor); | 35 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX,PCI_DEVICE_ID_MELLANOX_TAVOR,quirk_mellanox_tavor); |
| 36 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX,PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE,quirk_mellanox_tavor); | 36 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX,PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE,quirk_mellanox_tavor); |
| 37 | 37 | ||
| 38 | /* Many VIA bridges seem to corrupt data for DAC. Disable it here */ | ||
| 39 | int forbid_dac __read_mostly; | ||
| 40 | EXPORT_SYMBOL(forbid_dac); | ||
| 41 | |||
| 42 | static __devinit void via_no_dac(struct pci_dev *dev) | ||
| 43 | { | ||
| 44 | if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) { | ||
| 45 | dev_info(&dev->dev, | ||
| 46 | "VIA PCI bridge detected. Disabling DAC.\n"); | ||
| 47 | forbid_dac = 1; | ||
| 48 | } | ||
| 49 | } | ||
| 50 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac); | ||
| 51 | |||
| 38 | /* Deal with broken BIOS'es that neglect to enable passive release, | 52 | /* Deal with broken BIOS'es that neglect to enable passive release, |
| 39 | which can cause problems in combination with the 82441FX/PPro MTRRs */ | 53 | which can cause problems in combination with the 82441FX/PPro MTRRs */ |
| 40 | static void quirk_passive_release(struct pci_dev *dev) | 54 | static void quirk_passive_release(struct pci_dev *dev) |
diff --git a/include/asm-x86/iommu.h b/include/asm-x86/iommu.h index 961e746da977..2daaffcda52f 100644 --- a/include/asm-x86/iommu.h +++ b/include/asm-x86/iommu.h | |||
| @@ -7,9 +7,13 @@ extern struct dma_mapping_ops nommu_dma_ops; | |||
| 7 | extern int force_iommu, no_iommu; | 7 | extern int force_iommu, no_iommu; |
| 8 | extern int iommu_detected; | 8 | extern int iommu_detected; |
| 9 | extern int dmar_disabled; | 9 | extern int dmar_disabled; |
| 10 | extern int forbid_dac; | ||
| 10 | 11 | ||
| 11 | extern unsigned long iommu_nr_pages(unsigned long addr, unsigned long len); | 12 | extern unsigned long iommu_nr_pages(unsigned long addr, unsigned long len); |
| 12 | 13 | ||
| 14 | /* 10 seconds */ | ||
| 15 | #define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000) | ||
| 16 | |||
| 13 | #ifdef CONFIG_GART_IOMMU | 17 | #ifdef CONFIG_GART_IOMMU |
| 14 | extern int gart_iommu_aperture; | 18 | extern int gart_iommu_aperture; |
| 15 | extern int gart_iommu_aperture_allowed; | 19 | extern int gart_iommu_aperture_allowed; |
diff --git a/include/linux/dma_remapping.h b/include/linux/dma_remapping.h index bff5c65f81dc..952df39c989d 100644 --- a/include/linux/dma_remapping.h +++ b/include/linux/dma_remapping.h | |||
| @@ -2,15 +2,14 @@ | |||
| 2 | #define _DMA_REMAPPING_H | 2 | #define _DMA_REMAPPING_H |
| 3 | 3 | ||
| 4 | /* | 4 | /* |
| 5 | * We need a fixed PAGE_SIZE of 4K irrespective of | 5 | * VT-d hardware uses 4KiB page size regardless of host page size. |
| 6 | * arch PAGE_SIZE for IOMMU page tables. | ||
| 7 | */ | 6 | */ |
| 8 | #define PAGE_SHIFT_4K (12) | 7 | #define VTD_PAGE_SHIFT (12) |
| 9 | #define PAGE_SIZE_4K (1UL << PAGE_SHIFT_4K) | 8 | #define VTD_PAGE_SIZE (1UL << VTD_PAGE_SHIFT) |
| 10 | #define PAGE_MASK_4K (((u64)-1) << PAGE_SHIFT_4K) | 9 | #define VTD_PAGE_MASK (((u64)-1) << VTD_PAGE_SHIFT) |
| 11 | #define PAGE_ALIGN_4K(addr) (((addr) + PAGE_SIZE_4K - 1) & PAGE_MASK_4K) | 10 | #define VTD_PAGE_ALIGN(addr) (((addr) + VTD_PAGE_SIZE - 1) & VTD_PAGE_MASK) |
| 12 | 11 | ||
| 13 | #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT_4K) | 12 | #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT) |
| 14 | #define DMA_32BIT_PFN IOVA_PFN(DMA_32BIT_MASK) | 13 | #define DMA_32BIT_PFN IOVA_PFN(DMA_32BIT_MASK) |
| 15 | #define DMA_64BIT_PFN IOVA_PFN(DMA_64BIT_MASK) | 14 | #define DMA_64BIT_PFN IOVA_PFN(DMA_64BIT_MASK) |
| 16 | 15 | ||
| @@ -25,7 +24,7 @@ struct root_entry { | |||
| 25 | u64 val; | 24 | u64 val; |
| 26 | u64 rsvd1; | 25 | u64 rsvd1; |
| 27 | }; | 26 | }; |
| 28 | #define ROOT_ENTRY_NR (PAGE_SIZE_4K/sizeof(struct root_entry)) | 27 | #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry)) |
| 29 | static inline bool root_present(struct root_entry *root) | 28 | static inline bool root_present(struct root_entry *root) |
| 30 | { | 29 | { |
| 31 | return (root->val & 1); | 30 | return (root->val & 1); |
| @@ -36,7 +35,7 @@ static inline void set_root_present(struct root_entry *root) | |||
| 36 | } | 35 | } |
| 37 | static inline void set_root_value(struct root_entry *root, unsigned long value) | 36 | static inline void set_root_value(struct root_entry *root, unsigned long value) |
| 38 | { | 37 | { |
| 39 | root->val |= value & PAGE_MASK_4K; | 38 | root->val |= value & VTD_PAGE_MASK; |
| 40 | } | 39 | } |
| 41 | 40 | ||
| 42 | struct context_entry; | 41 | struct context_entry; |
| @@ -45,7 +44,7 @@ get_context_addr_from_root(struct root_entry *root) | |||
| 45 | { | 44 | { |
| 46 | return (struct context_entry *) | 45 | return (struct context_entry *) |
| 47 | (root_present(root)?phys_to_virt( | 46 | (root_present(root)?phys_to_virt( |
| 48 | root->val & PAGE_MASK_4K): | 47 | root->val & VTD_PAGE_MASK) : |
| 49 | NULL); | 48 | NULL); |
| 50 | } | 49 | } |
| 51 | 50 | ||
| @@ -67,7 +66,7 @@ struct context_entry { | |||
| 67 | #define context_present(c) ((c).lo & 1) | 66 | #define context_present(c) ((c).lo & 1) |
| 68 | #define context_fault_disable(c) (((c).lo >> 1) & 1) | 67 | #define context_fault_disable(c) (((c).lo >> 1) & 1) |
| 69 | #define context_translation_type(c) (((c).lo >> 2) & 3) | 68 | #define context_translation_type(c) (((c).lo >> 2) & 3) |
| 70 | #define context_address_root(c) ((c).lo & PAGE_MASK_4K) | 69 | #define context_address_root(c) ((c).lo & VTD_PAGE_MASK) |
| 71 | #define context_address_width(c) ((c).hi & 7) | 70 | #define context_address_width(c) ((c).hi & 7) |
| 72 | #define context_domain_id(c) (((c).hi >> 8) & ((1 << 16) - 1)) | 71 | #define context_domain_id(c) (((c).hi >> 8) & ((1 << 16) - 1)) |
| 73 | 72 | ||
| @@ -81,7 +80,7 @@ struct context_entry { | |||
| 81 | } while (0) | 80 | } while (0) |
| 82 | #define CONTEXT_TT_MULTI_LEVEL 0 | 81 | #define CONTEXT_TT_MULTI_LEVEL 0 |
| 83 | #define context_set_address_root(c, val) \ | 82 | #define context_set_address_root(c, val) \ |
| 84 | do {(c).lo |= (val) & PAGE_MASK_4K;} while (0) | 83 | do {(c).lo |= (val) & VTD_PAGE_MASK; } while (0) |
| 85 | #define context_set_address_width(c, val) do {(c).hi |= (val) & 7;} while (0) | 84 | #define context_set_address_width(c, val) do {(c).hi |= (val) & 7;} while (0) |
| 86 | #define context_set_domain_id(c, val) \ | 85 | #define context_set_domain_id(c, val) \ |
| 87 | do {(c).hi |= ((val) & ((1 << 16) - 1)) << 8;} while (0) | 86 | do {(c).hi |= ((val) & ((1 << 16) - 1)) << 8;} while (0) |
| @@ -107,9 +106,9 @@ struct dma_pte { | |||
| 107 | #define dma_set_pte_writable(p) do {(p).val |= DMA_PTE_WRITE;} while (0) | 106 | #define dma_set_pte_writable(p) do {(p).val |= DMA_PTE_WRITE;} while (0) |
| 108 | #define dma_set_pte_prot(p, prot) \ | 107 | #define dma_set_pte_prot(p, prot) \ |
| 109 | do {(p).val = ((p).val & ~3) | ((prot) & 3); } while (0) | 108 | do {(p).val = ((p).val & ~3) | ((prot) & 3); } while (0) |
| 110 | #define dma_pte_addr(p) ((p).val & PAGE_MASK_4K) | 109 | #define dma_pte_addr(p) ((p).val & VTD_PAGE_MASK) |
| 111 | #define dma_set_pte_addr(p, addr) do {\ | 110 | #define dma_set_pte_addr(p, addr) do {\ |
| 112 | (p).val |= ((addr) & PAGE_MASK_4K); } while (0) | 111 | (p).val |= ((addr) & VTD_PAGE_MASK); } while (0) |
| 113 | #define dma_pte_present(p) (((p).val & 3) != 0) | 112 | #define dma_pte_present(p) (((p).val & 3) != 0) |
| 114 | 113 | ||
| 115 | struct intel_iommu; | 114 | struct intel_iommu; |
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 2e117f30a76c..3d017cfd245b 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h | |||
| @@ -29,6 +29,7 @@ | |||
| 29 | #include <linux/io.h> | 29 | #include <linux/io.h> |
| 30 | #include <linux/dma_remapping.h> | 30 | #include <linux/dma_remapping.h> |
| 31 | #include <asm/cacheflush.h> | 31 | #include <asm/cacheflush.h> |
| 32 | #include <asm/iommu.h> | ||
| 32 | 33 | ||
| 33 | /* | 34 | /* |
| 34 | * Intel IOMMU register specification per version 1.0 public spec. | 35 | * Intel IOMMU register specification per version 1.0 public spec. |
| @@ -127,6 +128,7 @@ static inline void dmar_writeq(void __iomem *addr, u64 val) | |||
| 127 | 128 | ||
| 128 | 129 | ||
| 129 | /* IOTLB_REG */ | 130 | /* IOTLB_REG */ |
| 131 | #define DMA_TLB_FLUSH_GRANU_OFFSET 60 | ||
| 130 | #define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60) | 132 | #define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60) |
| 131 | #define DMA_TLB_DSI_FLUSH (((u64)2) << 60) | 133 | #define DMA_TLB_DSI_FLUSH (((u64)2) << 60) |
| 132 | #define DMA_TLB_PSI_FLUSH (((u64)3) << 60) | 134 | #define DMA_TLB_PSI_FLUSH (((u64)3) << 60) |
| @@ -140,6 +142,7 @@ static inline void dmar_writeq(void __iomem *addr, u64 val) | |||
| 140 | #define DMA_TLB_MAX_SIZE (0x3f) | 142 | #define DMA_TLB_MAX_SIZE (0x3f) |
| 141 | 143 | ||
| 142 | /* INVALID_DESC */ | 144 | /* INVALID_DESC */ |
| 145 | #define DMA_CCMD_INVL_GRANU_OFFSET 61 | ||
| 143 | #define DMA_ID_TLB_GLOBAL_FLUSH (((u64)1) << 3) | 146 | #define DMA_ID_TLB_GLOBAL_FLUSH (((u64)1) << 3) |
| 144 | #define DMA_ID_TLB_DSI_FLUSH (((u64)2) << 3) | 147 | #define DMA_ID_TLB_DSI_FLUSH (((u64)2) << 3) |
| 145 | #define DMA_ID_TLB_PSI_FLUSH (((u64)3) << 3) | 148 | #define DMA_ID_TLB_PSI_FLUSH (((u64)3) << 3) |
| @@ -200,22 +203,21 @@ static inline void dmar_writeq(void __iomem *addr, u64 val) | |||
| 200 | #define dma_frcd_type(d) ((d >> 30) & 1) | 203 | #define dma_frcd_type(d) ((d >> 30) & 1) |
| 201 | #define dma_frcd_fault_reason(c) (c & 0xff) | 204 | #define dma_frcd_fault_reason(c) (c & 0xff) |
| 202 | #define dma_frcd_source_id(c) (c & 0xffff) | 205 | #define dma_frcd_source_id(c) (c & 0xffff) |
| 203 | #define dma_frcd_page_addr(d) (d & (((u64)-1) << 12)) /* low 64 bit */ | 206 | /* low 64 bit */ |
| 204 | 207 | #define dma_frcd_page_addr(d) (d & (((u64)-1) << PAGE_SHIFT)) | |
| 205 | #define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000) /* 10sec */ | 208 | |
| 206 | 209 | #define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \ | |
| 207 | #define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \ | 210 | do { \ |
| 208 | {\ | 211 | cycles_t start_time = get_cycles(); \ |
| 209 | cycles_t start_time = get_cycles();\ | 212 | while (1) { \ |
| 210 | while (1) {\ | 213 | sts = op(iommu->reg + offset); \ |
| 211 | sts = op (iommu->reg + offset);\ | 214 | if (cond) \ |
| 212 | if (cond)\ | 215 | break; \ |
| 213 | break;\ | ||
| 214 | if (DMAR_OPERATION_TIMEOUT < (get_cycles() - start_time))\ | 216 | if (DMAR_OPERATION_TIMEOUT < (get_cycles() - start_time))\ |
| 215 | panic("DMAR hardware is malfunctioning\n");\ | 217 | panic("DMAR hardware is malfunctioning\n"); \ |
| 216 | cpu_relax();\ | 218 | cpu_relax(); \ |
| 217 | }\ | 219 | } \ |
| 218 | } | 220 | } while (0) |
| 219 | 221 | ||
| 220 | #define QI_LENGTH 256 /* queue length */ | 222 | #define QI_LENGTH 256 /* queue length */ |
| 221 | 223 | ||
| @@ -238,6 +240,19 @@ enum { | |||
| 238 | #define QI_IWD_STATUS_DATA(d) (((u64)d) << 32) | 240 | #define QI_IWD_STATUS_DATA(d) (((u64)d) << 32) |
| 239 | #define QI_IWD_STATUS_WRITE (((u64)1) << 5) | 241 | #define QI_IWD_STATUS_WRITE (((u64)1) << 5) |
| 240 | 242 | ||
| 243 | #define QI_IOTLB_DID(did) (((u64)did) << 16) | ||
| 244 | #define QI_IOTLB_DR(dr) (((u64)dr) << 7) | ||
| 245 | #define QI_IOTLB_DW(dw) (((u64)dw) << 6) | ||
| 246 | #define QI_IOTLB_GRAN(gran) (((u64)gran) >> (DMA_TLB_FLUSH_GRANU_OFFSET-4)) | ||
| 247 | #define QI_IOTLB_ADDR(addr) (((u64)addr) & VTD_PAGE_MASK) | ||
| 248 | #define QI_IOTLB_IH(ih) (((u64)ih) << 6) | ||
| 249 | #define QI_IOTLB_AM(am) (((u8)am)) | ||
| 250 | |||
| 251 | #define QI_CC_FM(fm) (((u64)fm) << 48) | ||
| 252 | #define QI_CC_SID(sid) (((u64)sid) << 32) | ||
| 253 | #define QI_CC_DID(did) (((u64)did) << 16) | ||
| 254 | #define QI_CC_GRAN(gran) (((u64)gran) >> (DMA_CCMD_INVL_GRANU_OFFSET-4)) | ||
| 255 | |||
| 241 | struct qi_desc { | 256 | struct qi_desc { |
| 242 | u64 low, high; | 257 | u64 low, high; |
| 243 | }; | 258 | }; |
| @@ -263,6 +278,13 @@ struct ir_table { | |||
| 263 | }; | 278 | }; |
| 264 | #endif | 279 | #endif |
| 265 | 280 | ||
| 281 | struct iommu_flush { | ||
| 282 | int (*flush_context)(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm, | ||
| 283 | u64 type, int non_present_entry_flush); | ||
| 284 | int (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr, | ||
| 285 | unsigned int size_order, u64 type, int non_present_entry_flush); | ||
| 286 | }; | ||
| 287 | |||
| 266 | struct intel_iommu { | 288 | struct intel_iommu { |
| 267 | void __iomem *reg; /* Pointer to hardware regs, virtual addr */ | 289 | void __iomem *reg; /* Pointer to hardware regs, virtual addr */ |
| 268 | u64 cap; | 290 | u64 cap; |
| @@ -282,6 +304,7 @@ struct intel_iommu { | |||
| 282 | unsigned char name[7]; /* Device Name */ | 304 | unsigned char name[7]; /* Device Name */ |
| 283 | struct msi_msg saved_msg; | 305 | struct msi_msg saved_msg; |
| 284 | struct sys_device sysdev; | 306 | struct sys_device sysdev; |
| 307 | struct iommu_flush flush; | ||
| 285 | #endif | 308 | #endif |
| 286 | struct q_inval *qi; /* Queued invalidation info */ | 309 | struct q_inval *qi; /* Queued invalidation info */ |
| 287 | #ifdef CONFIG_INTR_REMAP | 310 | #ifdef CONFIG_INTR_REMAP |
| @@ -303,6 +326,12 @@ extern void free_iommu(struct intel_iommu *iommu); | |||
| 303 | extern int dmar_enable_qi(struct intel_iommu *iommu); | 326 | extern int dmar_enable_qi(struct intel_iommu *iommu); |
| 304 | extern void qi_global_iec(struct intel_iommu *iommu); | 327 | extern void qi_global_iec(struct intel_iommu *iommu); |
| 305 | 328 | ||
| 329 | extern int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, | ||
| 330 | u8 fm, u64 type, int non_present_entry_flush); | ||
| 331 | extern int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, | ||
| 332 | unsigned int size_order, u64 type, | ||
| 333 | int non_present_entry_flush); | ||
| 334 | |||
| 306 | extern void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu); | 335 | extern void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu); |
| 307 | 336 | ||
| 308 | void intel_iommu_domain_exit(struct dmar_domain *domain); | 337 | void intel_iommu_domain_exit(struct dmar_domain *domain); |
| @@ -324,4 +353,11 @@ static inline int intel_iommu_found(void) | |||
| 324 | } | 353 | } |
| 325 | #endif /* CONFIG_DMAR */ | 354 | #endif /* CONFIG_DMAR */ |
| 326 | 355 | ||
| 356 | extern void *intel_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t); | ||
| 357 | extern void intel_free_coherent(struct device *, size_t, void *, dma_addr_t); | ||
| 358 | extern dma_addr_t intel_map_single(struct device *, phys_addr_t, size_t, int); | ||
| 359 | extern void intel_unmap_single(struct device *, dma_addr_t, size_t, int); | ||
| 360 | extern int intel_map_sg(struct device *, struct scatterlist *, int, int); | ||
| 361 | extern void intel_unmap_sg(struct device *, struct scatterlist *, int, int); | ||
| 362 | |||
| 327 | #endif | 363 | #endif |
