aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/amd_iommu.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/amd_iommu.c')
-rw-r--r--arch/x86/kernel/amd_iommu.c337
1 files changed, 272 insertions, 65 deletions
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index 042fdc27bc92..a8fd9ebdc8e2 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -33,6 +33,10 @@
33 33
34static DEFINE_RWLOCK(amd_iommu_devtable_lock); 34static DEFINE_RWLOCK(amd_iommu_devtable_lock);
35 35
36/* A list of preallocated protection domains */
37static LIST_HEAD(iommu_pd_list);
38static DEFINE_SPINLOCK(iommu_pd_list_lock);
39
36/* 40/*
37 * general struct to manage commands send to an IOMMU 41 * general struct to manage commands send to an IOMMU
38 */ 42 */
@@ -51,6 +55,102 @@ static int iommu_has_npcache(struct amd_iommu *iommu)
51 55
52/**************************************************************************** 56/****************************************************************************
53 * 57 *
58 * Interrupt handling functions
59 *
60 ****************************************************************************/
61
62static void iommu_print_event(void *__evt)
63{
64 u32 *event = __evt;
65 int type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK;
66 int devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
67 int domid = (event[1] >> EVENT_DOMID_SHIFT) & EVENT_DOMID_MASK;
68 int flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
69 u64 address = (u64)(((u64)event[3]) << 32) | event[2];
70
71 printk(KERN_ERR "AMD IOMMU: Event logged [");
72
73 switch (type) {
74 case EVENT_TYPE_ILL_DEV:
75 printk("ILLEGAL_DEV_TABLE_ENTRY device=%02x:%02x.%x "
76 "address=0x%016llx flags=0x%04x]\n",
77 PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
78 address, flags);
79 break;
80 case EVENT_TYPE_IO_FAULT:
81 printk("IO_PAGE_FAULT device=%02x:%02x.%x "
82 "domain=0x%04x address=0x%016llx flags=0x%04x]\n",
83 PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
84 domid, address, flags);
85 break;
86 case EVENT_TYPE_DEV_TAB_ERR:
87 printk("DEV_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
88 "address=0x%016llx flags=0x%04x]\n",
89 PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
90 address, flags);
91 break;
92 case EVENT_TYPE_PAGE_TAB_ERR:
93 printk("PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
94 "domain=0x%04x address=0x%016llx flags=0x%04x]\n",
95 PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
96 domid, address, flags);
97 break;
98 case EVENT_TYPE_ILL_CMD:
99 printk("ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address);
100 break;
101 case EVENT_TYPE_CMD_HARD_ERR:
102 printk("COMMAND_HARDWARE_ERROR address=0x%016llx "
103 "flags=0x%04x]\n", address, flags);
104 break;
105 case EVENT_TYPE_IOTLB_INV_TO:
106 printk("IOTLB_INV_TIMEOUT device=%02x:%02x.%x "
107 "address=0x%016llx]\n",
108 PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
109 address);
110 break;
111 case EVENT_TYPE_INV_DEV_REQ:
112 printk("INVALID_DEVICE_REQUEST device=%02x:%02x.%x "
113 "address=0x%016llx flags=0x%04x]\n",
114 PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
115 address, flags);
116 break;
117 default:
118 printk(KERN_ERR "UNKNOWN type=0x%02x]\n", type);
119 }
120}
121
122static void iommu_poll_events(struct amd_iommu *iommu)
123{
124 u32 head, tail;
125 unsigned long flags;
126
127 spin_lock_irqsave(&iommu->lock, flags);
128
129 head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
130 tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
131
132 while (head != tail) {
133 iommu_print_event(iommu->evt_buf + head);
134 head = (head + EVENT_ENTRY_SIZE) % iommu->evt_buf_size;
135 }
136
137 writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
138
139 spin_unlock_irqrestore(&iommu->lock, flags);
140}
141
142irqreturn_t amd_iommu_int_handler(int irq, void *data)
143{
144 struct amd_iommu *iommu;
145
146 list_for_each_entry(iommu, &amd_iommu_list, list)
147 iommu_poll_events(iommu);
148
149 return IRQ_HANDLED;
150}
151
152/****************************************************************************
153 *
54 * IOMMU command queuing functions 154 * IOMMU command queuing functions
55 * 155 *
56 ****************************************************************************/ 156 ****************************************************************************/
@@ -195,7 +295,7 @@ static int iommu_flush_pages(struct amd_iommu *iommu, u16 domid,
195 u64 address, size_t size) 295 u64 address, size_t size)
196{ 296{
197 int s = 0; 297 int s = 0;
198 unsigned pages = iommu_num_pages(address, size); 298 unsigned pages = iommu_num_pages(address, size, PAGE_SIZE);
199 299
200 address &= PAGE_MASK; 300 address &= PAGE_MASK;
201 301
@@ -213,6 +313,14 @@ static int iommu_flush_pages(struct amd_iommu *iommu, u16 domid,
213 return 0; 313 return 0;
214} 314}
215 315
316/* Flush the whole IO/TLB for a given protection domain */
317static void iommu_flush_tlb(struct amd_iommu *iommu, u16 domid)
318{
319 u64 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
320
321 iommu_queue_inv_iommu_pages(iommu, address, domid, 0, 1);
322}
323
216/**************************************************************************** 324/****************************************************************************
217 * 325 *
218 * The functions below are used the create the page table mappings for 326 * The functions below are used the create the page table mappings for
@@ -372,11 +480,6 @@ static int init_unity_mappings_for_device(struct dma_ops_domain *dma_dom,
372 * efficient allocator. 480 * efficient allocator.
373 * 481 *
374 ****************************************************************************/ 482 ****************************************************************************/
375static unsigned long dma_mask_to_pages(unsigned long mask)
376{
377 return (mask >> PAGE_SHIFT) +
378 (PAGE_ALIGN(mask & ~PAGE_MASK) >> PAGE_SHIFT);
379}
380 483
381/* 484/*
382 * The address allocator core function. 485 * The address allocator core function.
@@ -385,25 +488,31 @@ static unsigned long dma_mask_to_pages(unsigned long mask)
385 */ 488 */
386static unsigned long dma_ops_alloc_addresses(struct device *dev, 489static unsigned long dma_ops_alloc_addresses(struct device *dev,
387 struct dma_ops_domain *dom, 490 struct dma_ops_domain *dom,
388 unsigned int pages) 491 unsigned int pages,
492 unsigned long align_mask,
493 u64 dma_mask)
389{ 494{
390 unsigned long limit = dma_mask_to_pages(*dev->dma_mask); 495 unsigned long limit;
391 unsigned long address; 496 unsigned long address;
392 unsigned long size = dom->aperture_size >> PAGE_SHIFT;
393 unsigned long boundary_size; 497 unsigned long boundary_size;
394 498
395 boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, 499 boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
396 PAGE_SIZE) >> PAGE_SHIFT; 500 PAGE_SIZE) >> PAGE_SHIFT;
397 limit = limit < size ? limit : size; 501 limit = iommu_device_max_index(dom->aperture_size >> PAGE_SHIFT, 0,
502 dma_mask >> PAGE_SHIFT);
398 503
399 if (dom->next_bit >= limit) 504 if (dom->next_bit >= limit) {
400 dom->next_bit = 0; 505 dom->next_bit = 0;
506 dom->need_flush = true;
507 }
401 508
402 address = iommu_area_alloc(dom->bitmap, limit, dom->next_bit, pages, 509 address = iommu_area_alloc(dom->bitmap, limit, dom->next_bit, pages,
403 0 , boundary_size, 0); 510 0 , boundary_size, align_mask);
404 if (address == -1) 511 if (address == -1) {
405 address = iommu_area_alloc(dom->bitmap, limit, 0, pages, 512 address = iommu_area_alloc(dom->bitmap, limit, 0, pages,
406 0, boundary_size, 0); 513 0, boundary_size, align_mask);
514 dom->need_flush = true;
515 }
407 516
408 if (likely(address != -1)) { 517 if (likely(address != -1)) {
409 dom->next_bit = address + pages; 518 dom->next_bit = address + pages;
@@ -469,7 +578,7 @@ static void dma_ops_reserve_addresses(struct dma_ops_domain *dom,
469 if (start_page + pages > last_page) 578 if (start_page + pages > last_page)
470 pages = last_page - start_page; 579 pages = last_page - start_page;
471 580
472 set_bit_string(dom->bitmap, start_page, pages); 581 iommu_area_reserve(dom->bitmap, start_page, pages);
473} 582}
474 583
475static void dma_ops_free_pagetable(struct dma_ops_domain *dma_dom) 584static void dma_ops_free_pagetable(struct dma_ops_domain *dma_dom)
@@ -563,12 +672,16 @@ static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu,
563 dma_dom->bitmap[0] = 1; 672 dma_dom->bitmap[0] = 1;
564 dma_dom->next_bit = 0; 673 dma_dom->next_bit = 0;
565 674
675 dma_dom->need_flush = false;
676 dma_dom->target_dev = 0xffff;
677
566 /* Intialize the exclusion range if necessary */ 678 /* Intialize the exclusion range if necessary */
567 if (iommu->exclusion_start && 679 if (iommu->exclusion_start &&
568 iommu->exclusion_start < dma_dom->aperture_size) { 680 iommu->exclusion_start < dma_dom->aperture_size) {
569 unsigned long startpage = iommu->exclusion_start >> PAGE_SHIFT; 681 unsigned long startpage = iommu->exclusion_start >> PAGE_SHIFT;
570 int pages = iommu_num_pages(iommu->exclusion_start, 682 int pages = iommu_num_pages(iommu->exclusion_start,
571 iommu->exclusion_length); 683 iommu->exclusion_length,
684 PAGE_SIZE);
572 dma_ops_reserve_addresses(dma_dom, startpage, pages); 685 dma_ops_reserve_addresses(dma_dom, startpage, pages);
573 } 686 }
574 687
@@ -633,12 +746,13 @@ static void set_device_domain(struct amd_iommu *iommu,
633 746
634 u64 pte_root = virt_to_phys(domain->pt_root); 747 u64 pte_root = virt_to_phys(domain->pt_root);
635 748
636 pte_root |= (domain->mode & 0x07) << 9; 749 pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK)
637 pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | 2; 750 << DEV_ENTRY_MODE_SHIFT;
751 pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV;
638 752
639 write_lock_irqsave(&amd_iommu_devtable_lock, flags); 753 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
640 amd_iommu_dev_table[devid].data[0] = pte_root; 754 amd_iommu_dev_table[devid].data[0] = lower_32_bits(pte_root);
641 amd_iommu_dev_table[devid].data[1] = pte_root >> 32; 755 amd_iommu_dev_table[devid].data[1] = upper_32_bits(pte_root);
642 amd_iommu_dev_table[devid].data[2] = domain->id; 756 amd_iommu_dev_table[devid].data[2] = domain->id;
643 757
644 amd_iommu_pd_table[devid] = domain; 758 amd_iommu_pd_table[devid] = domain;
@@ -656,6 +770,45 @@ static void set_device_domain(struct amd_iommu *iommu,
656 *****************************************************************************/ 770 *****************************************************************************/
657 771
658/* 772/*
773 * This function checks if the driver got a valid device from the caller to
774 * avoid dereferencing invalid pointers.
775 */
776static bool check_device(struct device *dev)
777{
778 if (!dev || !dev->dma_mask)
779 return false;
780
781 return true;
782}
783
784/*
785 * In this function the list of preallocated protection domains is traversed to
786 * find the domain for a specific device
787 */
788static struct dma_ops_domain *find_protection_domain(u16 devid)
789{
790 struct dma_ops_domain *entry, *ret = NULL;
791 unsigned long flags;
792
793 if (list_empty(&iommu_pd_list))
794 return NULL;
795
796 spin_lock_irqsave(&iommu_pd_list_lock, flags);
797
798 list_for_each_entry(entry, &iommu_pd_list, list) {
799 if (entry->target_dev == devid) {
800 ret = entry;
801 list_del(&ret->list);
802 break;
803 }
804 }
805
806 spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
807
808 return ret;
809}
810
811/*
659 * In the dma_ops path we only have the struct device. This function 812 * In the dma_ops path we only have the struct device. This function
660 * finds the corresponding IOMMU, the protection domain and the 813 * finds the corresponding IOMMU, the protection domain and the
661 * requestor id for a given device. 814 * requestor id for a given device.
@@ -671,27 +824,30 @@ static int get_device_resources(struct device *dev,
671 struct pci_dev *pcidev; 824 struct pci_dev *pcidev;
672 u16 _bdf; 825 u16 _bdf;
673 826
674 BUG_ON(!dev || dev->bus != &pci_bus_type || !dev->dma_mask); 827 *iommu = NULL;
828 *domain = NULL;
829 *bdf = 0xffff;
830
831 if (dev->bus != &pci_bus_type)
832 return 0;
675 833
676 pcidev = to_pci_dev(dev); 834 pcidev = to_pci_dev(dev);
677 _bdf = calc_devid(pcidev->bus->number, pcidev->devfn); 835 _bdf = calc_devid(pcidev->bus->number, pcidev->devfn);
678 836
679 /* device not translated by any IOMMU in the system? */ 837 /* device not translated by any IOMMU in the system? */
680 if (_bdf > amd_iommu_last_bdf) { 838 if (_bdf > amd_iommu_last_bdf)
681 *iommu = NULL;
682 *domain = NULL;
683 *bdf = 0xffff;
684 return 0; 839 return 0;
685 }
686 840
687 *bdf = amd_iommu_alias_table[_bdf]; 841 *bdf = amd_iommu_alias_table[_bdf];
688 842
689 *iommu = amd_iommu_rlookup_table[*bdf]; 843 *iommu = amd_iommu_rlookup_table[*bdf];
690 if (*iommu == NULL) 844 if (*iommu == NULL)
691 return 0; 845 return 0;
692 dma_dom = (*iommu)->default_dom;
693 *domain = domain_for_device(*bdf); 846 *domain = domain_for_device(*bdf);
694 if (*domain == NULL) { 847 if (*domain == NULL) {
848 dma_dom = find_protection_domain(*bdf);
849 if (!dma_dom)
850 dma_dom = (*iommu)->default_dom;
695 *domain = &dma_dom->domain; 851 *domain = &dma_dom->domain;
696 set_device_domain(*iommu, *domain, *bdf); 852 set_device_domain(*iommu, *domain, *bdf);
697 printk(KERN_INFO "AMD IOMMU: Using protection domain %d for " 853 printk(KERN_INFO "AMD IOMMU: Using protection domain %d for "
@@ -770,17 +926,24 @@ static dma_addr_t __map_single(struct device *dev,
770 struct dma_ops_domain *dma_dom, 926 struct dma_ops_domain *dma_dom,
771 phys_addr_t paddr, 927 phys_addr_t paddr,
772 size_t size, 928 size_t size,
773 int dir) 929 int dir,
930 bool align,
931 u64 dma_mask)
774{ 932{
775 dma_addr_t offset = paddr & ~PAGE_MASK; 933 dma_addr_t offset = paddr & ~PAGE_MASK;
776 dma_addr_t address, start; 934 dma_addr_t address, start;
777 unsigned int pages; 935 unsigned int pages;
936 unsigned long align_mask = 0;
778 int i; 937 int i;
779 938
780 pages = iommu_num_pages(paddr, size); 939 pages = iommu_num_pages(paddr, size, PAGE_SIZE);
781 paddr &= PAGE_MASK; 940 paddr &= PAGE_MASK;
782 941
783 address = dma_ops_alloc_addresses(dev, dma_dom, pages); 942 if (align)
943 align_mask = (1UL << get_order(size)) - 1;
944
945 address = dma_ops_alloc_addresses(dev, dma_dom, pages, align_mask,
946 dma_mask);
784 if (unlikely(address == bad_dma_address)) 947 if (unlikely(address == bad_dma_address))
785 goto out; 948 goto out;
786 949
@@ -792,6 +955,12 @@ static dma_addr_t __map_single(struct device *dev,
792 } 955 }
793 address += offset; 956 address += offset;
794 957
958 if (unlikely(dma_dom->need_flush && !amd_iommu_unmap_flush)) {
959 iommu_flush_tlb(iommu, dma_dom->domain.id);
960 dma_dom->need_flush = false;
961 } else if (unlikely(iommu_has_npcache(iommu)))
962 iommu_flush_pages(iommu, dma_dom->domain.id, address, size);
963
795out: 964out:
796 return address; 965 return address;
797} 966}
@@ -812,7 +981,7 @@ static void __unmap_single(struct amd_iommu *iommu,
812 if ((dma_addr == 0) || (dma_addr + size > dma_dom->aperture_size)) 981 if ((dma_addr == 0) || (dma_addr + size > dma_dom->aperture_size))
813 return; 982 return;
814 983
815 pages = iommu_num_pages(dma_addr, size); 984 pages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
816 dma_addr &= PAGE_MASK; 985 dma_addr &= PAGE_MASK;
817 start = dma_addr; 986 start = dma_addr;
818 987
@@ -822,6 +991,9 @@ static void __unmap_single(struct amd_iommu *iommu,
822 } 991 }
823 992
824 dma_ops_free_addresses(dma_dom, dma_addr, pages); 993 dma_ops_free_addresses(dma_dom, dma_addr, pages);
994
995 if (amd_iommu_unmap_flush)
996 iommu_flush_pages(iommu, dma_dom->domain.id, dma_addr, size);
825} 997}
826 998
827/* 999/*
@@ -835,6 +1007,12 @@ static dma_addr_t map_single(struct device *dev, phys_addr_t paddr,
835 struct protection_domain *domain; 1007 struct protection_domain *domain;
836 u16 devid; 1008 u16 devid;
837 dma_addr_t addr; 1009 dma_addr_t addr;
1010 u64 dma_mask;
1011
1012 if (!check_device(dev))
1013 return bad_dma_address;
1014
1015 dma_mask = *dev->dma_mask;
838 1016
839 get_device_resources(dev, &iommu, &domain, &devid); 1017 get_device_resources(dev, &iommu, &domain, &devid);
840 1018
@@ -843,14 +1021,12 @@ static dma_addr_t map_single(struct device *dev, phys_addr_t paddr,
843 return (dma_addr_t)paddr; 1021 return (dma_addr_t)paddr;
844 1022
845 spin_lock_irqsave(&domain->lock, flags); 1023 spin_lock_irqsave(&domain->lock, flags);
846 addr = __map_single(dev, iommu, domain->priv, paddr, size, dir); 1024 addr = __map_single(dev, iommu, domain->priv, paddr, size, dir, false,
1025 dma_mask);
847 if (addr == bad_dma_address) 1026 if (addr == bad_dma_address)
848 goto out; 1027 goto out;
849 1028
850 if (iommu_has_npcache(iommu)) 1029 if (unlikely(iommu->need_sync))
851 iommu_flush_pages(iommu, domain->id, addr, size);
852
853 if (iommu->need_sync)
854 iommu_completion_wait(iommu); 1030 iommu_completion_wait(iommu);
855 1031
856out: 1032out:
@@ -870,7 +1046,8 @@ static void unmap_single(struct device *dev, dma_addr_t dma_addr,
870 struct protection_domain *domain; 1046 struct protection_domain *domain;
871 u16 devid; 1047 u16 devid;
872 1048
873 if (!get_device_resources(dev, &iommu, &domain, &devid)) 1049 if (!check_device(dev) ||
1050 !get_device_resources(dev, &iommu, &domain, &devid))
874 /* device not handled by any AMD IOMMU */ 1051 /* device not handled by any AMD IOMMU */
875 return; 1052 return;
876 1053
@@ -878,9 +1055,7 @@ static void unmap_single(struct device *dev, dma_addr_t dma_addr,
878 1055
879 __unmap_single(iommu, domain->priv, dma_addr, size, dir); 1056 __unmap_single(iommu, domain->priv, dma_addr, size, dir);
880 1057
881 iommu_flush_pages(iommu, domain->id, dma_addr, size); 1058 if (unlikely(iommu->need_sync))
882
883 if (iommu->need_sync)
884 iommu_completion_wait(iommu); 1059 iommu_completion_wait(iommu);
885 1060
886 spin_unlock_irqrestore(&domain->lock, flags); 1061 spin_unlock_irqrestore(&domain->lock, flags);
@@ -919,6 +1094,12 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
919 struct scatterlist *s; 1094 struct scatterlist *s;
920 phys_addr_t paddr; 1095 phys_addr_t paddr;
921 int mapped_elems = 0; 1096 int mapped_elems = 0;
1097 u64 dma_mask;
1098
1099 if (!check_device(dev))
1100 return 0;
1101
1102 dma_mask = *dev->dma_mask;
922 1103
923 get_device_resources(dev, &iommu, &domain, &devid); 1104 get_device_resources(dev, &iommu, &domain, &devid);
924 1105
@@ -931,19 +1112,17 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
931 paddr = sg_phys(s); 1112 paddr = sg_phys(s);
932 1113
933 s->dma_address = __map_single(dev, iommu, domain->priv, 1114 s->dma_address = __map_single(dev, iommu, domain->priv,
934 paddr, s->length, dir); 1115 paddr, s->length, dir, false,
1116 dma_mask);
935 1117
936 if (s->dma_address) { 1118 if (s->dma_address) {
937 s->dma_length = s->length; 1119 s->dma_length = s->length;
938 mapped_elems++; 1120 mapped_elems++;
939 } else 1121 } else
940 goto unmap; 1122 goto unmap;
941 if (iommu_has_npcache(iommu))
942 iommu_flush_pages(iommu, domain->id, s->dma_address,
943 s->dma_length);
944 } 1123 }
945 1124
946 if (iommu->need_sync) 1125 if (unlikely(iommu->need_sync))
947 iommu_completion_wait(iommu); 1126 iommu_completion_wait(iommu);
948 1127
949out: 1128out:
@@ -977,7 +1156,8 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist,
977 u16 devid; 1156 u16 devid;
978 int i; 1157 int i;
979 1158
980 if (!get_device_resources(dev, &iommu, &domain, &devid)) 1159 if (!check_device(dev) ||
1160 !get_device_resources(dev, &iommu, &domain, &devid))
981 return; 1161 return;
982 1162
983 spin_lock_irqsave(&domain->lock, flags); 1163 spin_lock_irqsave(&domain->lock, flags);
@@ -985,12 +1165,10 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist,
985 for_each_sg(sglist, s, nelems, i) { 1165 for_each_sg(sglist, s, nelems, i) {
986 __unmap_single(iommu, domain->priv, s->dma_address, 1166 __unmap_single(iommu, domain->priv, s->dma_address,
987 s->dma_length, dir); 1167 s->dma_length, dir);
988 iommu_flush_pages(iommu, domain->id, s->dma_address,
989 s->dma_length);
990 s->dma_address = s->dma_length = 0; 1168 s->dma_address = s->dma_length = 0;
991 } 1169 }
992 1170
993 if (iommu->need_sync) 1171 if (unlikely(iommu->need_sync))
994 iommu_completion_wait(iommu); 1172 iommu_completion_wait(iommu);
995 1173
996 spin_unlock_irqrestore(&domain->lock, flags); 1174 spin_unlock_irqrestore(&domain->lock, flags);
@@ -1008,25 +1186,33 @@ static void *alloc_coherent(struct device *dev, size_t size,
1008 struct protection_domain *domain; 1186 struct protection_domain *domain;
1009 u16 devid; 1187 u16 devid;
1010 phys_addr_t paddr; 1188 phys_addr_t paddr;
1189 u64 dma_mask = dev->coherent_dma_mask;
1190
1191 if (!check_device(dev))
1192 return NULL;
1193
1194 if (!get_device_resources(dev, &iommu, &domain, &devid))
1195 flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
1011 1196
1197 flag |= __GFP_ZERO;
1012 virt_addr = (void *)__get_free_pages(flag, get_order(size)); 1198 virt_addr = (void *)__get_free_pages(flag, get_order(size));
1013 if (!virt_addr) 1199 if (!virt_addr)
1014 return 0; 1200 return 0;
1015 1201
1016 memset(virt_addr, 0, size);
1017 paddr = virt_to_phys(virt_addr); 1202 paddr = virt_to_phys(virt_addr);
1018 1203
1019 get_device_resources(dev, &iommu, &domain, &devid);
1020
1021 if (!iommu || !domain) { 1204 if (!iommu || !domain) {
1022 *dma_addr = (dma_addr_t)paddr; 1205 *dma_addr = (dma_addr_t)paddr;
1023 return virt_addr; 1206 return virt_addr;
1024 } 1207 }
1025 1208
1209 if (!dma_mask)
1210 dma_mask = *dev->dma_mask;
1211
1026 spin_lock_irqsave(&domain->lock, flags); 1212 spin_lock_irqsave(&domain->lock, flags);
1027 1213
1028 *dma_addr = __map_single(dev, iommu, domain->priv, paddr, 1214 *dma_addr = __map_single(dev, iommu, domain->priv, paddr,
1029 size, DMA_BIDIRECTIONAL); 1215 size, DMA_BIDIRECTIONAL, true, dma_mask);
1030 1216
1031 if (*dma_addr == bad_dma_address) { 1217 if (*dma_addr == bad_dma_address) {
1032 free_pages((unsigned long)virt_addr, get_order(size)); 1218 free_pages((unsigned long)virt_addr, get_order(size));
@@ -1034,10 +1220,7 @@ static void *alloc_coherent(struct device *dev, size_t size,
1034 goto out; 1220 goto out;
1035 } 1221 }
1036 1222
1037 if (iommu_has_npcache(iommu)) 1223 if (unlikely(iommu->need_sync))
1038 iommu_flush_pages(iommu, domain->id, *dma_addr, size);
1039
1040 if (iommu->need_sync)
1041 iommu_completion_wait(iommu); 1224 iommu_completion_wait(iommu);
1042 1225
1043out: 1226out:
@@ -1048,8 +1231,6 @@ out:
1048 1231
1049/* 1232/*
1050 * The exported free_coherent function for dma_ops. 1233 * The exported free_coherent function for dma_ops.
1051 * FIXME: fix the generic x86 DMA layer so that it actually calls that
1052 * function.
1053 */ 1234 */
1054static void free_coherent(struct device *dev, size_t size, 1235static void free_coherent(struct device *dev, size_t size,
1055 void *virt_addr, dma_addr_t dma_addr) 1236 void *virt_addr, dma_addr_t dma_addr)
@@ -1059,6 +1240,9 @@ static void free_coherent(struct device *dev, size_t size,
1059 struct protection_domain *domain; 1240 struct protection_domain *domain;
1060 u16 devid; 1241 u16 devid;
1061 1242
1243 if (!check_device(dev))
1244 return;
1245
1062 get_device_resources(dev, &iommu, &domain, &devid); 1246 get_device_resources(dev, &iommu, &domain, &devid);
1063 1247
1064 if (!iommu || !domain) 1248 if (!iommu || !domain)
@@ -1067,9 +1251,8 @@ static void free_coherent(struct device *dev, size_t size,
1067 spin_lock_irqsave(&domain->lock, flags); 1251 spin_lock_irqsave(&domain->lock, flags);
1068 1252
1069 __unmap_single(iommu, domain->priv, dma_addr, size, DMA_BIDIRECTIONAL); 1253 __unmap_single(iommu, domain->priv, dma_addr, size, DMA_BIDIRECTIONAL);
1070 iommu_flush_pages(iommu, domain->id, dma_addr, size);
1071 1254
1072 if (iommu->need_sync) 1255 if (unlikely(iommu->need_sync))
1073 iommu_completion_wait(iommu); 1256 iommu_completion_wait(iommu);
1074 1257
1075 spin_unlock_irqrestore(&domain->lock, flags); 1258 spin_unlock_irqrestore(&domain->lock, flags);
@@ -1079,6 +1262,30 @@ free_mem:
1079} 1262}
1080 1263
1081/* 1264/*
1265 * This function is called by the DMA layer to find out if we can handle a
1266 * particular device. It is part of the dma_ops.
1267 */
1268static int amd_iommu_dma_supported(struct device *dev, u64 mask)
1269{
1270 u16 bdf;
1271 struct pci_dev *pcidev;
1272
1273 /* No device or no PCI device */
1274 if (!dev || dev->bus != &pci_bus_type)
1275 return 0;
1276
1277 pcidev = to_pci_dev(dev);
1278
1279 bdf = calc_devid(pcidev->bus->number, pcidev->devfn);
1280
1281 /* Out of our scope? */
1282 if (bdf > amd_iommu_last_bdf)
1283 return 0;
1284
1285 return 1;
1286}
1287
1288/*
1082 * The function for pre-allocating protection domains. 1289 * The function for pre-allocating protection domains.
1083 * 1290 *
1084 * If the driver core informs the DMA layer if a driver grabs a device 1291 * If the driver core informs the DMA layer if a driver grabs a device
@@ -1107,10 +1314,9 @@ void prealloc_protection_domains(void)
1107 if (!dma_dom) 1314 if (!dma_dom)
1108 continue; 1315 continue;
1109 init_unity_mappings_for_device(dma_dom, devid); 1316 init_unity_mappings_for_device(dma_dom, devid);
1110 set_device_domain(iommu, &dma_dom->domain, devid); 1317 dma_dom->target_dev = devid;
1111 printk(KERN_INFO "AMD IOMMU: Allocated domain %d for device ", 1318
1112 dma_dom->domain.id); 1319 list_add_tail(&dma_dom->list, &iommu_pd_list);
1113 print_devid(devid, 1);
1114 } 1320 }
1115} 1321}
1116 1322
@@ -1121,6 +1327,7 @@ static struct dma_mapping_ops amd_iommu_dma_ops = {
1121 .unmap_single = unmap_single, 1327 .unmap_single = unmap_single,
1122 .map_sg = map_sg, 1328 .map_sg = map_sg,
1123 .unmap_sg = unmap_sg, 1329 .unmap_sg = unmap_sg,
1330 .dma_supported = amd_iommu_dma_supported,
1124}; 1331};
1125 1332
1126/* 1333/*