aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci/dmar.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/pci/dmar.c')
-rw-r--r--drivers/pci/dmar.c235
1 files changed, 194 insertions, 41 deletions
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
index fa3a11365ec3..7b287cb38b7a 100644
--- a/drivers/pci/dmar.c
+++ b/drivers/pci/dmar.c
@@ -267,6 +267,84 @@ rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
267 } 267 }
268 return ret; 268 return ret;
269} 269}
270
271static LIST_HEAD(dmar_atsr_units);
272
273static int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
274{
275 struct acpi_dmar_atsr *atsr;
276 struct dmar_atsr_unit *atsru;
277
278 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
279 atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
280 if (!atsru)
281 return -ENOMEM;
282
283 atsru->hdr = hdr;
284 atsru->include_all = atsr->flags & 0x1;
285
286 list_add(&atsru->list, &dmar_atsr_units);
287
288 return 0;
289}
290
291static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru)
292{
293 int rc;
294 struct acpi_dmar_atsr *atsr;
295
296 if (atsru->include_all)
297 return 0;
298
299 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
300 rc = dmar_parse_dev_scope((void *)(atsr + 1),
301 (void *)atsr + atsr->header.length,
302 &atsru->devices_cnt, &atsru->devices,
303 atsr->segment);
304 if (rc || !atsru->devices_cnt) {
305 list_del(&atsru->list);
306 kfree(atsru);
307 }
308
309 return rc;
310}
311
312int dmar_find_matched_atsr_unit(struct pci_dev *dev)
313{
314 int i;
315 struct pci_bus *bus;
316 struct acpi_dmar_atsr *atsr;
317 struct dmar_atsr_unit *atsru;
318
319 list_for_each_entry(atsru, &dmar_atsr_units, list) {
320 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
321 if (atsr->segment == pci_domain_nr(dev->bus))
322 goto found;
323 }
324
325 return 0;
326
327found:
328 for (bus = dev->bus; bus; bus = bus->parent) {
329 struct pci_dev *bridge = bus->self;
330
331 if (!bridge || !bridge->is_pcie ||
332 bridge->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
333 return 0;
334
335 if (bridge->pcie_type == PCI_EXP_TYPE_ROOT_PORT) {
336 for (i = 0; i < atsru->devices_cnt; i++)
337 if (atsru->devices[i] == bridge)
338 return 1;
339 break;
340 }
341 }
342
343 if (atsru->include_all)
344 return 1;
345
346 return 0;
347}
270#endif 348#endif
271 349
272static void __init 350static void __init
@@ -274,22 +352,28 @@ dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
274{ 352{
275 struct acpi_dmar_hardware_unit *drhd; 353 struct acpi_dmar_hardware_unit *drhd;
276 struct acpi_dmar_reserved_memory *rmrr; 354 struct acpi_dmar_reserved_memory *rmrr;
355 struct acpi_dmar_atsr *atsr;
277 356
278 switch (header->type) { 357 switch (header->type) {
279 case ACPI_DMAR_TYPE_HARDWARE_UNIT: 358 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
280 drhd = (struct acpi_dmar_hardware_unit *)header; 359 drhd = container_of(header, struct acpi_dmar_hardware_unit,
360 header);
281 printk (KERN_INFO PREFIX 361 printk (KERN_INFO PREFIX
282 "DRHD (flags: 0x%08x)base: 0x%016Lx\n", 362 "DRHD base: %#016Lx flags: %#x\n",
283 drhd->flags, (unsigned long long)drhd->address); 363 (unsigned long long)drhd->address, drhd->flags);
284 break; 364 break;
285 case ACPI_DMAR_TYPE_RESERVED_MEMORY: 365 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
286 rmrr = (struct acpi_dmar_reserved_memory *)header; 366 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
287 367 header);
288 printk (KERN_INFO PREFIX 368 printk (KERN_INFO PREFIX
289 "RMRR base: 0x%016Lx end: 0x%016Lx\n", 369 "RMRR base: %#016Lx end: %#016Lx\n",
290 (unsigned long long)rmrr->base_address, 370 (unsigned long long)rmrr->base_address,
291 (unsigned long long)rmrr->end_address); 371 (unsigned long long)rmrr->end_address);
292 break; 372 break;
373 case ACPI_DMAR_TYPE_ATSR:
374 atsr = container_of(header, struct acpi_dmar_atsr, header);
375 printk(KERN_INFO PREFIX "ATSR flags: %#x\n", atsr->flags);
376 break;
293 } 377 }
294} 378}
295 379
@@ -363,6 +447,11 @@ parse_dmar_table(void)
363 ret = dmar_parse_one_rmrr(entry_header); 447 ret = dmar_parse_one_rmrr(entry_header);
364#endif 448#endif
365 break; 449 break;
450 case ACPI_DMAR_TYPE_ATSR:
451#ifdef CONFIG_DMAR
452 ret = dmar_parse_one_atsr(entry_header);
453#endif
454 break;
366 default: 455 default:
367 printk(KERN_WARNING PREFIX 456 printk(KERN_WARNING PREFIX
368 "Unknown DMAR structure type\n"); 457 "Unknown DMAR structure type\n");
@@ -431,11 +520,19 @@ int __init dmar_dev_scope_init(void)
431#ifdef CONFIG_DMAR 520#ifdef CONFIG_DMAR
432 { 521 {
433 struct dmar_rmrr_unit *rmrr, *rmrr_n; 522 struct dmar_rmrr_unit *rmrr, *rmrr_n;
523 struct dmar_atsr_unit *atsr, *atsr_n;
524
434 list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) { 525 list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
435 ret = rmrr_parse_dev(rmrr); 526 ret = rmrr_parse_dev(rmrr);
436 if (ret) 527 if (ret)
437 return ret; 528 return ret;
438 } 529 }
530
531 list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) {
532 ret = atsr_parse_dev(atsr);
533 if (ret)
534 return ret;
535 }
439 } 536 }
440#endif 537#endif
441 538
@@ -468,6 +565,9 @@ int __init dmar_table_init(void)
468#ifdef CONFIG_DMAR 565#ifdef CONFIG_DMAR
469 if (list_empty(&dmar_rmrr_units)) 566 if (list_empty(&dmar_rmrr_units))
470 printk(KERN_INFO PREFIX "No RMRR found\n"); 567 printk(KERN_INFO PREFIX "No RMRR found\n");
568
569 if (list_empty(&dmar_atsr_units))
570 printk(KERN_INFO PREFIX "No ATSR found\n");
471#endif 571#endif
472 572
473#ifdef CONFIG_INTR_REMAP 573#ifdef CONFIG_INTR_REMAP
@@ -515,6 +615,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
515 u32 ver; 615 u32 ver;
516 static int iommu_allocated = 0; 616 static int iommu_allocated = 0;
517 int agaw = 0; 617 int agaw = 0;
618 int msagaw = 0;
518 619
519 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL); 620 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
520 if (!iommu) 621 if (!iommu)
@@ -535,12 +636,20 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
535 agaw = iommu_calculate_agaw(iommu); 636 agaw = iommu_calculate_agaw(iommu);
536 if (agaw < 0) { 637 if (agaw < 0) {
537 printk(KERN_ERR 638 printk(KERN_ERR
538 "Cannot get a valid agaw for iommu (seq_id = %d)\n", 639 "Cannot get a valid agaw for iommu (seq_id = %d)\n",
640 iommu->seq_id);
641 goto error;
642 }
643 msagaw = iommu_calculate_max_sagaw(iommu);
644 if (msagaw < 0) {
645 printk(KERN_ERR
646 "Cannot get a valid max agaw for iommu (seq_id = %d)\n",
539 iommu->seq_id); 647 iommu->seq_id);
540 goto error; 648 goto error;
541 } 649 }
542#endif 650#endif
543 iommu->agaw = agaw; 651 iommu->agaw = agaw;
652 iommu->msagaw = msagaw;
544 653
545 /* the registers might be more than one page */ 654 /* the registers might be more than one page */
546 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap), 655 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
@@ -590,7 +699,8 @@ void free_iommu(struct intel_iommu *iommu)
590 */ 699 */
591static inline void reclaim_free_desc(struct q_inval *qi) 700static inline void reclaim_free_desc(struct q_inval *qi)
592{ 701{
593 while (qi->desc_status[qi->free_tail] == QI_DONE) { 702 while (qi->desc_status[qi->free_tail] == QI_DONE ||
703 qi->desc_status[qi->free_tail] == QI_ABORT) {
594 qi->desc_status[qi->free_tail] = QI_FREE; 704 qi->desc_status[qi->free_tail] = QI_FREE;
595 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH; 705 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
596 qi->free_cnt++; 706 qi->free_cnt++;
@@ -600,10 +710,13 @@ static inline void reclaim_free_desc(struct q_inval *qi)
600static int qi_check_fault(struct intel_iommu *iommu, int index) 710static int qi_check_fault(struct intel_iommu *iommu, int index)
601{ 711{
602 u32 fault; 712 u32 fault;
603 int head; 713 int head, tail;
604 struct q_inval *qi = iommu->qi; 714 struct q_inval *qi = iommu->qi;
605 int wait_index = (index + 1) % QI_LENGTH; 715 int wait_index = (index + 1) % QI_LENGTH;
606 716
717 if (qi->desc_status[wait_index] == QI_ABORT)
718 return -EAGAIN;
719
607 fault = readl(iommu->reg + DMAR_FSTS_REG); 720 fault = readl(iommu->reg + DMAR_FSTS_REG);
608 721
609 /* 722 /*
@@ -613,7 +726,11 @@ static int qi_check_fault(struct intel_iommu *iommu, int index)
613 */ 726 */
614 if (fault & DMA_FSTS_IQE) { 727 if (fault & DMA_FSTS_IQE) {
615 head = readl(iommu->reg + DMAR_IQH_REG); 728 head = readl(iommu->reg + DMAR_IQH_REG);
616 if ((head >> 4) == index) { 729 if ((head >> DMAR_IQ_SHIFT) == index) {
730 printk(KERN_ERR "VT-d detected invalid descriptor: "
731 "low=%llx, high=%llx\n",
732 (unsigned long long)qi->desc[index].low,
733 (unsigned long long)qi->desc[index].high);
617 memcpy(&qi->desc[index], &qi->desc[wait_index], 734 memcpy(&qi->desc[index], &qi->desc[wait_index],
618 sizeof(struct qi_desc)); 735 sizeof(struct qi_desc));
619 __iommu_flush_cache(iommu, &qi->desc[index], 736 __iommu_flush_cache(iommu, &qi->desc[index],
@@ -623,6 +740,32 @@ static int qi_check_fault(struct intel_iommu *iommu, int index)
623 } 740 }
624 } 741 }
625 742
743 /*
744 * If ITE happens, all pending wait_desc commands are aborted.
745 * No new descriptors are fetched until the ITE is cleared.
746 */
747 if (fault & DMA_FSTS_ITE) {
748 head = readl(iommu->reg + DMAR_IQH_REG);
749 head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
750 head |= 1;
751 tail = readl(iommu->reg + DMAR_IQT_REG);
752 tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
753
754 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
755
756 do {
757 if (qi->desc_status[head] == QI_IN_USE)
758 qi->desc_status[head] = QI_ABORT;
759 head = (head - 2 + QI_LENGTH) % QI_LENGTH;
760 } while (head != tail);
761
762 if (qi->desc_status[wait_index] == QI_ABORT)
763 return -EAGAIN;
764 }
765
766 if (fault & DMA_FSTS_ICE)
767 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
768
626 return 0; 769 return 0;
627} 770}
628 771
@@ -632,7 +775,7 @@ static int qi_check_fault(struct intel_iommu *iommu, int index)
632 */ 775 */
633int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu) 776int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
634{ 777{
635 int rc = 0; 778 int rc;
636 struct q_inval *qi = iommu->qi; 779 struct q_inval *qi = iommu->qi;
637 struct qi_desc *hw, wait_desc; 780 struct qi_desc *hw, wait_desc;
638 int wait_index, index; 781 int wait_index, index;
@@ -643,6 +786,9 @@ int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
643 786
644 hw = qi->desc; 787 hw = qi->desc;
645 788
789restart:
790 rc = 0;
791
646 spin_lock_irqsave(&qi->q_lock, flags); 792 spin_lock_irqsave(&qi->q_lock, flags);
647 while (qi->free_cnt < 3) { 793 while (qi->free_cnt < 3) {
648 spin_unlock_irqrestore(&qi->q_lock, flags); 794 spin_unlock_irqrestore(&qi->q_lock, flags);
@@ -673,7 +819,7 @@ int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
673 * update the HW tail register indicating the presence of 819 * update the HW tail register indicating the presence of
674 * new descriptors. 820 * new descriptors.
675 */ 821 */
676 writel(qi->free_head << 4, iommu->reg + DMAR_IQT_REG); 822 writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
677 823
678 while (qi->desc_status[wait_index] != QI_DONE) { 824 while (qi->desc_status[wait_index] != QI_DONE) {
679 /* 825 /*
@@ -685,18 +831,21 @@ int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
685 */ 831 */
686 rc = qi_check_fault(iommu, index); 832 rc = qi_check_fault(iommu, index);
687 if (rc) 833 if (rc)
688 goto out; 834 break;
689 835
690 spin_unlock(&qi->q_lock); 836 spin_unlock(&qi->q_lock);
691 cpu_relax(); 837 cpu_relax();
692 spin_lock(&qi->q_lock); 838 spin_lock(&qi->q_lock);
693 } 839 }
694out: 840
695 qi->desc_status[index] = qi->desc_status[wait_index] = QI_DONE; 841 qi->desc_status[index] = QI_DONE;
696 842
697 reclaim_free_desc(qi); 843 reclaim_free_desc(qi);
698 spin_unlock_irqrestore(&qi->q_lock, flags); 844 spin_unlock_irqrestore(&qi->q_lock, flags);
699 845
846 if (rc == -EAGAIN)
847 goto restart;
848
700 return rc; 849 return rc;
701} 850}
702 851
@@ -714,41 +863,26 @@ void qi_global_iec(struct intel_iommu *iommu)
714 qi_submit_sync(&desc, iommu); 863 qi_submit_sync(&desc, iommu);
715} 864}
716 865
717int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm, 866void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
718 u64 type, int non_present_entry_flush) 867 u64 type)
719{ 868{
720 struct qi_desc desc; 869 struct qi_desc desc;
721 870
722 if (non_present_entry_flush) {
723 if (!cap_caching_mode(iommu->cap))
724 return 1;
725 else
726 did = 0;
727 }
728
729 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did) 871 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
730 | QI_CC_GRAN(type) | QI_CC_TYPE; 872 | QI_CC_GRAN(type) | QI_CC_TYPE;
731 desc.high = 0; 873 desc.high = 0;
732 874
733 return qi_submit_sync(&desc, iommu); 875 qi_submit_sync(&desc, iommu);
734} 876}
735 877
736int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, 878void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
737 unsigned int size_order, u64 type, 879 unsigned int size_order, u64 type)
738 int non_present_entry_flush)
739{ 880{
740 u8 dw = 0, dr = 0; 881 u8 dw = 0, dr = 0;
741 882
742 struct qi_desc desc; 883 struct qi_desc desc;
743 int ih = 0; 884 int ih = 0;
744 885
745 if (non_present_entry_flush) {
746 if (!cap_caching_mode(iommu->cap))
747 return 1;
748 else
749 did = 0;
750 }
751
752 if (cap_write_drain(iommu->cap)) 886 if (cap_write_drain(iommu->cap))
753 dw = 1; 887 dw = 1;
754 888
@@ -760,7 +894,28 @@ int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
760 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih) 894 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
761 | QI_IOTLB_AM(size_order); 895 | QI_IOTLB_AM(size_order);
762 896
763 return qi_submit_sync(&desc, iommu); 897 qi_submit_sync(&desc, iommu);
898}
899
900void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
901 u64 addr, unsigned mask)
902{
903 struct qi_desc desc;
904
905 if (mask) {
906 BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
907 addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
908 desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
909 } else
910 desc.high = QI_DEV_IOTLB_ADDR(addr);
911
912 if (qdep >= QI_DEV_IOTLB_MAX_INVS)
913 qdep = 0;
914
915 desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
916 QI_DIOTLB_TYPE;
917
918 qi_submit_sync(&desc, iommu);
764} 919}
765 920
766/* 921/*
@@ -790,7 +945,6 @@ void dmar_disable_qi(struct intel_iommu *iommu)
790 cpu_relax(); 945 cpu_relax();
791 946
792 iommu->gcmd &= ~DMA_GCMD_QIE; 947 iommu->gcmd &= ~DMA_GCMD_QIE;
793
794 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); 948 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
795 949
796 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, 950 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
@@ -804,7 +958,7 @@ end:
804 */ 958 */
805static void __dmar_enable_qi(struct intel_iommu *iommu) 959static void __dmar_enable_qi(struct intel_iommu *iommu)
806{ 960{
807 u32 cmd, sts; 961 u32 sts;
808 unsigned long flags; 962 unsigned long flags;
809 struct q_inval *qi = iommu->qi; 963 struct q_inval *qi = iommu->qi;
810 964
@@ -818,9 +972,8 @@ static void __dmar_enable_qi(struct intel_iommu *iommu)
818 972
819 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc)); 973 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
820 974
821 cmd = iommu->gcmd | DMA_GCMD_QIE;
822 iommu->gcmd |= DMA_GCMD_QIE; 975 iommu->gcmd |= DMA_GCMD_QIE;
823 writel(cmd, iommu->reg + DMAR_GCMD_REG); 976 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
824 977
825 /* Make sure hardware complete it */ 978 /* Make sure hardware complete it */
826 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts); 979 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
@@ -1096,7 +1249,7 @@ int dmar_set_interrupt(struct intel_iommu *iommu)
1096 set_irq_data(irq, NULL); 1249 set_irq_data(irq, NULL);
1097 iommu->irq = 0; 1250 iommu->irq = 0;
1098 destroy_irq(irq); 1251 destroy_irq(irq);
1099 return 0; 1252 return ret;
1100 } 1253 }
1101 1254
1102 ret = request_irq(irq, dmar_fault, 0, iommu->name, iommu); 1255 ret = request_irq(irq, dmar_fault, 0, iommu->name, iommu);