aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2011-04-05 05:00:53 -0400
committerJoerg Roedel <joerg.roedel@amd.com>2011-04-11 03:04:03 -0400
commitcb41ed85efa01e633388314c03a4f3004c6b783b (patch)
treeaa132590ca4cce26cbd1bd3ae7ca557ec71d3a87 /arch
parent9844b4e5dd1932e175a23d84ce09702bdf4b5689 (diff)
x86/amd-iommu: Flush device IOTLB if ATS is enabled
This patch implements a function to flush the IOTLB on devices supporting ATS and makes sure that this TLB is also flushed if necessary. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/amd_iommu_types.h3
-rw-r--r--arch/x86/kernel/amd_iommu.c74
2 files changed, 75 insertions, 2 deletions
diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h
index 878ae008eb04..f5d184e7d5be 100644
--- a/arch/x86/include/asm/amd_iommu_types.h
+++ b/arch/x86/include/asm/amd_iommu_types.h
@@ -113,7 +113,8 @@
113/* command specific defines */ 113/* command specific defines */
114#define CMD_COMPL_WAIT 0x01 114#define CMD_COMPL_WAIT 0x01
115#define CMD_INV_DEV_ENTRY 0x02 115#define CMD_INV_DEV_ENTRY 0x02
116#define CMD_INV_IOMMU_PAGES 0x03 116#define CMD_INV_IOMMU_PAGES 0x03
117#define CMD_INV_IOTLB_PAGES 0x04
117 118
118#define CMD_COMPL_WAIT_STORE_MASK 0x01 119#define CMD_COMPL_WAIT_STORE_MASK 0x01
119#define CMD_COMPL_WAIT_INT_MASK 0x02 120#define CMD_COMPL_WAIT_INT_MASK 0x02
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index bcf58ea55cfa..f3ce4338dade 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -18,6 +18,7 @@
18 */ 18 */
19 19
20#include <linux/pci.h> 20#include <linux/pci.h>
21#include <linux/pci-ats.h>
21#include <linux/bitmap.h> 22#include <linux/bitmap.h>
22#include <linux/slab.h> 23#include <linux/slab.h>
23#include <linux/debugfs.h> 24#include <linux/debugfs.h>
@@ -463,6 +464,37 @@ static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address,
463 cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK; 464 cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
464} 465}
465 466
467static void build_inv_iotlb_pages(struct iommu_cmd *cmd, u16 devid, int qdep,
468 u64 address, size_t size)
469{
470 u64 pages;
471 int s;
472
473 pages = iommu_num_pages(address, size, PAGE_SIZE);
474 s = 0;
475
476 if (pages > 1) {
477 /*
478 * If we have to flush more than one page, flush all
479 * TLB entries for this domain
480 */
481 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
482 s = 1;
483 }
484
485 address &= PAGE_MASK;
486
487 memset(cmd, 0, sizeof(*cmd));
488 cmd->data[0] = devid;
489 cmd->data[0] |= (qdep & 0xff) << 24;
490 cmd->data[1] = devid;
491 cmd->data[2] = lower_32_bits(address);
492 cmd->data[3] = upper_32_bits(address);
493 CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES);
494 if (s)
495 cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
496}
497
466/* 498/*
467 * Writes the command to the IOMMUs command buffer and informs the 499 * Writes the command to the IOMMUs command buffer and informs the
468 * hardware about the new command. 500 * hardware about the new command.
@@ -574,17 +606,47 @@ void iommu_flush_all_caches(struct amd_iommu *iommu)
574} 606}
575 607
576/* 608/*
609 * Command send function for flushing on-device TLB
610 */
611static int device_flush_iotlb(struct device *dev, u64 address, size_t size)
612{
613 struct pci_dev *pdev = to_pci_dev(dev);
614 struct amd_iommu *iommu;
615 struct iommu_cmd cmd;
616 u16 devid;
617 int qdep;
618
619 qdep = pci_ats_queue_depth(pdev);
620 devid = get_device_id(dev);
621 iommu = amd_iommu_rlookup_table[devid];
622
623 build_inv_iotlb_pages(&cmd, devid, qdep, address, size);
624
625 return iommu_queue_command(iommu, &cmd);
626}
627
628/*
577 * Command send function for invalidating a device table entry 629 * Command send function for invalidating a device table entry
578 */ 630 */
579static int device_flush_dte(struct device *dev) 631static int device_flush_dte(struct device *dev)
580{ 632{
581 struct amd_iommu *iommu; 633 struct amd_iommu *iommu;
634 struct pci_dev *pdev;
582 u16 devid; 635 u16 devid;
636 int ret;
583 637
638 pdev = to_pci_dev(dev);
584 devid = get_device_id(dev); 639 devid = get_device_id(dev);
585 iommu = amd_iommu_rlookup_table[devid]; 640 iommu = amd_iommu_rlookup_table[devid];
586 641
587 return iommu_flush_dte(iommu, devid); 642 ret = iommu_flush_dte(iommu, devid);
643 if (ret)
644 return ret;
645
646 if (pci_ats_enabled(pdev))
647 ret = device_flush_iotlb(dev, 0, ~0UL);
648
649 return ret;
588} 650}
589 651
590/* 652/*
@@ -595,6 +657,7 @@ static int device_flush_dte(struct device *dev)
595static void __domain_flush_pages(struct protection_domain *domain, 657static void __domain_flush_pages(struct protection_domain *domain,
596 u64 address, size_t size, int pde) 658 u64 address, size_t size, int pde)
597{ 659{
660 struct iommu_dev_data *dev_data;
598 struct iommu_cmd cmd; 661 struct iommu_cmd cmd;
599 int ret = 0, i; 662 int ret = 0, i;
600 663
@@ -611,6 +674,15 @@ static void __domain_flush_pages(struct protection_domain *domain,
611 ret |= iommu_queue_command(amd_iommus[i], &cmd); 674 ret |= iommu_queue_command(amd_iommus[i], &cmd);
612 } 675 }
613 676
677 list_for_each_entry(dev_data, &domain->dev_list, list) {
678 struct pci_dev *pdev = to_pci_dev(dev_data->dev);
679
680 if (!pci_ats_enabled(pdev))
681 continue;
682
683 ret |= device_flush_iotlb(dev_data->dev, address, size);
684 }
685
614 WARN_ON(ret); 686 WARN_ON(ret);
615} 687}
616 688