aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJoerg Roedel <jroedel@suse.de>2016-07-06 05:55:37 -0400
committerJoerg Roedel <jroedel@suse.de>2016-07-13 06:48:35 -0400
commitc5b5da9c79bb2d88fa3c5163ccf1a7a9e89cfa49 (patch)
tree22a7e8fabeb6181d56841d8c52bb5f0fb99a129b
parentbda350dbdbc1ad8655ece0ec3d41bebc3ee7a77b (diff)
iommu/amd: Set up data structures for flush queue
The flush queue is the equivalent to defered-flushing in the Intel VT-d driver. This patch sets up the data structures needed for this. Signed-off-by: Joerg Roedel <jroedel@suse.de>
-rw-r--r--drivers/iommu/amd_iommu.c40
1 files changed, 39 insertions, 1 deletions
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index d218e35ed3e3..38f8a5e461fc 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -89,6 +89,22 @@ LIST_HEAD(ioapic_map);
89LIST_HEAD(hpet_map); 89LIST_HEAD(hpet_map);
90LIST_HEAD(acpihid_map); 90LIST_HEAD(acpihid_map);
91 91
92#define FLUSH_QUEUE_SIZE 256
93
94struct flush_queue_entry {
95 unsigned long iova_pfn;
96 unsigned long pages;
97 struct dma_ops_domain *dma_dom;
98};
99
100struct flush_queue {
101 spinlock_t lock;
102 unsigned next;
103 struct flush_queue_entry *entries;
104};
105
106DEFINE_PER_CPU(struct flush_queue, flush_queue);
107
92/* 108/*
93 * Domain for untranslated devices - only allocated 109 * Domain for untranslated devices - only allocated
94 * if iommu=pt passed on kernel cmd line. 110 * if iommu=pt passed on kernel cmd line.
@@ -2508,7 +2524,7 @@ static int init_reserved_iova_ranges(void)
2508 2524
2509int __init amd_iommu_init_api(void) 2525int __init amd_iommu_init_api(void)
2510{ 2526{
2511 int ret, err = 0; 2527 int ret, cpu, err = 0;
2512 2528
2513 ret = iova_cache_get(); 2529 ret = iova_cache_get();
2514 if (ret) 2530 if (ret)
@@ -2518,6 +2534,18 @@ int __init amd_iommu_init_api(void)
2518 if (ret) 2534 if (ret)
2519 return ret; 2535 return ret;
2520 2536
2537 for_each_possible_cpu(cpu) {
2538 struct flush_queue *queue = per_cpu_ptr(&flush_queue, cpu);
2539
2540 queue->entries = kzalloc(FLUSH_QUEUE_SIZE *
2541 sizeof(*queue->entries),
2542 GFP_KERNEL);
2543 if (!queue->entries)
2544 goto out_put_iova;
2545
2546 spin_lock_init(&queue->lock);
2547 }
2548
2521 err = bus_set_iommu(&pci_bus_type, &amd_iommu_ops); 2549 err = bus_set_iommu(&pci_bus_type, &amd_iommu_ops);
2522 if (err) 2550 if (err)
2523 return err; 2551 return err;
@@ -2530,6 +2558,15 @@ int __init amd_iommu_init_api(void)
2530 if (err) 2558 if (err)
2531 return err; 2559 return err;
2532 return 0; 2560 return 0;
2561
2562out_put_iova:
2563 for_each_possible_cpu(cpu) {
2564 struct flush_queue *queue = per_cpu_ptr(&flush_queue, cpu);
2565
2566 kfree(queue->entries);
2567 }
2568
2569 return -ENOMEM;
2533} 2570}
2534 2571
2535int __init amd_iommu_init_dma_ops(void) 2572int __init amd_iommu_init_dma_ops(void)
@@ -2552,6 +2589,7 @@ int __init amd_iommu_init_dma_ops(void)
2552 pr_info("AMD-Vi: Lazy IO/TLB flushing enabled\n"); 2589 pr_info("AMD-Vi: Lazy IO/TLB flushing enabled\n");
2553 2590
2554 return 0; 2591 return 0;
2592
2555} 2593}
2556 2594
2557/***************************************************************************** 2595/*****************************************************************************