aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci/intel-iommu.c
diff options
context:
space:
mode:
authorKay, Allen M <allen.m.kay@intel.com>2008-09-09 11:37:29 -0400
committerAvi Kivity <avi@redhat.com>2008-10-15 08:24:08 -0400
commit387179464257921eb9aa3d15cc3ff194f6945a7c (patch)
treea7f06903688df8a1d3231faf1ab68bf80e032ea6 /drivers/pci/intel-iommu.c
parentaa3a816b6d0bd59e1a9c548cc7d2dd829f26534f (diff)
VT-d: Changes to support KVM
This patch extends the VT-d driver to support KVM [Ben: fixed memory pinning] [avi: move dma_remapping.h as well] Signed-off-by: Kay, Allen M <allen.m.kay@intel.com> Signed-off-by: Weidong Han <weidong.han@intel.com> Signed-off-by: Ben-Ami Yassour <benami@il.ibm.com> Signed-off-by: Amit Shah <amit.shah@qumranet.com> Acked-by: Mark Gross <mgross@linux.intel.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers/pci/intel-iommu.c')
-rw-r--r--drivers/pci/intel-iommu.c116
1 files changed, 112 insertions, 4 deletions
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 389fdd6f4a9f..fc5f2dbf5323 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -33,8 +33,8 @@
33#include <linux/dma-mapping.h> 33#include <linux/dma-mapping.h>
34#include <linux/mempool.h> 34#include <linux/mempool.h>
35#include <linux/timer.h> 35#include <linux/timer.h>
36#include "iova.h" 36#include <linux/iova.h>
37#include "intel-iommu.h" 37#include <linux/intel-iommu.h>
38#include <asm/proto.h> /* force_iommu in this header in x86-64*/ 38#include <asm/proto.h> /* force_iommu in this header in x86-64*/
39#include <asm/cacheflush.h> 39#include <asm/cacheflush.h>
40#include <asm/iommu.h> 40#include <asm/iommu.h>
@@ -156,7 +156,7 @@ static inline void *alloc_domain_mem(void)
156 return iommu_kmem_cache_alloc(iommu_domain_cache); 156 return iommu_kmem_cache_alloc(iommu_domain_cache);
157} 157}
158 158
159static inline void free_domain_mem(void *vaddr) 159static void free_domain_mem(void *vaddr)
160{ 160{
161 kmem_cache_free(iommu_domain_cache, vaddr); 161 kmem_cache_free(iommu_domain_cache, vaddr);
162} 162}
@@ -1341,7 +1341,7 @@ static void domain_remove_dev_info(struct dmar_domain *domain)
1341 * find_domain 1341 * find_domain
1342 * Note: we use struct pci_dev->dev.archdata.iommu stores the info 1342 * Note: we use struct pci_dev->dev.archdata.iommu stores the info
1343 */ 1343 */
1344struct dmar_domain * 1344static struct dmar_domain *
1345find_domain(struct pci_dev *pdev) 1345find_domain(struct pci_dev *pdev)
1346{ 1346{
1347 struct device_domain_info *info; 1347 struct device_domain_info *info;
@@ -2318,3 +2318,111 @@ int __init intel_iommu_init(void)
2318 return 0; 2318 return 0;
2319} 2319}
2320 2320
2321void intel_iommu_domain_exit(struct dmar_domain *domain)
2322{
2323 u64 end;
2324
2325 /* Domain 0 is reserved, so dont process it */
2326 if (!domain)
2327 return;
2328
2329 end = DOMAIN_MAX_ADDR(domain->gaw);
2330 end = end & (~PAGE_MASK_4K);
2331
2332 /* clear ptes */
2333 dma_pte_clear_range(domain, 0, end);
2334
2335 /* free page tables */
2336 dma_pte_free_pagetable(domain, 0, end);
2337
2338 iommu_free_domain(domain);
2339 free_domain_mem(domain);
2340}
2341EXPORT_SYMBOL_GPL(intel_iommu_domain_exit);
2342
2343struct dmar_domain *intel_iommu_domain_alloc(struct pci_dev *pdev)
2344{
2345 struct dmar_drhd_unit *drhd;
2346 struct dmar_domain *domain;
2347 struct intel_iommu *iommu;
2348
2349 drhd = dmar_find_matched_drhd_unit(pdev);
2350 if (!drhd) {
2351 printk(KERN_ERR "intel_iommu_domain_alloc: drhd == NULL\n");
2352 return NULL;
2353 }
2354
2355 iommu = drhd->iommu;
2356 if (!iommu) {
2357 printk(KERN_ERR
2358 "intel_iommu_domain_alloc: iommu == NULL\n");
2359 return NULL;
2360 }
2361 domain = iommu_alloc_domain(iommu);
2362 if (!domain) {
2363 printk(KERN_ERR
2364 "intel_iommu_domain_alloc: domain == NULL\n");
2365 return NULL;
2366 }
2367 if (domain_init(domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2368 printk(KERN_ERR
2369 "intel_iommu_domain_alloc: domain_init() failed\n");
2370 intel_iommu_domain_exit(domain);
2371 return NULL;
2372 }
2373 return domain;
2374}
2375EXPORT_SYMBOL_GPL(intel_iommu_domain_alloc);
2376
2377int intel_iommu_context_mapping(
2378 struct dmar_domain *domain, struct pci_dev *pdev)
2379{
2380 int rc;
2381 rc = domain_context_mapping(domain, pdev);
2382 return rc;
2383}
2384EXPORT_SYMBOL_GPL(intel_iommu_context_mapping);
2385
2386int intel_iommu_page_mapping(
2387 struct dmar_domain *domain, dma_addr_t iova,
2388 u64 hpa, size_t size, int prot)
2389{
2390 int rc;
2391 rc = domain_page_mapping(domain, iova, hpa, size, prot);
2392 return rc;
2393}
2394EXPORT_SYMBOL_GPL(intel_iommu_page_mapping);
2395
2396void intel_iommu_detach_dev(struct dmar_domain *domain, u8 bus, u8 devfn)
2397{
2398 detach_domain_for_dev(domain, bus, devfn);
2399}
2400EXPORT_SYMBOL_GPL(intel_iommu_detach_dev);
2401
2402struct dmar_domain *
2403intel_iommu_find_domain(struct pci_dev *pdev)
2404{
2405 return find_domain(pdev);
2406}
2407EXPORT_SYMBOL_GPL(intel_iommu_find_domain);
2408
2409int intel_iommu_found(void)
2410{
2411 return g_num_of_iommus;
2412}
2413EXPORT_SYMBOL_GPL(intel_iommu_found);
2414
2415u64 intel_iommu_iova_to_pfn(struct dmar_domain *domain, u64 iova)
2416{
2417 struct dma_pte *pte;
2418 u64 pfn;
2419
2420 pfn = 0;
2421 pte = addr_to_dma_pte(domain, iova);
2422
2423 if (pte)
2424 pfn = dma_pte_addr(*pte);
2425
2426 return pfn >> PAGE_SHIFT_4K;
2427}
2428EXPORT_SYMBOL_GPL(intel_iommu_iova_to_pfn);