aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu/intel-svm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu/intel-svm.c')
-rw-r--r--drivers/iommu/intel-svm.c28
1 files changed, 17 insertions, 11 deletions
diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
index 8ebb3530afa7..cb72e0011310 100644
--- a/drivers/iommu/intel-svm.c
+++ b/drivers/iommu/intel-svm.c
@@ -39,10 +39,18 @@ int intel_svm_alloc_pasid_tables(struct intel_iommu *iommu)
39 struct page *pages; 39 struct page *pages;
40 int order; 40 int order;
41 41
42 order = ecap_pss(iommu->ecap) + 7 - PAGE_SHIFT; 42 /* Start at 2 because it's defined as 2^(1+PSS) */
43 if (order < 0) 43 iommu->pasid_max = 2 << ecap_pss(iommu->ecap);
44 order = 0; 44
45 45 /* Eventually I'm promised we will get a multi-level PASID table
46 * and it won't have to be physically contiguous. Until then,
47 * limit the size because 8MiB contiguous allocations can be hard
48 * to come by. The limit of 0x20000, which is 1MiB for each of
49 * the PASID and PASID-state tables, is somewhat arbitrary. */
50 if (iommu->pasid_max > 0x20000)
51 iommu->pasid_max = 0x20000;
52
53 order = get_order(sizeof(struct pasid_entry) * iommu->pasid_max);
46 pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order); 54 pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
47 if (!pages) { 55 if (!pages) {
48 pr_warn("IOMMU: %s: Failed to allocate PASID table\n", 56 pr_warn("IOMMU: %s: Failed to allocate PASID table\n",
@@ -53,6 +61,8 @@ int intel_svm_alloc_pasid_tables(struct intel_iommu *iommu)
53 pr_info("%s: Allocated order %d PASID table.\n", iommu->name, order); 61 pr_info("%s: Allocated order %d PASID table.\n", iommu->name, order);
54 62
55 if (ecap_dis(iommu->ecap)) { 63 if (ecap_dis(iommu->ecap)) {
64 /* Just making it explicit... */
65 BUILD_BUG_ON(sizeof(struct pasid_entry) != sizeof(struct pasid_state_entry));
56 pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order); 66 pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
57 if (pages) 67 if (pages)
58 iommu->pasid_state_table = page_address(pages); 68 iommu->pasid_state_table = page_address(pages);
@@ -68,11 +78,7 @@ int intel_svm_alloc_pasid_tables(struct intel_iommu *iommu)
68 78
69int intel_svm_free_pasid_tables(struct intel_iommu *iommu) 79int intel_svm_free_pasid_tables(struct intel_iommu *iommu)
70{ 80{
71 int order; 81 int order = get_order(sizeof(struct pasid_entry) * iommu->pasid_max);
72
73 order = ecap_pss(iommu->ecap) + 7 - PAGE_SHIFT;
74 if (order < 0)
75 order = 0;
76 82
77 if (iommu->pasid_table) { 83 if (iommu->pasid_table) {
78 free_pages((unsigned long)iommu->pasid_table, order); 84 free_pages((unsigned long)iommu->pasid_table, order);
@@ -371,8 +377,8 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
371 } 377 }
372 svm->iommu = iommu; 378 svm->iommu = iommu;
373 379
374 if (pasid_max > 2 << ecap_pss(iommu->ecap)) 380 if (pasid_max > iommu->pasid_max)
375 pasid_max = 2 << ecap_pss(iommu->ecap); 381 pasid_max = iommu->pasid_max;
376 382
377 /* Do not use PASID 0 in caching mode (virtualised IOMMU) */ 383 /* Do not use PASID 0 in caching mode (virtualised IOMMU) */
378 ret = idr_alloc(&iommu->pasid_idr, svm, 384 ret = idr_alloc(&iommu->pasid_idr, svm,