aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/amd_iommu_init.c140
-rw-r--r--drivers/iommu/amd_iommu_proto.h7
-rw-r--r--drivers/iommu/amd_iommu_types.h15
-rw-r--r--drivers/iommu/irq_remapping.c12
-rw-r--r--drivers/iommu/msm_iommu_dev.c20
5 files changed, 168 insertions, 26 deletions
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index bf51abb78dee..7acbf351e9af 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -99,7 +99,7 @@ struct ivhd_header {
99 u64 mmio_phys; 99 u64 mmio_phys;
100 u16 pci_seg; 100 u16 pci_seg;
101 u16 info; 101 u16 info;
102 u32 reserved; 102 u32 efr;
103} __attribute__((packed)); 103} __attribute__((packed));
104 104
105/* 105/*
@@ -154,6 +154,7 @@ bool amd_iommu_iotlb_sup __read_mostly = true;
154u32 amd_iommu_max_pasids __read_mostly = ~0; 154u32 amd_iommu_max_pasids __read_mostly = ~0;
155 155
156bool amd_iommu_v2_present __read_mostly; 156bool amd_iommu_v2_present __read_mostly;
157bool amd_iommu_pc_present __read_mostly;
157 158
158bool amd_iommu_force_isolation __read_mostly; 159bool amd_iommu_force_isolation __read_mostly;
159 160
@@ -369,23 +370,23 @@ static void iommu_disable(struct amd_iommu *iommu)
369 * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in 370 * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in
370 * the system has one. 371 * the system has one.
371 */ 372 */
372static u8 __iomem * __init iommu_map_mmio_space(u64 address) 373static u8 __iomem * __init iommu_map_mmio_space(u64 address, u64 end)
373{ 374{
374 if (!request_mem_region(address, MMIO_REGION_LENGTH, "amd_iommu")) { 375 if (!request_mem_region(address, end, "amd_iommu")) {
375 pr_err("AMD-Vi: Can not reserve memory region %llx for mmio\n", 376 pr_err("AMD-Vi: Can not reserve memory region %llx-%llx for mmio\n",
376 address); 377 address, end);
377 pr_err("AMD-Vi: This is a BIOS bug. Please contact your hardware vendor\n"); 378 pr_err("AMD-Vi: This is a BIOS bug. Please contact your hardware vendor\n");
378 return NULL; 379 return NULL;
379 } 380 }
380 381
381 return (u8 __iomem *)ioremap_nocache(address, MMIO_REGION_LENGTH); 382 return (u8 __iomem *)ioremap_nocache(address, end);
382} 383}
383 384
384static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu) 385static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu)
385{ 386{
386 if (iommu->mmio_base) 387 if (iommu->mmio_base)
387 iounmap(iommu->mmio_base); 388 iounmap(iommu->mmio_base);
388 release_mem_region(iommu->mmio_phys, MMIO_REGION_LENGTH); 389 release_mem_region(iommu->mmio_phys, iommu->mmio_phys_end);
389} 390}
390 391
391/**************************************************************************** 392/****************************************************************************
@@ -1085,7 +1086,18 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
1085 iommu->cap_ptr = h->cap_ptr; 1086 iommu->cap_ptr = h->cap_ptr;
1086 iommu->pci_seg = h->pci_seg; 1087 iommu->pci_seg = h->pci_seg;
1087 iommu->mmio_phys = h->mmio_phys; 1088 iommu->mmio_phys = h->mmio_phys;
1088 iommu->mmio_base = iommu_map_mmio_space(h->mmio_phys); 1089
1090 /* Check if IVHD EFR contains proper max banks/counters */
1091 if ((h->efr != 0) &&
1092 ((h->efr & (0xF << 13)) != 0) &&
1093 ((h->efr & (0x3F << 17)) != 0)) {
1094 iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
1095 } else {
1096 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
1097 }
1098
1099 iommu->mmio_base = iommu_map_mmio_space(iommu->mmio_phys,
1100 iommu->mmio_phys_end);
1089 if (!iommu->mmio_base) 1101 if (!iommu->mmio_base)
1090 return -ENOMEM; 1102 return -ENOMEM;
1091 1103
@@ -1160,6 +1172,33 @@ static int __init init_iommu_all(struct acpi_table_header *table)
1160 return 0; 1172 return 0;
1161} 1173}
1162 1174
1175
1176static void init_iommu_perf_ctr(struct amd_iommu *iommu)
1177{
1178 u64 val = 0xabcd, val2 = 0;
1179
1180 if (!iommu_feature(iommu, FEATURE_PC))
1181 return;
1182
1183 amd_iommu_pc_present = true;
1184
1185 /* Check if the performance counters can be written to */
1186 if ((0 != amd_iommu_pc_get_set_reg_val(0, 0, 0, 0, &val, true)) ||
1187 (0 != amd_iommu_pc_get_set_reg_val(0, 0, 0, 0, &val2, false)) ||
1188 (val != val2)) {
1189 pr_err("AMD-Vi: Unable to write to IOMMU perf counter.\n");
1190 amd_iommu_pc_present = false;
1191 return;
1192 }
1193
1194 pr_info("AMD-Vi: IOMMU performance counters supported\n");
1195
1196 val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET);
1197 iommu->max_banks = (u8) ((val >> 12) & 0x3f);
1198 iommu->max_counters = (u8) ((val >> 7) & 0xf);
1199}
1200
1201
1163static int iommu_init_pci(struct amd_iommu *iommu) 1202static int iommu_init_pci(struct amd_iommu *iommu)
1164{ 1203{
1165 int cap_ptr = iommu->cap_ptr; 1204 int cap_ptr = iommu->cap_ptr;
@@ -1226,6 +1265,8 @@ static int iommu_init_pci(struct amd_iommu *iommu)
1226 if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE)) 1265 if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE))
1227 amd_iommu_np_cache = true; 1266 amd_iommu_np_cache = true;
1228 1267
1268 init_iommu_perf_ctr(iommu);
1269
1229 if (is_rd890_iommu(iommu->dev)) { 1270 if (is_rd890_iommu(iommu->dev)) {
1230 int i, j; 1271 int i, j;
1231 1272
@@ -1278,7 +1319,7 @@ static void print_iommu_info(void)
1278 if (iommu_feature(iommu, (1ULL << i))) 1319 if (iommu_feature(iommu, (1ULL << i)))
1279 pr_cont(" %s", feat_str[i]); 1320 pr_cont(" %s", feat_str[i]);
1280 } 1321 }
1281 pr_cont("\n"); 1322 pr_cont("\n");
1282 } 1323 }
1283 } 1324 }
1284 if (irq_remapping_enabled) 1325 if (irq_remapping_enabled)
@@ -2232,3 +2273,84 @@ bool amd_iommu_v2_supported(void)
2232 return amd_iommu_v2_present; 2273 return amd_iommu_v2_present;
2233} 2274}
2234EXPORT_SYMBOL(amd_iommu_v2_supported); 2275EXPORT_SYMBOL(amd_iommu_v2_supported);
2276
2277/****************************************************************************
2278 *
2279 * IOMMU EFR Performance Counter support functionality. This code allows
2280 * access to the IOMMU PC functionality.
2281 *
2282 ****************************************************************************/
2283
2284u8 amd_iommu_pc_get_max_banks(u16 devid)
2285{
2286 struct amd_iommu *iommu;
2287 u8 ret = 0;
2288
2289 /* locate the iommu governing the devid */
2290 iommu = amd_iommu_rlookup_table[devid];
2291 if (iommu)
2292 ret = iommu->max_banks;
2293
2294 return ret;
2295}
2296EXPORT_SYMBOL(amd_iommu_pc_get_max_banks);
2297
2298bool amd_iommu_pc_supported(void)
2299{
2300 return amd_iommu_pc_present;
2301}
2302EXPORT_SYMBOL(amd_iommu_pc_supported);
2303
2304u8 amd_iommu_pc_get_max_counters(u16 devid)
2305{
2306 struct amd_iommu *iommu;
2307 u8 ret = 0;
2308
2309 /* locate the iommu governing the devid */
2310 iommu = amd_iommu_rlookup_table[devid];
2311 if (iommu)
2312 ret = iommu->max_counters;
2313
2314 return ret;
2315}
2316EXPORT_SYMBOL(amd_iommu_pc_get_max_counters);
2317
2318int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn,
2319 u64 *value, bool is_write)
2320{
2321 struct amd_iommu *iommu;
2322 u32 offset;
2323 u32 max_offset_lim;
2324
2325 /* Make sure the IOMMU PC resource is available */
2326 if (!amd_iommu_pc_present)
2327 return -ENODEV;
2328
2329 /* Locate the iommu associated with the device ID */
2330 iommu = amd_iommu_rlookup_table[devid];
2331
2332 /* Check for valid iommu and pc register indexing */
2333 if (WARN_ON((iommu == NULL) || (fxn > 0x28) || (fxn & 7)))
2334 return -ENODEV;
2335
2336 offset = (u32)(((0x40|bank) << 12) | (cntr << 8) | fxn);
2337
2338 /* Limit the offset to the hw defined mmio region aperture */
2339 max_offset_lim = (u32)(((0x40|iommu->max_banks) << 12) |
2340 (iommu->max_counters << 8) | 0x28);
2341 if ((offset < MMIO_CNTR_REG_OFFSET) ||
2342 (offset > max_offset_lim))
2343 return -EINVAL;
2344
2345 if (is_write) {
2346 writel((u32)*value, iommu->mmio_base + offset);
2347 writel((*value >> 32), iommu->mmio_base + offset + 4);
2348 } else {
2349 *value = readl(iommu->mmio_base + offset + 4);
2350 *value <<= 32;
2351 *value = readl(iommu->mmio_base + offset);
2352 }
2353
2354 return 0;
2355}
2356EXPORT_SYMBOL(amd_iommu_pc_get_set_reg_val);
diff --git a/drivers/iommu/amd_iommu_proto.h b/drivers/iommu/amd_iommu_proto.h
index c294961bdd36..95ed6deae47f 100644
--- a/drivers/iommu/amd_iommu_proto.h
+++ b/drivers/iommu/amd_iommu_proto.h
@@ -56,6 +56,13 @@ extern int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, int pasid,
56extern int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, int pasid); 56extern int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, int pasid);
57extern struct iommu_domain *amd_iommu_get_v2_domain(struct pci_dev *pdev); 57extern struct iommu_domain *amd_iommu_get_v2_domain(struct pci_dev *pdev);
58 58
59/* IOMMU Performance Counter functions */
60extern bool amd_iommu_pc_supported(void);
61extern u8 amd_iommu_pc_get_max_banks(u16 devid);
62extern u8 amd_iommu_pc_get_max_counters(u16 devid);
63extern int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn,
64 u64 *value, bool is_write);
65
59#define PPR_SUCCESS 0x0 66#define PPR_SUCCESS 0x0
60#define PPR_INVALID 0x1 67#define PPR_INVALID 0x1
61#define PPR_FAILURE 0xf 68#define PPR_FAILURE 0xf
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index 0285a215df16..e400fbe411de 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -38,9 +38,6 @@
38#define ALIAS_TABLE_ENTRY_SIZE 2 38#define ALIAS_TABLE_ENTRY_SIZE 2
39#define RLOOKUP_TABLE_ENTRY_SIZE (sizeof(void *)) 39#define RLOOKUP_TABLE_ENTRY_SIZE (sizeof(void *))
40 40
41/* Length of the MMIO region for the AMD IOMMU */
42#define MMIO_REGION_LENGTH 0x4000
43
44/* Capability offsets used by the driver */ 41/* Capability offsets used by the driver */
45#define MMIO_CAP_HDR_OFFSET 0x00 42#define MMIO_CAP_HDR_OFFSET 0x00
46#define MMIO_RANGE_OFFSET 0x0c 43#define MMIO_RANGE_OFFSET 0x0c
@@ -78,6 +75,10 @@
78#define MMIO_STATUS_OFFSET 0x2020 75#define MMIO_STATUS_OFFSET 0x2020
79#define MMIO_PPR_HEAD_OFFSET 0x2030 76#define MMIO_PPR_HEAD_OFFSET 0x2030
80#define MMIO_PPR_TAIL_OFFSET 0x2038 77#define MMIO_PPR_TAIL_OFFSET 0x2038
78#define MMIO_CNTR_CONF_OFFSET 0x4000
79#define MMIO_CNTR_REG_OFFSET 0x40000
80#define MMIO_REG_END_OFFSET 0x80000
81
81 82
82 83
83/* Extended Feature Bits */ 84/* Extended Feature Bits */
@@ -507,6 +508,10 @@ struct amd_iommu {
507 508
508 /* physical address of MMIO space */ 509 /* physical address of MMIO space */
509 u64 mmio_phys; 510 u64 mmio_phys;
511
512 /* physical end address of MMIO space */
513 u64 mmio_phys_end;
514
510 /* virtual address of MMIO space */ 515 /* virtual address of MMIO space */
511 u8 __iomem *mmio_base; 516 u8 __iomem *mmio_base;
512 517
@@ -584,6 +589,10 @@ struct amd_iommu {
584 589
585 /* The l2 indirect registers */ 590 /* The l2 indirect registers */
586 u32 stored_l2[0x83]; 591 u32 stored_l2[0x83];
592
593 /* The maximum PC banks and counters/bank (PCSup=1) */
594 u8 max_banks;
595 u8 max_counters;
587}; 596};
588 597
589struct devid_map { 598struct devid_map {
diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
index dcfea4e39be7..39f81aeefcd6 100644
--- a/drivers/iommu/irq_remapping.c
+++ b/drivers/iommu/irq_remapping.c
@@ -51,26 +51,27 @@ static void irq_remapping_disable_io_apic(void)
51 51
52static int do_setup_msi_irqs(struct pci_dev *dev, int nvec) 52static int do_setup_msi_irqs(struct pci_dev *dev, int nvec)
53{ 53{
54 int node, ret, sub_handle, index = 0; 54 int node, ret, sub_handle, nvec_pow2, index = 0;
55 unsigned int irq; 55 unsigned int irq;
56 struct msi_desc *msidesc; 56 struct msi_desc *msidesc;
57 57
58 nvec = __roundup_pow_of_two(nvec);
59
60 WARN_ON(!list_is_singular(&dev->msi_list)); 58 WARN_ON(!list_is_singular(&dev->msi_list));
61 msidesc = list_entry(dev->msi_list.next, struct msi_desc, list); 59 msidesc = list_entry(dev->msi_list.next, struct msi_desc, list);
62 WARN_ON(msidesc->irq); 60 WARN_ON(msidesc->irq);
63 WARN_ON(msidesc->msi_attrib.multiple); 61 WARN_ON(msidesc->msi_attrib.multiple);
62 WARN_ON(msidesc->nvec_used);
64 63
65 node = dev_to_node(&dev->dev); 64 node = dev_to_node(&dev->dev);
66 irq = __create_irqs(get_nr_irqs_gsi(), nvec, node); 65 irq = __create_irqs(get_nr_irqs_gsi(), nvec, node);
67 if (irq == 0) 66 if (irq == 0)
68 return -ENOSPC; 67 return -ENOSPC;
69 68
70 msidesc->msi_attrib.multiple = ilog2(nvec); 69 nvec_pow2 = __roundup_pow_of_two(nvec);
70 msidesc->nvec_used = nvec;
71 msidesc->msi_attrib.multiple = ilog2(nvec_pow2);
71 for (sub_handle = 0; sub_handle < nvec; sub_handle++) { 72 for (sub_handle = 0; sub_handle < nvec; sub_handle++) {
72 if (!sub_handle) { 73 if (!sub_handle) {
73 index = msi_alloc_remapped_irq(dev, irq, nvec); 74 index = msi_alloc_remapped_irq(dev, irq, nvec_pow2);
74 if (index < 0) { 75 if (index < 0) {
75 ret = index; 76 ret = index;
76 goto error; 77 goto error;
@@ -95,6 +96,7 @@ error:
95 * IRQs from tearing down again in default_teardown_msi_irqs() 96 * IRQs from tearing down again in default_teardown_msi_irqs()
96 */ 97 */
97 msidesc->irq = 0; 98 msidesc->irq = 0;
99 msidesc->nvec_used = 0;
98 msidesc->msi_attrib.multiple = 0; 100 msidesc->msi_attrib.multiple = 0;
99 101
100 return ret; 102 return ret;
diff --git a/drivers/iommu/msm_iommu_dev.c b/drivers/iommu/msm_iommu_dev.c
index 8e8fb079852d..9144a6beed92 100644
--- a/drivers/iommu/msm_iommu_dev.c
+++ b/drivers/iommu/msm_iommu_dev.c
@@ -29,7 +29,6 @@
29 29
30#include <mach/iommu_hw-8xxx.h> 30#include <mach/iommu_hw-8xxx.h>
31#include <mach/iommu.h> 31#include <mach/iommu.h>
32#include <mach/clk.h>
33 32
34struct iommu_ctx_iter_data { 33struct iommu_ctx_iter_data {
35 /* input */ 34 /* input */
@@ -160,7 +159,7 @@ static int msm_iommu_probe(struct platform_device *pdev)
160 goto fail; 159 goto fail;
161 } 160 }
162 161
163 ret = clk_enable(iommu_pclk); 162 ret = clk_prepare_enable(iommu_pclk);
164 if (ret) 163 if (ret)
165 goto fail_enable; 164 goto fail_enable;
166 165
@@ -168,9 +167,9 @@ static int msm_iommu_probe(struct platform_device *pdev)
168 167
169 if (!IS_ERR(iommu_clk)) { 168 if (!IS_ERR(iommu_clk)) {
170 if (clk_get_rate(iommu_clk) == 0) 169 if (clk_get_rate(iommu_clk) == 0)
171 clk_set_min_rate(iommu_clk, 1); 170 clk_set_rate(iommu_clk, 1);
172 171
173 ret = clk_enable(iommu_clk); 172 ret = clk_prepare_enable(iommu_clk);
174 if (ret) { 173 if (ret) {
175 clk_put(iommu_clk); 174 clk_put(iommu_clk);
176 goto fail_pclk; 175 goto fail_pclk;
@@ -261,7 +260,7 @@ fail_clk:
261 clk_put(iommu_clk); 260 clk_put(iommu_clk);
262 } 261 }
263fail_pclk: 262fail_pclk:
264 clk_disable(iommu_pclk); 263 clk_disable_unprepare(iommu_pclk);
265fail_enable: 264fail_enable:
266 clk_put(iommu_pclk); 265 clk_put(iommu_pclk);
267fail: 266fail:
@@ -275,8 +274,11 @@ static int msm_iommu_remove(struct platform_device *pdev)
275 274
276 drv = platform_get_drvdata(pdev); 275 drv = platform_get_drvdata(pdev);
277 if (drv) { 276 if (drv) {
278 if (drv->clk) 277 if (drv->clk) {
278 clk_unprepare(drv->clk);
279 clk_put(drv->clk); 279 clk_put(drv->clk);
280 }
281 clk_unprepare(drv->pclk);
280 clk_put(drv->pclk); 282 clk_put(drv->pclk);
281 memset(drv, 0, sizeof(*drv)); 283 memset(drv, 0, sizeof(*drv));
282 kfree(drv); 284 kfree(drv);
@@ -314,14 +316,14 @@ static int msm_iommu_ctx_probe(struct platform_device *pdev)
314 INIT_LIST_HEAD(&ctx_drvdata->attached_elm); 316 INIT_LIST_HEAD(&ctx_drvdata->attached_elm);
315 platform_set_drvdata(pdev, ctx_drvdata); 317 platform_set_drvdata(pdev, ctx_drvdata);
316 318
317 ret = clk_enable(drvdata->pclk); 319 ret = clk_prepare_enable(drvdata->pclk);
318 if (ret) 320 if (ret)
319 goto fail; 321 goto fail;
320 322
321 if (drvdata->clk) { 323 if (drvdata->clk) {
322 ret = clk_enable(drvdata->clk); 324 ret = clk_prepare_enable(drvdata->clk);
323 if (ret) { 325 if (ret) {
324 clk_disable(drvdata->pclk); 326 clk_disable_unprepare(drvdata->pclk);
325 goto fail; 327 goto fail;
326 } 328 }
327 } 329 }