aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2009-11-23 09:26:46 -0500
committerJoerg Roedel <joerg.roedel@amd.com>2009-11-27 08:20:32 -0500
commit657cbb6b6cba0f9c98c5299e0c803b2c0e67ea0a (patch)
tree529b149cbb5d1d80ea0b7cd37e0a638441aaf69e /arch
parent8793abeb783c12cc37f92f6133fd6468152b98df (diff)
x86/amd-iommu: Use dev->arch->iommu to store iommu related information
This patch changes IOMMU code to use dev->archdata->iommu to store information about the alias device and the domain the device is attached to. This allows the driver to get rid of the amd_iommu_pd_table in the future. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/amd_iommu_types.h8
-rw-r--r--arch/x86/include/asm/device.h2
-rw-r--r--arch/x86/kernel/amd_iommu.c109
3 files changed, 95 insertions, 24 deletions
diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h
index 02b6a0fd863c..9eaa27b46860 100644
--- a/arch/x86/include/asm/amd_iommu_types.h
+++ b/arch/x86/include/asm/amd_iommu_types.h
@@ -248,6 +248,14 @@ struct protection_domain {
248}; 248};
249 249
250/* 250/*
251 * This struct contains device specific data for the IOMMU
252 */
253struct iommu_dev_data {
254 struct device *alias; /* The Alias Device */
255 struct protection_domain *domain; /* Domain the device is bound to */
256};
257
258/*
251 * For dynamic growth the aperture size is split into ranges of 128MB of 259 * For dynamic growth the aperture size is split into ranges of 128MB of
252 * DMA address space each. This struct represents one such range. 260 * DMA address space each. This struct represents one such range.
253 */ 261 */
diff --git a/arch/x86/include/asm/device.h b/arch/x86/include/asm/device.h
index cee34e9ca45b..029f230ab637 100644
--- a/arch/x86/include/asm/device.h
+++ b/arch/x86/include/asm/device.h
@@ -8,7 +8,7 @@ struct dev_archdata {
8#ifdef CONFIG_X86_64 8#ifdef CONFIG_X86_64
9struct dma_map_ops *dma_ops; 9struct dma_map_ops *dma_ops;
10#endif 10#endif
11#ifdef CONFIG_DMAR 11#if defined(CONFIG_DMAR) || defined(CONFIG_AMD_IOMMU)
12 void *iommu; /* hook for IOMMU specific extension */ 12 void *iommu; /* hook for IOMMU specific extension */
13#endif 13#endif
14}; 14};
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index ed58a1688391..3214e8806f95 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -73,6 +73,11 @@ static inline u16 get_device_id(struct device *dev)
73 return calc_devid(pdev->bus->number, pdev->devfn); 73 return calc_devid(pdev->bus->number, pdev->devfn);
74} 74}
75 75
76static struct iommu_dev_data *get_dev_data(struct device *dev)
77{
78 return dev->archdata.iommu;
79}
80
76/* 81/*
77 * In this function the list of preallocated protection domains is traversed to 82 * In this function the list of preallocated protection domains is traversed to
78 * find the domain for a specific device 83 * find the domain for a specific device
@@ -128,6 +133,35 @@ static bool check_device(struct device *dev)
128 return true; 133 return true;
129} 134}
130 135
136static int iommu_init_device(struct device *dev)
137{
138 struct iommu_dev_data *dev_data;
139 struct pci_dev *pdev;
140 u16 devid, alias;
141
142 if (dev->archdata.iommu)
143 return 0;
144
145 dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL);
146 if (!dev_data)
147 return -ENOMEM;
148
149 devid = get_device_id(dev);
150 alias = amd_iommu_alias_table[devid];
151 pdev = pci_get_bus_and_slot(PCI_BUS(alias), alias & 0xff);
152 if (pdev)
153 dev_data->alias = &pdev->dev;
154
155 dev->archdata.iommu = dev_data;
156
157
158 return 0;
159}
160
161static void iommu_uninit_device(struct device *dev)
162{
163 kfree(dev->archdata.iommu);
164}
131#ifdef CONFIG_AMD_IOMMU_STATS 165#ifdef CONFIG_AMD_IOMMU_STATS
132 166
133/* 167/*
@@ -1346,28 +1380,39 @@ static void clear_dte_entry(u16 devid)
1346static int __attach_device(struct device *dev, 1380static int __attach_device(struct device *dev,
1347 struct protection_domain *domain) 1381 struct protection_domain *domain)
1348{ 1382{
1349 u16 devid = get_device_id(dev); 1383 struct iommu_dev_data *dev_data, *alias_data;
1350 u16 alias = amd_iommu_alias_table[devid]; 1384 u16 devid, alias;
1385
1386 devid = get_device_id(dev);
1387 alias = amd_iommu_alias_table[devid];
1388 dev_data = get_dev_data(dev);
1389 alias_data = get_dev_data(dev_data->alias);
1390 if (!alias_data)
1391 return -EINVAL;
1351 1392
1352 /* lock domain */ 1393 /* lock domain */
1353 spin_lock(&domain->lock); 1394 spin_lock(&domain->lock);
1354 1395
1355 /* Some sanity checks */ 1396 /* Some sanity checks */
1356 if (amd_iommu_pd_table[alias] != NULL && 1397 if (alias_data->domain != NULL &&
1357 amd_iommu_pd_table[alias] != domain) 1398 alias_data->domain != domain)
1358 return -EBUSY; 1399 return -EBUSY;
1359 1400
1360 if (amd_iommu_pd_table[devid] != NULL && 1401 if (dev_data->domain != NULL &&
1361 amd_iommu_pd_table[devid] != domain) 1402 dev_data->domain != domain)
1362 return -EBUSY; 1403 return -EBUSY;
1363 1404
1364 /* Do real assignment */ 1405 /* Do real assignment */
1365 if (alias != devid && 1406 if (alias != devid &&
1366 amd_iommu_pd_table[alias] == NULL) 1407 alias_data->domain == NULL) {
1408 alias_data->domain = domain;
1367 set_dte_entry(alias, domain); 1409 set_dte_entry(alias, domain);
1410 }
1368 1411
1369 if (amd_iommu_pd_table[devid] == NULL) 1412 if (dev_data->domain == NULL) {
1413 dev_data->domain = domain;
1370 set_dte_entry(devid, domain); 1414 set_dte_entry(devid, domain);
1415 }
1371 1416
1372 /* ready */ 1417 /* ready */
1373 spin_unlock(&domain->lock); 1418 spin_unlock(&domain->lock);
@@ -1406,10 +1451,12 @@ static void __detach_device(struct device *dev)
1406{ 1451{
1407 u16 devid = get_device_id(dev); 1452 u16 devid = get_device_id(dev);
1408 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; 1453 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
1454 struct iommu_dev_data *dev_data = get_dev_data(dev);
1409 1455
1410 BUG_ON(!iommu); 1456 BUG_ON(!iommu);
1411 1457
1412 clear_dte_entry(devid); 1458 clear_dte_entry(devid);
1459 dev_data->domain = NULL;
1413 1460
1414 /* 1461 /*
1415 * If we run in passthrough mode the device must be assigned to the 1462 * If we run in passthrough mode the device must be assigned to the
@@ -1439,18 +1486,23 @@ static void detach_device(struct device *dev)
1439static struct protection_domain *domain_for_device(struct device *dev) 1486static struct protection_domain *domain_for_device(struct device *dev)
1440{ 1487{
1441 struct protection_domain *dom; 1488 struct protection_domain *dom;
1489 struct iommu_dev_data *dev_data, *alias_data;
1442 unsigned long flags; 1490 unsigned long flags;
1443 u16 devid, alias; 1491 u16 devid, alias;
1444 1492
1445 devid = get_device_id(dev); 1493 devid = get_device_id(dev);
1446 alias = amd_iommu_alias_table[devid]; 1494 alias = amd_iommu_alias_table[devid];
1495 dev_data = get_dev_data(dev);
1496 alias_data = get_dev_data(dev_data->alias);
1497 if (!alias_data)
1498 return NULL;
1447 1499
1448 read_lock_irqsave(&amd_iommu_devtable_lock, flags); 1500 read_lock_irqsave(&amd_iommu_devtable_lock, flags);
1449 dom = amd_iommu_pd_table[devid]; 1501 dom = dev_data->domain;
1450 if (dom == NULL && 1502 if (dom == NULL &&
1451 amd_iommu_pd_table[alias] != NULL) { 1503 alias_data->domain != NULL) {
1452 __attach_device(dev, amd_iommu_pd_table[alias]); 1504 __attach_device(dev, alias_data->domain);
1453 dom = amd_iommu_pd_table[devid]; 1505 dom = alias_data->domain;
1454 } 1506 }
1455 1507
1456 read_unlock_irqrestore(&amd_iommu_devtable_lock, flags); 1508 read_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
@@ -1473,14 +1525,12 @@ static int device_change_notifier(struct notifier_block *nb,
1473 1525
1474 devid = get_device_id(dev); 1526 devid = get_device_id(dev);
1475 iommu = amd_iommu_rlookup_table[devid]; 1527 iommu = amd_iommu_rlookup_table[devid];
1476 domain = domain_for_device(dev);
1477
1478 if (domain && !dma_ops_domain(domain))
1479 WARN_ONCE(1, "AMD IOMMU WARNING: device %s already bound "
1480 "to a non-dma-ops domain\n", dev_name(dev));
1481 1528
1482 switch (action) { 1529 switch (action) {
1483 case BUS_NOTIFY_UNBOUND_DRIVER: 1530 case BUS_NOTIFY_UNBOUND_DRIVER:
1531
1532 domain = domain_for_device(dev);
1533
1484 if (!domain) 1534 if (!domain)
1485 goto out; 1535 goto out;
1486 if (iommu_pass_through) 1536 if (iommu_pass_through)
@@ -1488,6 +1538,11 @@ static int device_change_notifier(struct notifier_block *nb,
1488 detach_device(dev); 1538 detach_device(dev);
1489 break; 1539 break;
1490 case BUS_NOTIFY_ADD_DEVICE: 1540 case BUS_NOTIFY_ADD_DEVICE:
1541
1542 iommu_init_device(dev);
1543
1544 domain = domain_for_device(dev);
1545
1491 /* allocate a protection domain if a device is added */ 1546 /* allocate a protection domain if a device is added */
1492 dma_domain = find_protection_domain(devid); 1547 dma_domain = find_protection_domain(devid);
1493 if (dma_domain) 1548 if (dma_domain)
@@ -1502,6 +1557,10 @@ static int device_change_notifier(struct notifier_block *nb,
1502 spin_unlock_irqrestore(&iommu_pd_list_lock, flags); 1557 spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
1503 1558
1504 break; 1559 break;
1560 case BUS_NOTIFY_DEL_DEVICE:
1561
1562 iommu_uninit_device(dev);
1563
1505 default: 1564 default:
1506 goto out; 1565 goto out;
1507 } 1566 }
@@ -2079,6 +2138,8 @@ static void prealloc_protection_domains(void)
2079 if (!check_device(&dev->dev)) 2138 if (!check_device(&dev->dev))
2080 continue; 2139 continue;
2081 2140
2141 iommu_init_device(&dev->dev);
2142
2082 /* Is there already any domain for it? */ 2143 /* Is there already any domain for it? */
2083 if (domain_for_device(&dev->dev)) 2144 if (domain_for_device(&dev->dev))
2084 continue; 2145 continue;
@@ -2270,6 +2331,7 @@ static void amd_iommu_domain_destroy(struct iommu_domain *dom)
2270static void amd_iommu_detach_device(struct iommu_domain *dom, 2331static void amd_iommu_detach_device(struct iommu_domain *dom,
2271 struct device *dev) 2332 struct device *dev)
2272{ 2333{
2334 struct iommu_dev_data *dev_data = dev->archdata.iommu;
2273 struct amd_iommu *iommu; 2335 struct amd_iommu *iommu;
2274 u16 devid; 2336 u16 devid;
2275 2337
@@ -2278,7 +2340,7 @@ static void amd_iommu_detach_device(struct iommu_domain *dom,
2278 2340
2279 devid = get_device_id(dev); 2341 devid = get_device_id(dev);
2280 2342
2281 if (amd_iommu_pd_table[devid] != NULL) 2343 if (dev_data->domain != NULL)
2282 detach_device(dev); 2344 detach_device(dev);
2283 2345
2284 iommu = amd_iommu_rlookup_table[devid]; 2346 iommu = amd_iommu_rlookup_table[devid];
@@ -2293,7 +2355,7 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
2293 struct device *dev) 2355 struct device *dev)
2294{ 2356{
2295 struct protection_domain *domain = dom->priv; 2357 struct protection_domain *domain = dom->priv;
2296 struct protection_domain *old_domain; 2358 struct iommu_dev_data *dev_data;
2297 struct amd_iommu *iommu; 2359 struct amd_iommu *iommu;
2298 int ret; 2360 int ret;
2299 u16 devid; 2361 u16 devid;
@@ -2301,14 +2363,15 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
2301 if (!check_device(dev)) 2363 if (!check_device(dev))
2302 return -EINVAL; 2364 return -EINVAL;
2303 2365
2366 dev_data = dev->archdata.iommu;
2367
2304 devid = get_device_id(dev); 2368 devid = get_device_id(dev);
2305 2369
2306 iommu = amd_iommu_rlookup_table[devid]; 2370 iommu = amd_iommu_rlookup_table[devid];
2307 if (!iommu) 2371 if (!iommu)
2308 return -EINVAL; 2372 return -EINVAL;
2309 2373
2310 old_domain = amd_iommu_pd_table[devid]; 2374 if (dev_data->domain)
2311 if (old_domain)
2312 detach_device(dev); 2375 detach_device(dev);
2313 2376
2314 ret = attach_device(dev, domain); 2377 ret = attach_device(dev, domain);