diff options
author | Joerg Roedel <jroedel@suse.de> | 2015-05-28 12:41:40 -0400 |
---|---|---|
committer | Joerg Roedel <jroedel@suse.de> | 2015-06-11 03:42:22 -0400 |
commit | 0bb6e243d7fbb39fced5bd4a4c83eb49c6e820ce (patch) | |
tree | 7c7307162686a94aa3f8e59b936ae628db973b14 /drivers/iommu/amd_iommu.c | |
parent | aafd8ba0ca74894b9397e412bbd7f8ea2662ead8 (diff) |
iommu/amd: Support IOMMU_DOMAIN_DMA type allocation
This enables allocation of DMA-API default domains from the
IOMMU core and switches allocation of domain dma-api domain
to the IOMMU core too.
Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu/amd_iommu.c')
-rw-r--r-- | drivers/iommu/amd_iommu.c | 311 |
1 files changed, 73 insertions, 238 deletions
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 0f8776940bf5..27300aece203 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c | |||
@@ -64,10 +64,6 @@ | |||
64 | 64 | ||
65 | static DEFINE_RWLOCK(amd_iommu_devtable_lock); | 65 | static DEFINE_RWLOCK(amd_iommu_devtable_lock); |
66 | 66 | ||
67 | /* A list of preallocated protection domains */ | ||
68 | static LIST_HEAD(iommu_pd_list); | ||
69 | static DEFINE_SPINLOCK(iommu_pd_list_lock); | ||
70 | |||
71 | /* List of all available dev_data structures */ | 67 | /* List of all available dev_data structures */ |
72 | static LIST_HEAD(dev_data_list); | 68 | static LIST_HEAD(dev_data_list); |
73 | static DEFINE_SPINLOCK(dev_data_list_lock); | 69 | static DEFINE_SPINLOCK(dev_data_list_lock); |
@@ -234,31 +230,38 @@ static bool pdev_pri_erratum(struct pci_dev *pdev, u32 erratum) | |||
234 | } | 230 | } |
235 | 231 | ||
236 | /* | 232 | /* |
237 | * In this function the list of preallocated protection domains is traversed to | 233 | * This function actually applies the mapping to the page table of the |
238 | * find the domain for a specific device | 234 | * dma_ops domain. |
239 | */ | 235 | */ |
240 | static struct dma_ops_domain *find_protection_domain(u16 devid) | 236 | static void alloc_unity_mapping(struct dma_ops_domain *dma_dom, |
237 | struct unity_map_entry *e) | ||
241 | { | 238 | { |
242 | struct dma_ops_domain *entry, *ret = NULL; | 239 | u64 addr; |
243 | unsigned long flags; | ||
244 | u16 alias = amd_iommu_alias_table[devid]; | ||
245 | |||
246 | if (list_empty(&iommu_pd_list)) | ||
247 | return NULL; | ||
248 | |||
249 | spin_lock_irqsave(&iommu_pd_list_lock, flags); | ||
250 | 240 | ||
251 | list_for_each_entry(entry, &iommu_pd_list, list) { | 241 | for (addr = e->address_start; addr < e->address_end; |
252 | if (entry->target_dev == devid || | 242 | addr += PAGE_SIZE) { |
253 | entry->target_dev == alias) { | 243 | if (addr < dma_dom->aperture_size) |
254 | ret = entry; | 244 | __set_bit(addr >> PAGE_SHIFT, |
255 | break; | 245 | dma_dom->aperture[0]->bitmap); |
256 | } | ||
257 | } | 246 | } |
247 | } | ||
248 | |||
249 | /* | ||
250 | * Inits the unity mappings required for a specific device | ||
251 | */ | ||
252 | static void init_unity_mappings_for_device(struct device *dev, | ||
253 | struct dma_ops_domain *dma_dom) | ||
254 | { | ||
255 | struct unity_map_entry *e; | ||
256 | u16 devid; | ||
258 | 257 | ||
259 | spin_unlock_irqrestore(&iommu_pd_list_lock, flags); | 258 | devid = get_device_id(dev); |
260 | 259 | ||
261 | return ret; | 260 | list_for_each_entry(e, &amd_iommu_unity_map, list) { |
261 | if (!(devid >= e->devid_start && devid <= e->devid_end)) | ||
262 | continue; | ||
263 | alloc_unity_mapping(dma_dom, e); | ||
264 | } | ||
262 | } | 265 | } |
263 | 266 | ||
264 | /* | 267 | /* |
@@ -290,11 +293,23 @@ static bool check_device(struct device *dev) | |||
290 | 293 | ||
291 | static void init_iommu_group(struct device *dev) | 294 | static void init_iommu_group(struct device *dev) |
292 | { | 295 | { |
296 | struct dma_ops_domain *dma_domain; | ||
297 | struct iommu_domain *domain; | ||
293 | struct iommu_group *group; | 298 | struct iommu_group *group; |
294 | 299 | ||
295 | group = iommu_group_get_for_dev(dev); | 300 | group = iommu_group_get_for_dev(dev); |
296 | if (!IS_ERR(group)) | 301 | if (IS_ERR(group)) |
297 | iommu_group_put(group); | 302 | return; |
303 | |||
304 | domain = iommu_group_default_domain(group); | ||
305 | if (!domain) | ||
306 | goto out; | ||
307 | |||
308 | dma_domain = to_pdomain(domain)->priv; | ||
309 | |||
310 | init_unity_mappings_for_device(dev, dma_domain); | ||
311 | out: | ||
312 | iommu_group_put(group); | ||
298 | } | 313 | } |
299 | 314 | ||
300 | static int __last_alias(struct pci_dev *pdev, u16 alias, void *data) | 315 | static int __last_alias(struct pci_dev *pdev, u16 alias, void *data) |
@@ -1414,94 +1429,6 @@ static unsigned long iommu_unmap_page(struct protection_domain *dom, | |||
1414 | return unmapped; | 1429 | return unmapped; |
1415 | } | 1430 | } |
1416 | 1431 | ||
1417 | /* | ||
1418 | * This function checks if a specific unity mapping entry is needed for | ||
1419 | * this specific IOMMU. | ||
1420 | */ | ||
1421 | static int iommu_for_unity_map(struct amd_iommu *iommu, | ||
1422 | struct unity_map_entry *entry) | ||
1423 | { | ||
1424 | u16 bdf, i; | ||
1425 | |||
1426 | for (i = entry->devid_start; i <= entry->devid_end; ++i) { | ||
1427 | bdf = amd_iommu_alias_table[i]; | ||
1428 | if (amd_iommu_rlookup_table[bdf] == iommu) | ||
1429 | return 1; | ||
1430 | } | ||
1431 | |||
1432 | return 0; | ||
1433 | } | ||
1434 | |||
1435 | /* | ||
1436 | * This function actually applies the mapping to the page table of the | ||
1437 | * dma_ops domain. | ||
1438 | */ | ||
1439 | static int dma_ops_unity_map(struct dma_ops_domain *dma_dom, | ||
1440 | struct unity_map_entry *e) | ||
1441 | { | ||
1442 | u64 addr; | ||
1443 | int ret; | ||
1444 | |||
1445 | for (addr = e->address_start; addr < e->address_end; | ||
1446 | addr += PAGE_SIZE) { | ||
1447 | ret = iommu_map_page(&dma_dom->domain, addr, addr, e->prot, | ||
1448 | PAGE_SIZE); | ||
1449 | if (ret) | ||
1450 | return ret; | ||
1451 | /* | ||
1452 | * if unity mapping is in aperture range mark the page | ||
1453 | * as allocated in the aperture | ||
1454 | */ | ||
1455 | if (addr < dma_dom->aperture_size) | ||
1456 | __set_bit(addr >> PAGE_SHIFT, | ||
1457 | dma_dom->aperture[0]->bitmap); | ||
1458 | } | ||
1459 | |||
1460 | return 0; | ||
1461 | } | ||
1462 | |||
1463 | /* | ||
1464 | * Init the unity mappings for a specific IOMMU in the system | ||
1465 | * | ||
1466 | * Basically iterates over all unity mapping entries and applies them to | ||
1467 | * the default domain DMA of that IOMMU if necessary. | ||
1468 | */ | ||
1469 | static int iommu_init_unity_mappings(struct amd_iommu *iommu) | ||
1470 | { | ||
1471 | struct unity_map_entry *entry; | ||
1472 | int ret; | ||
1473 | |||
1474 | list_for_each_entry(entry, &amd_iommu_unity_map, list) { | ||
1475 | if (!iommu_for_unity_map(iommu, entry)) | ||
1476 | continue; | ||
1477 | ret = dma_ops_unity_map(iommu->default_dom, entry); | ||
1478 | if (ret) | ||
1479 | return ret; | ||
1480 | } | ||
1481 | |||
1482 | return 0; | ||
1483 | } | ||
1484 | |||
1485 | /* | ||
1486 | * Inits the unity mappings required for a specific device | ||
1487 | */ | ||
1488 | static int init_unity_mappings_for_device(struct dma_ops_domain *dma_dom, | ||
1489 | u16 devid) | ||
1490 | { | ||
1491 | struct unity_map_entry *e; | ||
1492 | int ret; | ||
1493 | |||
1494 | list_for_each_entry(e, &amd_iommu_unity_map, list) { | ||
1495 | if (!(devid >= e->devid_start && devid <= e->devid_end)) | ||
1496 | continue; | ||
1497 | ret = dma_ops_unity_map(dma_dom, e); | ||
1498 | if (ret) | ||
1499 | return ret; | ||
1500 | } | ||
1501 | |||
1502 | return 0; | ||
1503 | } | ||
1504 | |||
1505 | /**************************************************************************** | 1432 | /**************************************************************************** |
1506 | * | 1433 | * |
1507 | * The next functions belong to the address allocator for the dma_ops | 1434 | * The next functions belong to the address allocator for the dma_ops |
@@ -2324,42 +2251,9 @@ static void detach_device(struct device *dev) | |||
2324 | dev_data->ats.enabled = false; | 2251 | dev_data->ats.enabled = false; |
2325 | } | 2252 | } |
2326 | 2253 | ||
2327 | /* | ||
2328 | * Find out the protection domain structure for a given PCI device. This | ||
2329 | * will give us the pointer to the page table root for example. | ||
2330 | */ | ||
2331 | static struct protection_domain *domain_for_device(struct device *dev) | ||
2332 | { | ||
2333 | struct iommu_dev_data *dev_data; | ||
2334 | struct protection_domain *dom = NULL; | ||
2335 | unsigned long flags; | ||
2336 | |||
2337 | dev_data = get_dev_data(dev); | ||
2338 | |||
2339 | if (dev_data->domain) | ||
2340 | return dev_data->domain; | ||
2341 | |||
2342 | if (dev_data->alias_data != NULL) { | ||
2343 | struct iommu_dev_data *alias_data = dev_data->alias_data; | ||
2344 | |||
2345 | read_lock_irqsave(&amd_iommu_devtable_lock, flags); | ||
2346 | if (alias_data->domain != NULL) { | ||
2347 | __attach_device(dev_data, alias_data->domain); | ||
2348 | dom = alias_data->domain; | ||
2349 | } | ||
2350 | read_unlock_irqrestore(&amd_iommu_devtable_lock, flags); | ||
2351 | } | ||
2352 | |||
2353 | return dom; | ||
2354 | } | ||
2355 | |||
2356 | static int amd_iommu_add_device(struct device *dev) | 2254 | static int amd_iommu_add_device(struct device *dev) |
2357 | { | 2255 | { |
2358 | struct dma_ops_domain *dma_domain; | ||
2359 | struct protection_domain *domain; | ||
2360 | struct iommu_dev_data *dev_data; | ||
2361 | struct amd_iommu *iommu; | 2256 | struct amd_iommu *iommu; |
2362 | unsigned long flags; | ||
2363 | u16 devid; | 2257 | u16 devid; |
2364 | int ret; | 2258 | int ret; |
2365 | 2259 | ||
@@ -2376,35 +2270,6 @@ static int amd_iommu_add_device(struct device *dev) | |||
2376 | } | 2270 | } |
2377 | init_iommu_group(dev); | 2271 | init_iommu_group(dev); |
2378 | 2272 | ||
2379 | dev_data = get_dev_data(dev); | ||
2380 | |||
2381 | if (iommu_pass_through || dev_data->iommu_v2) { | ||
2382 | /* Make sure passthrough domain is allocated */ | ||
2383 | alloc_passthrough_domain(); | ||
2384 | dev_data->passthrough = true; | ||
2385 | attach_device(dev, pt_domain); | ||
2386 | goto out; | ||
2387 | } | ||
2388 | |||
2389 | domain = domain_for_device(dev); | ||
2390 | |||
2391 | /* allocate a protection domain if a device is added */ | ||
2392 | dma_domain = find_protection_domain(devid); | ||
2393 | if (!dma_domain) { | ||
2394 | dma_domain = dma_ops_domain_alloc(); | ||
2395 | if (!dma_domain) | ||
2396 | goto out; | ||
2397 | dma_domain->target_dev = devid; | ||
2398 | |||
2399 | init_unity_mappings_for_device(dma_domain, devid); | ||
2400 | |||
2401 | spin_lock_irqsave(&iommu_pd_list_lock, flags); | ||
2402 | list_add_tail(&dma_domain->list, &iommu_pd_list); | ||
2403 | spin_unlock_irqrestore(&iommu_pd_list_lock, flags); | ||
2404 | } | ||
2405 | |||
2406 | attach_device(dev, &dma_domain->domain); | ||
2407 | |||
2408 | dev->archdata.dma_ops = &amd_iommu_dma_ops; | 2273 | dev->archdata.dma_ops = &amd_iommu_dma_ops; |
2409 | 2274 | ||
2410 | out: | 2275 | out: |
@@ -2445,34 +2310,19 @@ static struct protection_domain *get_domain(struct device *dev) | |||
2445 | { | 2310 | { |
2446 | struct protection_domain *domain; | 2311 | struct protection_domain *domain; |
2447 | struct iommu_domain *io_domain; | 2312 | struct iommu_domain *io_domain; |
2448 | struct dma_ops_domain *dma_dom; | ||
2449 | u16 devid = get_device_id(dev); | ||
2450 | 2313 | ||
2451 | if (!check_device(dev)) | 2314 | if (!check_device(dev)) |
2452 | return ERR_PTR(-EINVAL); | 2315 | return ERR_PTR(-EINVAL); |
2453 | 2316 | ||
2454 | io_domain = iommu_get_domain_for_dev(dev); | 2317 | io_domain = iommu_get_domain_for_dev(dev); |
2455 | if (io_domain) { | 2318 | if (!io_domain) |
2456 | domain = to_pdomain(io_domain); | 2319 | return NULL; |
2457 | return domain; | ||
2458 | } | ||
2459 | 2320 | ||
2460 | domain = domain_for_device(dev); | 2321 | domain = to_pdomain(io_domain); |
2461 | if (domain != NULL && !dma_ops_domain(domain)) | 2322 | if (!dma_ops_domain(domain)) |
2462 | return ERR_PTR(-EBUSY); | 2323 | return ERR_PTR(-EBUSY); |
2463 | 2324 | ||
2464 | if (domain != NULL) | 2325 | return domain; |
2465 | return domain; | ||
2466 | |||
2467 | /* Device not bound yet - bind it */ | ||
2468 | dma_dom = find_protection_domain(devid); | ||
2469 | if (!dma_dom) | ||
2470 | dma_dom = amd_iommu_rlookup_table[devid]->default_dom; | ||
2471 | attach_device(dev, &dma_dom->domain); | ||
2472 | DUMP_printk("Using protection domain %d for device %s\n", | ||
2473 | dma_dom->domain.id, dev_name(dev)); | ||
2474 | |||
2475 | return &dma_dom->domain; | ||
2476 | } | 2326 | } |
2477 | 2327 | ||
2478 | static void update_device_table(struct protection_domain *domain) | 2328 | static void update_device_table(struct protection_domain *domain) |
@@ -3014,23 +2864,7 @@ void __init amd_iommu_init_api(void) | |||
3014 | 2864 | ||
3015 | int __init amd_iommu_init_dma_ops(void) | 2865 | int __init amd_iommu_init_dma_ops(void) |
3016 | { | 2866 | { |
3017 | struct amd_iommu *iommu; | 2867 | int unhandled; |
3018 | int ret, unhandled; | ||
3019 | |||
3020 | /* | ||
3021 | * first allocate a default protection domain for every IOMMU we | ||
3022 | * found in the system. Devices not assigned to any other | ||
3023 | * protection domain will be assigned to the default one. | ||
3024 | */ | ||
3025 | for_each_iommu(iommu) { | ||
3026 | iommu->default_dom = dma_ops_domain_alloc(); | ||
3027 | if (iommu->default_dom == NULL) | ||
3028 | return -ENOMEM; | ||
3029 | iommu->default_dom->domain.flags |= PD_DEFAULT_MASK; | ||
3030 | ret = iommu_init_unity_mappings(iommu); | ||
3031 | if (ret) | ||
3032 | goto free_domains; | ||
3033 | } | ||
3034 | 2868 | ||
3035 | iommu_detected = 1; | 2869 | iommu_detected = 1; |
3036 | swiotlb = 0; | 2870 | swiotlb = 0; |
@@ -3050,14 +2884,6 @@ int __init amd_iommu_init_dma_ops(void) | |||
3050 | pr_info("AMD-Vi: Lazy IO/TLB flushing enabled\n"); | 2884 | pr_info("AMD-Vi: Lazy IO/TLB flushing enabled\n"); |
3051 | 2885 | ||
3052 | return 0; | 2886 | return 0; |
3053 | |||
3054 | free_domains: | ||
3055 | |||
3056 | for_each_iommu(iommu) { | ||
3057 | dma_ops_domain_free(iommu->default_dom); | ||
3058 | } | ||
3059 | |||
3060 | return ret; | ||
3061 | } | 2887 | } |
3062 | 2888 | ||
3063 | /***************************************************************************** | 2889 | /***************************************************************************** |
@@ -3142,30 +2968,39 @@ static int alloc_passthrough_domain(void) | |||
3142 | static struct iommu_domain *amd_iommu_domain_alloc(unsigned type) | 2968 | static struct iommu_domain *amd_iommu_domain_alloc(unsigned type) |
3143 | { | 2969 | { |
3144 | struct protection_domain *pdomain; | 2970 | struct protection_domain *pdomain; |
2971 | struct dma_ops_domain *dma_domain; | ||
3145 | 2972 | ||
3146 | /* We only support unmanaged domains for now */ | 2973 | switch (type) { |
3147 | if (type != IOMMU_DOMAIN_UNMANAGED) | 2974 | case IOMMU_DOMAIN_UNMANAGED: |
3148 | return NULL; | 2975 | pdomain = protection_domain_alloc(); |
2976 | if (!pdomain) | ||
2977 | return NULL; | ||
3149 | 2978 | ||
3150 | pdomain = protection_domain_alloc(); | 2979 | pdomain->mode = PAGE_MODE_3_LEVEL; |
3151 | if (!pdomain) | 2980 | pdomain->pt_root = (void *)get_zeroed_page(GFP_KERNEL); |
3152 | goto out_free; | 2981 | if (!pdomain->pt_root) { |
2982 | protection_domain_free(pdomain); | ||
2983 | return NULL; | ||
2984 | } | ||
3153 | 2985 | ||
3154 | pdomain->mode = PAGE_MODE_3_LEVEL; | 2986 | pdomain->domain.geometry.aperture_start = 0; |
3155 | pdomain->pt_root = (void *)get_zeroed_page(GFP_KERNEL); | 2987 | pdomain->domain.geometry.aperture_end = ~0ULL; |
3156 | if (!pdomain->pt_root) | 2988 | pdomain->domain.geometry.force_aperture = true; |
3157 | goto out_free; | ||
3158 | 2989 | ||
3159 | pdomain->domain.geometry.aperture_start = 0; | 2990 | break; |
3160 | pdomain->domain.geometry.aperture_end = ~0ULL; | 2991 | case IOMMU_DOMAIN_DMA: |
3161 | pdomain->domain.geometry.force_aperture = true; | 2992 | dma_domain = dma_ops_domain_alloc(); |
2993 | if (!dma_domain) { | ||
2994 | pr_err("AMD-Vi: Failed to allocate\n"); | ||
2995 | return NULL; | ||
2996 | } | ||
2997 | pdomain = &dma_domain->domain; | ||
2998 | break; | ||
2999 | default: | ||
3000 | return NULL; | ||
3001 | } | ||
3162 | 3002 | ||
3163 | return &pdomain->domain; | 3003 | return &pdomain->domain; |
3164 | |||
3165 | out_free: | ||
3166 | protection_domain_free(pdomain); | ||
3167 | |||
3168 | return NULL; | ||
3169 | } | 3004 | } |
3170 | 3005 | ||
3171 | static void amd_iommu_domain_free(struct iommu_domain *dom) | 3006 | static void amd_iommu_domain_free(struct iommu_domain *dom) |