diff options
author | Joerg Roedel <jroedel@suse.de> | 2015-05-28 12:41:39 -0400 |
---|---|---|
committer | Joerg Roedel <jroedel@suse.de> | 2015-06-11 03:42:21 -0400 |
commit | aafd8ba0ca74894b9397e412bbd7f8ea2662ead8 (patch) | |
tree | db6fdf9068f347510a7ac8aa561bc0498c8ba874 | |
parent | 063071dff53858027e95d3cfcedb1780952302ad (diff) |
iommu/amd: Implement add_device and remove_device
Implement these two iommu-ops call-backs to make use of the
initialization and notifier features of the iommu core.
Signed-off-by: Joerg Roedel <jroedel@suse.de>
-rw-r--r-- | drivers/iommu/amd_iommu.c | 210 | ||||
-rw-r--r-- | drivers/iommu/amd_iommu_init.c | 31 |
2 files changed, 63 insertions, 178 deletions
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index ae7d636d3099..0f8776940bf5 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c | |||
@@ -119,7 +119,7 @@ struct iommu_cmd { | |||
119 | struct kmem_cache *amd_iommu_irq_cache; | 119 | struct kmem_cache *amd_iommu_irq_cache; |
120 | 120 | ||
121 | static void update_domain(struct protection_domain *domain); | 121 | static void update_domain(struct protection_domain *domain); |
122 | static int __init alloc_passthrough_domain(void); | 122 | static int alloc_passthrough_domain(void); |
123 | 123 | ||
124 | /**************************************************************************** | 124 | /**************************************************************************** |
125 | * | 125 | * |
@@ -434,64 +434,15 @@ static void iommu_uninit_device(struct device *dev) | |||
434 | /* Unlink from alias, it may change if another device is re-plugged */ | 434 | /* Unlink from alias, it may change if another device is re-plugged */ |
435 | dev_data->alias_data = NULL; | 435 | dev_data->alias_data = NULL; |
436 | 436 | ||
437 | /* Remove dma-ops */ | ||
438 | dev->archdata.dma_ops = NULL; | ||
439 | |||
437 | /* | 440 | /* |
438 | * We keep dev_data around for unplugged devices and reuse it when the | 441 | * We keep dev_data around for unplugged devices and reuse it when the |
439 | * device is re-plugged - not doing so would introduce a ton of races. | 442 | * device is re-plugged - not doing so would introduce a ton of races. |
440 | */ | 443 | */ |
441 | } | 444 | } |
442 | 445 | ||
443 | void __init amd_iommu_uninit_devices(void) | ||
444 | { | ||
445 | struct iommu_dev_data *dev_data, *n; | ||
446 | struct pci_dev *pdev = NULL; | ||
447 | |||
448 | for_each_pci_dev(pdev) { | ||
449 | |||
450 | if (!check_device(&pdev->dev)) | ||
451 | continue; | ||
452 | |||
453 | iommu_uninit_device(&pdev->dev); | ||
454 | } | ||
455 | |||
456 | /* Free all of our dev_data structures */ | ||
457 | list_for_each_entry_safe(dev_data, n, &dev_data_list, dev_data_list) | ||
458 | free_dev_data(dev_data); | ||
459 | } | ||
460 | |||
461 | int __init amd_iommu_init_devices(void) | ||
462 | { | ||
463 | struct pci_dev *pdev = NULL; | ||
464 | int ret = 0; | ||
465 | |||
466 | for_each_pci_dev(pdev) { | ||
467 | |||
468 | if (!check_device(&pdev->dev)) | ||
469 | continue; | ||
470 | |||
471 | ret = iommu_init_device(&pdev->dev); | ||
472 | if (ret == -ENOTSUPP) | ||
473 | iommu_ignore_device(&pdev->dev); | ||
474 | else if (ret) | ||
475 | goto out_free; | ||
476 | } | ||
477 | |||
478 | /* | ||
479 | * Initialize IOMMU groups only after iommu_init_device() has | ||
480 | * had a chance to populate any IVRS defined aliases. | ||
481 | */ | ||
482 | for_each_pci_dev(pdev) { | ||
483 | if (check_device(&pdev->dev)) | ||
484 | init_iommu_group(&pdev->dev); | ||
485 | } | ||
486 | |||
487 | return 0; | ||
488 | |||
489 | out_free: | ||
490 | |||
491 | amd_iommu_uninit_devices(); | ||
492 | |||
493 | return ret; | ||
494 | } | ||
495 | #ifdef CONFIG_AMD_IOMMU_STATS | 446 | #ifdef CONFIG_AMD_IOMMU_STATS |
496 | 447 | ||
497 | /* | 448 | /* |
@@ -2402,81 +2353,79 @@ static struct protection_domain *domain_for_device(struct device *dev) | |||
2402 | return dom; | 2353 | return dom; |
2403 | } | 2354 | } |
2404 | 2355 | ||
2405 | static int device_change_notifier(struct notifier_block *nb, | 2356 | static int amd_iommu_add_device(struct device *dev) |
2406 | unsigned long action, void *data) | ||
2407 | { | 2357 | { |
2408 | struct dma_ops_domain *dma_domain; | 2358 | struct dma_ops_domain *dma_domain; |
2409 | struct protection_domain *domain; | 2359 | struct protection_domain *domain; |
2410 | struct iommu_dev_data *dev_data; | 2360 | struct iommu_dev_data *dev_data; |
2411 | struct device *dev = data; | ||
2412 | struct amd_iommu *iommu; | 2361 | struct amd_iommu *iommu; |
2413 | unsigned long flags; | 2362 | unsigned long flags; |
2414 | u16 devid; | 2363 | u16 devid; |
2364 | int ret; | ||
2415 | 2365 | ||
2416 | if (!check_device(dev)) | 2366 | if (!check_device(dev) || get_dev_data(dev)) |
2417 | return 0; | 2367 | return 0; |
2418 | 2368 | ||
2419 | devid = get_device_id(dev); | 2369 | devid = get_device_id(dev); |
2420 | iommu = amd_iommu_rlookup_table[devid]; | 2370 | iommu = amd_iommu_rlookup_table[devid]; |
2421 | dev_data = get_dev_data(dev); | ||
2422 | |||
2423 | switch (action) { | ||
2424 | case BUS_NOTIFY_ADD_DEVICE: | ||
2425 | |||
2426 | iommu_init_device(dev); | ||
2427 | init_iommu_group(dev); | ||
2428 | 2371 | ||
2429 | /* | 2372 | ret = iommu_init_device(dev); |
2430 | * dev_data is still NULL and | 2373 | if (ret == -ENOTSUPP) { |
2431 | * got initialized in iommu_init_device | 2374 | iommu_ignore_device(dev); |
2432 | */ | 2375 | goto out; |
2433 | dev_data = get_dev_data(dev); | 2376 | } |
2377 | init_iommu_group(dev); | ||
2434 | 2378 | ||
2435 | if (iommu_pass_through || dev_data->iommu_v2) { | 2379 | dev_data = get_dev_data(dev); |
2436 | dev_data->passthrough = true; | ||
2437 | attach_device(dev, pt_domain); | ||
2438 | break; | ||
2439 | } | ||
2440 | 2380 | ||
2441 | domain = domain_for_device(dev); | 2381 | if (iommu_pass_through || dev_data->iommu_v2) { |
2382 | /* Make sure passthrough domain is allocated */ | ||
2383 | alloc_passthrough_domain(); | ||
2384 | dev_data->passthrough = true; | ||
2385 | attach_device(dev, pt_domain); | ||
2386 | goto out; | ||
2387 | } | ||
2442 | 2388 | ||
2443 | /* allocate a protection domain if a device is added */ | 2389 | domain = domain_for_device(dev); |
2444 | dma_domain = find_protection_domain(devid); | ||
2445 | if (!dma_domain) { | ||
2446 | dma_domain = dma_ops_domain_alloc(); | ||
2447 | if (!dma_domain) | ||
2448 | goto out; | ||
2449 | dma_domain->target_dev = devid; | ||
2450 | 2390 | ||
2451 | spin_lock_irqsave(&iommu_pd_list_lock, flags); | 2391 | /* allocate a protection domain if a device is added */ |
2452 | list_add_tail(&dma_domain->list, &iommu_pd_list); | 2392 | dma_domain = find_protection_domain(devid); |
2453 | spin_unlock_irqrestore(&iommu_pd_list_lock, flags); | 2393 | if (!dma_domain) { |
2454 | } | 2394 | dma_domain = dma_ops_domain_alloc(); |
2395 | if (!dma_domain) | ||
2396 | goto out; | ||
2397 | dma_domain->target_dev = devid; | ||
2455 | 2398 | ||
2456 | dev->archdata.dma_ops = &amd_iommu_dma_ops; | 2399 | init_unity_mappings_for_device(dma_domain, devid); |
2457 | 2400 | ||
2458 | break; | 2401 | spin_lock_irqsave(&iommu_pd_list_lock, flags); |
2459 | case BUS_NOTIFY_REMOVED_DEVICE: | 2402 | list_add_tail(&dma_domain->list, &iommu_pd_list); |
2403 | spin_unlock_irqrestore(&iommu_pd_list_lock, flags); | ||
2404 | } | ||
2460 | 2405 | ||
2461 | iommu_uninit_device(dev); | 2406 | attach_device(dev, &dma_domain->domain); |
2462 | 2407 | ||
2463 | default: | 2408 | dev->archdata.dma_ops = &amd_iommu_dma_ops; |
2464 | goto out; | ||
2465 | } | ||
2466 | 2409 | ||
2410 | out: | ||
2467 | iommu_completion_wait(iommu); | 2411 | iommu_completion_wait(iommu); |
2468 | 2412 | ||
2469 | out: | ||
2470 | return 0; | 2413 | return 0; |
2471 | } | 2414 | } |
2472 | 2415 | ||
2473 | static struct notifier_block device_nb = { | 2416 | static void amd_iommu_remove_device(struct device *dev) |
2474 | .notifier_call = device_change_notifier, | ||
2475 | }; | ||
2476 | |||
2477 | void amd_iommu_init_notifier(void) | ||
2478 | { | 2417 | { |
2479 | bus_register_notifier(&pci_bus_type, &device_nb); | 2418 | struct amd_iommu *iommu; |
2419 | u16 devid; | ||
2420 | |||
2421 | if (!check_device(dev)) | ||
2422 | return; | ||
2423 | |||
2424 | devid = get_device_id(dev); | ||
2425 | iommu = amd_iommu_rlookup_table[devid]; | ||
2426 | |||
2427 | iommu_uninit_device(dev); | ||
2428 | iommu_completion_wait(iommu); | ||
2480 | } | 2429 | } |
2481 | 2430 | ||
2482 | /***************************************************************************** | 2431 | /***************************************************************************** |
@@ -3018,54 +2967,6 @@ static int amd_iommu_dma_supported(struct device *dev, u64 mask) | |||
3018 | return check_device(dev); | 2967 | return check_device(dev); |
3019 | } | 2968 | } |
3020 | 2969 | ||
3021 | /* | ||
3022 | * The function for pre-allocating protection domains. | ||
3023 | * | ||
3024 | * If the driver core informs the DMA layer if a driver grabs a device | ||
3025 | * we don't need to preallocate the protection domains anymore. | ||
3026 | * For now we have to. | ||
3027 | */ | ||
3028 | static void __init prealloc_protection_domains(void) | ||
3029 | { | ||
3030 | struct iommu_dev_data *dev_data; | ||
3031 | struct dma_ops_domain *dma_dom; | ||
3032 | struct pci_dev *dev = NULL; | ||
3033 | u16 devid; | ||
3034 | |||
3035 | for_each_pci_dev(dev) { | ||
3036 | |||
3037 | /* Do we handle this device? */ | ||
3038 | if (!check_device(&dev->dev)) | ||
3039 | continue; | ||
3040 | |||
3041 | dev_data = get_dev_data(&dev->dev); | ||
3042 | if (!amd_iommu_force_isolation && dev_data->iommu_v2) { | ||
3043 | /* Make sure passthrough domain is allocated */ | ||
3044 | alloc_passthrough_domain(); | ||
3045 | dev_data->passthrough = true; | ||
3046 | attach_device(&dev->dev, pt_domain); | ||
3047 | pr_info("AMD-Vi: Using passthrough domain for device %s\n", | ||
3048 | dev_name(&dev->dev)); | ||
3049 | } | ||
3050 | |||
3051 | /* Is there already any domain for it? */ | ||
3052 | if (domain_for_device(&dev->dev)) | ||
3053 | continue; | ||
3054 | |||
3055 | devid = get_device_id(&dev->dev); | ||
3056 | |||
3057 | dma_dom = dma_ops_domain_alloc(); | ||
3058 | if (!dma_dom) | ||
3059 | continue; | ||
3060 | init_unity_mappings_for_device(dma_dom, devid); | ||
3061 | dma_dom->target_dev = devid; | ||
3062 | |||
3063 | attach_device(&dev->dev, &dma_dom->domain); | ||
3064 | |||
3065 | list_add_tail(&dma_dom->list, &iommu_pd_list); | ||
3066 | } | ||
3067 | } | ||
3068 | |||
3069 | static struct dma_map_ops amd_iommu_dma_ops = { | 2970 | static struct dma_map_ops amd_iommu_dma_ops = { |
3070 | .alloc = alloc_coherent, | 2971 | .alloc = alloc_coherent, |
3071 | .free = free_coherent, | 2972 | .free = free_coherent, |
@@ -3131,11 +3032,6 @@ int __init amd_iommu_init_dma_ops(void) | |||
3131 | goto free_domains; | 3032 | goto free_domains; |
3132 | } | 3033 | } |
3133 | 3034 | ||
3134 | /* | ||
3135 | * Pre-allocate the protection domains for each device. | ||
3136 | */ | ||
3137 | prealloc_protection_domains(); | ||
3138 | |||
3139 | iommu_detected = 1; | 3035 | iommu_detected = 1; |
3140 | swiotlb = 0; | 3036 | swiotlb = 0; |
3141 | 3037 | ||
@@ -3228,7 +3124,7 @@ out_err: | |||
3228 | return NULL; | 3124 | return NULL; |
3229 | } | 3125 | } |
3230 | 3126 | ||
3231 | static int __init alloc_passthrough_domain(void) | 3127 | static int alloc_passthrough_domain(void) |
3232 | { | 3128 | { |
3233 | if (pt_domain != NULL) | 3129 | if (pt_domain != NULL) |
3234 | return 0; | 3130 | return 0; |
@@ -3470,6 +3366,8 @@ static const struct iommu_ops amd_iommu_ops = { | |||
3470 | .unmap = amd_iommu_unmap, | 3366 | .unmap = amd_iommu_unmap, |
3471 | .map_sg = default_iommu_map_sg, | 3367 | .map_sg = default_iommu_map_sg, |
3472 | .iova_to_phys = amd_iommu_iova_to_phys, | 3368 | .iova_to_phys = amd_iommu_iova_to_phys, |
3369 | .add_device = amd_iommu_add_device, | ||
3370 | .remove_device = amd_iommu_remove_device, | ||
3473 | .get_dm_regions = amd_iommu_get_dm_regions, | 3371 | .get_dm_regions = amd_iommu_get_dm_regions, |
3474 | .put_dm_regions = amd_iommu_put_dm_regions, | 3372 | .put_dm_regions = amd_iommu_put_dm_regions, |
3475 | .pgsize_bitmap = AMD_IOMMU_PGSIZES, | 3373 | .pgsize_bitmap = AMD_IOMMU_PGSIZES, |
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index 450ef5001a65..e4a6e405e35d 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c | |||
@@ -226,6 +226,7 @@ static enum iommu_init_state init_state = IOMMU_START_STATE; | |||
226 | 226 | ||
227 | static int amd_iommu_enable_interrupts(void); | 227 | static int amd_iommu_enable_interrupts(void); |
228 | static int __init iommu_go_to_state(enum iommu_init_state state); | 228 | static int __init iommu_go_to_state(enum iommu_init_state state); |
229 | static void init_device_table_dma(void); | ||
229 | 230 | ||
230 | static inline void update_last_devid(u16 devid) | 231 | static inline void update_last_devid(u16 devid) |
231 | { | 232 | { |
@@ -1385,7 +1386,12 @@ static int __init amd_iommu_init_pci(void) | |||
1385 | break; | 1386 | break; |
1386 | } | 1387 | } |
1387 | 1388 | ||
1388 | ret = amd_iommu_init_devices(); | 1389 | init_device_table_dma(); |
1390 | |||
1391 | for_each_iommu(iommu) | ||
1392 | iommu_flush_all_caches(iommu); | ||
1393 | |||
1394 | amd_iommu_init_api(); | ||
1389 | 1395 | ||
1390 | print_iommu_info(); | 1396 | print_iommu_info(); |
1391 | 1397 | ||
@@ -1825,8 +1831,6 @@ static bool __init check_ioapic_information(void) | |||
1825 | 1831 | ||
1826 | static void __init free_dma_resources(void) | 1832 | static void __init free_dma_resources(void) |
1827 | { | 1833 | { |
1828 | amd_iommu_uninit_devices(); | ||
1829 | |||
1830 | free_pages((unsigned long)amd_iommu_pd_alloc_bitmap, | 1834 | free_pages((unsigned long)amd_iommu_pd_alloc_bitmap, |
1831 | get_order(MAX_DOMAIN_ID/8)); | 1835 | get_order(MAX_DOMAIN_ID/8)); |
1832 | 1836 | ||
@@ -2019,27 +2023,10 @@ static bool detect_ivrs(void) | |||
2019 | 2023 | ||
2020 | static int amd_iommu_init_dma(void) | 2024 | static int amd_iommu_init_dma(void) |
2021 | { | 2025 | { |
2022 | struct amd_iommu *iommu; | ||
2023 | int ret; | ||
2024 | |||
2025 | if (iommu_pass_through) | 2026 | if (iommu_pass_through) |
2026 | ret = amd_iommu_init_passthrough(); | 2027 | return amd_iommu_init_passthrough(); |
2027 | else | 2028 | else |
2028 | ret = amd_iommu_init_dma_ops(); | 2029 | return amd_iommu_init_dma_ops(); |
2029 | |||
2030 | if (ret) | ||
2031 | return ret; | ||
2032 | |||
2033 | init_device_table_dma(); | ||
2034 | |||
2035 | for_each_iommu(iommu) | ||
2036 | iommu_flush_all_caches(iommu); | ||
2037 | |||
2038 | amd_iommu_init_api(); | ||
2039 | |||
2040 | amd_iommu_init_notifier(); | ||
2041 | |||
2042 | return 0; | ||
2043 | } | 2030 | } |
2044 | 2031 | ||
2045 | /**************************************************************************** | 2032 | /**************************************************************************** |