aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu/amd_iommu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu/amd_iommu.c')
-rw-r--r--drivers/iommu/amd_iommu.c99
1 files changed, 71 insertions, 28 deletions
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 625626391f2d..6d1cbdfc9b2a 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -256,11 +256,21 @@ static bool check_device(struct device *dev)
256 return true; 256 return true;
257} 257}
258 258
259static void swap_pci_ref(struct pci_dev **from, struct pci_dev *to)
260{
261 pci_dev_put(*from);
262 *from = to;
263}
264
265#define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
266
259static int iommu_init_device(struct device *dev) 267static int iommu_init_device(struct device *dev)
260{ 268{
261 struct pci_dev *pdev = to_pci_dev(dev); 269 struct pci_dev *dma_pdev, *pdev = to_pci_dev(dev);
262 struct iommu_dev_data *dev_data; 270 struct iommu_dev_data *dev_data;
271 struct iommu_group *group;
263 u16 alias; 272 u16 alias;
273 int ret;
264 274
265 if (dev->archdata.iommu) 275 if (dev->archdata.iommu)
266 return 0; 276 return 0;
@@ -281,8 +291,43 @@ static int iommu_init_device(struct device *dev)
281 return -ENOTSUPP; 291 return -ENOTSUPP;
282 } 292 }
283 dev_data->alias_data = alias_data; 293 dev_data->alias_data = alias_data;
294
295 dma_pdev = pci_get_bus_and_slot(alias >> 8, alias & 0xff);
296 } else
297 dma_pdev = pci_dev_get(pdev);
298
299 swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev));
300
301 if (dma_pdev->multifunction &&
302 !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS))
303 swap_pci_ref(&dma_pdev,
304 pci_get_slot(dma_pdev->bus,
305 PCI_DEVFN(PCI_SLOT(dma_pdev->devfn),
306 0)));
307
308 while (!pci_is_root_bus(dma_pdev->bus)) {
309 if (pci_acs_path_enabled(dma_pdev->bus->self,
310 NULL, REQ_ACS_FLAGS))
311 break;
312
313 swap_pci_ref(&dma_pdev, pci_dev_get(dma_pdev->bus->self));
314 }
315
316 group = iommu_group_get(&dma_pdev->dev);
317 pci_dev_put(dma_pdev);
318 if (!group) {
319 group = iommu_group_alloc();
320 if (IS_ERR(group))
321 return PTR_ERR(group);
284 } 322 }
285 323
324 ret = iommu_group_add_device(group, dev);
325
326 iommu_group_put(group);
327
328 if (ret)
329 return ret;
330
286 if (pci_iommuv2_capable(pdev)) { 331 if (pci_iommuv2_capable(pdev)) {
287 struct amd_iommu *iommu; 332 struct amd_iommu *iommu;
288 333
@@ -311,6 +356,8 @@ static void iommu_ignore_device(struct device *dev)
311 356
312static void iommu_uninit_device(struct device *dev) 357static void iommu_uninit_device(struct device *dev)
313{ 358{
359 iommu_group_remove_device(dev);
360
314 /* 361 /*
315 * Nothing to do here - we keep dev_data around for unplugged devices 362 * Nothing to do here - we keep dev_data around for unplugged devices
316 * and reuse it when the device is re-plugged - not doing so would 363 * and reuse it when the device is re-plugged - not doing so would
@@ -384,7 +431,6 @@ DECLARE_STATS_COUNTER(invalidate_iotlb);
384DECLARE_STATS_COUNTER(invalidate_iotlb_all); 431DECLARE_STATS_COUNTER(invalidate_iotlb_all);
385DECLARE_STATS_COUNTER(pri_requests); 432DECLARE_STATS_COUNTER(pri_requests);
386 433
387
388static struct dentry *stats_dir; 434static struct dentry *stats_dir;
389static struct dentry *de_fflush; 435static struct dentry *de_fflush;
390 436
@@ -2073,7 +2119,7 @@ out_err:
2073/* FIXME: Move this to PCI code */ 2119/* FIXME: Move this to PCI code */
2074#define PCI_PRI_TLP_OFF (1 << 15) 2120#define PCI_PRI_TLP_OFF (1 << 15)
2075 2121
2076bool pci_pri_tlp_required(struct pci_dev *pdev) 2122static bool pci_pri_tlp_required(struct pci_dev *pdev)
2077{ 2123{
2078 u16 status; 2124 u16 status;
2079 int pos; 2125 int pos;
@@ -2254,6 +2300,18 @@ static int device_change_notifier(struct notifier_block *nb,
2254 2300
2255 iommu_init_device(dev); 2301 iommu_init_device(dev);
2256 2302
2303 /*
2304 * dev_data is still NULL and
2305 * got initialized in iommu_init_device
2306 */
2307 dev_data = get_dev_data(dev);
2308
2309 if (iommu_pass_through || dev_data->iommu_v2) {
2310 dev_data->passthrough = true;
2311 attach_device(dev, pt_domain);
2312 break;
2313 }
2314
2257 domain = domain_for_device(dev); 2315 domain = domain_for_device(dev);
2258 2316
2259 /* allocate a protection domain if a device is added */ 2317 /* allocate a protection domain if a device is added */
@@ -2271,10 +2329,7 @@ static int device_change_notifier(struct notifier_block *nb,
2271 2329
2272 dev_data = get_dev_data(dev); 2330 dev_data = get_dev_data(dev);
2273 2331
2274 if (!dev_data->passthrough) 2332 dev->archdata.dma_ops = &amd_iommu_dma_ops;
2275 dev->archdata.dma_ops = &amd_iommu_dma_ops;
2276 else
2277 dev->archdata.dma_ops = &nommu_dma_ops;
2278 2333
2279 break; 2334 break;
2280 case BUS_NOTIFY_DEL_DEVICE: 2335 case BUS_NOTIFY_DEL_DEVICE:
@@ -2972,6 +3027,11 @@ int __init amd_iommu_init_dma_ops(void)
2972 3027
2973 amd_iommu_stats_init(); 3028 amd_iommu_stats_init();
2974 3029
3030 if (amd_iommu_unmap_flush)
3031 pr_info("AMD-Vi: IO/TLB flush on unmap enabled\n");
3032 else
3033 pr_info("AMD-Vi: Lazy IO/TLB flushing enabled\n");
3034
2975 return 0; 3035 return 0;
2976 3036
2977free_domains: 3037free_domains:
@@ -3078,6 +3138,10 @@ static int amd_iommu_domain_init(struct iommu_domain *dom)
3078 3138
3079 dom->priv = domain; 3139 dom->priv = domain;
3080 3140
3141 dom->geometry.aperture_start = 0;
3142 dom->geometry.aperture_end = ~0ULL;
3143 dom->geometry.force_aperture = true;
3144
3081 return 0; 3145 return 0;
3082 3146
3083out_free: 3147out_free:
@@ -3236,26 +3300,6 @@ static int amd_iommu_domain_has_cap(struct iommu_domain *domain,
3236 return 0; 3300 return 0;
3237} 3301}
3238 3302
3239static int amd_iommu_device_group(struct device *dev, unsigned int *groupid)
3240{
3241 struct iommu_dev_data *dev_data = dev->archdata.iommu;
3242 struct pci_dev *pdev = to_pci_dev(dev);
3243 u16 devid;
3244
3245 if (!dev_data)
3246 return -ENODEV;
3247
3248 if (pdev->is_virtfn || !iommu_group_mf)
3249 devid = dev_data->devid;
3250 else
3251 devid = calc_devid(pdev->bus->number,
3252 PCI_DEVFN(PCI_SLOT(pdev->devfn), 0));
3253
3254 *groupid = amd_iommu_alias_table[devid];
3255
3256 return 0;
3257}
3258
3259static struct iommu_ops amd_iommu_ops = { 3303static struct iommu_ops amd_iommu_ops = {
3260 .domain_init = amd_iommu_domain_init, 3304 .domain_init = amd_iommu_domain_init,
3261 .domain_destroy = amd_iommu_domain_destroy, 3305 .domain_destroy = amd_iommu_domain_destroy,
@@ -3265,7 +3309,6 @@ static struct iommu_ops amd_iommu_ops = {
3265 .unmap = amd_iommu_unmap, 3309 .unmap = amd_iommu_unmap,
3266 .iova_to_phys = amd_iommu_iova_to_phys, 3310 .iova_to_phys = amd_iommu_iova_to_phys,
3267 .domain_has_cap = amd_iommu_domain_has_cap, 3311 .domain_has_cap = amd_iommu_domain_has_cap,
3268 .device_group = amd_iommu_device_group,
3269 .pgsize_bitmap = AMD_IOMMU_PGSIZES, 3312 .pgsize_bitmap = AMD_IOMMU_PGSIZES,
3270}; 3313};
3271 3314