diff options
Diffstat (limited to 'drivers/pci/controller/vmd.c')
-rw-r--r-- | drivers/pci/controller/vmd.c | 48 |
1 files changed, 17 insertions, 31 deletions
diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c index e50b0b5815ff..3890812cdf87 100644 --- a/drivers/pci/controller/vmd.c +++ b/drivers/pci/controller/vmd.c | |||
@@ -307,39 +307,32 @@ static struct device *to_vmd_dev(struct device *dev) | |||
307 | return &vmd->dev->dev; | 307 | return &vmd->dev->dev; |
308 | } | 308 | } |
309 | 309 | ||
310 | static const struct dma_map_ops *vmd_dma_ops(struct device *dev) | ||
311 | { | ||
312 | return get_dma_ops(to_vmd_dev(dev)); | ||
313 | } | ||
314 | |||
315 | static void *vmd_alloc(struct device *dev, size_t size, dma_addr_t *addr, | 310 | static void *vmd_alloc(struct device *dev, size_t size, dma_addr_t *addr, |
316 | gfp_t flag, unsigned long attrs) | 311 | gfp_t flag, unsigned long attrs) |
317 | { | 312 | { |
318 | return vmd_dma_ops(dev)->alloc(to_vmd_dev(dev), size, addr, flag, | 313 | return dma_alloc_attrs(to_vmd_dev(dev), size, addr, flag, attrs); |
319 | attrs); | ||
320 | } | 314 | } |
321 | 315 | ||
322 | static void vmd_free(struct device *dev, size_t size, void *vaddr, | 316 | static void vmd_free(struct device *dev, size_t size, void *vaddr, |
323 | dma_addr_t addr, unsigned long attrs) | 317 | dma_addr_t addr, unsigned long attrs) |
324 | { | 318 | { |
325 | return vmd_dma_ops(dev)->free(to_vmd_dev(dev), size, vaddr, addr, | 319 | return dma_free_attrs(to_vmd_dev(dev), size, vaddr, addr, attrs); |
326 | attrs); | ||
327 | } | 320 | } |
328 | 321 | ||
329 | static int vmd_mmap(struct device *dev, struct vm_area_struct *vma, | 322 | static int vmd_mmap(struct device *dev, struct vm_area_struct *vma, |
330 | void *cpu_addr, dma_addr_t addr, size_t size, | 323 | void *cpu_addr, dma_addr_t addr, size_t size, |
331 | unsigned long attrs) | 324 | unsigned long attrs) |
332 | { | 325 | { |
333 | return vmd_dma_ops(dev)->mmap(to_vmd_dev(dev), vma, cpu_addr, addr, | 326 | return dma_mmap_attrs(to_vmd_dev(dev), vma, cpu_addr, addr, size, |
334 | size, attrs); | 327 | attrs); |
335 | } | 328 | } |
336 | 329 | ||
337 | static int vmd_get_sgtable(struct device *dev, struct sg_table *sgt, | 330 | static int vmd_get_sgtable(struct device *dev, struct sg_table *sgt, |
338 | void *cpu_addr, dma_addr_t addr, size_t size, | 331 | void *cpu_addr, dma_addr_t addr, size_t size, |
339 | unsigned long attrs) | 332 | unsigned long attrs) |
340 | { | 333 | { |
341 | return vmd_dma_ops(dev)->get_sgtable(to_vmd_dev(dev), sgt, cpu_addr, | 334 | return dma_get_sgtable_attrs(to_vmd_dev(dev), sgt, cpu_addr, addr, size, |
342 | addr, size, attrs); | 335 | attrs); |
343 | } | 336 | } |
344 | 337 | ||
345 | static dma_addr_t vmd_map_page(struct device *dev, struct page *page, | 338 | static dma_addr_t vmd_map_page(struct device *dev, struct page *page, |
@@ -347,66 +340,60 @@ static dma_addr_t vmd_map_page(struct device *dev, struct page *page, | |||
347 | enum dma_data_direction dir, | 340 | enum dma_data_direction dir, |
348 | unsigned long attrs) | 341 | unsigned long attrs) |
349 | { | 342 | { |
350 | return vmd_dma_ops(dev)->map_page(to_vmd_dev(dev), page, offset, size, | 343 | return dma_map_page_attrs(to_vmd_dev(dev), page, offset, size, dir, |
351 | dir, attrs); | 344 | attrs); |
352 | } | 345 | } |
353 | 346 | ||
354 | static void vmd_unmap_page(struct device *dev, dma_addr_t addr, size_t size, | 347 | static void vmd_unmap_page(struct device *dev, dma_addr_t addr, size_t size, |
355 | enum dma_data_direction dir, unsigned long attrs) | 348 | enum dma_data_direction dir, unsigned long attrs) |
356 | { | 349 | { |
357 | vmd_dma_ops(dev)->unmap_page(to_vmd_dev(dev), addr, size, dir, attrs); | 350 | dma_unmap_page_attrs(to_vmd_dev(dev), addr, size, dir, attrs); |
358 | } | 351 | } |
359 | 352 | ||
360 | static int vmd_map_sg(struct device *dev, struct scatterlist *sg, int nents, | 353 | static int vmd_map_sg(struct device *dev, struct scatterlist *sg, int nents, |
361 | enum dma_data_direction dir, unsigned long attrs) | 354 | enum dma_data_direction dir, unsigned long attrs) |
362 | { | 355 | { |
363 | return vmd_dma_ops(dev)->map_sg(to_vmd_dev(dev), sg, nents, dir, attrs); | 356 | return dma_map_sg_attrs(to_vmd_dev(dev), sg, nents, dir, attrs); |
364 | } | 357 | } |
365 | 358 | ||
366 | static void vmd_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, | 359 | static void vmd_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, |
367 | enum dma_data_direction dir, unsigned long attrs) | 360 | enum dma_data_direction dir, unsigned long attrs) |
368 | { | 361 | { |
369 | vmd_dma_ops(dev)->unmap_sg(to_vmd_dev(dev), sg, nents, dir, attrs); | 362 | dma_unmap_sg_attrs(to_vmd_dev(dev), sg, nents, dir, attrs); |
370 | } | 363 | } |
371 | 364 | ||
372 | static void vmd_sync_single_for_cpu(struct device *dev, dma_addr_t addr, | 365 | static void vmd_sync_single_for_cpu(struct device *dev, dma_addr_t addr, |
373 | size_t size, enum dma_data_direction dir) | 366 | size_t size, enum dma_data_direction dir) |
374 | { | 367 | { |
375 | vmd_dma_ops(dev)->sync_single_for_cpu(to_vmd_dev(dev), addr, size, dir); | 368 | dma_sync_single_for_cpu(to_vmd_dev(dev), addr, size, dir); |
376 | } | 369 | } |
377 | 370 | ||
378 | static void vmd_sync_single_for_device(struct device *dev, dma_addr_t addr, | 371 | static void vmd_sync_single_for_device(struct device *dev, dma_addr_t addr, |
379 | size_t size, enum dma_data_direction dir) | 372 | size_t size, enum dma_data_direction dir) |
380 | { | 373 | { |
381 | vmd_dma_ops(dev)->sync_single_for_device(to_vmd_dev(dev), addr, size, | 374 | dma_sync_single_for_device(to_vmd_dev(dev), addr, size, dir); |
382 | dir); | ||
383 | } | 375 | } |
384 | 376 | ||
385 | static void vmd_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, | 377 | static void vmd_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, |
386 | int nents, enum dma_data_direction dir) | 378 | int nents, enum dma_data_direction dir) |
387 | { | 379 | { |
388 | vmd_dma_ops(dev)->sync_sg_for_cpu(to_vmd_dev(dev), sg, nents, dir); | 380 | dma_sync_sg_for_cpu(to_vmd_dev(dev), sg, nents, dir); |
389 | } | 381 | } |
390 | 382 | ||
391 | static void vmd_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | 383 | static void vmd_sync_sg_for_device(struct device *dev, struct scatterlist *sg, |
392 | int nents, enum dma_data_direction dir) | 384 | int nents, enum dma_data_direction dir) |
393 | { | 385 | { |
394 | vmd_dma_ops(dev)->sync_sg_for_device(to_vmd_dev(dev), sg, nents, dir); | 386 | dma_sync_sg_for_device(to_vmd_dev(dev), sg, nents, dir); |
395 | } | ||
396 | |||
397 | static int vmd_mapping_error(struct device *dev, dma_addr_t addr) | ||
398 | { | ||
399 | return vmd_dma_ops(dev)->mapping_error(to_vmd_dev(dev), addr); | ||
400 | } | 387 | } |
401 | 388 | ||
402 | static int vmd_dma_supported(struct device *dev, u64 mask) | 389 | static int vmd_dma_supported(struct device *dev, u64 mask) |
403 | { | 390 | { |
404 | return vmd_dma_ops(dev)->dma_supported(to_vmd_dev(dev), mask); | 391 | return dma_supported(to_vmd_dev(dev), mask); |
405 | } | 392 | } |
406 | 393 | ||
407 | static u64 vmd_get_required_mask(struct device *dev) | 394 | static u64 vmd_get_required_mask(struct device *dev) |
408 | { | 395 | { |
409 | return vmd_dma_ops(dev)->get_required_mask(to_vmd_dev(dev)); | 396 | return dma_get_required_mask(to_vmd_dev(dev)); |
410 | } | 397 | } |
411 | 398 | ||
412 | static void vmd_teardown_dma_ops(struct vmd_dev *vmd) | 399 | static void vmd_teardown_dma_ops(struct vmd_dev *vmd) |
@@ -446,7 +433,6 @@ static void vmd_setup_dma_ops(struct vmd_dev *vmd) | |||
446 | ASSIGN_VMD_DMA_OPS(source, dest, sync_single_for_device); | 433 | ASSIGN_VMD_DMA_OPS(source, dest, sync_single_for_device); |
447 | ASSIGN_VMD_DMA_OPS(source, dest, sync_sg_for_cpu); | 434 | ASSIGN_VMD_DMA_OPS(source, dest, sync_sg_for_cpu); |
448 | ASSIGN_VMD_DMA_OPS(source, dest, sync_sg_for_device); | 435 | ASSIGN_VMD_DMA_OPS(source, dest, sync_sg_for_device); |
449 | ASSIGN_VMD_DMA_OPS(source, dest, mapping_error); | ||
450 | ASSIGN_VMD_DMA_OPS(source, dest, dma_supported); | 436 | ASSIGN_VMD_DMA_OPS(source, dest, dma_supported); |
451 | ASSIGN_VMD_DMA_OPS(source, dest, get_required_mask); | 437 | ASSIGN_VMD_DMA_OPS(source, dest, get_required_mask); |
452 | add_dma_domain(domain); | 438 | add_dma_domain(domain); |