aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/ttm/ttm_bo_util.c
diff options
context:
space:
mode:
authorJerome Glisse <jglisse@redhat.com>2010-04-09 08:39:23 -0400
committerDave Airlie <airlied@redhat.com>2010-04-20 00:12:05 -0400
commit82c5da6bf8b55a931b042fb531083863d26c8020 (patch)
tree88168d32d7060598ac730c30967b2e62dc5da28d /drivers/gpu/drm/ttm/ttm_bo_util.c
parent9d87fa2138d06ff400551800d67d522625033e35 (diff)
drm/ttm: ttm_fault callback to allow driver to handle bo placement V6
On fault the driver is given the opportunity to perform any operation it sees fit in order to place the buffer into a CPU visible area of memory. This patch doesn't break TTM users, nouveau, vmwgfx and radeon should keep working properly. Future patch will take advantage of this infrastructure and remove the old path from TTM once driver are converted. V2 return VM_FAULT_NOPAGE if callback return -EBUSY or -ERESTARTSYS V3 balance io_mem_reserve and io_mem_free call, fault_reserve_notify is responsible to perform any necessary task for mapping to succeed V4 minor cleanup, atomic_t -> bool as member is protected by reserve mecanism from concurent access V5 the callback is now responsible for iomapping the bo and providing a virtual address this simplify TTM and will allow to get rid of TTM_MEMTYPE_FLAG_NEEDS_IOREMAP V6 use the bus addr data to decide to ioremap or this isn't needed but we don't necesarily need to ioremap in the callback but still allow driver to use static mapping Signed-off-by: Jerome Glisse <jglisse@redhat.com> Reviewed-by: Thomas Hellstrom <thellstrom@vmware.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/ttm/ttm_bo_util.c')
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c124
1 files changed, 62 insertions, 62 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 865b2a826e1..d58eeb5ed22 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -81,30 +81,62 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
81} 81}
82EXPORT_SYMBOL(ttm_bo_move_ttm); 82EXPORT_SYMBOL(ttm_bo_move_ttm);
83 83
84int ttm_mem_io_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
85{
86 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
87 int ret;
88
89 if (bdev->driver->io_mem_reserve) {
90 if (!mem->bus.io_reserved) {
91 mem->bus.io_reserved = true;
92 ret = bdev->driver->io_mem_reserve(bdev, mem);
93 if (unlikely(ret != 0))
94 return ret;
95 }
96 } else {
97 ret = ttm_bo_pci_offset(bdev, mem, &mem->bus.base, &mem->bus.offset, &mem->bus.size);
98 if (unlikely(ret != 0))
99 return ret;
100 mem->bus.addr = NULL;
101 if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP))
102 mem->bus.addr = (void *)(((u8 *)man->io_addr) + mem->bus.offset);
103 mem->bus.is_iomem = (mem->bus.size > 0) ? 1 : 0;
104 }
105 return 0;
106}
107
108void ttm_mem_io_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
109{
110 if (bdev->driver->io_mem_reserve) {
111 if (mem->bus.io_reserved) {
112 mem->bus.io_reserved = false;
113 bdev->driver->io_mem_free(bdev, mem);
114 }
115 }
116}
117
84int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, 118int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
85 void **virtual) 119 void **virtual)
86{ 120{
87 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
88 unsigned long bus_offset;
89 unsigned long bus_size;
90 unsigned long bus_base;
91 int ret; 121 int ret;
92 void *addr; 122 void *addr;
93 123
94 *virtual = NULL; 124 *virtual = NULL;
95 ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset, &bus_size); 125 ret = ttm_mem_io_reserve(bdev, mem);
96 if (ret || bus_size == 0) 126 if (ret)
97 return ret; 127 return ret;
98 128
99 if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP)) 129 if (mem->bus.addr) {
100 addr = (void *)(((u8 *) man->io_addr) + bus_offset); 130 addr = mem->bus.addr;
101 else { 131 } else {
102 if (mem->placement & TTM_PL_FLAG_WC) 132 if (mem->placement & TTM_PL_FLAG_WC)
103 addr = ioremap_wc(bus_base + bus_offset, bus_size); 133 addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
104 else 134 else
105 addr = ioremap_nocache(bus_base + bus_offset, bus_size); 135 addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
106 if (!addr) 136 if (!addr) {
137 ttm_mem_io_free(bdev, mem);
107 return -ENOMEM; 138 return -ENOMEM;
139 }
108 } 140 }
109 *virtual = addr; 141 *virtual = addr;
110 return 0; 142 return 0;
@@ -117,8 +149,9 @@ void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
117 149
118 man = &bdev->man[mem->mem_type]; 150 man = &bdev->man[mem->mem_type];
119 151
120 if (virtual && (man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP)) 152 if (virtual && (man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP || mem->bus.addr == NULL))
121 iounmap(virtual); 153 iounmap(virtual);
154 ttm_mem_io_free(bdev, mem);
122} 155}
123 156
124static int ttm_copy_io_page(void *dst, void *src, unsigned long page) 157static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
@@ -370,26 +403,23 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
370EXPORT_SYMBOL(ttm_io_prot); 403EXPORT_SYMBOL(ttm_io_prot);
371 404
372static int ttm_bo_ioremap(struct ttm_buffer_object *bo, 405static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
373 unsigned long bus_base, 406 unsigned long offset,
374 unsigned long bus_offset, 407 unsigned long size,
375 unsigned long bus_size,
376 struct ttm_bo_kmap_obj *map) 408 struct ttm_bo_kmap_obj *map)
377{ 409{
378 struct ttm_bo_device *bdev = bo->bdev;
379 struct ttm_mem_reg *mem = &bo->mem; 410 struct ttm_mem_reg *mem = &bo->mem;
380 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
381 411
382 if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP)) { 412 if (bo->mem.bus.addr) {
383 map->bo_kmap_type = ttm_bo_map_premapped; 413 map->bo_kmap_type = ttm_bo_map_premapped;
384 map->virtual = (void *)(((u8 *) man->io_addr) + bus_offset); 414 map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
385 } else { 415 } else {
386 map->bo_kmap_type = ttm_bo_map_iomap; 416 map->bo_kmap_type = ttm_bo_map_iomap;
387 if (mem->placement & TTM_PL_FLAG_WC) 417 if (mem->placement & TTM_PL_FLAG_WC)
388 map->virtual = ioremap_wc(bus_base + bus_offset, 418 map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
389 bus_size); 419 size);
390 else 420 else
391 map->virtual = ioremap_nocache(bus_base + bus_offset, 421 map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
392 bus_size); 422 size);
393 } 423 }
394 return (!map->virtual) ? -ENOMEM : 0; 424 return (!map->virtual) ? -ENOMEM : 0;
395} 425}
@@ -442,13 +472,12 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
442 unsigned long start_page, unsigned long num_pages, 472 unsigned long start_page, unsigned long num_pages,
443 struct ttm_bo_kmap_obj *map) 473 struct ttm_bo_kmap_obj *map)
444{ 474{
475 unsigned long offset, size;
445 int ret; 476 int ret;
446 unsigned long bus_base;
447 unsigned long bus_offset;
448 unsigned long bus_size;
449 477
450 BUG_ON(!list_empty(&bo->swap)); 478 BUG_ON(!list_empty(&bo->swap));
451 map->virtual = NULL; 479 map->virtual = NULL;
480 map->bo = bo;
452 if (num_pages > bo->num_pages) 481 if (num_pages > bo->num_pages)
453 return -EINVAL; 482 return -EINVAL;
454 if (start_page > bo->num_pages) 483 if (start_page > bo->num_pages)
@@ -457,16 +486,15 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
457 if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC)) 486 if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
458 return -EPERM; 487 return -EPERM;
459#endif 488#endif
460 ret = ttm_bo_pci_offset(bo->bdev, &bo->mem, &bus_base, 489 ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
461 &bus_offset, &bus_size);
462 if (ret) 490 if (ret)
463 return ret; 491 return ret;
464 if (bus_size == 0) { 492 if (!bo->mem.bus.is_iomem) {
465 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map); 493 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
466 } else { 494 } else {
467 bus_offset += start_page << PAGE_SHIFT; 495 offset = start_page << PAGE_SHIFT;
468 bus_size = num_pages << PAGE_SHIFT; 496 size = num_pages << PAGE_SHIFT;
469 return ttm_bo_ioremap(bo, bus_base, bus_offset, bus_size, map); 497 return ttm_bo_ioremap(bo, offset, size, map);
470 } 498 }
471} 499}
472EXPORT_SYMBOL(ttm_bo_kmap); 500EXPORT_SYMBOL(ttm_bo_kmap);
@@ -478,6 +506,7 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
478 switch (map->bo_kmap_type) { 506 switch (map->bo_kmap_type) {
479 case ttm_bo_map_iomap: 507 case ttm_bo_map_iomap:
480 iounmap(map->virtual); 508 iounmap(map->virtual);
509 ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
481 break; 510 break;
482 case ttm_bo_map_vmap: 511 case ttm_bo_map_vmap:
483 vunmap(map->virtual); 512 vunmap(map->virtual);
@@ -495,35 +524,6 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
495} 524}
496EXPORT_SYMBOL(ttm_bo_kunmap); 525EXPORT_SYMBOL(ttm_bo_kunmap);
497 526
498int ttm_bo_pfn_prot(struct ttm_buffer_object *bo,
499 unsigned long dst_offset,
500 unsigned long *pfn, pgprot_t *prot)
501{
502 struct ttm_mem_reg *mem = &bo->mem;
503 struct ttm_bo_device *bdev = bo->bdev;
504 unsigned long bus_offset;
505 unsigned long bus_size;
506 unsigned long bus_base;
507 int ret;
508 ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset,
509 &bus_size);
510 if (ret)
511 return -EINVAL;
512 if (bus_size != 0)
513 *pfn = (bus_base + bus_offset + dst_offset) >> PAGE_SHIFT;
514 else
515 if (!bo->ttm)
516 return -EINVAL;
517 else
518 *pfn = page_to_pfn(ttm_tt_get_page(bo->ttm,
519 dst_offset >>
520 PAGE_SHIFT));
521 *prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
522 PAGE_KERNEL : ttm_io_prot(mem->placement, PAGE_KERNEL);
523
524 return 0;
525}
526
527int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, 527int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
528 void *sync_obj, 528 void *sync_obj,
529 void *sync_obj_arg, 529 void *sync_obj_arg,