diff options
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo.c | 7 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo_util.c | 124 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo_vm.c | 41 | ||||
-rw-r--r-- | include/drm/ttm/ttm_bo_api.h | 23 | ||||
-rw-r--r-- | include/drm/ttm/ttm_bo_driver.h | 16 |
5 files changed, 126 insertions, 85 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 40631e2866f8..b42e3fae1bd5 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c | |||
@@ -632,6 +632,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, | |||
632 | 632 | ||
633 | evict_mem = bo->mem; | 633 | evict_mem = bo->mem; |
634 | evict_mem.mm_node = NULL; | 634 | evict_mem.mm_node = NULL; |
635 | evict_mem.bus.io_reserved = false; | ||
635 | 636 | ||
636 | placement.fpfn = 0; | 637 | placement.fpfn = 0; |
637 | placement.lpfn = 0; | 638 | placement.lpfn = 0; |
@@ -1005,6 +1006,7 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo, | |||
1005 | mem.num_pages = bo->num_pages; | 1006 | mem.num_pages = bo->num_pages; |
1006 | mem.size = mem.num_pages << PAGE_SHIFT; | 1007 | mem.size = mem.num_pages << PAGE_SHIFT; |
1007 | mem.page_alignment = bo->mem.page_alignment; | 1008 | mem.page_alignment = bo->mem.page_alignment; |
1009 | mem.bus.io_reserved = false; | ||
1008 | /* | 1010 | /* |
1009 | * Determine where to move the buffer. | 1011 | * Determine where to move the buffer. |
1010 | */ | 1012 | */ |
@@ -1160,6 +1162,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev, | |||
1160 | bo->mem.num_pages = bo->num_pages; | 1162 | bo->mem.num_pages = bo->num_pages; |
1161 | bo->mem.mm_node = NULL; | 1163 | bo->mem.mm_node = NULL; |
1162 | bo->mem.page_alignment = page_alignment; | 1164 | bo->mem.page_alignment = page_alignment; |
1165 | bo->mem.bus.io_reserved = false; | ||
1163 | bo->buffer_start = buffer_start & PAGE_MASK; | 1166 | bo->buffer_start = buffer_start & PAGE_MASK; |
1164 | bo->priv_flags = 0; | 1167 | bo->priv_flags = 0; |
1165 | bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED); | 1168 | bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED); |
@@ -1574,7 +1577,7 @@ int ttm_bo_pci_offset(struct ttm_bo_device *bdev, | |||
1574 | if (ttm_mem_reg_is_pci(bdev, mem)) { | 1577 | if (ttm_mem_reg_is_pci(bdev, mem)) { |
1575 | *bus_offset = mem->mm_node->start << PAGE_SHIFT; | 1578 | *bus_offset = mem->mm_node->start << PAGE_SHIFT; |
1576 | *bus_size = mem->num_pages << PAGE_SHIFT; | 1579 | *bus_size = mem->num_pages << PAGE_SHIFT; |
1577 | *bus_base = man->io_offset; | 1580 | *bus_base = man->io_offset + (uintptr_t)man->io_addr; |
1578 | } | 1581 | } |
1579 | 1582 | ||
1580 | return 0; | 1583 | return 0; |
@@ -1588,8 +1591,8 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) | |||
1588 | 1591 | ||
1589 | if (!bdev->dev_mapping) | 1592 | if (!bdev->dev_mapping) |
1590 | return; | 1593 | return; |
1591 | |||
1592 | unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1); | 1594 | unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1); |
1595 | ttm_mem_io_free(bdev, &bo->mem); | ||
1593 | } | 1596 | } |
1594 | EXPORT_SYMBOL(ttm_bo_unmap_virtual); | 1597 | EXPORT_SYMBOL(ttm_bo_unmap_virtual); |
1595 | 1598 | ||
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 865b2a826e13..d58eeb5ed22b 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c | |||
@@ -81,30 +81,62 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo, | |||
81 | } | 81 | } |
82 | EXPORT_SYMBOL(ttm_bo_move_ttm); | 82 | EXPORT_SYMBOL(ttm_bo_move_ttm); |
83 | 83 | ||
84 | int ttm_mem_io_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) | ||
85 | { | ||
86 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; | ||
87 | int ret; | ||
88 | |||
89 | if (bdev->driver->io_mem_reserve) { | ||
90 | if (!mem->bus.io_reserved) { | ||
91 | mem->bus.io_reserved = true; | ||
92 | ret = bdev->driver->io_mem_reserve(bdev, mem); | ||
93 | if (unlikely(ret != 0)) | ||
94 | return ret; | ||
95 | } | ||
96 | } else { | ||
97 | ret = ttm_bo_pci_offset(bdev, mem, &mem->bus.base, &mem->bus.offset, &mem->bus.size); | ||
98 | if (unlikely(ret != 0)) | ||
99 | return ret; | ||
100 | mem->bus.addr = NULL; | ||
101 | if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP)) | ||
102 | mem->bus.addr = (void *)(((u8 *)man->io_addr) + mem->bus.offset); | ||
103 | mem->bus.is_iomem = (mem->bus.size > 0) ? 1 : 0; | ||
104 | } | ||
105 | return 0; | ||
106 | } | ||
107 | |||
108 | void ttm_mem_io_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) | ||
109 | { | ||
110 | if (bdev->driver->io_mem_reserve) { | ||
111 | if (mem->bus.io_reserved) { | ||
112 | mem->bus.io_reserved = false; | ||
113 | bdev->driver->io_mem_free(bdev, mem); | ||
114 | } | ||
115 | } | ||
116 | } | ||
117 | |||
84 | int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, | 118 | int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, |
85 | void **virtual) | 119 | void **virtual) |
86 | { | 120 | { |
87 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; | ||
88 | unsigned long bus_offset; | ||
89 | unsigned long bus_size; | ||
90 | unsigned long bus_base; | ||
91 | int ret; | 121 | int ret; |
92 | void *addr; | 122 | void *addr; |
93 | 123 | ||
94 | *virtual = NULL; | 124 | *virtual = NULL; |
95 | ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset, &bus_size); | 125 | ret = ttm_mem_io_reserve(bdev, mem); |
96 | if (ret || bus_size == 0) | 126 | if (ret) |
97 | return ret; | 127 | return ret; |
98 | 128 | ||
99 | if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP)) | 129 | if (mem->bus.addr) { |
100 | addr = (void *)(((u8 *) man->io_addr) + bus_offset); | 130 | addr = mem->bus.addr; |
101 | else { | 131 | } else { |
102 | if (mem->placement & TTM_PL_FLAG_WC) | 132 | if (mem->placement & TTM_PL_FLAG_WC) |
103 | addr = ioremap_wc(bus_base + bus_offset, bus_size); | 133 | addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size); |
104 | else | 134 | else |
105 | addr = ioremap_nocache(bus_base + bus_offset, bus_size); | 135 | addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size); |
106 | if (!addr) | 136 | if (!addr) { |
137 | ttm_mem_io_free(bdev, mem); | ||
107 | return -ENOMEM; | 138 | return -ENOMEM; |
139 | } | ||
108 | } | 140 | } |
109 | *virtual = addr; | 141 | *virtual = addr; |
110 | return 0; | 142 | return 0; |
@@ -117,8 +149,9 @@ void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, | |||
117 | 149 | ||
118 | man = &bdev->man[mem->mem_type]; | 150 | man = &bdev->man[mem->mem_type]; |
119 | 151 | ||
120 | if (virtual && (man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP)) | 152 | if (virtual && (man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP || mem->bus.addr == NULL)) |
121 | iounmap(virtual); | 153 | iounmap(virtual); |
154 | ttm_mem_io_free(bdev, mem); | ||
122 | } | 155 | } |
123 | 156 | ||
124 | static int ttm_copy_io_page(void *dst, void *src, unsigned long page) | 157 | static int ttm_copy_io_page(void *dst, void *src, unsigned long page) |
@@ -370,26 +403,23 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp) | |||
370 | EXPORT_SYMBOL(ttm_io_prot); | 403 | EXPORT_SYMBOL(ttm_io_prot); |
371 | 404 | ||
372 | static int ttm_bo_ioremap(struct ttm_buffer_object *bo, | 405 | static int ttm_bo_ioremap(struct ttm_buffer_object *bo, |
373 | unsigned long bus_base, | 406 | unsigned long offset, |
374 | unsigned long bus_offset, | 407 | unsigned long size, |
375 | unsigned long bus_size, | ||
376 | struct ttm_bo_kmap_obj *map) | 408 | struct ttm_bo_kmap_obj *map) |
377 | { | 409 | { |
378 | struct ttm_bo_device *bdev = bo->bdev; | ||
379 | struct ttm_mem_reg *mem = &bo->mem; | 410 | struct ttm_mem_reg *mem = &bo->mem; |
380 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; | ||
381 | 411 | ||
382 | if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP)) { | 412 | if (bo->mem.bus.addr) { |
383 | map->bo_kmap_type = ttm_bo_map_premapped; | 413 | map->bo_kmap_type = ttm_bo_map_premapped; |
384 | map->virtual = (void *)(((u8 *) man->io_addr) + bus_offset); | 414 | map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset); |
385 | } else { | 415 | } else { |
386 | map->bo_kmap_type = ttm_bo_map_iomap; | 416 | map->bo_kmap_type = ttm_bo_map_iomap; |
387 | if (mem->placement & TTM_PL_FLAG_WC) | 417 | if (mem->placement & TTM_PL_FLAG_WC) |
388 | map->virtual = ioremap_wc(bus_base + bus_offset, | 418 | map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset, |
389 | bus_size); | 419 | size); |
390 | else | 420 | else |
391 | map->virtual = ioremap_nocache(bus_base + bus_offset, | 421 | map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset, |
392 | bus_size); | 422 | size); |
393 | } | 423 | } |
394 | return (!map->virtual) ? -ENOMEM : 0; | 424 | return (!map->virtual) ? -ENOMEM : 0; |
395 | } | 425 | } |
@@ -442,13 +472,12 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo, | |||
442 | unsigned long start_page, unsigned long num_pages, | 472 | unsigned long start_page, unsigned long num_pages, |
443 | struct ttm_bo_kmap_obj *map) | 473 | struct ttm_bo_kmap_obj *map) |
444 | { | 474 | { |
475 | unsigned long offset, size; | ||
445 | int ret; | 476 | int ret; |
446 | unsigned long bus_base; | ||
447 | unsigned long bus_offset; | ||
448 | unsigned long bus_size; | ||
449 | 477 | ||
450 | BUG_ON(!list_empty(&bo->swap)); | 478 | BUG_ON(!list_empty(&bo->swap)); |
451 | map->virtual = NULL; | 479 | map->virtual = NULL; |
480 | map->bo = bo; | ||
452 | if (num_pages > bo->num_pages) | 481 | if (num_pages > bo->num_pages) |
453 | return -EINVAL; | 482 | return -EINVAL; |
454 | if (start_page > bo->num_pages) | 483 | if (start_page > bo->num_pages) |
@@ -457,16 +486,15 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo, | |||
457 | if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC)) | 486 | if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC)) |
458 | return -EPERM; | 487 | return -EPERM; |
459 | #endif | 488 | #endif |
460 | ret = ttm_bo_pci_offset(bo->bdev, &bo->mem, &bus_base, | 489 | ret = ttm_mem_io_reserve(bo->bdev, &bo->mem); |
461 | &bus_offset, &bus_size); | ||
462 | if (ret) | 490 | if (ret) |
463 | return ret; | 491 | return ret; |
464 | if (bus_size == 0) { | 492 | if (!bo->mem.bus.is_iomem) { |
465 | return ttm_bo_kmap_ttm(bo, start_page, num_pages, map); | 493 | return ttm_bo_kmap_ttm(bo, start_page, num_pages, map); |
466 | } else { | 494 | } else { |
467 | bus_offset += start_page << PAGE_SHIFT; | 495 | offset = start_page << PAGE_SHIFT; |
468 | bus_size = num_pages << PAGE_SHIFT; | 496 | size = num_pages << PAGE_SHIFT; |
469 | return ttm_bo_ioremap(bo, bus_base, bus_offset, bus_size, map); | 497 | return ttm_bo_ioremap(bo, offset, size, map); |
470 | } | 498 | } |
471 | } | 499 | } |
472 | EXPORT_SYMBOL(ttm_bo_kmap); | 500 | EXPORT_SYMBOL(ttm_bo_kmap); |
@@ -478,6 +506,7 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) | |||
478 | switch (map->bo_kmap_type) { | 506 | switch (map->bo_kmap_type) { |
479 | case ttm_bo_map_iomap: | 507 | case ttm_bo_map_iomap: |
480 | iounmap(map->virtual); | 508 | iounmap(map->virtual); |
509 | ttm_mem_io_free(map->bo->bdev, &map->bo->mem); | ||
481 | break; | 510 | break; |
482 | case ttm_bo_map_vmap: | 511 | case ttm_bo_map_vmap: |
483 | vunmap(map->virtual); | 512 | vunmap(map->virtual); |
@@ -495,35 +524,6 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) | |||
495 | } | 524 | } |
496 | EXPORT_SYMBOL(ttm_bo_kunmap); | 525 | EXPORT_SYMBOL(ttm_bo_kunmap); |
497 | 526 | ||
498 | int ttm_bo_pfn_prot(struct ttm_buffer_object *bo, | ||
499 | unsigned long dst_offset, | ||
500 | unsigned long *pfn, pgprot_t *prot) | ||
501 | { | ||
502 | struct ttm_mem_reg *mem = &bo->mem; | ||
503 | struct ttm_bo_device *bdev = bo->bdev; | ||
504 | unsigned long bus_offset; | ||
505 | unsigned long bus_size; | ||
506 | unsigned long bus_base; | ||
507 | int ret; | ||
508 | ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset, | ||
509 | &bus_size); | ||
510 | if (ret) | ||
511 | return -EINVAL; | ||
512 | if (bus_size != 0) | ||
513 | *pfn = (bus_base + bus_offset + dst_offset) >> PAGE_SHIFT; | ||
514 | else | ||
515 | if (!bo->ttm) | ||
516 | return -EINVAL; | ||
517 | else | ||
518 | *pfn = page_to_pfn(ttm_tt_get_page(bo->ttm, | ||
519 | dst_offset >> | ||
520 | PAGE_SHIFT)); | ||
521 | *prot = (mem->placement & TTM_PL_FLAG_CACHED) ? | ||
522 | PAGE_KERNEL : ttm_io_prot(mem->placement, PAGE_KERNEL); | ||
523 | |||
524 | return 0; | ||
525 | } | ||
526 | |||
527 | int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, | 527 | int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, |
528 | void *sync_obj, | 528 | void *sync_obj, |
529 | void *sync_obj_arg, | 529 | void *sync_obj_arg, |
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 668dbe8b8dd3..fe6cb77899f4 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c | |||
@@ -74,9 +74,6 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
74 | struct ttm_buffer_object *bo = (struct ttm_buffer_object *) | 74 | struct ttm_buffer_object *bo = (struct ttm_buffer_object *) |
75 | vma->vm_private_data; | 75 | vma->vm_private_data; |
76 | struct ttm_bo_device *bdev = bo->bdev; | 76 | struct ttm_bo_device *bdev = bo->bdev; |
77 | unsigned long bus_base; | ||
78 | unsigned long bus_offset; | ||
79 | unsigned long bus_size; | ||
80 | unsigned long page_offset; | 77 | unsigned long page_offset; |
81 | unsigned long page_last; | 78 | unsigned long page_last; |
82 | unsigned long pfn; | 79 | unsigned long pfn; |
@@ -84,7 +81,6 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
84 | struct page *page; | 81 | struct page *page; |
85 | int ret; | 82 | int ret; |
86 | int i; | 83 | int i; |
87 | bool is_iomem; | ||
88 | unsigned long address = (unsigned long)vmf->virtual_address; | 84 | unsigned long address = (unsigned long)vmf->virtual_address; |
89 | int retval = VM_FAULT_NOPAGE; | 85 | int retval = VM_FAULT_NOPAGE; |
90 | 86 | ||
@@ -101,8 +97,21 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
101 | return VM_FAULT_NOPAGE; | 97 | return VM_FAULT_NOPAGE; |
102 | } | 98 | } |
103 | 99 | ||
104 | if (bdev->driver->fault_reserve_notify) | 100 | if (bdev->driver->fault_reserve_notify) { |
105 | bdev->driver->fault_reserve_notify(bo); | 101 | ret = bdev->driver->fault_reserve_notify(bo); |
102 | switch (ret) { | ||
103 | case 0: | ||
104 | break; | ||
105 | case -EBUSY: | ||
106 | set_need_resched(); | ||
107 | case -ERESTARTSYS: | ||
108 | retval = VM_FAULT_NOPAGE; | ||
109 | goto out_unlock; | ||
110 | default: | ||
111 | retval = VM_FAULT_SIGBUS; | ||
112 | goto out_unlock; | ||
113 | } | ||
114 | } | ||
106 | 115 | ||
107 | /* | 116 | /* |
108 | * Wait for buffer data in transit, due to a pipelined | 117 | * Wait for buffer data in transit, due to a pipelined |
@@ -122,15 +131,12 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
122 | spin_unlock(&bo->lock); | 131 | spin_unlock(&bo->lock); |
123 | 132 | ||
124 | 133 | ||
125 | ret = ttm_bo_pci_offset(bdev, &bo->mem, &bus_base, &bus_offset, | 134 | ret = ttm_mem_io_reserve(bdev, &bo->mem); |
126 | &bus_size); | 135 | if (ret) { |
127 | if (unlikely(ret != 0)) { | ||
128 | retval = VM_FAULT_SIGBUS; | 136 | retval = VM_FAULT_SIGBUS; |
129 | goto out_unlock; | 137 | goto out_unlock; |
130 | } | 138 | } |
131 | 139 | ||
132 | is_iomem = (bus_size != 0); | ||
133 | |||
134 | page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + | 140 | page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + |
135 | bo->vm_node->start - vma->vm_pgoff; | 141 | bo->vm_node->start - vma->vm_pgoff; |
136 | page_last = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) + | 142 | page_last = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) + |
@@ -154,8 +160,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
154 | * vma->vm_page_prot when the object changes caching policy, with | 160 | * vma->vm_page_prot when the object changes caching policy, with |
155 | * the correct locks held. | 161 | * the correct locks held. |
156 | */ | 162 | */ |
157 | 163 | if (bo->mem.bus.is_iomem) { | |
158 | if (is_iomem) { | ||
159 | vma->vm_page_prot = ttm_io_prot(bo->mem.placement, | 164 | vma->vm_page_prot = ttm_io_prot(bo->mem.placement, |
160 | vma->vm_page_prot); | 165 | vma->vm_page_prot); |
161 | } else { | 166 | } else { |
@@ -171,10 +176,8 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
171 | */ | 176 | */ |
172 | 177 | ||
173 | for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) { | 178 | for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) { |
174 | 179 | if (bo->mem.bus.is_iomem) | |
175 | if (is_iomem) | 180 | pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset; |
176 | pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + | ||
177 | page_offset; | ||
178 | else { | 181 | else { |
179 | page = ttm_tt_get_page(ttm, page_offset); | 182 | page = ttm_tt_get_page(ttm, page_offset); |
180 | if (unlikely(!page && i == 0)) { | 183 | if (unlikely(!page && i == 0)) { |
@@ -198,7 +201,6 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
198 | retval = | 201 | retval = |
199 | (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS; | 202 | (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS; |
200 | goto out_unlock; | 203 | goto out_unlock; |
201 | |||
202 | } | 204 | } |
203 | 205 | ||
204 | address += PAGE_SIZE; | 206 | address += PAGE_SIZE; |
@@ -221,8 +223,7 @@ static void ttm_bo_vm_open(struct vm_area_struct *vma) | |||
221 | 223 | ||
222 | static void ttm_bo_vm_close(struct vm_area_struct *vma) | 224 | static void ttm_bo_vm_close(struct vm_area_struct *vma) |
223 | { | 225 | { |
224 | struct ttm_buffer_object *bo = | 226 | struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data; |
225 | (struct ttm_buffer_object *)vma->vm_private_data; | ||
226 | 227 | ||
227 | ttm_bo_unref(&bo); | 228 | ttm_bo_unref(&bo); |
228 | vma->vm_private_data = NULL; | 229 | vma->vm_private_data = NULL; |
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index 8c8005ec4eaf..3e273e0b9417 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h | |||
@@ -66,6 +66,26 @@ struct ttm_placement { | |||
66 | const uint32_t *busy_placement; | 66 | const uint32_t *busy_placement; |
67 | }; | 67 | }; |
68 | 68 | ||
69 | /** | ||
70 | * struct ttm_bus_placement | ||
71 | * | ||
72 | * @addr: mapped virtual address | ||
73 | * @base: bus base address | ||
74 | * @is_iomem: is this io memory ? | ||
75 | * @size: size in byte | ||
76 | * @offset: offset from the base address | ||
77 | * | ||
78 | * Structure indicating the bus placement of an object. | ||
79 | */ | ||
80 | struct ttm_bus_placement { | ||
81 | void *addr; | ||
82 | unsigned long base; | ||
83 | unsigned long size; | ||
84 | unsigned long offset; | ||
85 | bool is_iomem; | ||
86 | bool io_reserved; | ||
87 | }; | ||
88 | |||
69 | 89 | ||
70 | /** | 90 | /** |
71 | * struct ttm_mem_reg | 91 | * struct ttm_mem_reg |
@@ -75,6 +95,7 @@ struct ttm_placement { | |||
75 | * @num_pages: Actual size of memory region in pages. | 95 | * @num_pages: Actual size of memory region in pages. |
76 | * @page_alignment: Page alignment. | 96 | * @page_alignment: Page alignment. |
77 | * @placement: Placement flags. | 97 | * @placement: Placement flags. |
98 | * @bus: Placement on io bus accessible to the CPU | ||
78 | * | 99 | * |
79 | * Structure indicating the placement and space resources used by a | 100 | * Structure indicating the placement and space resources used by a |
80 | * buffer object. | 101 | * buffer object. |
@@ -87,6 +108,7 @@ struct ttm_mem_reg { | |||
87 | uint32_t page_alignment; | 108 | uint32_t page_alignment; |
88 | uint32_t mem_type; | 109 | uint32_t mem_type; |
89 | uint32_t placement; | 110 | uint32_t placement; |
111 | struct ttm_bus_placement bus; | ||
90 | }; | 112 | }; |
91 | 113 | ||
92 | /** | 114 | /** |
@@ -274,6 +296,7 @@ struct ttm_bo_kmap_obj { | |||
274 | ttm_bo_map_kmap = 3, | 296 | ttm_bo_map_kmap = 3, |
275 | ttm_bo_map_premapped = 4 | TTM_BO_MAP_IOMEM_MASK, | 297 | ttm_bo_map_premapped = 4 | TTM_BO_MAP_IOMEM_MASK, |
276 | } bo_kmap_type; | 298 | } bo_kmap_type; |
299 | struct ttm_buffer_object *bo; | ||
277 | }; | 300 | }; |
278 | 301 | ||
279 | /** | 302 | /** |
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 69f70e418c2c..da39865d67d8 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h | |||
@@ -352,12 +352,21 @@ struct ttm_bo_driver { | |||
352 | struct ttm_mem_reg *new_mem); | 352 | struct ttm_mem_reg *new_mem); |
353 | /* notify the driver we are taking a fault on this BO | 353 | /* notify the driver we are taking a fault on this BO |
354 | * and have reserved it */ | 354 | * and have reserved it */ |
355 | void (*fault_reserve_notify)(struct ttm_buffer_object *bo); | 355 | int (*fault_reserve_notify)(struct ttm_buffer_object *bo); |
356 | 356 | ||
357 | /** | 357 | /** |
358 | * notify the driver that we're about to swap out this bo | 358 | * notify the driver that we're about to swap out this bo |
359 | */ | 359 | */ |
360 | void (*swap_notify) (struct ttm_buffer_object *bo); | 360 | void (*swap_notify) (struct ttm_buffer_object *bo); |
361 | |||
362 | /** | ||
363 | * Driver callback on when mapping io memory (for bo_move_memcpy | ||
364 | * for instance). TTM will take care to call io_mem_free whenever | ||
365 | * the mapping is not use anymore. io_mem_reserve & io_mem_free | ||
366 | * are balanced. | ||
367 | */ | ||
368 | int (*io_mem_reserve)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem); | ||
369 | void (*io_mem_free)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem); | ||
361 | }; | 370 | }; |
362 | 371 | ||
363 | /** | 372 | /** |
@@ -685,6 +694,11 @@ extern int ttm_bo_pci_offset(struct ttm_bo_device *bdev, | |||
685 | unsigned long *bus_offset, | 694 | unsigned long *bus_offset, |
686 | unsigned long *bus_size); | 695 | unsigned long *bus_size); |
687 | 696 | ||
697 | extern int ttm_mem_io_reserve(struct ttm_bo_device *bdev, | ||
698 | struct ttm_mem_reg *mem); | ||
699 | extern void ttm_mem_io_free(struct ttm_bo_device *bdev, | ||
700 | struct ttm_mem_reg *mem); | ||
701 | |||
688 | extern void ttm_bo_global_release(struct ttm_global_reference *ref); | 702 | extern void ttm_bo_global_release(struct ttm_global_reference *ref); |
689 | extern int ttm_bo_global_init(struct ttm_global_reference *ref); | 703 | extern int ttm_bo_global_init(struct ttm_global_reference *ref); |
690 | 704 | ||