aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2010-12-02 22:59:36 -0500
committerDave Airlie <airlied@redhat.com>2010-12-02 22:59:36 -0500
commita9979d6077e4482dbe64cedc4bb181d5576d13f7 (patch)
tree24e383df4e984b907651f79d3fb0642aa0691426
parent27641c3f003e7f3b6585c01d8a788883603eb262 (diff)
parent147666fb3b93b8c484f562da33a37f886ddff768 (diff)
Merge branch 'drm-ttm-next' into drm-core-next
* drm-ttm-next: drm/radeon: Use the ttm execbuf utilities drm/ttm: Fix up io_mem_reserve / io_mem_free calling drm/ttm/vmwgfx: Have TTM manage the validation sequence. drm/ttm: Improved fencing of buffer object lists drm/ttm/radeon/nouveau: Kill the bo lock in favour of a bo device fence_lock drm/ttm: Don't deadlock on recursive multi-bo reservations drm/ttm: Optimize ttm_eu_backoff_reservation drm/ttm: Use kref_sub instead of repeatedly calling kref_put kref: Add a kref_sub function drm/ttm: Add a bo list reserve fastpath (v2)
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c17
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c55
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h7
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c156
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c136
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c29
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c169
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c3
-rw-r--r--include/drm/ttm/ttm_bo_api.h50
-rw-r--r--include/drm/ttm/ttm_bo_driver.h152
-rw-r--r--include/drm/ttm/ttm_execbuf_util.h11
-rw-r--r--include/linux/kref.h2
-rw-r--r--lib/kref.c30
16 files changed, 593 insertions, 241 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 9a1fdcf400c2..1f2301d26c0a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -234,10 +234,10 @@ validate_fini_list(struct list_head *list, struct nouveau_fence *fence)
234 if (likely(fence)) { 234 if (likely(fence)) {
235 struct nouveau_fence *prev_fence; 235 struct nouveau_fence *prev_fence;
236 236
237 spin_lock(&nvbo->bo.lock); 237 spin_lock(&nvbo->bo.bdev->fence_lock);
238 prev_fence = nvbo->bo.sync_obj; 238 prev_fence = nvbo->bo.sync_obj;
239 nvbo->bo.sync_obj = nouveau_fence_ref(fence); 239 nvbo->bo.sync_obj = nouveau_fence_ref(fence);
240 spin_unlock(&nvbo->bo.lock); 240 spin_unlock(&nvbo->bo.bdev->fence_lock);
241 nouveau_fence_unref((void *)&prev_fence); 241 nouveau_fence_unref((void *)&prev_fence);
242 } 242 }
243 243
@@ -557,9 +557,9 @@ nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
557 data |= r->vor; 557 data |= r->vor;
558 } 558 }
559 559
560 spin_lock(&nvbo->bo.lock); 560 spin_lock(&nvbo->bo.bdev->fence_lock);
561 ret = ttm_bo_wait(&nvbo->bo, false, false, false); 561 ret = ttm_bo_wait(&nvbo->bo, false, false, false);
562 spin_unlock(&nvbo->bo.lock); 562 spin_unlock(&nvbo->bo.bdev->fence_lock);
563 if (ret) { 563 if (ret) {
564 NV_ERROR(dev, "reloc wait_idle failed: %d\n", ret); 564 NV_ERROR(dev, "reloc wait_idle failed: %d\n", ret);
565 break; 565 break;
@@ -791,9 +791,9 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
791 } 791 }
792 792
793 if (req->flags & NOUVEAU_GEM_CPU_PREP_NOBLOCK) { 793 if (req->flags & NOUVEAU_GEM_CPU_PREP_NOBLOCK) {
794 spin_lock(&nvbo->bo.lock); 794 spin_lock(&nvbo->bo.bdev->fence_lock);
795 ret = ttm_bo_wait(&nvbo->bo, false, false, no_wait); 795 ret = ttm_bo_wait(&nvbo->bo, false, false, no_wait);
796 spin_unlock(&nvbo->bo.lock); 796 spin_unlock(&nvbo->bo.bdev->fence_lock);
797 } else { 797 } else {
798 ret = ttm_bo_synccpu_write_grab(&nvbo->bo, no_wait); 798 ret = ttm_bo_synccpu_write_grab(&nvbo->bo, no_wait);
799 if (ret == 0) 799 if (ret == 0)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 3a7095743d44..b1e073b7381f 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -69,6 +69,7 @@
69#include <ttm/ttm_bo_driver.h> 69#include <ttm/ttm_bo_driver.h>
70#include <ttm/ttm_placement.h> 70#include <ttm/ttm_placement.h>
71#include <ttm/ttm_module.h> 71#include <ttm/ttm_module.h>
72#include <ttm/ttm_execbuf_util.h>
72 73
73#include "radeon_family.h" 74#include "radeon_family.h"
74#include "radeon_mode.h" 75#include "radeon_mode.h"
@@ -259,13 +260,12 @@ struct radeon_bo {
259}; 260};
260 261
261struct radeon_bo_list { 262struct radeon_bo_list {
262 struct list_head list; 263 struct ttm_validate_buffer tv;
263 struct radeon_bo *bo; 264 struct radeon_bo *bo;
264 uint64_t gpu_offset; 265 uint64_t gpu_offset;
265 unsigned rdomain; 266 unsigned rdomain;
266 unsigned wdomain; 267 unsigned wdomain;
267 u32 tiling_flags; 268 u32 tiling_flags;
268 bool reserved;
269}; 269};
270 270
271/* 271/*
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 6d64a2705f12..35b5eb8fbe2a 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -77,13 +77,13 @@ int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
77 p->relocs_ptr[i] = &p->relocs[i]; 77 p->relocs_ptr[i] = &p->relocs[i];
78 p->relocs[i].robj = p->relocs[i].gobj->driver_private; 78 p->relocs[i].robj = p->relocs[i].gobj->driver_private;
79 p->relocs[i].lobj.bo = p->relocs[i].robj; 79 p->relocs[i].lobj.bo = p->relocs[i].robj;
80 p->relocs[i].lobj.rdomain = r->read_domains;
81 p->relocs[i].lobj.wdomain = r->write_domain; 80 p->relocs[i].lobj.wdomain = r->write_domain;
81 p->relocs[i].lobj.rdomain = r->read_domains;
82 p->relocs[i].lobj.tv.bo = &p->relocs[i].robj->tbo;
82 p->relocs[i].handle = r->handle; 83 p->relocs[i].handle = r->handle;
83 p->relocs[i].flags = r->flags; 84 p->relocs[i].flags = r->flags;
84 INIT_LIST_HEAD(&p->relocs[i].lobj.list);
85 radeon_bo_list_add_object(&p->relocs[i].lobj, 85 radeon_bo_list_add_object(&p->relocs[i].lobj,
86 &p->validated); 86 &p->validated);
87 } 87 }
88 } 88 }
89 return radeon_bo_list_validate(&p->validated); 89 return radeon_bo_list_validate(&p->validated);
@@ -189,10 +189,13 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
189{ 189{
190 unsigned i; 190 unsigned i;
191 191
192 if (!error && parser->ib) { 192
193 radeon_bo_list_fence(&parser->validated, parser->ib->fence); 193 if (!error && parser->ib)
194 } 194 ttm_eu_fence_buffer_objects(&parser->validated,
195 radeon_bo_list_unreserve(&parser->validated); 195 parser->ib->fence);
196 else
197 ttm_eu_backoff_reservation(&parser->validated);
198
196 if (parser->relocs != NULL) { 199 if (parser->relocs != NULL) {
197 for (i = 0; i < parser->nrelocs; i++) { 200 for (i = 0; i < parser->nrelocs; i++) {
198 if (parser->relocs[i].gobj) 201 if (parser->relocs[i].gobj)
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 1d067743fee0..a8594d289bcf 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -293,34 +293,9 @@ void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
293 struct list_head *head) 293 struct list_head *head)
294{ 294{
295 if (lobj->wdomain) { 295 if (lobj->wdomain) {
296 list_add(&lobj->list, head); 296 list_add(&lobj->tv.head, head);
297 } else { 297 } else {
298 list_add_tail(&lobj->list, head); 298 list_add_tail(&lobj->tv.head, head);
299 }
300}
301
302int radeon_bo_list_reserve(struct list_head *head)
303{
304 struct radeon_bo_list *lobj;
305 int r;
306
307 list_for_each_entry(lobj, head, list){
308 r = radeon_bo_reserve(lobj->bo, false);
309 if (unlikely(r != 0))
310 return r;
311 lobj->reserved = true;
312 }
313 return 0;
314}
315
316void radeon_bo_list_unreserve(struct list_head *head)
317{
318 struct radeon_bo_list *lobj;
319
320 list_for_each_entry(lobj, head, list) {
321 /* only unreserve object we successfully reserved */
322 if (lobj->reserved && radeon_bo_is_reserved(lobj->bo))
323 radeon_bo_unreserve(lobj->bo);
324 } 299 }
325} 300}
326 301
@@ -331,14 +306,11 @@ int radeon_bo_list_validate(struct list_head *head)
331 u32 domain; 306 u32 domain;
332 int r; 307 int r;
333 308
334 list_for_each_entry(lobj, head, list) { 309 r = ttm_eu_reserve_buffers(head);
335 lobj->reserved = false;
336 }
337 r = radeon_bo_list_reserve(head);
338 if (unlikely(r != 0)) { 310 if (unlikely(r != 0)) {
339 return r; 311 return r;
340 } 312 }
341 list_for_each_entry(lobj, head, list) { 313 list_for_each_entry(lobj, head, tv.head) {
342 bo = lobj->bo; 314 bo = lobj->bo;
343 if (!bo->pin_count) { 315 if (!bo->pin_count) {
344 domain = lobj->wdomain ? lobj->wdomain : lobj->rdomain; 316 domain = lobj->wdomain ? lobj->wdomain : lobj->rdomain;
@@ -361,25 +333,6 @@ int radeon_bo_list_validate(struct list_head *head)
361 return 0; 333 return 0;
362} 334}
363 335
364void radeon_bo_list_fence(struct list_head *head, void *fence)
365{
366 struct radeon_bo_list *lobj;
367 struct radeon_bo *bo;
368 struct radeon_fence *old_fence = NULL;
369
370 list_for_each_entry(lobj, head, list) {
371 bo = lobj->bo;
372 spin_lock(&bo->tbo.lock);
373 old_fence = (struct radeon_fence *)bo->tbo.sync_obj;
374 bo->tbo.sync_obj = radeon_fence_ref(fence);
375 bo->tbo.sync_obj_arg = NULL;
376 spin_unlock(&bo->tbo.lock);
377 if (old_fence) {
378 radeon_fence_unref(&old_fence);
379 }
380 }
381}
382
383int radeon_bo_fbdev_mmap(struct radeon_bo *bo, 336int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
384 struct vm_area_struct *vma) 337 struct vm_area_struct *vma)
385{ 338{
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index d143702b244a..22d4c237dea5 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -126,12 +126,12 @@ static inline int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
126 r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0); 126 r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
127 if (unlikely(r != 0)) 127 if (unlikely(r != 0))
128 return r; 128 return r;
129 spin_lock(&bo->tbo.lock); 129 spin_lock(&bo->tbo.bdev->fence_lock);
130 if (mem_type) 130 if (mem_type)
131 *mem_type = bo->tbo.mem.mem_type; 131 *mem_type = bo->tbo.mem.mem_type;
132 if (bo->tbo.sync_obj) 132 if (bo->tbo.sync_obj)
133 r = ttm_bo_wait(&bo->tbo, true, true, no_wait); 133 r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
134 spin_unlock(&bo->tbo.lock); 134 spin_unlock(&bo->tbo.bdev->fence_lock);
135 ttm_bo_unreserve(&bo->tbo); 135 ttm_bo_unreserve(&bo->tbo);
136 return r; 136 return r;
137} 137}
@@ -152,10 +152,7 @@ extern int radeon_bo_init(struct radeon_device *rdev);
152extern void radeon_bo_fini(struct radeon_device *rdev); 152extern void radeon_bo_fini(struct radeon_device *rdev);
153extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj, 153extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
154 struct list_head *head); 154 struct list_head *head);
155extern int radeon_bo_list_reserve(struct list_head *head);
156extern void radeon_bo_list_unreserve(struct list_head *head);
157extern int radeon_bo_list_validate(struct list_head *head); 155extern int radeon_bo_list_validate(struct list_head *head);
158extern void radeon_bo_list_fence(struct list_head *head, void *fence);
159extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo, 156extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
160 struct vm_area_struct *vma); 157 struct vm_area_struct *vma);
161extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo, 158extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 148a322d8f5d..cf2ec562550e 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -169,7 +169,7 @@ int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
169} 169}
170EXPORT_SYMBOL(ttm_bo_wait_unreserved); 170EXPORT_SYMBOL(ttm_bo_wait_unreserved);
171 171
172static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) 172void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
173{ 173{
174 struct ttm_bo_device *bdev = bo->bdev; 174 struct ttm_bo_device *bdev = bo->bdev;
175 struct ttm_mem_type_manager *man; 175 struct ttm_mem_type_manager *man;
@@ -191,11 +191,7 @@ static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
191 } 191 }
192} 192}
193 193
194/** 194int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
195 * Call with the lru_lock held.
196 */
197
198static int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
199{ 195{
200 int put_count = 0; 196 int put_count = 0;
201 197
@@ -227,9 +223,18 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
227 /** 223 /**
228 * Deadlock avoidance for multi-bo reserving. 224 * Deadlock avoidance for multi-bo reserving.
229 */ 225 */
230 if (use_sequence && bo->seq_valid && 226 if (use_sequence && bo->seq_valid) {
231 (sequence - bo->val_seq < (1 << 31))) { 227 /**
232 return -EAGAIN; 228 * We've already reserved this one.
229 */
230 if (unlikely(sequence == bo->val_seq))
231 return -EDEADLK;
232 /**
233 * Already reserved by a thread that will not back
234 * off for us. We need to back off.
235 */
236 if (unlikely(sequence - bo->val_seq < (1 << 31)))
237 return -EAGAIN;
233 } 238 }
234 239
235 if (no_wait) 240 if (no_wait)
@@ -267,6 +272,13 @@ static void ttm_bo_ref_bug(struct kref *list_kref)
267 BUG(); 272 BUG();
268} 273}
269 274
275void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count,
276 bool never_free)
277{
278 kref_sub(&bo->list_kref, count,
279 (never_free) ? ttm_bo_ref_bug : ttm_bo_release_list);
280}
281
270int ttm_bo_reserve(struct ttm_buffer_object *bo, 282int ttm_bo_reserve(struct ttm_buffer_object *bo,
271 bool interruptible, 283 bool interruptible,
272 bool no_wait, bool use_sequence, uint32_t sequence) 284 bool no_wait, bool use_sequence, uint32_t sequence)
@@ -282,20 +294,24 @@ int ttm_bo_reserve(struct ttm_buffer_object *bo,
282 put_count = ttm_bo_del_from_lru(bo); 294 put_count = ttm_bo_del_from_lru(bo);
283 spin_unlock(&glob->lru_lock); 295 spin_unlock(&glob->lru_lock);
284 296
285 while (put_count--) 297 ttm_bo_list_ref_sub(bo, put_count, true);
286 kref_put(&bo->list_kref, ttm_bo_ref_bug);
287 298
288 return ret; 299 return ret;
289} 300}
290 301
302void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo)
303{
304 ttm_bo_add_to_lru(bo);
305 atomic_set(&bo->reserved, 0);
306 wake_up_all(&bo->event_queue);
307}
308
291void ttm_bo_unreserve(struct ttm_buffer_object *bo) 309void ttm_bo_unreserve(struct ttm_buffer_object *bo)
292{ 310{
293 struct ttm_bo_global *glob = bo->glob; 311 struct ttm_bo_global *glob = bo->glob;
294 312
295 spin_lock(&glob->lru_lock); 313 spin_lock(&glob->lru_lock);
296 ttm_bo_add_to_lru(bo); 314 ttm_bo_unreserve_locked(bo);
297 atomic_set(&bo->reserved, 0);
298 wake_up_all(&bo->event_queue);
299 spin_unlock(&glob->lru_lock); 315 spin_unlock(&glob->lru_lock);
300} 316}
301EXPORT_SYMBOL(ttm_bo_unreserve); 317EXPORT_SYMBOL(ttm_bo_unreserve);
@@ -362,8 +378,13 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
362 int ret = 0; 378 int ret = 0;
363 379
364 if (old_is_pci || new_is_pci || 380 if (old_is_pci || new_is_pci ||
365 ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) 381 ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) {
366 ttm_bo_unmap_virtual(bo); 382 ret = ttm_mem_io_lock(old_man, true);
383 if (unlikely(ret != 0))
384 goto out_err;
385 ttm_bo_unmap_virtual_locked(bo);
386 ttm_mem_io_unlock(old_man);
387 }
367 388
368 /* 389 /*
369 * Create and bind a ttm if required. 390 * Create and bind a ttm if required.
@@ -416,11 +437,9 @@ moved:
416 } 437 }
417 438
418 if (bo->mem.mm_node) { 439 if (bo->mem.mm_node) {
419 spin_lock(&bo->lock);
420 bo->offset = (bo->mem.start << PAGE_SHIFT) + 440 bo->offset = (bo->mem.start << PAGE_SHIFT) +
421 bdev->man[bo->mem.mem_type].gpu_offset; 441 bdev->man[bo->mem.mem_type].gpu_offset;
422 bo->cur_placement = bo->mem.placement; 442 bo->cur_placement = bo->mem.placement;
423 spin_unlock(&bo->lock);
424 } else 443 } else
425 bo->offset = 0; 444 bo->offset = 0;
426 445
@@ -452,7 +471,6 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
452 ttm_tt_destroy(bo->ttm); 471 ttm_tt_destroy(bo->ttm);
453 bo->ttm = NULL; 472 bo->ttm = NULL;
454 } 473 }
455
456 ttm_bo_mem_put(bo, &bo->mem); 474 ttm_bo_mem_put(bo, &bo->mem);
457 475
458 atomic_set(&bo->reserved, 0); 476 atomic_set(&bo->reserved, 0);
@@ -474,14 +492,14 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
474 int put_count; 492 int put_count;
475 int ret; 493 int ret;
476 494
477 spin_lock(&bo->lock); 495 spin_lock(&bdev->fence_lock);
478 (void) ttm_bo_wait(bo, false, false, true); 496 (void) ttm_bo_wait(bo, false, false, true);
479 if (!bo->sync_obj) { 497 if (!bo->sync_obj) {
480 498
481 spin_lock(&glob->lru_lock); 499 spin_lock(&glob->lru_lock);
482 500
483 /** 501 /**
484 * Lock inversion between bo::reserve and bo::lock here, 502 * Lock inversion between bo:reserve and bdev::fence_lock here,
485 * but that's OK, since we're only trylocking. 503 * but that's OK, since we're only trylocking.
486 */ 504 */
487 505
@@ -490,14 +508,13 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
490 if (unlikely(ret == -EBUSY)) 508 if (unlikely(ret == -EBUSY))
491 goto queue; 509 goto queue;
492 510
493 spin_unlock(&bo->lock); 511 spin_unlock(&bdev->fence_lock);
494 put_count = ttm_bo_del_from_lru(bo); 512 put_count = ttm_bo_del_from_lru(bo);
495 513
496 spin_unlock(&glob->lru_lock); 514 spin_unlock(&glob->lru_lock);
497 ttm_bo_cleanup_memtype_use(bo); 515 ttm_bo_cleanup_memtype_use(bo);
498 516
499 while (put_count--) 517 ttm_bo_list_ref_sub(bo, put_count, true);
500 kref_put(&bo->list_kref, ttm_bo_ref_bug);
501 518
502 return; 519 return;
503 } else { 520 } else {
@@ -512,7 +529,7 @@ queue:
512 kref_get(&bo->list_kref); 529 kref_get(&bo->list_kref);
513 list_add_tail(&bo->ddestroy, &bdev->ddestroy); 530 list_add_tail(&bo->ddestroy, &bdev->ddestroy);
514 spin_unlock(&glob->lru_lock); 531 spin_unlock(&glob->lru_lock);
515 spin_unlock(&bo->lock); 532 spin_unlock(&bdev->fence_lock);
516 533
517 if (sync_obj) { 534 if (sync_obj) {
518 driver->sync_obj_flush(sync_obj, sync_obj_arg); 535 driver->sync_obj_flush(sync_obj, sync_obj_arg);
@@ -537,14 +554,15 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
537 bool no_wait_reserve, 554 bool no_wait_reserve,
538 bool no_wait_gpu) 555 bool no_wait_gpu)
539{ 556{
557 struct ttm_bo_device *bdev = bo->bdev;
540 struct ttm_bo_global *glob = bo->glob; 558 struct ttm_bo_global *glob = bo->glob;
541 int put_count; 559 int put_count;
542 int ret = 0; 560 int ret = 0;
543 561
544retry: 562retry:
545 spin_lock(&bo->lock); 563 spin_lock(&bdev->fence_lock);
546 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); 564 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
547 spin_unlock(&bo->lock); 565 spin_unlock(&bdev->fence_lock);
548 566
549 if (unlikely(ret != 0)) 567 if (unlikely(ret != 0))
550 return ret; 568 return ret;
@@ -580,8 +598,7 @@ retry:
580 spin_unlock(&glob->lru_lock); 598 spin_unlock(&glob->lru_lock);
581 ttm_bo_cleanup_memtype_use(bo); 599 ttm_bo_cleanup_memtype_use(bo);
582 600
583 while (put_count--) 601 ttm_bo_list_ref_sub(bo, put_count, true);
584 kref_put(&bo->list_kref, ttm_bo_ref_bug);
585 602
586 return 0; 603 return 0;
587} 604}
@@ -652,6 +669,7 @@ static void ttm_bo_release(struct kref *kref)
652 struct ttm_buffer_object *bo = 669 struct ttm_buffer_object *bo =
653 container_of(kref, struct ttm_buffer_object, kref); 670 container_of(kref, struct ttm_buffer_object, kref);
654 struct ttm_bo_device *bdev = bo->bdev; 671 struct ttm_bo_device *bdev = bo->bdev;
672 struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
655 673
656 if (likely(bo->vm_node != NULL)) { 674 if (likely(bo->vm_node != NULL)) {
657 rb_erase(&bo->vm_rb, &bdev->addr_space_rb); 675 rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
@@ -659,6 +677,9 @@ static void ttm_bo_release(struct kref *kref)
659 bo->vm_node = NULL; 677 bo->vm_node = NULL;
660 } 678 }
661 write_unlock(&bdev->vm_lock); 679 write_unlock(&bdev->vm_lock);
680 ttm_mem_io_lock(man, false);
681 ttm_mem_io_free_vm(bo);
682 ttm_mem_io_unlock(man);
662 ttm_bo_cleanup_refs_or_queue(bo); 683 ttm_bo_cleanup_refs_or_queue(bo);
663 kref_put(&bo->list_kref, ttm_bo_release_list); 684 kref_put(&bo->list_kref, ttm_bo_release_list);
664 write_lock(&bdev->vm_lock); 685 write_lock(&bdev->vm_lock);
@@ -698,9 +719,9 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
698 struct ttm_placement placement; 719 struct ttm_placement placement;
699 int ret = 0; 720 int ret = 0;
700 721
701 spin_lock(&bo->lock); 722 spin_lock(&bdev->fence_lock);
702 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); 723 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
703 spin_unlock(&bo->lock); 724 spin_unlock(&bdev->fence_lock);
704 725
705 if (unlikely(ret != 0)) { 726 if (unlikely(ret != 0)) {
706 if (ret != -ERESTARTSYS) { 727 if (ret != -ERESTARTSYS) {
@@ -715,7 +736,8 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
715 736
716 evict_mem = bo->mem; 737 evict_mem = bo->mem;
717 evict_mem.mm_node = NULL; 738 evict_mem.mm_node = NULL;
718 evict_mem.bus.io_reserved = false; 739 evict_mem.bus.io_reserved_vm = false;
740 evict_mem.bus.io_reserved_count = 0;
719 741
720 placement.fpfn = 0; 742 placement.fpfn = 0;
721 placement.lpfn = 0; 743 placement.lpfn = 0;
@@ -802,8 +824,7 @@ retry:
802 824
803 BUG_ON(ret != 0); 825 BUG_ON(ret != 0);
804 826
805 while (put_count--) 827 ttm_bo_list_ref_sub(bo, put_count, true);
806 kref_put(&bo->list_kref, ttm_bo_ref_bug);
807 828
808 ret = ttm_bo_evict(bo, interruptible, no_wait_reserve, no_wait_gpu); 829 ret = ttm_bo_evict(bo, interruptible, no_wait_reserve, no_wait_gpu);
809 ttm_bo_unreserve(bo); 830 ttm_bo_unreserve(bo);
@@ -1036,6 +1057,7 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
1036{ 1057{
1037 int ret = 0; 1058 int ret = 0;
1038 struct ttm_mem_reg mem; 1059 struct ttm_mem_reg mem;
1060 struct ttm_bo_device *bdev = bo->bdev;
1039 1061
1040 BUG_ON(!atomic_read(&bo->reserved)); 1062 BUG_ON(!atomic_read(&bo->reserved));
1041 1063
@@ -1044,15 +1066,16 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
1044 * Have the driver move function wait for idle when necessary, 1066 * Have the driver move function wait for idle when necessary,
1045 * instead of doing it here. 1067 * instead of doing it here.
1046 */ 1068 */
1047 spin_lock(&bo->lock); 1069 spin_lock(&bdev->fence_lock);
1048 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); 1070 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
1049 spin_unlock(&bo->lock); 1071 spin_unlock(&bdev->fence_lock);
1050 if (ret) 1072 if (ret)
1051 return ret; 1073 return ret;
1052 mem.num_pages = bo->num_pages; 1074 mem.num_pages = bo->num_pages;
1053 mem.size = mem.num_pages << PAGE_SHIFT; 1075 mem.size = mem.num_pages << PAGE_SHIFT;
1054 mem.page_alignment = bo->mem.page_alignment; 1076 mem.page_alignment = bo->mem.page_alignment;
1055 mem.bus.io_reserved = false; 1077 mem.bus.io_reserved_vm = false;
1078 mem.bus.io_reserved_count = 0;
1056 /* 1079 /*
1057 * Determine where to move the buffer. 1080 * Determine where to move the buffer.
1058 */ 1081 */
@@ -1163,7 +1186,6 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
1163 } 1186 }
1164 bo->destroy = destroy; 1187 bo->destroy = destroy;
1165 1188
1166 spin_lock_init(&bo->lock);
1167 kref_init(&bo->kref); 1189 kref_init(&bo->kref);
1168 kref_init(&bo->list_kref); 1190 kref_init(&bo->list_kref);
1169 atomic_set(&bo->cpu_writers, 0); 1191 atomic_set(&bo->cpu_writers, 0);
@@ -1172,6 +1194,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
1172 INIT_LIST_HEAD(&bo->lru); 1194 INIT_LIST_HEAD(&bo->lru);
1173 INIT_LIST_HEAD(&bo->ddestroy); 1195 INIT_LIST_HEAD(&bo->ddestroy);
1174 INIT_LIST_HEAD(&bo->swap); 1196 INIT_LIST_HEAD(&bo->swap);
1197 INIT_LIST_HEAD(&bo->io_reserve_lru);
1175 bo->bdev = bdev; 1198 bo->bdev = bdev;
1176 bo->glob = bdev->glob; 1199 bo->glob = bdev->glob;
1177 bo->type = type; 1200 bo->type = type;
@@ -1181,7 +1204,8 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
1181 bo->mem.num_pages = bo->num_pages; 1204 bo->mem.num_pages = bo->num_pages;
1182 bo->mem.mm_node = NULL; 1205 bo->mem.mm_node = NULL;
1183 bo->mem.page_alignment = page_alignment; 1206 bo->mem.page_alignment = page_alignment;
1184 bo->mem.bus.io_reserved = false; 1207 bo->mem.bus.io_reserved_vm = false;
1208 bo->mem.bus.io_reserved_count = 0;
1185 bo->buffer_start = buffer_start & PAGE_MASK; 1209 bo->buffer_start = buffer_start & PAGE_MASK;
1186 bo->priv_flags = 0; 1210 bo->priv_flags = 0;
1187 bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED); 1211 bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
@@ -1355,6 +1379,10 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1355 BUG_ON(type >= TTM_NUM_MEM_TYPES); 1379 BUG_ON(type >= TTM_NUM_MEM_TYPES);
1356 man = &bdev->man[type]; 1380 man = &bdev->man[type];
1357 BUG_ON(man->has_type); 1381 BUG_ON(man->has_type);
1382 man->io_reserve_fastpath = true;
1383 man->use_io_reserve_lru = false;
1384 mutex_init(&man->io_reserve_mutex);
1385 INIT_LIST_HEAD(&man->io_reserve_lru);
1358 1386
1359 ret = bdev->driver->init_mem_type(bdev, type, man); 1387 ret = bdev->driver->init_mem_type(bdev, type, man);
1360 if (ret) 1388 if (ret)
@@ -1527,7 +1555,8 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
1527 bdev->dev_mapping = NULL; 1555 bdev->dev_mapping = NULL;
1528 bdev->glob = glob; 1556 bdev->glob = glob;
1529 bdev->need_dma32 = need_dma32; 1557 bdev->need_dma32 = need_dma32;
1530 1558 bdev->val_seq = 0;
1559 spin_lock_init(&bdev->fence_lock);
1531 mutex_lock(&glob->device_list_mutex); 1560 mutex_lock(&glob->device_list_mutex);
1532 list_add_tail(&bdev->device_list, &glob->device_list); 1561 list_add_tail(&bdev->device_list, &glob->device_list);
1533 mutex_unlock(&glob->device_list_mutex); 1562 mutex_unlock(&glob->device_list_mutex);
@@ -1561,7 +1590,7 @@ bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1561 return true; 1590 return true;
1562} 1591}
1563 1592
1564void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) 1593void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
1565{ 1594{
1566 struct ttm_bo_device *bdev = bo->bdev; 1595 struct ttm_bo_device *bdev = bo->bdev;
1567 loff_t offset = (loff_t) bo->addr_space_offset; 1596 loff_t offset = (loff_t) bo->addr_space_offset;
@@ -1570,8 +1599,20 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1570 if (!bdev->dev_mapping) 1599 if (!bdev->dev_mapping)
1571 return; 1600 return;
1572 unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1); 1601 unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
1573 ttm_mem_io_free(bdev, &bo->mem); 1602 ttm_mem_io_free_vm(bo);
1603}
1604
1605void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1606{
1607 struct ttm_bo_device *bdev = bo->bdev;
1608 struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
1609
1610 ttm_mem_io_lock(man, false);
1611 ttm_bo_unmap_virtual_locked(bo);
1612 ttm_mem_io_unlock(man);
1574} 1613}
1614
1615
1575EXPORT_SYMBOL(ttm_bo_unmap_virtual); 1616EXPORT_SYMBOL(ttm_bo_unmap_virtual);
1576 1617
1577static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo) 1618static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
@@ -1651,6 +1692,7 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
1651 bool lazy, bool interruptible, bool no_wait) 1692 bool lazy, bool interruptible, bool no_wait)
1652{ 1693{
1653 struct ttm_bo_driver *driver = bo->bdev->driver; 1694 struct ttm_bo_driver *driver = bo->bdev->driver;
1695 struct ttm_bo_device *bdev = bo->bdev;
1654 void *sync_obj; 1696 void *sync_obj;
1655 void *sync_obj_arg; 1697 void *sync_obj_arg;
1656 int ret = 0; 1698 int ret = 0;
@@ -1664,9 +1706,9 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
1664 void *tmp_obj = bo->sync_obj; 1706 void *tmp_obj = bo->sync_obj;
1665 bo->sync_obj = NULL; 1707 bo->sync_obj = NULL;
1666 clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); 1708 clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
1667 spin_unlock(&bo->lock); 1709 spin_unlock(&bdev->fence_lock);
1668 driver->sync_obj_unref(&tmp_obj); 1710 driver->sync_obj_unref(&tmp_obj);
1669 spin_lock(&bo->lock); 1711 spin_lock(&bdev->fence_lock);
1670 continue; 1712 continue;
1671 } 1713 }
1672 1714
@@ -1675,29 +1717,29 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
1675 1717
1676 sync_obj = driver->sync_obj_ref(bo->sync_obj); 1718 sync_obj = driver->sync_obj_ref(bo->sync_obj);
1677 sync_obj_arg = bo->sync_obj_arg; 1719 sync_obj_arg = bo->sync_obj_arg;
1678 spin_unlock(&bo->lock); 1720 spin_unlock(&bdev->fence_lock);
1679 ret = driver->sync_obj_wait(sync_obj, sync_obj_arg, 1721 ret = driver->sync_obj_wait(sync_obj, sync_obj_arg,
1680 lazy, interruptible); 1722 lazy, interruptible);
1681 if (unlikely(ret != 0)) { 1723 if (unlikely(ret != 0)) {
1682 driver->sync_obj_unref(&sync_obj); 1724 driver->sync_obj_unref(&sync_obj);
1683 spin_lock(&bo->lock); 1725 spin_lock(&bdev->fence_lock);
1684 return ret; 1726 return ret;
1685 } 1727 }
1686 spin_lock(&bo->lock); 1728 spin_lock(&bdev->fence_lock);
1687 if (likely(bo->sync_obj == sync_obj && 1729 if (likely(bo->sync_obj == sync_obj &&
1688 bo->sync_obj_arg == sync_obj_arg)) { 1730 bo->sync_obj_arg == sync_obj_arg)) {
1689 void *tmp_obj = bo->sync_obj; 1731 void *tmp_obj = bo->sync_obj;
1690 bo->sync_obj = NULL; 1732 bo->sync_obj = NULL;
1691 clear_bit(TTM_BO_PRIV_FLAG_MOVING, 1733 clear_bit(TTM_BO_PRIV_FLAG_MOVING,
1692 &bo->priv_flags); 1734 &bo->priv_flags);
1693 spin_unlock(&bo->lock); 1735 spin_unlock(&bdev->fence_lock);
1694 driver->sync_obj_unref(&sync_obj); 1736 driver->sync_obj_unref(&sync_obj);
1695 driver->sync_obj_unref(&tmp_obj); 1737 driver->sync_obj_unref(&tmp_obj);
1696 spin_lock(&bo->lock); 1738 spin_lock(&bdev->fence_lock);
1697 } else { 1739 } else {
1698 spin_unlock(&bo->lock); 1740 spin_unlock(&bdev->fence_lock);
1699 driver->sync_obj_unref(&sync_obj); 1741 driver->sync_obj_unref(&sync_obj);
1700 spin_lock(&bo->lock); 1742 spin_lock(&bdev->fence_lock);
1701 } 1743 }
1702 } 1744 }
1703 return 0; 1745 return 0;
@@ -1706,6 +1748,7 @@ EXPORT_SYMBOL(ttm_bo_wait);
1706 1748
1707int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait) 1749int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
1708{ 1750{
1751 struct ttm_bo_device *bdev = bo->bdev;
1709 int ret = 0; 1752 int ret = 0;
1710 1753
1711 /* 1754 /*
@@ -1715,9 +1758,9 @@ int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
1715 ret = ttm_bo_reserve(bo, true, no_wait, false, 0); 1758 ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
1716 if (unlikely(ret != 0)) 1759 if (unlikely(ret != 0))
1717 return ret; 1760 return ret;
1718 spin_lock(&bo->lock); 1761 spin_lock(&bdev->fence_lock);
1719 ret = ttm_bo_wait(bo, false, true, no_wait); 1762 ret = ttm_bo_wait(bo, false, true, no_wait);
1720 spin_unlock(&bo->lock); 1763 spin_unlock(&bdev->fence_lock);
1721 if (likely(ret == 0)) 1764 if (likely(ret == 0))
1722 atomic_inc(&bo->cpu_writers); 1765 atomic_inc(&bo->cpu_writers);
1723 ttm_bo_unreserve(bo); 1766 ttm_bo_unreserve(bo);
@@ -1783,16 +1826,15 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1783 put_count = ttm_bo_del_from_lru(bo); 1826 put_count = ttm_bo_del_from_lru(bo);
1784 spin_unlock(&glob->lru_lock); 1827 spin_unlock(&glob->lru_lock);
1785 1828
1786 while (put_count--) 1829 ttm_bo_list_ref_sub(bo, put_count, true);
1787 kref_put(&bo->list_kref, ttm_bo_ref_bug);
1788 1830
1789 /** 1831 /**
1790 * Wait for GPU, then move to system cached. 1832 * Wait for GPU, then move to system cached.
1791 */ 1833 */
1792 1834
1793 spin_lock(&bo->lock); 1835 spin_lock(&bo->bdev->fence_lock);
1794 ret = ttm_bo_wait(bo, false, false, false); 1836 ret = ttm_bo_wait(bo, false, false, false);
1795 spin_unlock(&bo->lock); 1837 spin_unlock(&bo->bdev->fence_lock);
1796 1838
1797 if (unlikely(ret != 0)) 1839 if (unlikely(ret != 0))
1798 goto out; 1840 goto out;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 3106d5bcce32..a89839f83f6c 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -75,37 +75,123 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
75} 75}
76EXPORT_SYMBOL(ttm_bo_move_ttm); 76EXPORT_SYMBOL(ttm_bo_move_ttm);
77 77
78int ttm_mem_io_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) 78int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
79{ 79{
80 int ret; 80 if (likely(man->io_reserve_fastpath))
81 return 0;
82
83 if (interruptible)
84 return mutex_lock_interruptible(&man->io_reserve_mutex);
85
86 mutex_lock(&man->io_reserve_mutex);
87 return 0;
88}
81 89
82 if (!mem->bus.io_reserved) { 90void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
83 mem->bus.io_reserved = true; 91{
92 if (likely(man->io_reserve_fastpath))
93 return;
94
95 mutex_unlock(&man->io_reserve_mutex);
96}
97
98static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
99{
100 struct ttm_buffer_object *bo;
101
102 if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
103 return -EAGAIN;
104
105 bo = list_first_entry(&man->io_reserve_lru,
106 struct ttm_buffer_object,
107 io_reserve_lru);
108 list_del_init(&bo->io_reserve_lru);
109 ttm_bo_unmap_virtual_locked(bo);
110
111 return 0;
112}
113
114static int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
115 struct ttm_mem_reg *mem)
116{
117 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
118 int ret = 0;
119
120 if (!bdev->driver->io_mem_reserve)
121 return 0;
122 if (likely(man->io_reserve_fastpath))
123 return bdev->driver->io_mem_reserve(bdev, mem);
124
125 if (bdev->driver->io_mem_reserve &&
126 mem->bus.io_reserved_count++ == 0) {
127retry:
84 ret = bdev->driver->io_mem_reserve(bdev, mem); 128 ret = bdev->driver->io_mem_reserve(bdev, mem);
129 if (ret == -EAGAIN) {
130 ret = ttm_mem_io_evict(man);
131 if (ret == 0)
132 goto retry;
133 }
134 }
135 return ret;
136}
137
138static void ttm_mem_io_free(struct ttm_bo_device *bdev,
139 struct ttm_mem_reg *mem)
140{
141 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
142
143 if (likely(man->io_reserve_fastpath))
144 return;
145
146 if (bdev->driver->io_mem_reserve &&
147 --mem->bus.io_reserved_count == 0 &&
148 bdev->driver->io_mem_free)
149 bdev->driver->io_mem_free(bdev, mem);
150
151}
152
153int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
154{
155 struct ttm_mem_reg *mem = &bo->mem;
156 int ret;
157
158 if (!mem->bus.io_reserved_vm) {
159 struct ttm_mem_type_manager *man =
160 &bo->bdev->man[mem->mem_type];
161
162 ret = ttm_mem_io_reserve(bo->bdev, mem);
85 if (unlikely(ret != 0)) 163 if (unlikely(ret != 0))
86 return ret; 164 return ret;
165 mem->bus.io_reserved_vm = true;
166 if (man->use_io_reserve_lru)
167 list_add_tail(&bo->io_reserve_lru,
168 &man->io_reserve_lru);
87 } 169 }
88 return 0; 170 return 0;
89} 171}
90 172
91void ttm_mem_io_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) 173void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
92{ 174{
93 if (bdev->driver->io_mem_reserve) { 175 struct ttm_mem_reg *mem = &bo->mem;
94 if (mem->bus.io_reserved) { 176
95 mem->bus.io_reserved = false; 177 if (mem->bus.io_reserved_vm) {
96 bdev->driver->io_mem_free(bdev, mem); 178 mem->bus.io_reserved_vm = false;
97 } 179 list_del_init(&bo->io_reserve_lru);
180 ttm_mem_io_free(bo->bdev, mem);
98 } 181 }
99} 182}
100 183
101int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, 184int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
102 void **virtual) 185 void **virtual)
103{ 186{
187 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
104 int ret; 188 int ret;
105 void *addr; 189 void *addr;
106 190
107 *virtual = NULL; 191 *virtual = NULL;
192 (void) ttm_mem_io_lock(man, false);
108 ret = ttm_mem_io_reserve(bdev, mem); 193 ret = ttm_mem_io_reserve(bdev, mem);
194 ttm_mem_io_unlock(man);
109 if (ret || !mem->bus.is_iomem) 195 if (ret || !mem->bus.is_iomem)
110 return ret; 196 return ret;
111 197
@@ -117,7 +203,9 @@ int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
117 else 203 else
118 addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size); 204 addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
119 if (!addr) { 205 if (!addr) {
206 (void) ttm_mem_io_lock(man, false);
120 ttm_mem_io_free(bdev, mem); 207 ttm_mem_io_free(bdev, mem);
208 ttm_mem_io_unlock(man);
121 return -ENOMEM; 209 return -ENOMEM;
122 } 210 }
123 } 211 }
@@ -134,7 +222,9 @@ void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
134 222
135 if (virtual && mem->bus.addr == NULL) 223 if (virtual && mem->bus.addr == NULL)
136 iounmap(virtual); 224 iounmap(virtual);
225 (void) ttm_mem_io_lock(man, false);
137 ttm_mem_io_free(bdev, mem); 226 ttm_mem_io_free(bdev, mem);
227 ttm_mem_io_unlock(man);
138} 228}
139 229
140static int ttm_copy_io_page(void *dst, void *src, unsigned long page) 230static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
@@ -231,7 +321,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
231 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; 321 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
232 struct ttm_tt *ttm = bo->ttm; 322 struct ttm_tt *ttm = bo->ttm;
233 struct ttm_mem_reg *old_mem = &bo->mem; 323 struct ttm_mem_reg *old_mem = &bo->mem;
234 struct ttm_mem_reg old_copy = *old_mem; 324 struct ttm_mem_reg old_copy;
235 void *old_iomap; 325 void *old_iomap;
236 void *new_iomap; 326 void *new_iomap;
237 int ret; 327 int ret;
@@ -281,7 +371,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
281 mb(); 371 mb();
282out2: 372out2:
283 ttm_bo_free_old_node(bo); 373 ttm_bo_free_old_node(bo);
284 374 old_copy = *old_mem;
285 *old_mem = *new_mem; 375 *old_mem = *new_mem;
286 new_mem->mm_node = NULL; 376 new_mem->mm_node = NULL;
287 377
@@ -292,7 +382,7 @@ out2:
292 } 382 }
293 383
294out1: 384out1:
295 ttm_mem_reg_iounmap(bdev, new_mem, new_iomap); 385 ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
296out: 386out:
297 ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap); 387 ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
298 return ret; 388 return ret;
@@ -337,11 +427,11 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
337 * TODO: Explicit member copy would probably be better here. 427 * TODO: Explicit member copy would probably be better here.
338 */ 428 */
339 429
340 spin_lock_init(&fbo->lock);
341 init_waitqueue_head(&fbo->event_queue); 430 init_waitqueue_head(&fbo->event_queue);
342 INIT_LIST_HEAD(&fbo->ddestroy); 431 INIT_LIST_HEAD(&fbo->ddestroy);
343 INIT_LIST_HEAD(&fbo->lru); 432 INIT_LIST_HEAD(&fbo->lru);
344 INIT_LIST_HEAD(&fbo->swap); 433 INIT_LIST_HEAD(&fbo->swap);
434 INIT_LIST_HEAD(&fbo->io_reserve_lru);
345 fbo->vm_node = NULL; 435 fbo->vm_node = NULL;
346 atomic_set(&fbo->cpu_writers, 0); 436 atomic_set(&fbo->cpu_writers, 0);
347 437
@@ -453,6 +543,8 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
453 unsigned long start_page, unsigned long num_pages, 543 unsigned long start_page, unsigned long num_pages,
454 struct ttm_bo_kmap_obj *map) 544 struct ttm_bo_kmap_obj *map)
455{ 545{
546 struct ttm_mem_type_manager *man =
547 &bo->bdev->man[bo->mem.mem_type];
456 unsigned long offset, size; 548 unsigned long offset, size;
457 int ret; 549 int ret;
458 550
@@ -467,7 +559,9 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
467 if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC)) 559 if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
468 return -EPERM; 560 return -EPERM;
469#endif 561#endif
562 (void) ttm_mem_io_lock(man, false);
470 ret = ttm_mem_io_reserve(bo->bdev, &bo->mem); 563 ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
564 ttm_mem_io_unlock(man);
471 if (ret) 565 if (ret)
472 return ret; 566 return ret;
473 if (!bo->mem.bus.is_iomem) { 567 if (!bo->mem.bus.is_iomem) {
@@ -482,12 +576,15 @@ EXPORT_SYMBOL(ttm_bo_kmap);
482 576
483void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) 577void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
484{ 578{
579 struct ttm_buffer_object *bo = map->bo;
580 struct ttm_mem_type_manager *man =
581 &bo->bdev->man[bo->mem.mem_type];
582
485 if (!map->virtual) 583 if (!map->virtual)
486 return; 584 return;
487 switch (map->bo_kmap_type) { 585 switch (map->bo_kmap_type) {
488 case ttm_bo_map_iomap: 586 case ttm_bo_map_iomap:
489 iounmap(map->virtual); 587 iounmap(map->virtual);
490 ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
491 break; 588 break;
492 case ttm_bo_map_vmap: 589 case ttm_bo_map_vmap:
493 vunmap(map->virtual); 590 vunmap(map->virtual);
@@ -500,6 +597,9 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
500 default: 597 default:
501 BUG(); 598 BUG();
502 } 599 }
600 (void) ttm_mem_io_lock(man, false);
601 ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
602 ttm_mem_io_unlock(man);
503 map->virtual = NULL; 603 map->virtual = NULL;
504 map->page = NULL; 604 map->page = NULL;
505} 605}
@@ -520,7 +620,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
520 struct ttm_buffer_object *ghost_obj; 620 struct ttm_buffer_object *ghost_obj;
521 void *tmp_obj = NULL; 621 void *tmp_obj = NULL;
522 622
523 spin_lock(&bo->lock); 623 spin_lock(&bdev->fence_lock);
524 if (bo->sync_obj) { 624 if (bo->sync_obj) {
525 tmp_obj = bo->sync_obj; 625 tmp_obj = bo->sync_obj;
526 bo->sync_obj = NULL; 626 bo->sync_obj = NULL;
@@ -529,7 +629,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
529 bo->sync_obj_arg = sync_obj_arg; 629 bo->sync_obj_arg = sync_obj_arg;
530 if (evict) { 630 if (evict) {
531 ret = ttm_bo_wait(bo, false, false, false); 631 ret = ttm_bo_wait(bo, false, false, false);
532 spin_unlock(&bo->lock); 632 spin_unlock(&bdev->fence_lock);
533 if (tmp_obj) 633 if (tmp_obj)
534 driver->sync_obj_unref(&tmp_obj); 634 driver->sync_obj_unref(&tmp_obj);
535 if (ret) 635 if (ret)
@@ -552,7 +652,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
552 */ 652 */
553 653
554 set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); 654 set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
555 spin_unlock(&bo->lock); 655 spin_unlock(&bdev->fence_lock);
556 if (tmp_obj) 656 if (tmp_obj)
557 driver->sync_obj_unref(&tmp_obj); 657 driver->sync_obj_unref(&tmp_obj);
558 658
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index fe6cb77899f4..221b924acebe 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -83,6 +83,8 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
83 int i; 83 int i;
84 unsigned long address = (unsigned long)vmf->virtual_address; 84 unsigned long address = (unsigned long)vmf->virtual_address;
85 int retval = VM_FAULT_NOPAGE; 85 int retval = VM_FAULT_NOPAGE;
86 struct ttm_mem_type_manager *man =
87 &bdev->man[bo->mem.mem_type];
86 88
87 /* 89 /*
88 * Work around locking order reversal in fault / nopfn 90 * Work around locking order reversal in fault / nopfn
@@ -118,24 +120,28 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
118 * move. 120 * move.
119 */ 121 */
120 122
121 spin_lock(&bo->lock); 123 spin_lock(&bdev->fence_lock);
122 if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) { 124 if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) {
123 ret = ttm_bo_wait(bo, false, true, false); 125 ret = ttm_bo_wait(bo, false, true, false);
124 spin_unlock(&bo->lock); 126 spin_unlock(&bdev->fence_lock);
125 if (unlikely(ret != 0)) { 127 if (unlikely(ret != 0)) {
126 retval = (ret != -ERESTARTSYS) ? 128 retval = (ret != -ERESTARTSYS) ?
127 VM_FAULT_SIGBUS : VM_FAULT_NOPAGE; 129 VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
128 goto out_unlock; 130 goto out_unlock;
129 } 131 }
130 } else 132 } else
131 spin_unlock(&bo->lock); 133 spin_unlock(&bdev->fence_lock);
132 134
133 135 ret = ttm_mem_io_lock(man, true);
134 ret = ttm_mem_io_reserve(bdev, &bo->mem); 136 if (unlikely(ret != 0)) {
135 if (ret) { 137 retval = VM_FAULT_NOPAGE;
136 retval = VM_FAULT_SIGBUS;
137 goto out_unlock; 138 goto out_unlock;
138 } 139 }
140 ret = ttm_mem_io_reserve_vm(bo);
141 if (unlikely(ret != 0)) {
142 retval = VM_FAULT_SIGBUS;
143 goto out_io_unlock;
144 }
139 145
140 page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + 146 page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
141 bo->vm_node->start - vma->vm_pgoff; 147 bo->vm_node->start - vma->vm_pgoff;
@@ -144,7 +150,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
144 150
145 if (unlikely(page_offset >= bo->num_pages)) { 151 if (unlikely(page_offset >= bo->num_pages)) {
146 retval = VM_FAULT_SIGBUS; 152 retval = VM_FAULT_SIGBUS;
147 goto out_unlock; 153 goto out_io_unlock;
148 } 154 }
149 155
150 /* 156 /*
@@ -182,7 +188,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
182 page = ttm_tt_get_page(ttm, page_offset); 188 page = ttm_tt_get_page(ttm, page_offset);
183 if (unlikely(!page && i == 0)) { 189 if (unlikely(!page && i == 0)) {
184 retval = VM_FAULT_OOM; 190 retval = VM_FAULT_OOM;
185 goto out_unlock; 191 goto out_io_unlock;
186 } else if (unlikely(!page)) { 192 } else if (unlikely(!page)) {
187 break; 193 break;
188 } 194 }
@@ -200,14 +206,15 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
200 else if (unlikely(ret != 0)) { 206 else if (unlikely(ret != 0)) {
201 retval = 207 retval =
202 (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS; 208 (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
203 goto out_unlock; 209 goto out_io_unlock;
204 } 210 }
205 211
206 address += PAGE_SIZE; 212 address += PAGE_SIZE;
207 if (unlikely(++page_offset >= page_last)) 213 if (unlikely(++page_offset >= page_last))
208 break; 214 break;
209 } 215 }
210 216out_io_unlock:
217 ttm_mem_io_unlock(man);
211out_unlock: 218out_unlock:
212 ttm_bo_unreserve(bo); 219 ttm_bo_unreserve(bo);
213 return retval; 220 return retval;
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index c285c2902d15..3832fe10b4df 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -32,7 +32,7 @@
32#include <linux/sched.h> 32#include <linux/sched.h>
33#include <linux/module.h> 33#include <linux/module.h>
34 34
35void ttm_eu_backoff_reservation(struct list_head *list) 35static void ttm_eu_backoff_reservation_locked(struct list_head *list)
36{ 36{
37 struct ttm_validate_buffer *entry; 37 struct ttm_validate_buffer *entry;
38 38
@@ -41,10 +41,77 @@ void ttm_eu_backoff_reservation(struct list_head *list)
41 if (!entry->reserved) 41 if (!entry->reserved)
42 continue; 42 continue;
43 43
44 if (entry->removed) {
45 ttm_bo_add_to_lru(bo);
46 entry->removed = false;
47
48 }
44 entry->reserved = false; 49 entry->reserved = false;
45 ttm_bo_unreserve(bo); 50 atomic_set(&bo->reserved, 0);
51 wake_up_all(&bo->event_queue);
52 }
53}
54
55static void ttm_eu_del_from_lru_locked(struct list_head *list)
56{
57 struct ttm_validate_buffer *entry;
58
59 list_for_each_entry(entry, list, head) {
60 struct ttm_buffer_object *bo = entry->bo;
61 if (!entry->reserved)
62 continue;
63
64 if (!entry->removed) {
65 entry->put_count = ttm_bo_del_from_lru(bo);
66 entry->removed = true;
67 }
46 } 68 }
47} 69}
70
71static void ttm_eu_list_ref_sub(struct list_head *list)
72{
73 struct ttm_validate_buffer *entry;
74
75 list_for_each_entry(entry, list, head) {
76 struct ttm_buffer_object *bo = entry->bo;
77
78 if (entry->put_count) {
79 ttm_bo_list_ref_sub(bo, entry->put_count, true);
80 entry->put_count = 0;
81 }
82 }
83}
84
85static int ttm_eu_wait_unreserved_locked(struct list_head *list,
86 struct ttm_buffer_object *bo)
87{
88 struct ttm_bo_global *glob = bo->glob;
89 int ret;
90
91 ttm_eu_del_from_lru_locked(list);
92 spin_unlock(&glob->lru_lock);
93 ret = ttm_bo_wait_unreserved(bo, true);
94 spin_lock(&glob->lru_lock);
95 if (unlikely(ret != 0))
96 ttm_eu_backoff_reservation_locked(list);
97 return ret;
98}
99
100
101void ttm_eu_backoff_reservation(struct list_head *list)
102{
103 struct ttm_validate_buffer *entry;
104 struct ttm_bo_global *glob;
105
106 if (list_empty(list))
107 return;
108
109 entry = list_first_entry(list, struct ttm_validate_buffer, head);
110 glob = entry->bo->glob;
111 spin_lock(&glob->lru_lock);
112 ttm_eu_backoff_reservation_locked(list);
113 spin_unlock(&glob->lru_lock);
114}
48EXPORT_SYMBOL(ttm_eu_backoff_reservation); 115EXPORT_SYMBOL(ttm_eu_backoff_reservation);
49 116
50/* 117/*
@@ -59,37 +126,76 @@ EXPORT_SYMBOL(ttm_eu_backoff_reservation);
59 * buffers in different orders. 126 * buffers in different orders.
60 */ 127 */
61 128
62int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq) 129int ttm_eu_reserve_buffers(struct list_head *list)
63{ 130{
131 struct ttm_bo_global *glob;
64 struct ttm_validate_buffer *entry; 132 struct ttm_validate_buffer *entry;
65 int ret; 133 int ret;
134 uint32_t val_seq;
135
136 if (list_empty(list))
137 return 0;
138
139 list_for_each_entry(entry, list, head) {
140 entry->reserved = false;
141 entry->put_count = 0;
142 entry->removed = false;
143 }
144
145 entry = list_first_entry(list, struct ttm_validate_buffer, head);
146 glob = entry->bo->glob;
66 147
67retry: 148retry:
149 spin_lock(&glob->lru_lock);
150 val_seq = entry->bo->bdev->val_seq++;
151
68 list_for_each_entry(entry, list, head) { 152 list_for_each_entry(entry, list, head) {
69 struct ttm_buffer_object *bo = entry->bo; 153 struct ttm_buffer_object *bo = entry->bo;
70 154
71 entry->reserved = false; 155retry_this_bo:
72 ret = ttm_bo_reserve(bo, true, false, true, val_seq); 156 ret = ttm_bo_reserve_locked(bo, true, true, true, val_seq);
73 if (ret != 0) { 157 switch (ret) {
74 ttm_eu_backoff_reservation(list); 158 case 0:
75 if (ret == -EAGAIN) { 159 break;
76 ret = ttm_bo_wait_unreserved(bo, true); 160 case -EBUSY:
77 if (unlikely(ret != 0)) 161 ret = ttm_eu_wait_unreserved_locked(list, bo);
78 return ret; 162 if (unlikely(ret != 0)) {
79 goto retry; 163 spin_unlock(&glob->lru_lock);
80 } else 164 ttm_eu_list_ref_sub(list);
81 return ret; 165 return ret;
166 }
167 goto retry_this_bo;
168 case -EAGAIN:
169 ttm_eu_backoff_reservation_locked(list);
170 spin_unlock(&glob->lru_lock);
171 ttm_eu_list_ref_sub(list);
172 ret = ttm_bo_wait_unreserved(bo, true);
173 if (unlikely(ret != 0))
174 return ret;
175 goto retry;
176 default:
177 ttm_eu_backoff_reservation_locked(list);
178 spin_unlock(&glob->lru_lock);
179 ttm_eu_list_ref_sub(list);
180 return ret;
82 } 181 }
83 182
84 entry->reserved = true; 183 entry->reserved = true;
85 if (unlikely(atomic_read(&bo->cpu_writers) > 0)) { 184 if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
86 ttm_eu_backoff_reservation(list); 185 ttm_eu_backoff_reservation_locked(list);
186 spin_unlock(&glob->lru_lock);
187 ttm_eu_list_ref_sub(list);
87 ret = ttm_bo_wait_cpu(bo, false); 188 ret = ttm_bo_wait_cpu(bo, false);
88 if (ret) 189 if (ret)
89 return ret; 190 return ret;
90 goto retry; 191 goto retry;
91 } 192 }
92 } 193 }
194
195 ttm_eu_del_from_lru_locked(list);
196 spin_unlock(&glob->lru_lock);
197 ttm_eu_list_ref_sub(list);
198
93 return 0; 199 return 0;
94} 200}
95EXPORT_SYMBOL(ttm_eu_reserve_buffers); 201EXPORT_SYMBOL(ttm_eu_reserve_buffers);
@@ -97,21 +203,36 @@ EXPORT_SYMBOL(ttm_eu_reserve_buffers);
97void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj) 203void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj)
98{ 204{
99 struct ttm_validate_buffer *entry; 205 struct ttm_validate_buffer *entry;
206 struct ttm_buffer_object *bo;
207 struct ttm_bo_global *glob;
208 struct ttm_bo_device *bdev;
209 struct ttm_bo_driver *driver;
100 210
101 list_for_each_entry(entry, list, head) { 211 if (list_empty(list))
102 struct ttm_buffer_object *bo = entry->bo; 212 return;
103 struct ttm_bo_driver *driver = bo->bdev->driver; 213
104 void *old_sync_obj; 214 bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
215 bdev = bo->bdev;
216 driver = bdev->driver;
217 glob = bo->glob;
105 218
106 spin_lock(&bo->lock); 219 spin_lock(&bdev->fence_lock);
107 old_sync_obj = bo->sync_obj; 220 spin_lock(&glob->lru_lock);
221
222 list_for_each_entry(entry, list, head) {
223 bo = entry->bo;
224 entry->old_sync_obj = bo->sync_obj;
108 bo->sync_obj = driver->sync_obj_ref(sync_obj); 225 bo->sync_obj = driver->sync_obj_ref(sync_obj);
109 bo->sync_obj_arg = entry->new_sync_obj_arg; 226 bo->sync_obj_arg = entry->new_sync_obj_arg;
110 spin_unlock(&bo->lock); 227 ttm_bo_unreserve_locked(bo);
111 ttm_bo_unreserve(bo);
112 entry->reserved = false; 228 entry->reserved = false;
113 if (old_sync_obj) 229 }
114 driver->sync_obj_unref(&old_sync_obj); 230 spin_unlock(&glob->lru_lock);
231 spin_unlock(&bdev->fence_lock);
232
233 list_for_each_entry(entry, list, head) {
234 if (entry->old_sync_obj)
235 driver->sync_obj_unref(&entry->old_sync_obj);
115 } 236 }
116} 237}
117EXPORT_SYMBOL(ttm_eu_fence_buffer_objects); 238EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index e7a58d055041..10fc01f69c40 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -264,7 +264,6 @@ struct vmw_private {
264 */ 264 */
265 265
266 struct vmw_sw_context ctx; 266 struct vmw_sw_context ctx;
267 uint32_t val_seq;
268 struct mutex cmdbuf_mutex; 267 struct mutex cmdbuf_mutex;
269 268
270 /** 269 /**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 76954e3528c1..41b95ed6dbcd 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -653,8 +653,7 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
653 ret = vmw_cmd_check_all(dev_priv, sw_context, cmd, arg->command_size); 653 ret = vmw_cmd_check_all(dev_priv, sw_context, cmd, arg->command_size);
654 if (unlikely(ret != 0)) 654 if (unlikely(ret != 0))
655 goto out_err; 655 goto out_err;
656 ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes, 656 ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes);
657 dev_priv->val_seq++);
658 if (unlikely(ret != 0)) 657 if (unlikely(ret != 0))
659 goto out_err; 658 goto out_err;
660 659
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index beafc156a535..50852aad260a 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -74,6 +74,8 @@ struct ttm_placement {
74 * @is_iomem: is this io memory ? 74 * @is_iomem: is this io memory ?
75 * @size: size in byte 75 * @size: size in byte
76 * @offset: offset from the base address 76 * @offset: offset from the base address
77 * @io_reserved_vm: The VM system has a refcount in @io_reserved_count
78 * @io_reserved_count: Refcounting the numbers of callers to ttm_mem_io_reserve
77 * 79 *
78 * Structure indicating the bus placement of an object. 80 * Structure indicating the bus placement of an object.
79 */ 81 */
@@ -83,7 +85,8 @@ struct ttm_bus_placement {
83 unsigned long size; 85 unsigned long size;
84 unsigned long offset; 86 unsigned long offset;
85 bool is_iomem; 87 bool is_iomem;
86 bool io_reserved; 88 bool io_reserved_vm;
89 uint64_t io_reserved_count;
87}; 90};
88 91
89 92
@@ -154,7 +157,6 @@ struct ttm_tt;
154 * keeps one refcount. When this refcount reaches zero, 157 * keeps one refcount. When this refcount reaches zero,
155 * the object is destroyed. 158 * the object is destroyed.
156 * @event_queue: Queue for processes waiting on buffer object status change. 159 * @event_queue: Queue for processes waiting on buffer object status change.
157 * @lock: spinlock protecting mostly synchronization members.
158 * @mem: structure describing current placement. 160 * @mem: structure describing current placement.
159 * @persistant_swap_storage: Usually the swap storage is deleted for buffers 161 * @persistant_swap_storage: Usually the swap storage is deleted for buffers
160 * pinned in physical memory. If this behaviour is not desired, this member 162 * pinned in physical memory. If this behaviour is not desired, this member
@@ -213,7 +215,6 @@ struct ttm_buffer_object {
213 struct kref kref; 215 struct kref kref;
214 struct kref list_kref; 216 struct kref list_kref;
215 wait_queue_head_t event_queue; 217 wait_queue_head_t event_queue;
216 spinlock_t lock;
217 218
218 /** 219 /**
219 * Members protected by the bo::reserved lock. 220 * Members protected by the bo::reserved lock.
@@ -237,6 +238,7 @@ struct ttm_buffer_object {
237 struct list_head lru; 238 struct list_head lru;
238 struct list_head ddestroy; 239 struct list_head ddestroy;
239 struct list_head swap; 240 struct list_head swap;
241 struct list_head io_reserve_lru;
240 uint32_t val_seq; 242 uint32_t val_seq;
241 bool seq_valid; 243 bool seq_valid;
242 244
@@ -248,10 +250,10 @@ struct ttm_buffer_object {
248 atomic_t reserved; 250 atomic_t reserved;
249 251
250 /** 252 /**
251 * Members protected by the bo::lock 253 * Members protected by struct buffer_object_device::fence_lock
252 * In addition, setting sync_obj to anything else 254 * In addition, setting sync_obj to anything else
253 * than NULL requires bo::reserved to be held. This allows for 255 * than NULL requires bo::reserved to be held. This allows for
254 * checking NULL while reserved but not holding bo::lock. 256 * checking NULL while reserved but not holding the mentioned lock.
255 */ 257 */
256 258
257 void *sync_obj_arg; 259 void *sync_obj_arg;
@@ -364,6 +366,44 @@ extern int ttm_bo_validate(struct ttm_buffer_object *bo,
364 */ 366 */
365extern void ttm_bo_unref(struct ttm_buffer_object **bo); 367extern void ttm_bo_unref(struct ttm_buffer_object **bo);
366 368
369
370/**
371 * ttm_bo_list_ref_sub
372 *
373 * @bo: The buffer object.
374 * @count: The number of references with which to decrease @bo::list_kref;
375 * @never_free: The refcount should not reach zero with this operation.
376 *
377 * Release @count lru list references to this buffer object.
378 */
379extern void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count,
380 bool never_free);
381
382/**
383 * ttm_bo_add_to_lru
384 *
385 * @bo: The buffer object.
386 *
387 * Add this bo to the relevant mem type lru and, if it's backed by
388 * system pages (ttms) to the swap list.
389 * This function must be called with struct ttm_bo_global::lru_lock held, and
390 * is typically called immediately prior to unreserving a bo.
391 */
392extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo);
393
394/**
395 * ttm_bo_del_from_lru
396 *
397 * @bo: The buffer object.
398 *
399 * Remove this bo from all lru lists used to lookup and reserve an object.
400 * This function must be called with struct ttm_bo_global::lru_lock held,
401 * and is usually called just immediately after the bo has been reserved to
402 * avoid recursive reservation from lru lists.
403 */
404extern int ttm_bo_del_from_lru(struct ttm_buffer_object *bo);
405
406
367/** 407/**
368 * ttm_bo_lock_delayed_workqueue 408 * ttm_bo_lock_delayed_workqueue
369 * 409 *
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 8e0c848326b6..1da8af6ac884 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -179,30 +179,6 @@ struct ttm_tt {
179#define TTM_MEMTYPE_FLAG_MAPPABLE (1 << 1) /* Memory mappable */ 179#define TTM_MEMTYPE_FLAG_MAPPABLE (1 << 1) /* Memory mappable */
180#define TTM_MEMTYPE_FLAG_CMA (1 << 3) /* Can't map aperture */ 180#define TTM_MEMTYPE_FLAG_CMA (1 << 3) /* Can't map aperture */
181 181
182/**
183 * struct ttm_mem_type_manager
184 *
185 * @has_type: The memory type has been initialized.
186 * @use_type: The memory type is enabled.
187 * @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory
188 * managed by this memory type.
189 * @gpu_offset: If used, the GPU offset of the first managed page of
190 * fixed memory or the first managed location in an aperture.
191 * @size: Size of the managed region.
192 * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX,
193 * as defined in ttm_placement_common.h
194 * @default_caching: The default caching policy used for a buffer object
195 * placed in this memory type if the user doesn't provide one.
196 * @manager: The range manager used for this memory type. FIXME: If the aperture
197 * has a page size different from the underlying system, the granularity
198 * of this manager should take care of this. But the range allocating code
199 * in ttm_bo.c needs to be modified for this.
200 * @lru: The lru list for this memory type.
201 *
202 * This structure is used to identify and manage memory types for a device.
203 * It's set up by the ttm_bo_driver::init_mem_type method.
204 */
205
206struct ttm_mem_type_manager; 182struct ttm_mem_type_manager;
207 183
208struct ttm_mem_type_manager_func { 184struct ttm_mem_type_manager_func {
@@ -287,6 +263,36 @@ struct ttm_mem_type_manager_func {
287 void (*debug)(struct ttm_mem_type_manager *man, const char *prefix); 263 void (*debug)(struct ttm_mem_type_manager *man, const char *prefix);
288}; 264};
289 265
266/**
267 * struct ttm_mem_type_manager
268 *
269 * @has_type: The memory type has been initialized.
270 * @use_type: The memory type is enabled.
271 * @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory
272 * managed by this memory type.
273 * @gpu_offset: If used, the GPU offset of the first managed page of
274 * fixed memory or the first managed location in an aperture.
275 * @size: Size of the managed region.
276 * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX,
277 * as defined in ttm_placement_common.h
278 * @default_caching: The default caching policy used for a buffer object
279 * placed in this memory type if the user doesn't provide one.
280 * @func: structure pointer implementing the range manager. See above
281 * @priv: Driver private closure for @func.
282 * @io_reserve_mutex: Mutex optionally protecting shared io_reserve structures
283 * @use_io_reserve_lru: Use an lru list to try to unreserve io_mem_regions
284 * reserved by the TTM vm system.
285 * @io_reserve_lru: Optional lru list for unreserving io mem regions.
286 * @io_reserve_fastpath: Only use bdev::driver::io_mem_reserve to obtain
287 * static information. bdev::driver::io_mem_free is never used.
288 * @lru: The lru list for this memory type.
289 *
290 * This structure is used to identify and manage memory types for a device.
291 * It's set up by the ttm_bo_driver::init_mem_type method.
292 */
293
294
295
290struct ttm_mem_type_manager { 296struct ttm_mem_type_manager {
291 struct ttm_bo_device *bdev; 297 struct ttm_bo_device *bdev;
292 298
@@ -303,6 +309,15 @@ struct ttm_mem_type_manager {
303 uint32_t default_caching; 309 uint32_t default_caching;
304 const struct ttm_mem_type_manager_func *func; 310 const struct ttm_mem_type_manager_func *func;
305 void *priv; 311 void *priv;
312 struct mutex io_reserve_mutex;
313 bool use_io_reserve_lru;
314 bool io_reserve_fastpath;
315
316 /*
317 * Protected by @io_reserve_mutex:
318 */
319
320 struct list_head io_reserve_lru;
306 321
307 /* 322 /*
308 * Protected by the global->lru_lock. 323 * Protected by the global->lru_lock.
@@ -510,9 +525,12 @@ struct ttm_bo_global {
510 * 525 *
511 * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver. 526 * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver.
512 * @man: An array of mem_type_managers. 527 * @man: An array of mem_type_managers.
528 * @fence_lock: Protects the synchronizing members on *all* bos belonging
529 * to this device.
513 * @addr_space_mm: Range manager for the device address space. 530 * @addr_space_mm: Range manager for the device address space.
514 * lru_lock: Spinlock that protects the buffer+device lru lists and 531 * lru_lock: Spinlock that protects the buffer+device lru lists and
515 * ddestroy lists. 532 * ddestroy lists.
533 * @val_seq: Current validation sequence.
516 * @nice_mode: Try nicely to wait for buffer idle when cleaning a manager. 534 * @nice_mode: Try nicely to wait for buffer idle when cleaning a manager.
517 * If a GPU lockup has been detected, this is forced to 0. 535 * If a GPU lockup has been detected, this is forced to 0.
518 * @dev_mapping: A pointer to the struct address_space representing the 536 * @dev_mapping: A pointer to the struct address_space representing the
@@ -531,6 +549,7 @@ struct ttm_bo_device {
531 struct ttm_bo_driver *driver; 549 struct ttm_bo_driver *driver;
532 rwlock_t vm_lock; 550 rwlock_t vm_lock;
533 struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES]; 551 struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
552 spinlock_t fence_lock;
534 /* 553 /*
535 * Protected by the vm lock. 554 * Protected by the vm lock.
536 */ 555 */
@@ -541,6 +560,7 @@ struct ttm_bo_device {
541 * Protected by the global:lru lock. 560 * Protected by the global:lru lock.
542 */ 561 */
543 struct list_head ddestroy; 562 struct list_head ddestroy;
563 uint32_t val_seq;
544 564
545 /* 565 /*
546 * Protected by load / firstopen / lastclose /unload sync. 566 * Protected by load / firstopen / lastclose /unload sync.
@@ -753,31 +773,6 @@ extern void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo,
753 773
754extern int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait); 774extern int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait);
755 775
756/**
757 * ttm_bo_pci_offset - Get the PCI offset for the buffer object memory.
758 *
759 * @bo Pointer to a struct ttm_buffer_object.
760 * @bus_base On return the base of the PCI region
761 * @bus_offset On return the byte offset into the PCI region
762 * @bus_size On return the byte size of the buffer object or zero if
763 * the buffer object memory is not accessible through a PCI region.
764 *
765 * Returns:
766 * -EINVAL if the buffer object is currently not mappable.
767 * 0 otherwise.
768 */
769
770extern int ttm_bo_pci_offset(struct ttm_bo_device *bdev,
771 struct ttm_mem_reg *mem,
772 unsigned long *bus_base,
773 unsigned long *bus_offset,
774 unsigned long *bus_size);
775
776extern int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
777 struct ttm_mem_reg *mem);
778extern void ttm_mem_io_free(struct ttm_bo_device *bdev,
779 struct ttm_mem_reg *mem);
780
781extern void ttm_bo_global_release(struct drm_global_reference *ref); 776extern void ttm_bo_global_release(struct drm_global_reference *ref);
782extern int ttm_bo_global_init(struct drm_global_reference *ref); 777extern int ttm_bo_global_init(struct drm_global_reference *ref);
783 778
@@ -810,6 +805,22 @@ extern int ttm_bo_device_init(struct ttm_bo_device *bdev,
810extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo); 805extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
811 806
812/** 807/**
808 * ttm_bo_unmap_virtual
809 *
810 * @bo: tear down the virtual mappings for this BO
811 *
812 * The caller must take ttm_mem_io_lock before calling this function.
813 */
814extern void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo);
815
816extern int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo);
817extern void ttm_mem_io_free_vm(struct ttm_buffer_object *bo);
818extern int ttm_mem_io_lock(struct ttm_mem_type_manager *man,
819 bool interruptible);
820extern void ttm_mem_io_unlock(struct ttm_mem_type_manager *man);
821
822
823/**
813 * ttm_bo_reserve: 824 * ttm_bo_reserve:
814 * 825 *
815 * @bo: A pointer to a struct ttm_buffer_object. 826 * @bo: A pointer to a struct ttm_buffer_object.
@@ -859,11 +870,44 @@ extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
859 * try again. (only if use_sequence == 1). 870 * try again. (only if use_sequence == 1).
860 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by 871 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
861 * a signal. Release all buffer reservations and return to user-space. 872 * a signal. Release all buffer reservations and return to user-space.
873 * -EBUSY: The function needed to sleep, but @no_wait was true
874 * -EDEADLK: Bo already reserved using @sequence. This error code will only
875 * be returned if @use_sequence is set to true.
862 */ 876 */
863extern int ttm_bo_reserve(struct ttm_buffer_object *bo, 877extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
864 bool interruptible, 878 bool interruptible,
865 bool no_wait, bool use_sequence, uint32_t sequence); 879 bool no_wait, bool use_sequence, uint32_t sequence);
866 880
881
882/**
883 * ttm_bo_reserve_locked:
884 *
885 * @bo: A pointer to a struct ttm_buffer_object.
886 * @interruptible: Sleep interruptible if waiting.
887 * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
888 * @use_sequence: If @bo is already reserved, Only sleep waiting for
889 * it to become unreserved if @sequence < (@bo)->sequence.
890 *
891 * Must be called with struct ttm_bo_global::lru_lock held,
892 * and will not remove reserved buffers from the lru lists.
893 * The function may release the LRU spinlock if it needs to sleep.
894 * Otherwise identical to ttm_bo_reserve.
895 *
896 * Returns:
897 * -EAGAIN: The reservation may cause a deadlock.
898 * Release all buffer reservations, wait for @bo to become unreserved and
899 * try again. (only if use_sequence == 1).
900 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
901 * a signal. Release all buffer reservations and return to user-space.
902 * -EBUSY: The function needed to sleep, but @no_wait was true
903 * -EDEADLK: Bo already reserved using @sequence. This error code will only
904 * be returned if @use_sequence is set to true.
905 */
906extern int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
907 bool interruptible,
908 bool no_wait, bool use_sequence,
909 uint32_t sequence);
910
867/** 911/**
868 * ttm_bo_unreserve 912 * ttm_bo_unreserve
869 * 913 *
@@ -874,6 +918,16 @@ extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
874extern void ttm_bo_unreserve(struct ttm_buffer_object *bo); 918extern void ttm_bo_unreserve(struct ttm_buffer_object *bo);
875 919
876/** 920/**
921 * ttm_bo_unreserve_locked
922 *
923 * @bo: A pointer to a struct ttm_buffer_object.
924 *
925 * Unreserve a previous reservation of @bo.
926 * Needs to be called with struct ttm_bo_global::lru_lock held.
927 */
928extern void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo);
929
930/**
877 * ttm_bo_wait_unreserved 931 * ttm_bo_wait_unreserved
878 * 932 *
879 * @bo: A pointer to a struct ttm_buffer_object. 933 * @bo: A pointer to a struct ttm_buffer_object.
diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h
index cd2c475da9ea..26cc7f9ffa41 100644
--- a/include/drm/ttm/ttm_execbuf_util.h
+++ b/include/drm/ttm/ttm_execbuf_util.h
@@ -41,7 +41,10 @@
41 * @bo: refcounted buffer object pointer. 41 * @bo: refcounted buffer object pointer.
42 * @new_sync_obj_arg: New sync_obj_arg for @bo, to be used once 42 * @new_sync_obj_arg: New sync_obj_arg for @bo, to be used once
43 * adding a new sync object. 43 * adding a new sync object.
44 * @reservied: Indicates whether @bo has been reserved for validation. 44 * @reserved: Indicates whether @bo has been reserved for validation.
45 * @removed: Indicates whether @bo has been removed from lru lists.
46 * @put_count: Number of outstanding references on bo::list_kref.
47 * @old_sync_obj: Pointer to a sync object about to be unreferenced
45 */ 48 */
46 49
47struct ttm_validate_buffer { 50struct ttm_validate_buffer {
@@ -49,6 +52,9 @@ struct ttm_validate_buffer {
49 struct ttm_buffer_object *bo; 52 struct ttm_buffer_object *bo;
50 void *new_sync_obj_arg; 53 void *new_sync_obj_arg;
51 bool reserved; 54 bool reserved;
55 bool removed;
56 int put_count;
57 void *old_sync_obj;
52}; 58};
53 59
54/** 60/**
@@ -66,7 +72,6 @@ extern void ttm_eu_backoff_reservation(struct list_head *list);
66 * function ttm_eu_reserve_buffers 72 * function ttm_eu_reserve_buffers
67 * 73 *
68 * @list: thread private list of ttm_validate_buffer structs. 74 * @list: thread private list of ttm_validate_buffer structs.
69 * @val_seq: A unique sequence number.
70 * 75 *
71 * Tries to reserve bos pointed to by the list entries for validation. 76 * Tries to reserve bos pointed to by the list entries for validation.
72 * If the function returns 0, all buffers are marked as "unfenced", 77 * If the function returns 0, all buffers are marked as "unfenced",
@@ -88,7 +93,7 @@ extern void ttm_eu_backoff_reservation(struct list_head *list);
88 * has failed. 93 * has failed.
89 */ 94 */
90 95
91extern int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq); 96extern int ttm_eu_reserve_buffers(struct list_head *list);
92 97
93/** 98/**
94 * function ttm_eu_fence_buffer_objects. 99 * function ttm_eu_fence_buffer_objects.
diff --git a/include/linux/kref.h b/include/linux/kref.h
index 6cc38fc07ab7..d4a62ab2ee5e 100644
--- a/include/linux/kref.h
+++ b/include/linux/kref.h
@@ -24,5 +24,7 @@ struct kref {
24void kref_init(struct kref *kref); 24void kref_init(struct kref *kref);
25void kref_get(struct kref *kref); 25void kref_get(struct kref *kref);
26int kref_put(struct kref *kref, void (*release) (struct kref *kref)); 26int kref_put(struct kref *kref, void (*release) (struct kref *kref));
27int kref_sub(struct kref *kref, unsigned int count,
28 void (*release) (struct kref *kref));
27 29
28#endif /* _KREF_H_ */ 30#endif /* _KREF_H_ */
diff --git a/lib/kref.c b/lib/kref.c
index d3d227a08a4b..3efb882b11db 100644
--- a/lib/kref.c
+++ b/lib/kref.c
@@ -62,6 +62,36 @@ int kref_put(struct kref *kref, void (*release)(struct kref *kref))
62 return 0; 62 return 0;
63} 63}
64 64
65
66/**
67 * kref_sub - subtract a number of refcounts for object.
68 * @kref: object.
69 * @count: Number of recounts to subtract.
70 * @release: pointer to the function that will clean up the object when the
71 * last reference to the object is released.
72 * This pointer is required, and it is not acceptable to pass kfree
73 * in as this function.
74 *
75 * Subtract @count from the refcount, and if 0, call release().
76 * Return 1 if the object was removed, otherwise return 0. Beware, if this
77 * function returns 0, you still can not count on the kref from remaining in
78 * memory. Only use the return value if you want to see if the kref is now
79 * gone, not present.
80 */
81int kref_sub(struct kref *kref, unsigned int count,
82 void (*release)(struct kref *kref))
83{
84 WARN_ON(release == NULL);
85 WARN_ON(release == (void (*)(struct kref *))kfree);
86
87 if (atomic_sub_and_test((int) count, &kref->refcount)) {
88 release(kref);
89 return 1;
90 }
91 return 0;
92}
93
65EXPORT_SYMBOL(kref_init); 94EXPORT_SYMBOL(kref_init);
66EXPORT_SYMBOL(kref_get); 95EXPORT_SYMBOL(kref_get);
67EXPORT_SYMBOL(kref_put); 96EXPORT_SYMBOL(kref_put);
97EXPORT_SYMBOL(kref_sub);