diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
commit | c71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch) | |
tree | ecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /drivers/gpu/drm/radeon/radeon_object.c | |
parent | ea53c912f8a86a8567697115b6a0d8152beee5c8 (diff) | |
parent | 6a00f206debf8a5c8899055726ad127dbeeed098 (diff) |
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts:
litmus/sched_cedf.c
Diffstat (limited to 'drivers/gpu/drm/radeon/radeon_object.c')
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_object.c | 115 |
1 files changed, 41 insertions, 74 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index b3b5306bb578..976c3b1b1b6e 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <drm/drmP.h> | 34 | #include <drm/drmP.h> |
35 | #include "radeon_drm.h" | 35 | #include "radeon_drm.h" |
36 | #include "radeon.h" | 36 | #include "radeon.h" |
37 | #include "radeon_trace.h" | ||
37 | 38 | ||
38 | 39 | ||
39 | int radeon_ttm_init(struct radeon_device *rdev); | 40 | int radeon_ttm_init(struct radeon_device *rdev); |
@@ -54,6 +55,7 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo) | |||
54 | list_del_init(&bo->list); | 55 | list_del_init(&bo->list); |
55 | mutex_unlock(&bo->rdev->gem.mutex); | 56 | mutex_unlock(&bo->rdev->gem.mutex); |
56 | radeon_bo_clear_surface_reg(bo); | 57 | radeon_bo_clear_surface_reg(bo); |
58 | drm_gem_object_release(&bo->gem_base); | ||
57 | kfree(bo); | 59 | kfree(bo); |
58 | } | 60 | } |
59 | 61 | ||
@@ -69,7 +71,7 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) | |||
69 | u32 c = 0; | 71 | u32 c = 0; |
70 | 72 | ||
71 | rbo->placement.fpfn = 0; | 73 | rbo->placement.fpfn = 0; |
72 | rbo->placement.lpfn = rbo->rdev->mc.active_vram_size >> PAGE_SHIFT; | 74 | rbo->placement.lpfn = 0; |
73 | rbo->placement.placement = rbo->placements; | 75 | rbo->placement.placement = rbo->placements; |
74 | rbo->placement.busy_placement = rbo->placements; | 76 | rbo->placement.busy_placement = rbo->placements; |
75 | if (domain & RADEON_GEM_DOMAIN_VRAM) | 77 | if (domain & RADEON_GEM_DOMAIN_VRAM) |
@@ -85,14 +87,18 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) | |||
85 | rbo->placement.num_busy_placement = c; | 87 | rbo->placement.num_busy_placement = c; |
86 | } | 88 | } |
87 | 89 | ||
88 | int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj, | 90 | int radeon_bo_create(struct radeon_device *rdev, |
89 | unsigned long size, bool kernel, u32 domain, | 91 | unsigned long size, int byte_align, bool kernel, u32 domain, |
90 | struct radeon_bo **bo_ptr) | 92 | struct radeon_bo **bo_ptr) |
91 | { | 93 | { |
92 | struct radeon_bo *bo; | 94 | struct radeon_bo *bo; |
93 | enum ttm_bo_type type; | 95 | enum ttm_bo_type type; |
96 | unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT; | ||
97 | unsigned long max_size = 0; | ||
94 | int r; | 98 | int r; |
95 | 99 | ||
100 | size = ALIGN(size, PAGE_SIZE); | ||
101 | |||
96 | if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) { | 102 | if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) { |
97 | rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping; | 103 | rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping; |
98 | } | 104 | } |
@@ -102,20 +108,33 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj, | |||
102 | type = ttm_bo_type_device; | 108 | type = ttm_bo_type_device; |
103 | } | 109 | } |
104 | *bo_ptr = NULL; | 110 | *bo_ptr = NULL; |
111 | |||
112 | /* maximun bo size is the minimun btw visible vram and gtt size */ | ||
113 | max_size = min(rdev->mc.visible_vram_size, rdev->mc.gtt_size); | ||
114 | if ((page_align << PAGE_SHIFT) >= max_size) { | ||
115 | printk(KERN_WARNING "%s:%d alloc size %ldM bigger than %ldMb limit\n", | ||
116 | __func__, __LINE__, page_align >> (20 - PAGE_SHIFT), max_size >> 20); | ||
117 | return -ENOMEM; | ||
118 | } | ||
119 | |||
120 | retry: | ||
105 | bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL); | 121 | bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL); |
106 | if (bo == NULL) | 122 | if (bo == NULL) |
107 | return -ENOMEM; | 123 | return -ENOMEM; |
124 | r = drm_gem_object_init(rdev->ddev, &bo->gem_base, size); | ||
125 | if (unlikely(r)) { | ||
126 | kfree(bo); | ||
127 | return r; | ||
128 | } | ||
108 | bo->rdev = rdev; | 129 | bo->rdev = rdev; |
109 | bo->gobj = gobj; | 130 | bo->gem_base.driver_private = NULL; |
110 | bo->surface_reg = -1; | 131 | bo->surface_reg = -1; |
111 | INIT_LIST_HEAD(&bo->list); | 132 | INIT_LIST_HEAD(&bo->list); |
112 | |||
113 | retry: | ||
114 | radeon_ttm_placement_from_domain(bo, domain); | 133 | radeon_ttm_placement_from_domain(bo, domain); |
115 | /* Kernel allocation are uninterruptible */ | 134 | /* Kernel allocation are uninterruptible */ |
116 | mutex_lock(&rdev->vram_mutex); | 135 | mutex_lock(&rdev->vram_mutex); |
117 | r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type, | 136 | r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type, |
118 | &bo->placement, 0, 0, !kernel, NULL, size, | 137 | &bo->placement, page_align, 0, !kernel, NULL, size, |
119 | &radeon_ttm_bo_destroy); | 138 | &radeon_ttm_bo_destroy); |
120 | mutex_unlock(&rdev->vram_mutex); | 139 | mutex_unlock(&rdev->vram_mutex); |
121 | if (unlikely(r != 0)) { | 140 | if (unlikely(r != 0)) { |
@@ -131,11 +150,9 @@ retry: | |||
131 | return r; | 150 | return r; |
132 | } | 151 | } |
133 | *bo_ptr = bo; | 152 | *bo_ptr = bo; |
134 | if (gobj) { | 153 | |
135 | mutex_lock(&bo->rdev->gem.mutex); | 154 | trace_radeon_bo_create(bo); |
136 | list_add_tail(&bo->list, &rdev->gem.objects); | 155 | |
137 | mutex_unlock(&bo->rdev->gem.mutex); | ||
138 | } | ||
139 | return 0; | 156 | return 0; |
140 | } | 157 | } |
141 | 158 | ||
@@ -248,7 +265,6 @@ int radeon_bo_evict_vram(struct radeon_device *rdev) | |||
248 | void radeon_bo_force_delete(struct radeon_device *rdev) | 265 | void radeon_bo_force_delete(struct radeon_device *rdev) |
249 | { | 266 | { |
250 | struct radeon_bo *bo, *n; | 267 | struct radeon_bo *bo, *n; |
251 | struct drm_gem_object *gobj; | ||
252 | 268 | ||
253 | if (list_empty(&rdev->gem.objects)) { | 269 | if (list_empty(&rdev->gem.objects)) { |
254 | return; | 270 | return; |
@@ -256,16 +272,14 @@ void radeon_bo_force_delete(struct radeon_device *rdev) | |||
256 | dev_err(rdev->dev, "Userspace still has active objects !\n"); | 272 | dev_err(rdev->dev, "Userspace still has active objects !\n"); |
257 | list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) { | 273 | list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) { |
258 | mutex_lock(&rdev->ddev->struct_mutex); | 274 | mutex_lock(&rdev->ddev->struct_mutex); |
259 | gobj = bo->gobj; | ||
260 | dev_err(rdev->dev, "%p %p %lu %lu force free\n", | 275 | dev_err(rdev->dev, "%p %p %lu %lu force free\n", |
261 | gobj, bo, (unsigned long)gobj->size, | 276 | &bo->gem_base, bo, (unsigned long)bo->gem_base.size, |
262 | *((unsigned long *)&gobj->refcount)); | 277 | *((unsigned long *)&bo->gem_base.refcount)); |
263 | mutex_lock(&bo->rdev->gem.mutex); | 278 | mutex_lock(&bo->rdev->gem.mutex); |
264 | list_del_init(&bo->list); | 279 | list_del_init(&bo->list); |
265 | mutex_unlock(&bo->rdev->gem.mutex); | 280 | mutex_unlock(&bo->rdev->gem.mutex); |
266 | radeon_bo_unref(&bo); | 281 | /* this should unref the ttm bo */ |
267 | gobj->driver_private = NULL; | 282 | drm_gem_object_unreference(&bo->gem_base); |
268 | drm_gem_object_unreference(gobj); | ||
269 | mutex_unlock(&rdev->ddev->struct_mutex); | 283 | mutex_unlock(&rdev->ddev->struct_mutex); |
270 | } | 284 | } |
271 | } | 285 | } |
@@ -292,34 +306,9 @@ void radeon_bo_list_add_object(struct radeon_bo_list *lobj, | |||
292 | struct list_head *head) | 306 | struct list_head *head) |
293 | { | 307 | { |
294 | if (lobj->wdomain) { | 308 | if (lobj->wdomain) { |
295 | list_add(&lobj->list, head); | 309 | list_add(&lobj->tv.head, head); |
296 | } else { | 310 | } else { |
297 | list_add_tail(&lobj->list, head); | 311 | list_add_tail(&lobj->tv.head, head); |
298 | } | ||
299 | } | ||
300 | |||
301 | int radeon_bo_list_reserve(struct list_head *head) | ||
302 | { | ||
303 | struct radeon_bo_list *lobj; | ||
304 | int r; | ||
305 | |||
306 | list_for_each_entry(lobj, head, list){ | ||
307 | r = radeon_bo_reserve(lobj->bo, false); | ||
308 | if (unlikely(r != 0)) | ||
309 | return r; | ||
310 | lobj->reserved = true; | ||
311 | } | ||
312 | return 0; | ||
313 | } | ||
314 | |||
315 | void radeon_bo_list_unreserve(struct list_head *head) | ||
316 | { | ||
317 | struct radeon_bo_list *lobj; | ||
318 | |||
319 | list_for_each_entry(lobj, head, list) { | ||
320 | /* only unreserve object we successfully reserved */ | ||
321 | if (lobj->reserved && radeon_bo_is_reserved(lobj->bo)) | ||
322 | radeon_bo_unreserve(lobj->bo); | ||
323 | } | 312 | } |
324 | } | 313 | } |
325 | 314 | ||
@@ -330,14 +319,11 @@ int radeon_bo_list_validate(struct list_head *head) | |||
330 | u32 domain; | 319 | u32 domain; |
331 | int r; | 320 | int r; |
332 | 321 | ||
333 | list_for_each_entry(lobj, head, list) { | 322 | r = ttm_eu_reserve_buffers(head); |
334 | lobj->reserved = false; | ||
335 | } | ||
336 | r = radeon_bo_list_reserve(head); | ||
337 | if (unlikely(r != 0)) { | 323 | if (unlikely(r != 0)) { |
338 | return r; | 324 | return r; |
339 | } | 325 | } |
340 | list_for_each_entry(lobj, head, list) { | 326 | list_for_each_entry(lobj, head, tv.head) { |
341 | bo = lobj->bo; | 327 | bo = lobj->bo; |
342 | if (!bo->pin_count) { | 328 | if (!bo->pin_count) { |
343 | domain = lobj->wdomain ? lobj->wdomain : lobj->rdomain; | 329 | domain = lobj->wdomain ? lobj->wdomain : lobj->rdomain; |
@@ -360,25 +346,6 @@ int radeon_bo_list_validate(struct list_head *head) | |||
360 | return 0; | 346 | return 0; |
361 | } | 347 | } |
362 | 348 | ||
363 | void radeon_bo_list_fence(struct list_head *head, void *fence) | ||
364 | { | ||
365 | struct radeon_bo_list *lobj; | ||
366 | struct radeon_bo *bo; | ||
367 | struct radeon_fence *old_fence = NULL; | ||
368 | |||
369 | list_for_each_entry(lobj, head, list) { | ||
370 | bo = lobj->bo; | ||
371 | spin_lock(&bo->tbo.lock); | ||
372 | old_fence = (struct radeon_fence *)bo->tbo.sync_obj; | ||
373 | bo->tbo.sync_obj = radeon_fence_ref(fence); | ||
374 | bo->tbo.sync_obj_arg = NULL; | ||
375 | spin_unlock(&bo->tbo.lock); | ||
376 | if (old_fence) { | ||
377 | radeon_fence_unref(&old_fence); | ||
378 | } | ||
379 | } | ||
380 | } | ||
381 | |||
382 | int radeon_bo_fbdev_mmap(struct radeon_bo *bo, | 349 | int radeon_bo_fbdev_mmap(struct radeon_bo *bo, |
383 | struct vm_area_struct *vma) | 350 | struct vm_area_struct *vma) |
384 | { | 351 | { |
@@ -435,7 +402,7 @@ int radeon_bo_get_surface_reg(struct radeon_bo *bo) | |||
435 | 402 | ||
436 | out: | 403 | out: |
437 | radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch, | 404 | radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch, |
438 | bo->tbo.mem.mm_node->start << PAGE_SHIFT, | 405 | bo->tbo.mem.start << PAGE_SHIFT, |
439 | bo->tbo.num_pages << PAGE_SHIFT); | 406 | bo->tbo.num_pages << PAGE_SHIFT); |
440 | return 0; | 407 | return 0; |
441 | } | 408 | } |
@@ -532,7 +499,7 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) | |||
532 | rdev = rbo->rdev; | 499 | rdev = rbo->rdev; |
533 | if (bo->mem.mem_type == TTM_PL_VRAM) { | 500 | if (bo->mem.mem_type == TTM_PL_VRAM) { |
534 | size = bo->mem.num_pages << PAGE_SHIFT; | 501 | size = bo->mem.num_pages << PAGE_SHIFT; |
535 | offset = bo->mem.mm_node->start << PAGE_SHIFT; | 502 | offset = bo->mem.start << PAGE_SHIFT; |
536 | if ((offset + size) > rdev->mc.visible_vram_size) { | 503 | if ((offset + size) > rdev->mc.visible_vram_size) { |
537 | /* hurrah the memory is not visible ! */ | 504 | /* hurrah the memory is not visible ! */ |
538 | radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM); | 505 | radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM); |
@@ -540,7 +507,7 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) | |||
540 | r = ttm_bo_validate(bo, &rbo->placement, false, true, false); | 507 | r = ttm_bo_validate(bo, &rbo->placement, false, true, false); |
541 | if (unlikely(r != 0)) | 508 | if (unlikely(r != 0)) |
542 | return r; | 509 | return r; |
543 | offset = bo->mem.mm_node->start << PAGE_SHIFT; | 510 | offset = bo->mem.start << PAGE_SHIFT; |
544 | /* this should not happen */ | 511 | /* this should not happen */ |
545 | if ((offset + size) > rdev->mc.visible_vram_size) | 512 | if ((offset + size) > rdev->mc.visible_vram_size) |
546 | return -EINVAL; | 513 | return -EINVAL; |