aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/radeon/radeon.h17
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c241
-rw-r--r--drivers/gpu/drm/radeon/radeon_semaphore.c2
5 files changed, 71 insertions, 202 deletions
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 45164e101257..617030727ca8 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -625,7 +625,6 @@ void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc);
625 625
626struct radeon_ib { 626struct radeon_ib {
627 struct radeon_sa_bo *sa_bo; 627 struct radeon_sa_bo *sa_bo;
628 unsigned idx;
629 uint32_t length_dw; 628 uint32_t length_dw;
630 uint64_t gpu_addr; 629 uint64_t gpu_addr;
631 uint32_t *ptr; 630 uint32_t *ptr;
@@ -634,18 +633,6 @@ struct radeon_ib {
634 bool is_const_ib; 633 bool is_const_ib;
635}; 634};
636 635
637/*
638 * locking -
639 * mutex protects scheduled_ibs, ready, alloc_bm
640 */
641struct radeon_ib_pool {
642 struct radeon_mutex mutex;
643 struct radeon_sa_manager sa_manager;
644 struct radeon_ib ibs[RADEON_IB_POOL_SIZE];
645 bool ready;
646 unsigned head_id;
647};
648
649struct radeon_ring { 636struct radeon_ring {
650 struct radeon_bo *ring_obj; 637 struct radeon_bo *ring_obj;
651 volatile uint32_t *ring; 638 volatile uint32_t *ring;
@@ -787,7 +774,6 @@ struct si_rlc {
787int radeon_ib_get(struct radeon_device *rdev, int ring, 774int radeon_ib_get(struct radeon_device *rdev, int ring,
788 struct radeon_ib **ib, unsigned size); 775 struct radeon_ib **ib, unsigned size);
789void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib); 776void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib);
790bool radeon_ib_try_free(struct radeon_device *rdev, struct radeon_ib *ib);
791int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib); 777int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib);
792int radeon_ib_pool_init(struct radeon_device *rdev); 778int radeon_ib_pool_init(struct radeon_device *rdev);
793void radeon_ib_pool_fini(struct radeon_device *rdev); 779void radeon_ib_pool_fini(struct radeon_device *rdev);
@@ -1522,7 +1508,8 @@ struct radeon_device {
1522 wait_queue_head_t fence_queue; 1508 wait_queue_head_t fence_queue;
1523 struct mutex ring_lock; 1509 struct mutex ring_lock;
1524 struct radeon_ring ring[RADEON_NUM_RINGS]; 1510 struct radeon_ring ring[RADEON_NUM_RINGS];
1525 struct radeon_ib_pool ib_pool; 1511 bool ib_pool_ready;
1512 struct radeon_sa_manager ring_tmp_bo;
1526 struct radeon_irq irq; 1513 struct radeon_irq irq;
1527 struct radeon_asic *asic; 1514 struct radeon_asic *asic;
1528 struct radeon_gem gem; 1515 struct radeon_gem gem;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 48876c11a4a9..e1bc7e96f29c 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -724,7 +724,6 @@ int radeon_device_init(struct radeon_device *rdev,
724 /* mutex initialization are all done here so we 724 /* mutex initialization are all done here so we
725 * can recall function without having locking issues */ 725 * can recall function without having locking issues */
726 radeon_mutex_init(&rdev->cs_mutex); 726 radeon_mutex_init(&rdev->cs_mutex);
727 radeon_mutex_init(&rdev->ib_pool.mutex);
728 mutex_init(&rdev->ring_lock); 727 mutex_init(&rdev->ring_lock);
729 mutex_init(&rdev->dc_hw_i2c_mutex); 728 mutex_init(&rdev->dc_hw_i2c_mutex);
730 if (rdev->family >= CHIP_R600) 729 if (rdev->family >= CHIP_R600)
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 53dba8e5942f..8e9ef3403acd 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -432,8 +432,8 @@ retry_id:
432 rdev->vm_manager.use_bitmap |= 1 << id; 432 rdev->vm_manager.use_bitmap |= 1 << id;
433 vm->id = id; 433 vm->id = id;
434 list_add_tail(&vm->list, &rdev->vm_manager.lru_vm); 434 list_add_tail(&vm->list, &rdev->vm_manager.lru_vm);
435 return radeon_vm_bo_update_pte(rdev, vm, rdev->ib_pool.sa_manager.bo, 435 return radeon_vm_bo_update_pte(rdev, vm, rdev->ring_tmp_bo.bo,
436 &rdev->ib_pool.sa_manager.bo->tbo.mem); 436 &rdev->ring_tmp_bo.bo->tbo.mem);
437} 437}
438 438
439/* object have to be reserved */ 439/* object have to be reserved */
@@ -631,7 +631,7 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
631 /* map the ib pool buffer at 0 in virtual address space, set 631 /* map the ib pool buffer at 0 in virtual address space, set
632 * read only 632 * read only
633 */ 633 */
634 r = radeon_vm_bo_add(rdev, vm, rdev->ib_pool.sa_manager.bo, 0, 634 r = radeon_vm_bo_add(rdev, vm, rdev->ring_tmp_bo.bo, 0,
635 RADEON_VM_PAGE_READABLE | RADEON_VM_PAGE_SNOOPED); 635 RADEON_VM_PAGE_READABLE | RADEON_VM_PAGE_SNOOPED);
636 return r; 636 return r;
637} 637}
@@ -648,12 +648,12 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
648 radeon_mutex_unlock(&rdev->cs_mutex); 648 radeon_mutex_unlock(&rdev->cs_mutex);
649 649
650 /* remove all bo */ 650 /* remove all bo */
651 r = radeon_bo_reserve(rdev->ib_pool.sa_manager.bo, false); 651 r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
652 if (!r) { 652 if (!r) {
653 bo_va = radeon_bo_va(rdev->ib_pool.sa_manager.bo, vm); 653 bo_va = radeon_bo_va(rdev->ring_tmp_bo.bo, vm);
654 list_del_init(&bo_va->bo_list); 654 list_del_init(&bo_va->bo_list);
655 list_del_init(&bo_va->vm_list); 655 list_del_init(&bo_va->vm_list);
656 radeon_bo_unreserve(rdev->ib_pool.sa_manager.bo); 656 radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
657 kfree(bo_va); 657 kfree(bo_va);
658 } 658 }
659 if (!list_empty(&vm->va)) { 659 if (!list_empty(&vm->va)) {
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index e074ff5c2ac2..b3d6942a2be9 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -24,6 +24,7 @@
24 * Authors: Dave Airlie 24 * Authors: Dave Airlie
25 * Alex Deucher 25 * Alex Deucher
26 * Jerome Glisse 26 * Jerome Glisse
27 * Christian König
27 */ 28 */
28#include <linux/seq_file.h> 29#include <linux/seq_file.h>
29#include <linux/slab.h> 30#include <linux/slab.h>
@@ -33,8 +34,10 @@
33#include "radeon.h" 34#include "radeon.h"
34#include "atom.h" 35#include "atom.h"
35 36
36int radeon_debugfs_ib_init(struct radeon_device *rdev); 37/*
37int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring); 38 * IB.
39 */
40int radeon_debugfs_sa_init(struct radeon_device *rdev);
38 41
39u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx) 42u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
40{ 43{
@@ -61,106 +64,37 @@ u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
61 return idx_value; 64 return idx_value;
62} 65}
63 66
64void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
65{
66#if DRM_DEBUG_CODE
67 if (ring->count_dw <= 0) {
68 DRM_ERROR("radeon: writting more dword to ring than expected !\n");
69 }
70#endif
71 ring->ring[ring->wptr++] = v;
72 ring->wptr &= ring->ptr_mask;
73 ring->count_dw--;
74 ring->ring_free_dw--;
75}
76
77/*
78 * IB.
79 */
80bool radeon_ib_try_free(struct radeon_device *rdev, struct radeon_ib *ib)
81{
82 bool done = false;
83
84 /* only free ib which have been emited */
85 if (ib->fence && ib->fence->seq < RADEON_FENCE_NOTEMITED_SEQ) {
86 if (radeon_fence_signaled(ib->fence)) {
87 radeon_fence_unref(&ib->fence);
88 radeon_sa_bo_free(rdev, &ib->sa_bo, NULL);
89 done = true;
90 }
91 }
92 return done;
93}
94
95int radeon_ib_get(struct radeon_device *rdev, int ring, 67int radeon_ib_get(struct radeon_device *rdev, int ring,
96 struct radeon_ib **ib, unsigned size) 68 struct radeon_ib **ib, unsigned size)
97{ 69{
98 struct radeon_fence *fence; 70 int r;
99 unsigned cretry = 0;
100 int r = 0, i, idx;
101
102 *ib = NULL;
103 /* align size on 256 bytes */
104 size = ALIGN(size, 256);
105
106 r = radeon_fence_create(rdev, &fence, ring);
107 if (r) {
108 dev_err(rdev->dev, "failed to create fence for new IB\n");
109 return r;
110 }
111 71
112 radeon_mutex_lock(&rdev->ib_pool.mutex); 72 *ib = kmalloc(sizeof(struct radeon_ib), GFP_KERNEL);
113 idx = rdev->ib_pool.head_id; 73 if (*ib == NULL) {
114retry:
115 if (cretry > 5) {
116 dev_err(rdev->dev, "failed to get an ib after 5 retry\n");
117 radeon_mutex_unlock(&rdev->ib_pool.mutex);
118 radeon_fence_unref(&fence);
119 return -ENOMEM; 74 return -ENOMEM;
120 } 75 }
121 cretry++; 76 r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &(*ib)->sa_bo, size, 256, true);
122 for (i = 0; i < RADEON_IB_POOL_SIZE; i++) { 77 if (r) {
123 radeon_ib_try_free(rdev, &rdev->ib_pool.ibs[idx]); 78 dev_err(rdev->dev, "failed to get a new IB (%d)\n", r);
124 if (rdev->ib_pool.ibs[idx].fence == NULL) { 79 kfree(*ib);
125 r = radeon_sa_bo_new(rdev, &rdev->ib_pool.sa_manager, 80 *ib = NULL;
126 &rdev->ib_pool.ibs[idx].sa_bo, 81 return r;
127 size, 256, false);
128 if (!r) {
129 *ib = &rdev->ib_pool.ibs[idx];
130 (*ib)->ptr = radeon_sa_bo_cpu_addr((*ib)->sa_bo);
131 (*ib)->gpu_addr = radeon_sa_bo_gpu_addr((*ib)->sa_bo);
132 (*ib)->fence = fence;
133 (*ib)->vm_id = 0;
134 (*ib)->is_const_ib = false;
135 /* ib are most likely to be allocated in a ring fashion
136 * thus rdev->ib_pool.head_id should be the id of the
137 * oldest ib
138 */
139 rdev->ib_pool.head_id = (1 + idx);
140 rdev->ib_pool.head_id &= (RADEON_IB_POOL_SIZE - 1);
141 radeon_mutex_unlock(&rdev->ib_pool.mutex);
142 return 0;
143 }
144 }
145 idx = (idx + 1) & (RADEON_IB_POOL_SIZE - 1);
146 } 82 }
147 /* this should be rare event, ie all ib scheduled none signaled yet. 83 r = radeon_fence_create(rdev, &(*ib)->fence, ring);
148 */ 84 if (r) {
149 for (i = 0; i < RADEON_IB_POOL_SIZE; i++) { 85 dev_err(rdev->dev, "failed to create fence for new IB (%d)\n", r);
150 struct radeon_fence *fence = rdev->ib_pool.ibs[idx].fence; 86 radeon_sa_bo_free(rdev, &(*ib)->sa_bo, NULL);
151 if (fence && fence->seq < RADEON_FENCE_NOTEMITED_SEQ) { 87 kfree(*ib);
152 r = radeon_fence_wait(fence, false); 88 *ib = NULL;
153 if (!r) { 89 return r;
154 goto retry;
155 }
156 /* an error happened */
157 break;
158 }
159 idx = (idx + 1) & (RADEON_IB_POOL_SIZE - 1);
160 } 90 }
161 radeon_mutex_unlock(&rdev->ib_pool.mutex); 91
162 radeon_fence_unref(&fence); 92 (*ib)->ptr = radeon_sa_bo_cpu_addr((*ib)->sa_bo);
163 return r; 93 (*ib)->gpu_addr = radeon_sa_bo_gpu_addr((*ib)->sa_bo);
94 (*ib)->vm_id = 0;
95 (*ib)->is_const_ib = false;
96
97 return 0;
164} 98}
165 99
166void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib) 100void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
@@ -171,12 +105,9 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
171 if (tmp == NULL) { 105 if (tmp == NULL) {
172 return; 106 return;
173 } 107 }
174 radeon_mutex_lock(&rdev->ib_pool.mutex); 108 radeon_sa_bo_free(rdev, &tmp->sa_bo, tmp->fence);
175 if (tmp->fence && tmp->fence->seq == RADEON_FENCE_NOTEMITED_SEQ) { 109 radeon_fence_unref(&tmp->fence);
176 radeon_sa_bo_free(rdev, &tmp->sa_bo, NULL); 110 kfree(tmp);
177 radeon_fence_unref(&tmp->fence);
178 }
179 radeon_mutex_unlock(&rdev->ib_pool.mutex);
180} 111}
181 112
182int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib) 113int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
@@ -186,14 +117,14 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
186 117
187 if (!ib->length_dw || !ring->ready) { 118 if (!ib->length_dw || !ring->ready) {
188 /* TODO: Nothings in the ib we should report. */ 119 /* TODO: Nothings in the ib we should report. */
189 DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx); 120 dev_err(rdev->dev, "couldn't schedule ib\n");
190 return -EINVAL; 121 return -EINVAL;
191 } 122 }
192 123
193 /* 64 dwords should be enough for fence too */ 124 /* 64 dwords should be enough for fence too */
194 r = radeon_ring_lock(rdev, ring, 64); 125 r = radeon_ring_lock(rdev, ring, 64);
195 if (r) { 126 if (r) {
196 DRM_ERROR("radeon: scheduling IB failed (%d).\n", r); 127 dev_err(rdev->dev, "scheduling IB failed (%d).\n", r);
197 return r; 128 return r;
198 } 129 }
199 radeon_ring_ib_execute(rdev, ib->fence->ring, ib); 130 radeon_ring_ib_execute(rdev, ib->fence->ring, ib);
@@ -204,63 +135,40 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
204 135
205int radeon_ib_pool_init(struct radeon_device *rdev) 136int radeon_ib_pool_init(struct radeon_device *rdev)
206{ 137{
207 int i, r; 138 int r;
208 139
209 radeon_mutex_lock(&rdev->ib_pool.mutex); 140 if (rdev->ib_pool_ready) {
210 if (rdev->ib_pool.ready) {
211 radeon_mutex_unlock(&rdev->ib_pool.mutex);
212 return 0; 141 return 0;
213 } 142 }
214 143 r = radeon_sa_bo_manager_init(rdev, &rdev->ring_tmp_bo,
215 r = radeon_sa_bo_manager_init(rdev, &rdev->ib_pool.sa_manager,
216 RADEON_IB_POOL_SIZE*64*1024, 144 RADEON_IB_POOL_SIZE*64*1024,
217 RADEON_GEM_DOMAIN_GTT); 145 RADEON_GEM_DOMAIN_GTT);
218 if (r) { 146 if (r) {
219 radeon_mutex_unlock(&rdev->ib_pool.mutex);
220 return r; 147 return r;
221 } 148 }
222 149 rdev->ib_pool_ready = true;
223 for (i = 0; i < RADEON_IB_POOL_SIZE; i++) { 150 if (radeon_debugfs_sa_init(rdev)) {
224 rdev->ib_pool.ibs[i].fence = NULL; 151 dev_err(rdev->dev, "failed to register debugfs file for SA\n");
225 rdev->ib_pool.ibs[i].idx = i;
226 rdev->ib_pool.ibs[i].length_dw = 0;
227 rdev->ib_pool.ibs[i].sa_bo = NULL;
228 }
229 rdev->ib_pool.head_id = 0;
230 rdev->ib_pool.ready = true;
231 DRM_INFO("radeon: ib pool ready.\n");
232
233 if (radeon_debugfs_ib_init(rdev)) {
234 DRM_ERROR("Failed to register debugfs file for IB !\n");
235 } 152 }
236 radeon_mutex_unlock(&rdev->ib_pool.mutex);
237 return 0; 153 return 0;
238} 154}
239 155
240void radeon_ib_pool_fini(struct radeon_device *rdev) 156void radeon_ib_pool_fini(struct radeon_device *rdev)
241{ 157{
242 unsigned i; 158 if (rdev->ib_pool_ready) {
243 159 radeon_sa_bo_manager_fini(rdev, &rdev->ring_tmp_bo);
244 radeon_mutex_lock(&rdev->ib_pool.mutex); 160 rdev->ib_pool_ready = false;
245 if (rdev->ib_pool.ready) {
246 for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
247 radeon_sa_bo_free(rdev, &rdev->ib_pool.ibs[i].sa_bo, NULL);
248 radeon_fence_unref(&rdev->ib_pool.ibs[i].fence);
249 }
250 radeon_sa_bo_manager_fini(rdev, &rdev->ib_pool.sa_manager);
251 rdev->ib_pool.ready = false;
252 } 161 }
253 radeon_mutex_unlock(&rdev->ib_pool.mutex);
254} 162}
255 163
256int radeon_ib_pool_start(struct radeon_device *rdev) 164int radeon_ib_pool_start(struct radeon_device *rdev)
257{ 165{
258 return radeon_sa_bo_manager_start(rdev, &rdev->ib_pool.sa_manager); 166 return radeon_sa_bo_manager_start(rdev, &rdev->ring_tmp_bo);
259} 167}
260 168
261int radeon_ib_pool_suspend(struct radeon_device *rdev) 169int radeon_ib_pool_suspend(struct radeon_device *rdev)
262{ 170{
263 return radeon_sa_bo_manager_suspend(rdev, &rdev->ib_pool.sa_manager); 171 return radeon_sa_bo_manager_suspend(rdev, &rdev->ring_tmp_bo);
264} 172}
265 173
266int radeon_ib_ring_tests(struct radeon_device *rdev) 174int radeon_ib_ring_tests(struct radeon_device *rdev)
@@ -296,6 +204,21 @@ int radeon_ib_ring_tests(struct radeon_device *rdev)
296/* 204/*
297 * Ring. 205 * Ring.
298 */ 206 */
207int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring);
208
209void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
210{
211#if DRM_DEBUG_CODE
212 if (ring->count_dw <= 0) {
213 DRM_ERROR("radeon: writting more dword to ring than expected !\n");
214 }
215#endif
216 ring->ring[ring->wptr++] = v;
217 ring->wptr &= ring->ptr_mask;
218 ring->count_dw--;
219 ring->ring_free_dw--;
220}
221
299int radeon_ring_index(struct radeon_device *rdev, struct radeon_ring *ring) 222int radeon_ring_index(struct radeon_device *rdev, struct radeon_ring *ring)
300{ 223{
301 /* r1xx-r5xx only has CP ring */ 224 /* r1xx-r5xx only has CP ring */
@@ -575,37 +498,13 @@ static struct drm_info_list radeon_debugfs_ring_info_list[] = {
575 {"radeon_ring_cp2", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp2_index}, 498 {"radeon_ring_cp2", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp2_index},
576}; 499};
577 500
578static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
579{
580 struct drm_info_node *node = (struct drm_info_node *) m->private;
581 struct drm_device *dev = node->minor->dev;
582 struct radeon_device *rdev = dev->dev_private;
583 struct radeon_ib *ib = &rdev->ib_pool.ibs[*((unsigned*)node->info_ent->data)];
584 unsigned i;
585
586 if (ib == NULL) {
587 return 0;
588 }
589 seq_printf(m, "IB %04u\n", ib->idx);
590 seq_printf(m, "IB fence %p\n", ib->fence);
591 seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
592 for (i = 0; i < ib->length_dw; i++) {
593 seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]);
594 }
595 return 0;
596}
597
598static struct drm_info_list radeon_debugfs_ib_list[RADEON_IB_POOL_SIZE];
599static char radeon_debugfs_ib_names[RADEON_IB_POOL_SIZE][32];
600static unsigned radeon_debugfs_ib_idx[RADEON_IB_POOL_SIZE];
601
602static int radeon_debugfs_sa_info(struct seq_file *m, void *data) 501static int radeon_debugfs_sa_info(struct seq_file *m, void *data)
603{ 502{
604 struct drm_info_node *node = (struct drm_info_node *) m->private; 503 struct drm_info_node *node = (struct drm_info_node *) m->private;
605 struct drm_device *dev = node->minor->dev; 504 struct drm_device *dev = node->minor->dev;
606 struct radeon_device *rdev = dev->dev_private; 505 struct radeon_device *rdev = dev->dev_private;
607 506
608 radeon_sa_bo_dump_debug_info(&rdev->ib_pool.sa_manager, m); 507 radeon_sa_bo_dump_debug_info(&rdev->ring_tmp_bo, m);
609 508
610 return 0; 509 return 0;
611 510
@@ -637,26 +536,10 @@ int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *rin
637 return 0; 536 return 0;
638} 537}
639 538
640int radeon_debugfs_ib_init(struct radeon_device *rdev) 539int radeon_debugfs_sa_init(struct radeon_device *rdev)
641{ 540{
642#if defined(CONFIG_DEBUG_FS) 541#if defined(CONFIG_DEBUG_FS)
643 unsigned i; 542 return radeon_debugfs_add_files(rdev, radeon_debugfs_sa_list, 1);
644 int r;
645
646 r = radeon_debugfs_add_files(rdev, radeon_debugfs_sa_list, 1);
647 if (r)
648 return r;
649
650 for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
651 sprintf(radeon_debugfs_ib_names[i], "radeon_ib_%04u", i);
652 radeon_debugfs_ib_idx[i] = i;
653 radeon_debugfs_ib_list[i].name = radeon_debugfs_ib_names[i];
654 radeon_debugfs_ib_list[i].show = &radeon_debugfs_ib_info;
655 radeon_debugfs_ib_list[i].driver_features = 0;
656 radeon_debugfs_ib_list[i].data = &radeon_debugfs_ib_idx[i];
657 }
658 return radeon_debugfs_add_files(rdev, radeon_debugfs_ib_list,
659 RADEON_IB_POOL_SIZE);
660#else 543#else
661 return 0; 544 return 0;
662#endif 545#endif
diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c
index 1bc5513a5292..e2ace5dce117 100644
--- a/drivers/gpu/drm/radeon/radeon_semaphore.c
+++ b/drivers/gpu/drm/radeon/radeon_semaphore.c
@@ -41,7 +41,7 @@ int radeon_semaphore_create(struct radeon_device *rdev,
41 if (*semaphore == NULL) { 41 if (*semaphore == NULL) {
42 return -ENOMEM; 42 return -ENOMEM;
43 } 43 }
44 r = radeon_sa_bo_new(rdev, &rdev->ib_pool.sa_manager, 44 r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo,
45 &(*semaphore)->sa_bo, 8, 8, true); 45 &(*semaphore)->sa_bo, 8, 8, true);
46 if (r) { 46 if (r) {
47 kfree(*semaphore); 47 kfree(*semaphore);