aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/radeon
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/radeon')
-rw-r--r--drivers/gpu/drm/radeon/radeon.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c34
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c2
4 files changed, 22 insertions, 22 deletions
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 599361466a2..33f3be369a2 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -199,7 +199,7 @@ struct radeon_fence_driver {
199 wait_queue_head_t queue; 199 wait_queue_head_t queue;
200 rwlock_t lock; 200 rwlock_t lock;
201 struct list_head created; 201 struct list_head created;
202 struct list_head emited; 202 struct list_head emitted;
203 struct list_head signaled; 203 struct list_head signaled;
204 bool initialized; 204 bool initialized;
205}; 205};
@@ -210,7 +210,7 @@ struct radeon_fence {
210 struct list_head list; 210 struct list_head list;
211 /* protected by radeon_fence.lock */ 211 /* protected by radeon_fence.lock */
212 uint32_t seq; 212 uint32_t seq;
213 bool emited; 213 bool emitted;
214 bool signaled; 214 bool signaled;
215}; 215};
216 216
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 76ec0e9ed8a..7027766ec2a 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -74,7 +74,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
74 unsigned long irq_flags; 74 unsigned long irq_flags;
75 75
76 write_lock_irqsave(&rdev->fence_drv.lock, irq_flags); 76 write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
77 if (fence->emited) { 77 if (fence->emitted) {
78 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); 78 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
79 return 0; 79 return 0;
80 } 80 }
@@ -88,8 +88,8 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
88 radeon_fence_ring_emit(rdev, fence); 88 radeon_fence_ring_emit(rdev, fence);
89 89
90 trace_radeon_fence_emit(rdev->ddev, fence->seq); 90 trace_radeon_fence_emit(rdev->ddev, fence->seq);
91 fence->emited = true; 91 fence->emitted = true;
92 list_move_tail(&fence->list, &rdev->fence_drv.emited); 92 list_move_tail(&fence->list, &rdev->fence_drv.emitted);
93 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); 93 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
94 return 0; 94 return 0;
95} 95}
@@ -129,7 +129,7 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev)
129 return false; 129 return false;
130 } 130 }
131 n = NULL; 131 n = NULL;
132 list_for_each(i, &rdev->fence_drv.emited) { 132 list_for_each(i, &rdev->fence_drv.emitted) {
133 fence = list_entry(i, struct radeon_fence, list); 133 fence = list_entry(i, struct radeon_fence, list);
134 if (fence->seq == seq) { 134 if (fence->seq == seq) {
135 n = i; 135 n = i;
@@ -145,7 +145,7 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev)
145 fence = list_entry(i, struct radeon_fence, list); 145 fence = list_entry(i, struct radeon_fence, list);
146 fence->signaled = true; 146 fence->signaled = true;
147 i = n; 147 i = n;
148 } while (i != &rdev->fence_drv.emited); 148 } while (i != &rdev->fence_drv.emitted);
149 wake = true; 149 wake = true;
150 } 150 }
151 return wake; 151 return wake;
@@ -159,7 +159,7 @@ static void radeon_fence_destroy(struct kref *kref)
159 fence = container_of(kref, struct radeon_fence, kref); 159 fence = container_of(kref, struct radeon_fence, kref);
160 write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags); 160 write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags);
161 list_del(&fence->list); 161 list_del(&fence->list);
162 fence->emited = false; 162 fence->emitted = false;
163 write_unlock_irqrestore(&fence->rdev->fence_drv.lock, irq_flags); 163 write_unlock_irqrestore(&fence->rdev->fence_drv.lock, irq_flags);
164 kfree(fence); 164 kfree(fence);
165} 165}
@@ -174,7 +174,7 @@ int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence)
174 } 174 }
175 kref_init(&((*fence)->kref)); 175 kref_init(&((*fence)->kref));
176 (*fence)->rdev = rdev; 176 (*fence)->rdev = rdev;
177 (*fence)->emited = false; 177 (*fence)->emitted = false;
178 (*fence)->signaled = false; 178 (*fence)->signaled = false;
179 (*fence)->seq = 0; 179 (*fence)->seq = 0;
180 INIT_LIST_HEAD(&(*fence)->list); 180 INIT_LIST_HEAD(&(*fence)->list);
@@ -203,8 +203,8 @@ bool radeon_fence_signaled(struct radeon_fence *fence)
203 if (fence->rdev->shutdown) { 203 if (fence->rdev->shutdown) {
204 signaled = true; 204 signaled = true;
205 } 205 }
206 if (!fence->emited) { 206 if (!fence->emitted) {
207 WARN(1, "Querying an unemited fence : %p !\n", fence); 207 WARN(1, "Querying an unemitted fence : %p !\n", fence);
208 signaled = true; 208 signaled = true;
209 } 209 }
210 if (!signaled) { 210 if (!signaled) {
@@ -295,11 +295,11 @@ int radeon_fence_wait_next(struct radeon_device *rdev)
295 return 0; 295 return 0;
296 } 296 }
297 write_lock_irqsave(&rdev->fence_drv.lock, irq_flags); 297 write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
298 if (list_empty(&rdev->fence_drv.emited)) { 298 if (list_empty(&rdev->fence_drv.emitted)) {
299 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); 299 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
300 return 0; 300 return 0;
301 } 301 }
302 fence = list_entry(rdev->fence_drv.emited.next, 302 fence = list_entry(rdev->fence_drv.emitted.next,
303 struct radeon_fence, list); 303 struct radeon_fence, list);
304 radeon_fence_ref(fence); 304 radeon_fence_ref(fence);
305 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); 305 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
@@ -318,11 +318,11 @@ int radeon_fence_wait_last(struct radeon_device *rdev)
318 return 0; 318 return 0;
319 } 319 }
320 write_lock_irqsave(&rdev->fence_drv.lock, irq_flags); 320 write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
321 if (list_empty(&rdev->fence_drv.emited)) { 321 if (list_empty(&rdev->fence_drv.emitted)) {
322 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); 322 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
323 return 0; 323 return 0;
324 } 324 }
325 fence = list_entry(rdev->fence_drv.emited.prev, 325 fence = list_entry(rdev->fence_drv.emitted.prev,
326 struct radeon_fence, list); 326 struct radeon_fence, list);
327 radeon_fence_ref(fence); 327 radeon_fence_ref(fence);
328 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); 328 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
@@ -375,7 +375,7 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
375 radeon_fence_write(rdev, 0); 375 radeon_fence_write(rdev, 0);
376 atomic_set(&rdev->fence_drv.seq, 0); 376 atomic_set(&rdev->fence_drv.seq, 0);
377 INIT_LIST_HEAD(&rdev->fence_drv.created); 377 INIT_LIST_HEAD(&rdev->fence_drv.created);
378 INIT_LIST_HEAD(&rdev->fence_drv.emited); 378 INIT_LIST_HEAD(&rdev->fence_drv.emitted);
379 INIT_LIST_HEAD(&rdev->fence_drv.signaled); 379 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
380 init_waitqueue_head(&rdev->fence_drv.queue); 380 init_waitqueue_head(&rdev->fence_drv.queue);
381 rdev->fence_drv.initialized = true; 381 rdev->fence_drv.initialized = true;
@@ -413,10 +413,10 @@ static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
413 413
414 seq_printf(m, "Last signaled fence 0x%08X\n", 414 seq_printf(m, "Last signaled fence 0x%08X\n",
415 radeon_fence_read(rdev)); 415 radeon_fence_read(rdev));
416 if (!list_empty(&rdev->fence_drv.emited)) { 416 if (!list_empty(&rdev->fence_drv.emitted)) {
417 fence = list_entry(rdev->fence_drv.emited.prev, 417 fence = list_entry(rdev->fence_drv.emitted.prev,
418 struct radeon_fence, list); 418 struct radeon_fence, list);
419 seq_printf(m, "Last emited fence %p with 0x%08X\n", 419 seq_printf(m, "Last emitted fence %p with 0x%08X\n",
420 fence, fence->seq); 420 fence, fence->seq);
421 } 421 }
422 return 0; 422 return 0;
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 78a665bd951..1fb84676afe 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -799,9 +799,9 @@ static void radeon_dynpm_idle_work_handler(struct work_struct *work)
799 int not_processed = 0; 799 int not_processed = 0;
800 800
801 read_lock_irqsave(&rdev->fence_drv.lock, irq_flags); 801 read_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
802 if (!list_empty(&rdev->fence_drv.emited)) { 802 if (!list_empty(&rdev->fence_drv.emitted)) {
803 struct list_head *ptr; 803 struct list_head *ptr;
804 list_for_each(ptr, &rdev->fence_drv.emited) { 804 list_for_each(ptr, &rdev->fence_drv.emitted) {
805 /* count up to 3, that's enought info */ 805 /* count up to 3, that's enought info */
806 if (++not_processed >= 3) 806 if (++not_processed >= 3)
807 break; 807 break;
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 49d58202202..f3d7d224eeb 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -169,7 +169,7 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
169 if (tmp == NULL) { 169 if (tmp == NULL) {
170 return; 170 return;
171 } 171 }
172 if (!tmp->fence->emited) 172 if (!tmp->fence->emitted)
173 radeon_fence_unref(&tmp->fence); 173 radeon_fence_unref(&tmp->fence);
174 mutex_lock(&rdev->ib_pool.mutex); 174 mutex_lock(&rdev->ib_pool.mutex);
175 tmp->free = true; 175 tmp->free = true;