aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/radeon/radeon_fence.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/radeon/radeon_fence.c')
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c34
1 files changed, 17 insertions, 17 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 76ec0e9ed8a..7027766ec2a 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -74,7 +74,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
74 unsigned long irq_flags; 74 unsigned long irq_flags;
75 75
76 write_lock_irqsave(&rdev->fence_drv.lock, irq_flags); 76 write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
77 if (fence->emited) { 77 if (fence->emitted) {
78 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); 78 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
79 return 0; 79 return 0;
80 } 80 }
@@ -88,8 +88,8 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
88 radeon_fence_ring_emit(rdev, fence); 88 radeon_fence_ring_emit(rdev, fence);
89 89
90 trace_radeon_fence_emit(rdev->ddev, fence->seq); 90 trace_radeon_fence_emit(rdev->ddev, fence->seq);
91 fence->emited = true; 91 fence->emitted = true;
92 list_move_tail(&fence->list, &rdev->fence_drv.emited); 92 list_move_tail(&fence->list, &rdev->fence_drv.emitted);
93 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); 93 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
94 return 0; 94 return 0;
95} 95}
@@ -129,7 +129,7 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev)
129 return false; 129 return false;
130 } 130 }
131 n = NULL; 131 n = NULL;
132 list_for_each(i, &rdev->fence_drv.emited) { 132 list_for_each(i, &rdev->fence_drv.emitted) {
133 fence = list_entry(i, struct radeon_fence, list); 133 fence = list_entry(i, struct radeon_fence, list);
134 if (fence->seq == seq) { 134 if (fence->seq == seq) {
135 n = i; 135 n = i;
@@ -145,7 +145,7 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev)
145 fence = list_entry(i, struct radeon_fence, list); 145 fence = list_entry(i, struct radeon_fence, list);
146 fence->signaled = true; 146 fence->signaled = true;
147 i = n; 147 i = n;
148 } while (i != &rdev->fence_drv.emited); 148 } while (i != &rdev->fence_drv.emitted);
149 wake = true; 149 wake = true;
150 } 150 }
151 return wake; 151 return wake;
@@ -159,7 +159,7 @@ static void radeon_fence_destroy(struct kref *kref)
159 fence = container_of(kref, struct radeon_fence, kref); 159 fence = container_of(kref, struct radeon_fence, kref);
160 write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags); 160 write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags);
161 list_del(&fence->list); 161 list_del(&fence->list);
162 fence->emited = false; 162 fence->emitted = false;
163 write_unlock_irqrestore(&fence->rdev->fence_drv.lock, irq_flags); 163 write_unlock_irqrestore(&fence->rdev->fence_drv.lock, irq_flags);
164 kfree(fence); 164 kfree(fence);
165} 165}
@@ -174,7 +174,7 @@ int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence)
174 } 174 }
175 kref_init(&((*fence)->kref)); 175 kref_init(&((*fence)->kref));
176 (*fence)->rdev = rdev; 176 (*fence)->rdev = rdev;
177 (*fence)->emited = false; 177 (*fence)->emitted = false;
178 (*fence)->signaled = false; 178 (*fence)->signaled = false;
179 (*fence)->seq = 0; 179 (*fence)->seq = 0;
180 INIT_LIST_HEAD(&(*fence)->list); 180 INIT_LIST_HEAD(&(*fence)->list);
@@ -203,8 +203,8 @@ bool radeon_fence_signaled(struct radeon_fence *fence)
203 if (fence->rdev->shutdown) { 203 if (fence->rdev->shutdown) {
204 signaled = true; 204 signaled = true;
205 } 205 }
206 if (!fence->emited) { 206 if (!fence->emitted) {
207 WARN(1, "Querying an unemited fence : %p !\n", fence); 207 WARN(1, "Querying an unemitted fence : %p !\n", fence);
208 signaled = true; 208 signaled = true;
209 } 209 }
210 if (!signaled) { 210 if (!signaled) {
@@ -295,11 +295,11 @@ int radeon_fence_wait_next(struct radeon_device *rdev)
295 return 0; 295 return 0;
296 } 296 }
297 write_lock_irqsave(&rdev->fence_drv.lock, irq_flags); 297 write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
298 if (list_empty(&rdev->fence_drv.emited)) { 298 if (list_empty(&rdev->fence_drv.emitted)) {
299 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); 299 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
300 return 0; 300 return 0;
301 } 301 }
302 fence = list_entry(rdev->fence_drv.emited.next, 302 fence = list_entry(rdev->fence_drv.emitted.next,
303 struct radeon_fence, list); 303 struct radeon_fence, list);
304 radeon_fence_ref(fence); 304 radeon_fence_ref(fence);
305 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); 305 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
@@ -318,11 +318,11 @@ int radeon_fence_wait_last(struct radeon_device *rdev)
318 return 0; 318 return 0;
319 } 319 }
320 write_lock_irqsave(&rdev->fence_drv.lock, irq_flags); 320 write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
321 if (list_empty(&rdev->fence_drv.emited)) { 321 if (list_empty(&rdev->fence_drv.emitted)) {
322 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); 322 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
323 return 0; 323 return 0;
324 } 324 }
325 fence = list_entry(rdev->fence_drv.emited.prev, 325 fence = list_entry(rdev->fence_drv.emitted.prev,
326 struct radeon_fence, list); 326 struct radeon_fence, list);
327 radeon_fence_ref(fence); 327 radeon_fence_ref(fence);
328 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); 328 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
@@ -375,7 +375,7 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
375 radeon_fence_write(rdev, 0); 375 radeon_fence_write(rdev, 0);
376 atomic_set(&rdev->fence_drv.seq, 0); 376 atomic_set(&rdev->fence_drv.seq, 0);
377 INIT_LIST_HEAD(&rdev->fence_drv.created); 377 INIT_LIST_HEAD(&rdev->fence_drv.created);
378 INIT_LIST_HEAD(&rdev->fence_drv.emited); 378 INIT_LIST_HEAD(&rdev->fence_drv.emitted);
379 INIT_LIST_HEAD(&rdev->fence_drv.signaled); 379 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
380 init_waitqueue_head(&rdev->fence_drv.queue); 380 init_waitqueue_head(&rdev->fence_drv.queue);
381 rdev->fence_drv.initialized = true; 381 rdev->fence_drv.initialized = true;
@@ -413,10 +413,10 @@ static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
413 413
414 seq_printf(m, "Last signaled fence 0x%08X\n", 414 seq_printf(m, "Last signaled fence 0x%08X\n",
415 radeon_fence_read(rdev)); 415 radeon_fence_read(rdev));
416 if (!list_empty(&rdev->fence_drv.emited)) { 416 if (!list_empty(&rdev->fence_drv.emitted)) {
417 fence = list_entry(rdev->fence_drv.emited.prev, 417 fence = list_entry(rdev->fence_drv.emitted.prev,
418 struct radeon_fence, list); 418 struct radeon_fence, list);
419 seq_printf(m, "Last emited fence %p with 0x%08X\n", 419 seq_printf(m, "Last emitted fence %p with 0x%08X\n",
420 fence, fence->seq); 420 fence, fence->seq);
421 } 421 }
422 return 0; 422 return 0;