diff options
author | Dave Airlie <airlied@redhat.com> | 2013-09-19 19:06:48 -0400 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2013-09-19 19:06:48 -0400 |
commit | 6ddf2ed6e00396883b3123032ccb4416205aac7c (patch) | |
tree | aa544e6288bfc4c9aba53df49be798bb2226ffe8 /drivers | |
parent | 9808cc946943e4c260ff270f8b2e7884dd6968f8 (diff) | |
parent | 7e60353a1f5335ecd63b1c54897c2aa75874aaee (diff) |
Merge branch 'msm-fixes-3.12' of git://people.freedesktop.org/~robclark/linux into drm-fixes
A couple small msm fixes. Plus drop of set_need_resched().
* 'msm-fixes-3.12' of git://people.freedesktop.org/~robclark/linux:
drm/msm: drop unnecessary set_need_resched()
drm/msm: fix potential NULL pointer dereference
drm/msm: workaround for missing irq
drm/msm: return -EBUSY if bo still active
drm/msm: fix return value check in ERR_PTR()
drm/msm: fix cmdstream size check
drm/msm: hangcheck harder
drm/msm: handle read vs write fences
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/gpu/drm/msm/adreno/adreno_gpu.c | 10 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/msm_drv.c | 50 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/msm_drv.h | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/msm_gem.c | 34 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/msm_gem.h | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/msm_gem_submit.c | 24 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/msm_gpu.c | 24 |
7 files changed, 107 insertions, 45 deletions
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index a60584763b61..a0b9d8a95b16 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c | |||
@@ -124,6 +124,8 @@ void adreno_recover(struct msm_gpu *gpu) | |||
124 | 124 | ||
125 | /* reset completed fence seqno, just discard anything pending: */ | 125 | /* reset completed fence seqno, just discard anything pending: */ |
126 | adreno_gpu->memptrs->fence = gpu->submitted_fence; | 126 | adreno_gpu->memptrs->fence = gpu->submitted_fence; |
127 | adreno_gpu->memptrs->rptr = 0; | ||
128 | adreno_gpu->memptrs->wptr = 0; | ||
127 | 129 | ||
128 | gpu->funcs->pm_resume(gpu); | 130 | gpu->funcs->pm_resume(gpu); |
129 | ret = gpu->funcs->hw_init(gpu); | 131 | ret = gpu->funcs->hw_init(gpu); |
@@ -229,7 +231,7 @@ void adreno_idle(struct msm_gpu *gpu) | |||
229 | return; | 231 | return; |
230 | } while(time_before(jiffies, t)); | 232 | } while(time_before(jiffies, t)); |
231 | 233 | ||
232 | DRM_ERROR("timeout waiting for %s to drain ringbuffer!\n", gpu->name); | 234 | DRM_ERROR("%s: timeout waiting to drain ringbuffer!\n", gpu->name); |
233 | 235 | ||
234 | /* TODO maybe we need to reset GPU here to recover from hang? */ | 236 | /* TODO maybe we need to reset GPU here to recover from hang? */ |
235 | } | 237 | } |
@@ -256,11 +258,17 @@ void adreno_wait_ring(struct msm_gpu *gpu, uint32_t ndwords) | |||
256 | { | 258 | { |
257 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); | 259 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); |
258 | uint32_t freedwords; | 260 | uint32_t freedwords; |
261 | unsigned long t = jiffies + ADRENO_IDLE_TIMEOUT; | ||
259 | do { | 262 | do { |
260 | uint32_t size = gpu->rb->size / 4; | 263 | uint32_t size = gpu->rb->size / 4; |
261 | uint32_t wptr = get_wptr(gpu->rb); | 264 | uint32_t wptr = get_wptr(gpu->rb); |
262 | uint32_t rptr = adreno_gpu->memptrs->rptr; | 265 | uint32_t rptr = adreno_gpu->memptrs->rptr; |
263 | freedwords = (rptr + (size - 1) - wptr) % size; | 266 | freedwords = (rptr + (size - 1) - wptr) % size; |
267 | |||
268 | if (time_after(jiffies, t)) { | ||
269 | DRM_ERROR("%s: timeout waiting for ringbuffer space\n", gpu->name); | ||
270 | break; | ||
271 | } | ||
264 | } while(freedwords < ndwords); | 272 | } while(freedwords < ndwords); |
265 | } | 273 | } |
266 | 274 | ||
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 864c9773636b..008d772384c7 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c | |||
@@ -499,25 +499,41 @@ int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence, | |||
499 | struct timespec *timeout) | 499 | struct timespec *timeout) |
500 | { | 500 | { |
501 | struct msm_drm_private *priv = dev->dev_private; | 501 | struct msm_drm_private *priv = dev->dev_private; |
502 | unsigned long timeout_jiffies = timespec_to_jiffies(timeout); | ||
503 | unsigned long start_jiffies = jiffies; | ||
504 | unsigned long remaining_jiffies; | ||
505 | int ret; | 502 | int ret; |
506 | 503 | ||
507 | if (time_after(start_jiffies, timeout_jiffies)) | 504 | if (!priv->gpu) |
508 | remaining_jiffies = 0; | 505 | return 0; |
509 | else | 506 | |
510 | remaining_jiffies = timeout_jiffies - start_jiffies; | 507 | if (fence > priv->gpu->submitted_fence) { |
511 | 508 | DRM_ERROR("waiting on invalid fence: %u (of %u)\n", | |
512 | ret = wait_event_interruptible_timeout(priv->fence_event, | 509 | fence, priv->gpu->submitted_fence); |
513 | priv->completed_fence >= fence, | 510 | return -EINVAL; |
514 | remaining_jiffies); | 511 | } |
515 | if (ret == 0) { | 512 | |
516 | DBG("timeout waiting for fence: %u (completed: %u)", | 513 | if (!timeout) { |
517 | fence, priv->completed_fence); | 514 | /* no-wait: */ |
518 | ret = -ETIMEDOUT; | 515 | ret = fence_completed(dev, fence) ? 0 : -EBUSY; |
519 | } else if (ret != -ERESTARTSYS) { | 516 | } else { |
520 | ret = 0; | 517 | unsigned long timeout_jiffies = timespec_to_jiffies(timeout); |
518 | unsigned long start_jiffies = jiffies; | ||
519 | unsigned long remaining_jiffies; | ||
520 | |||
521 | if (time_after(start_jiffies, timeout_jiffies)) | ||
522 | remaining_jiffies = 0; | ||
523 | else | ||
524 | remaining_jiffies = timeout_jiffies - start_jiffies; | ||
525 | |||
526 | ret = wait_event_interruptible_timeout(priv->fence_event, | ||
527 | fence_completed(dev, fence), | ||
528 | remaining_jiffies); | ||
529 | |||
530 | if (ret == 0) { | ||
531 | DBG("timeout waiting for fence: %u (completed: %u)", | ||
532 | fence, priv->completed_fence); | ||
533 | ret = -ETIMEDOUT; | ||
534 | } else if (ret != -ERESTARTSYS) { | ||
535 | ret = 0; | ||
536 | } | ||
521 | } | 537 | } |
522 | 538 | ||
523 | return ret; | 539 | return ret; |
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index 80d75094bf0a..df8f1d084bc1 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h | |||
@@ -153,7 +153,7 @@ void *msm_gem_vaddr(struct drm_gem_object *obj); | |||
153 | int msm_gem_queue_inactive_work(struct drm_gem_object *obj, | 153 | int msm_gem_queue_inactive_work(struct drm_gem_object *obj, |
154 | struct work_struct *work); | 154 | struct work_struct *work); |
155 | void msm_gem_move_to_active(struct drm_gem_object *obj, | 155 | void msm_gem_move_to_active(struct drm_gem_object *obj, |
156 | struct msm_gpu *gpu, uint32_t fence); | 156 | struct msm_gpu *gpu, bool write, uint32_t fence); |
157 | void msm_gem_move_to_inactive(struct drm_gem_object *obj); | 157 | void msm_gem_move_to_inactive(struct drm_gem_object *obj); |
158 | int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, | 158 | int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, |
159 | struct timespec *timeout); | 159 | struct timespec *timeout); |
@@ -191,6 +191,12 @@ u32 msm_readl(const void __iomem *addr); | |||
191 | #define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__) | 191 | #define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__) |
192 | #define VERB(fmt, ...) if (0) DRM_DEBUG(fmt"\n", ##__VA_ARGS__) | 192 | #define VERB(fmt, ...) if (0) DRM_DEBUG(fmt"\n", ##__VA_ARGS__) |
193 | 193 | ||
194 | static inline bool fence_completed(struct drm_device *dev, uint32_t fence) | ||
195 | { | ||
196 | struct msm_drm_private *priv = dev->dev_private; | ||
197 | return priv->completed_fence >= fence; | ||
198 | } | ||
199 | |||
194 | static inline int align_pitch(int width, int bpp) | 200 | static inline int align_pitch(int width, int bpp) |
195 | { | 201 | { |
196 | int bytespp = (bpp + 7) / 8; | 202 | int bytespp = (bpp + 7) / 8; |
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 6b5a6c8c7658..29eacfa29cfb 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c | |||
@@ -40,9 +40,9 @@ static struct page **get_pages(struct drm_gem_object *obj) | |||
40 | } | 40 | } |
41 | 41 | ||
42 | msm_obj->sgt = drm_prime_pages_to_sg(p, npages); | 42 | msm_obj->sgt = drm_prime_pages_to_sg(p, npages); |
43 | if (!msm_obj->sgt) { | 43 | if (IS_ERR(msm_obj->sgt)) { |
44 | dev_err(dev->dev, "failed to allocate sgt\n"); | 44 | dev_err(dev->dev, "failed to allocate sgt\n"); |
45 | return ERR_PTR(-ENOMEM); | 45 | return ERR_CAST(msm_obj->sgt); |
46 | } | 46 | } |
47 | 47 | ||
48 | msm_obj->pages = p; | 48 | msm_obj->pages = p; |
@@ -159,7 +159,6 @@ out_unlock: | |||
159 | out: | 159 | out: |
160 | switch (ret) { | 160 | switch (ret) { |
161 | case -EAGAIN: | 161 | case -EAGAIN: |
162 | set_need_resched(); | ||
163 | case 0: | 162 | case 0: |
164 | case -ERESTARTSYS: | 163 | case -ERESTARTSYS: |
165 | case -EINTR: | 164 | case -EINTR: |
@@ -393,11 +392,14 @@ int msm_gem_queue_inactive_work(struct drm_gem_object *obj, | |||
393 | } | 392 | } |
394 | 393 | ||
395 | void msm_gem_move_to_active(struct drm_gem_object *obj, | 394 | void msm_gem_move_to_active(struct drm_gem_object *obj, |
396 | struct msm_gpu *gpu, uint32_t fence) | 395 | struct msm_gpu *gpu, bool write, uint32_t fence) |
397 | { | 396 | { |
398 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | 397 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
399 | msm_obj->gpu = gpu; | 398 | msm_obj->gpu = gpu; |
400 | msm_obj->fence = fence; | 399 | if (write) |
400 | msm_obj->write_fence = fence; | ||
401 | else | ||
402 | msm_obj->read_fence = fence; | ||
401 | list_del_init(&msm_obj->mm_list); | 403 | list_del_init(&msm_obj->mm_list); |
402 | list_add_tail(&msm_obj->mm_list, &gpu->active_list); | 404 | list_add_tail(&msm_obj->mm_list, &gpu->active_list); |
403 | } | 405 | } |
@@ -411,7 +413,8 @@ void msm_gem_move_to_inactive(struct drm_gem_object *obj) | |||
411 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | 413 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); |
412 | 414 | ||
413 | msm_obj->gpu = NULL; | 415 | msm_obj->gpu = NULL; |
414 | msm_obj->fence = 0; | 416 | msm_obj->read_fence = 0; |
417 | msm_obj->write_fence = 0; | ||
415 | list_del_init(&msm_obj->mm_list); | 418 | list_del_init(&msm_obj->mm_list); |
416 | list_add_tail(&msm_obj->mm_list, &priv->inactive_list); | 419 | list_add_tail(&msm_obj->mm_list, &priv->inactive_list); |
417 | 420 | ||
@@ -433,8 +436,18 @@ int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, | |||
433 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | 436 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
434 | int ret = 0; | 437 | int ret = 0; |
435 | 438 | ||
436 | if (is_active(msm_obj) && !(op & MSM_PREP_NOSYNC)) | 439 | if (is_active(msm_obj)) { |
437 | ret = msm_wait_fence_interruptable(dev, msm_obj->fence, timeout); | 440 | uint32_t fence = 0; |
441 | |||
442 | if (op & MSM_PREP_READ) | ||
443 | fence = msm_obj->write_fence; | ||
444 | if (op & MSM_PREP_WRITE) | ||
445 | fence = max(fence, msm_obj->read_fence); | ||
446 | if (op & MSM_PREP_NOSYNC) | ||
447 | timeout = NULL; | ||
448 | |||
449 | ret = msm_wait_fence_interruptable(dev, fence, timeout); | ||
450 | } | ||
438 | 451 | ||
439 | /* TODO cache maintenance */ | 452 | /* TODO cache maintenance */ |
440 | 453 | ||
@@ -455,9 +468,10 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m) | |||
455 | uint64_t off = drm_vma_node_start(&obj->vma_node); | 468 | uint64_t off = drm_vma_node_start(&obj->vma_node); |
456 | 469 | ||
457 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | 470 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); |
458 | seq_printf(m, "%08x: %c(%d) %2d (%2d) %08llx %p %d\n", | 471 | seq_printf(m, "%08x: %c(r=%u,w=%u) %2d (%2d) %08llx %p %d\n", |
459 | msm_obj->flags, is_active(msm_obj) ? 'A' : 'I', | 472 | msm_obj->flags, is_active(msm_obj) ? 'A' : 'I', |
460 | msm_obj->fence, obj->name, obj->refcount.refcount.counter, | 473 | msm_obj->read_fence, msm_obj->write_fence, |
474 | obj->name, obj->refcount.refcount.counter, | ||
461 | off, msm_obj->vaddr, obj->size); | 475 | off, msm_obj->vaddr, obj->size); |
462 | } | 476 | } |
463 | 477 | ||
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index d746f13d283c..0676f32e2c6a 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h | |||
@@ -36,7 +36,7 @@ struct msm_gem_object { | |||
36 | */ | 36 | */ |
37 | struct list_head mm_list; | 37 | struct list_head mm_list; |
38 | struct msm_gpu *gpu; /* non-null if active */ | 38 | struct msm_gpu *gpu; /* non-null if active */ |
39 | uint32_t fence; | 39 | uint32_t read_fence, write_fence; |
40 | 40 | ||
41 | /* Transiently in the process of submit ioctl, objects associated | 41 | /* Transiently in the process of submit ioctl, objects associated |
42 | * with the submit are on submit->bo_list.. this only lasts for | 42 | * with the submit are on submit->bo_list.. this only lasts for |
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index 3e1ef3a00f60..5281d4bc37f7 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c | |||
@@ -78,7 +78,7 @@ static int submit_lookup_objects(struct msm_gem_submit *submit, | |||
78 | } | 78 | } |
79 | 79 | ||
80 | if (submit_bo.flags & BO_INVALID_FLAGS) { | 80 | if (submit_bo.flags & BO_INVALID_FLAGS) { |
81 | DBG("invalid flags: %x", submit_bo.flags); | 81 | DRM_ERROR("invalid flags: %x\n", submit_bo.flags); |
82 | ret = -EINVAL; | 82 | ret = -EINVAL; |
83 | goto out_unlock; | 83 | goto out_unlock; |
84 | } | 84 | } |
@@ -92,7 +92,7 @@ static int submit_lookup_objects(struct msm_gem_submit *submit, | |||
92 | */ | 92 | */ |
93 | obj = idr_find(&file->object_idr, submit_bo.handle); | 93 | obj = idr_find(&file->object_idr, submit_bo.handle); |
94 | if (!obj) { | 94 | if (!obj) { |
95 | DBG("invalid handle %u at index %u", submit_bo.handle, i); | 95 | DRM_ERROR("invalid handle %u at index %u\n", submit_bo.handle, i); |
96 | ret = -EINVAL; | 96 | ret = -EINVAL; |
97 | goto out_unlock; | 97 | goto out_unlock; |
98 | } | 98 | } |
@@ -100,7 +100,7 @@ static int submit_lookup_objects(struct msm_gem_submit *submit, | |||
100 | msm_obj = to_msm_bo(obj); | 100 | msm_obj = to_msm_bo(obj); |
101 | 101 | ||
102 | if (!list_empty(&msm_obj->submit_entry)) { | 102 | if (!list_empty(&msm_obj->submit_entry)) { |
103 | DBG("handle %u at index %u already on submit list", | 103 | DRM_ERROR("handle %u at index %u already on submit list\n", |
104 | submit_bo.handle, i); | 104 | submit_bo.handle, i); |
105 | ret = -EINVAL; | 105 | ret = -EINVAL; |
106 | goto out_unlock; | 106 | goto out_unlock; |
@@ -216,8 +216,9 @@ static int submit_bo(struct msm_gem_submit *submit, uint32_t idx, | |||
216 | struct msm_gem_object **obj, uint32_t *iova, bool *valid) | 216 | struct msm_gem_object **obj, uint32_t *iova, bool *valid) |
217 | { | 217 | { |
218 | if (idx >= submit->nr_bos) { | 218 | if (idx >= submit->nr_bos) { |
219 | DBG("invalid buffer index: %u (out of %u)", idx, submit->nr_bos); | 219 | DRM_ERROR("invalid buffer index: %u (out of %u)\n", |
220 | return EINVAL; | 220 | idx, submit->nr_bos); |
221 | return -EINVAL; | ||
221 | } | 222 | } |
222 | 223 | ||
223 | if (obj) | 224 | if (obj) |
@@ -239,7 +240,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob | |||
239 | int ret; | 240 | int ret; |
240 | 241 | ||
241 | if (offset % 4) { | 242 | if (offset % 4) { |
242 | DBG("non-aligned cmdstream buffer: %u", offset); | 243 | DRM_ERROR("non-aligned cmdstream buffer: %u\n", offset); |
243 | return -EINVAL; | 244 | return -EINVAL; |
244 | } | 245 | } |
245 | 246 | ||
@@ -266,7 +267,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob | |||
266 | return -EFAULT; | 267 | return -EFAULT; |
267 | 268 | ||
268 | if (submit_reloc.submit_offset % 4) { | 269 | if (submit_reloc.submit_offset % 4) { |
269 | DBG("non-aligned reloc offset: %u", | 270 | DRM_ERROR("non-aligned reloc offset: %u\n", |
270 | submit_reloc.submit_offset); | 271 | submit_reloc.submit_offset); |
271 | return -EINVAL; | 272 | return -EINVAL; |
272 | } | 273 | } |
@@ -276,7 +277,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob | |||
276 | 277 | ||
277 | if ((off >= (obj->base.size / 4)) || | 278 | if ((off >= (obj->base.size / 4)) || |
278 | (off < last_offset)) { | 279 | (off < last_offset)) { |
279 | DBG("invalid offset %u at reloc %u", off, i); | 280 | DRM_ERROR("invalid offset %u at reloc %u\n", off, i); |
280 | return -EINVAL; | 281 | return -EINVAL; |
281 | } | 282 | } |
282 | 283 | ||
@@ -374,14 +375,15 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, | |||
374 | goto out; | 375 | goto out; |
375 | 376 | ||
376 | if (submit_cmd.size % 4) { | 377 | if (submit_cmd.size % 4) { |
377 | DBG("non-aligned cmdstream buffer size: %u", | 378 | DRM_ERROR("non-aligned cmdstream buffer size: %u\n", |
378 | submit_cmd.size); | 379 | submit_cmd.size); |
379 | ret = -EINVAL; | 380 | ret = -EINVAL; |
380 | goto out; | 381 | goto out; |
381 | } | 382 | } |
382 | 383 | ||
383 | if (submit_cmd.size >= msm_obj->base.size) { | 384 | if ((submit_cmd.size + submit_cmd.submit_offset) >= |
384 | DBG("invalid cmdstream size: %u", submit_cmd.size); | 385 | msm_obj->base.size) { |
386 | DRM_ERROR("invalid cmdstream size: %u\n", submit_cmd.size); | ||
385 | ret = -EINVAL; | 387 | ret = -EINVAL; |
386 | goto out; | 388 | goto out; |
387 | } | 389 | } |
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index e1e1ec9321ff..3bab937965d1 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c | |||
@@ -29,13 +29,14 @@ | |||
29 | static void bs_init(struct msm_gpu *gpu, struct platform_device *pdev) | 29 | static void bs_init(struct msm_gpu *gpu, struct platform_device *pdev) |
30 | { | 30 | { |
31 | struct drm_device *dev = gpu->dev; | 31 | struct drm_device *dev = gpu->dev; |
32 | struct kgsl_device_platform_data *pdata = pdev->dev.platform_data; | 32 | struct kgsl_device_platform_data *pdata; |
33 | 33 | ||
34 | if (!pdev) { | 34 | if (!pdev) { |
35 | dev_err(dev->dev, "could not find dtv pdata\n"); | 35 | dev_err(dev->dev, "could not find dtv pdata\n"); |
36 | return; | 36 | return; |
37 | } | 37 | } |
38 | 38 | ||
39 | pdata = pdev->dev.platform_data; | ||
39 | if (pdata->bus_scale_table) { | 40 | if (pdata->bus_scale_table) { |
40 | gpu->bsc = msm_bus_scale_register_client(pdata->bus_scale_table); | 41 | gpu->bsc = msm_bus_scale_register_client(pdata->bus_scale_table); |
41 | DBG("bus scale client: %08x", gpu->bsc); | 42 | DBG("bus scale client: %08x", gpu->bsc); |
@@ -230,6 +231,8 @@ static void hangcheck_timer_reset(struct msm_gpu *gpu) | |||
230 | static void hangcheck_handler(unsigned long data) | 231 | static void hangcheck_handler(unsigned long data) |
231 | { | 232 | { |
232 | struct msm_gpu *gpu = (struct msm_gpu *)data; | 233 | struct msm_gpu *gpu = (struct msm_gpu *)data; |
234 | struct drm_device *dev = gpu->dev; | ||
235 | struct msm_drm_private *priv = dev->dev_private; | ||
233 | uint32_t fence = gpu->funcs->last_fence(gpu); | 236 | uint32_t fence = gpu->funcs->last_fence(gpu); |
234 | 237 | ||
235 | if (fence != gpu->hangcheck_fence) { | 238 | if (fence != gpu->hangcheck_fence) { |
@@ -237,14 +240,22 @@ static void hangcheck_handler(unsigned long data) | |||
237 | gpu->hangcheck_fence = fence; | 240 | gpu->hangcheck_fence = fence; |
238 | } else if (fence < gpu->submitted_fence) { | 241 | } else if (fence < gpu->submitted_fence) { |
239 | /* no progress and not done.. hung! */ | 242 | /* no progress and not done.. hung! */ |
240 | struct msm_drm_private *priv = gpu->dev->dev_private; | ||
241 | gpu->hangcheck_fence = fence; | 243 | gpu->hangcheck_fence = fence; |
244 | dev_err(dev->dev, "%s: hangcheck detected gpu lockup!\n", | ||
245 | gpu->name); | ||
246 | dev_err(dev->dev, "%s: completed fence: %u\n", | ||
247 | gpu->name, fence); | ||
248 | dev_err(dev->dev, "%s: submitted fence: %u\n", | ||
249 | gpu->name, gpu->submitted_fence); | ||
242 | queue_work(priv->wq, &gpu->recover_work); | 250 | queue_work(priv->wq, &gpu->recover_work); |
243 | } | 251 | } |
244 | 252 | ||
245 | /* if still more pending work, reset the hangcheck timer: */ | 253 | /* if still more pending work, reset the hangcheck timer: */ |
246 | if (gpu->submitted_fence > gpu->hangcheck_fence) | 254 | if (gpu->submitted_fence > gpu->hangcheck_fence) |
247 | hangcheck_timer_reset(gpu); | 255 | hangcheck_timer_reset(gpu); |
256 | |||
257 | /* workaround for missing irq: */ | ||
258 | queue_work(priv->wq, &gpu->retire_work); | ||
248 | } | 259 | } |
249 | 260 | ||
250 | /* | 261 | /* |
@@ -265,7 +276,8 @@ static void retire_worker(struct work_struct *work) | |||
265 | obj = list_first_entry(&gpu->active_list, | 276 | obj = list_first_entry(&gpu->active_list, |
266 | struct msm_gem_object, mm_list); | 277 | struct msm_gem_object, mm_list); |
267 | 278 | ||
268 | if (obj->fence <= fence) { | 279 | if ((obj->read_fence <= fence) && |
280 | (obj->write_fence <= fence)) { | ||
269 | /* move to inactive: */ | 281 | /* move to inactive: */ |
270 | msm_gem_move_to_inactive(&obj->base); | 282 | msm_gem_move_to_inactive(&obj->base); |
271 | msm_gem_put_iova(&obj->base, gpu->id); | 283 | msm_gem_put_iova(&obj->base, gpu->id); |
@@ -321,7 +333,11 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, | |||
321 | submit->gpu->id, &iova); | 333 | submit->gpu->id, &iova); |
322 | } | 334 | } |
323 | 335 | ||
324 | msm_gem_move_to_active(&msm_obj->base, gpu, submit->fence); | 336 | if (submit->bos[i].flags & MSM_SUBMIT_BO_READ) |
337 | msm_gem_move_to_active(&msm_obj->base, gpu, false, submit->fence); | ||
338 | |||
339 | if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE) | ||
340 | msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence); | ||
325 | } | 341 | } |
326 | hangcheck_timer_reset(gpu); | 342 | hangcheck_timer_reset(gpu); |
327 | mutex_unlock(&dev->struct_mutex); | 343 | mutex_unlock(&dev->struct_mutex); |