aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/msm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/msm')
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c179
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c4
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c7
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c9
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c3
6 files changed, 130 insertions, 74 deletions
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
index 1964f4f0d452..84c5b13b33c9 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
@@ -39,6 +39,7 @@ struct mdp4_crtc {
39 spinlock_t lock; 39 spinlock_t lock;
40 bool stale; 40 bool stale;
41 uint32_t width, height; 41 uint32_t width, height;
42 uint32_t x, y;
42 43
43 /* next cursor to scan-out: */ 44 /* next cursor to scan-out: */
44 uint32_t next_iova; 45 uint32_t next_iova;
@@ -57,9 +58,16 @@ struct mdp4_crtc {
57#define PENDING_FLIP 0x2 58#define PENDING_FLIP 0x2
58 atomic_t pending; 59 atomic_t pending;
59 60
60 /* the fb that we currently hold a scanout ref to: */ 61 /* the fb that we logically (from PoV of KMS API) hold a ref
62 * to. Which we may not yet be scanning out (we may still
63 * be scanning out previous in case of page_flip while waiting
64 * for gpu rendering to complete:
65 */
61 struct drm_framebuffer *fb; 66 struct drm_framebuffer *fb;
62 67
68 /* the fb that we currently hold a scanout ref to: */
69 struct drm_framebuffer *scanout_fb;
70
63 /* for unref'ing framebuffers after scanout completes: */ 71 /* for unref'ing framebuffers after scanout completes: */
64 struct drm_flip_work unref_fb_work; 72 struct drm_flip_work unref_fb_work;
65 73
@@ -77,24 +85,73 @@ static struct mdp4_kms *get_kms(struct drm_crtc *crtc)
77 return to_mdp4_kms(to_mdp_kms(priv->kms)); 85 return to_mdp4_kms(to_mdp_kms(priv->kms));
78} 86}
79 87
80static void update_fb(struct drm_crtc *crtc, bool async, 88static void request_pending(struct drm_crtc *crtc, uint32_t pending)
81 struct drm_framebuffer *new_fb)
82{ 89{
83 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); 90 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
84 struct drm_framebuffer *old_fb = mdp4_crtc->fb;
85 91
86 if (old_fb) 92 atomic_or(pending, &mdp4_crtc->pending);
87 drm_flip_work_queue(&mdp4_crtc->unref_fb_work, old_fb); 93 mdp_irq_register(&get_kms(crtc)->base, &mdp4_crtc->vblank);
94}
95
96static void crtc_flush(struct drm_crtc *crtc)
97{
98 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
99 struct mdp4_kms *mdp4_kms = get_kms(crtc);
100 uint32_t i, flush = 0;
101
102 for (i = 0; i < ARRAY_SIZE(mdp4_crtc->planes); i++) {
103 struct drm_plane *plane = mdp4_crtc->planes[i];
104 if (plane) {
105 enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
106 flush |= pipe2flush(pipe_id);
107 }
108 }
109 flush |= ovlp2flush(mdp4_crtc->ovlp);
110
111 DBG("%s: flush=%08x", mdp4_crtc->name, flush);
112
113 mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush);
114}
115
116static void update_fb(struct drm_crtc *crtc, struct drm_framebuffer *new_fb)
117{
118 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
119 struct drm_framebuffer *old_fb = mdp4_crtc->fb;
88 120
89 /* grab reference to incoming scanout fb: */ 121 /* grab reference to incoming scanout fb: */
90 drm_framebuffer_reference(new_fb); 122 drm_framebuffer_reference(new_fb);
91 mdp4_crtc->base.fb = new_fb; 123 mdp4_crtc->base.fb = new_fb;
92 mdp4_crtc->fb = new_fb; 124 mdp4_crtc->fb = new_fb;
93 125
94 if (!async) { 126 if (old_fb)
95 /* enable vblank to pick up the old_fb */ 127 drm_flip_work_queue(&mdp4_crtc->unref_fb_work, old_fb);
96 mdp_irq_register(&get_kms(crtc)->base, &mdp4_crtc->vblank); 128}
97 } 129
130/* unlike update_fb(), take a ref to the new scanout fb *before* updating
131 * plane, then call this. Needed to ensure we don't unref the buffer that
132 * is actually still being scanned out.
133 *
134 * Note that this whole thing goes away with atomic.. since we can defer
135 * calling into driver until rendering is done.
136 */
137static void update_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb)
138{
139 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
140
141 /* flush updates, to make sure hw is updated to new scanout fb,
142 * so that we can safely queue unref to current fb (ie. next
143 * vblank we know hw is done w/ previous scanout_fb).
144 */
145 crtc_flush(crtc);
146
147 if (mdp4_crtc->scanout_fb)
148 drm_flip_work_queue(&mdp4_crtc->unref_fb_work,
149 mdp4_crtc->scanout_fb);
150
151 mdp4_crtc->scanout_fb = fb;
152
153 /* enable vblank to complete flip: */
154 request_pending(crtc, PENDING_FLIP);
98} 155}
99 156
100/* if file!=NULL, this is preclose potential cancel-flip path */ 157/* if file!=NULL, this is preclose potential cancel-flip path */
@@ -120,34 +177,6 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
120 spin_unlock_irqrestore(&dev->event_lock, flags); 177 spin_unlock_irqrestore(&dev->event_lock, flags);
121} 178}
122 179
123static void crtc_flush(struct drm_crtc *crtc)
124{
125 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
126 struct mdp4_kms *mdp4_kms = get_kms(crtc);
127 uint32_t i, flush = 0;
128
129 for (i = 0; i < ARRAY_SIZE(mdp4_crtc->planes); i++) {
130 struct drm_plane *plane = mdp4_crtc->planes[i];
131 if (plane) {
132 enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
133 flush |= pipe2flush(pipe_id);
134 }
135 }
136 flush |= ovlp2flush(mdp4_crtc->ovlp);
137
138 DBG("%s: flush=%08x", mdp4_crtc->name, flush);
139
140 mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush);
141}
142
143static void request_pending(struct drm_crtc *crtc, uint32_t pending)
144{
145 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
146
147 atomic_or(pending, &mdp4_crtc->pending);
148 mdp_irq_register(&get_kms(crtc)->base, &mdp4_crtc->vblank);
149}
150
151static void pageflip_cb(struct msm_fence_cb *cb) 180static void pageflip_cb(struct msm_fence_cb *cb)
152{ 181{
153 struct mdp4_crtc *mdp4_crtc = 182 struct mdp4_crtc *mdp4_crtc =
@@ -158,11 +187,9 @@ static void pageflip_cb(struct msm_fence_cb *cb)
158 if (!fb) 187 if (!fb)
159 return; 188 return;
160 189
190 drm_framebuffer_reference(fb);
161 mdp4_plane_set_scanout(mdp4_crtc->plane, fb); 191 mdp4_plane_set_scanout(mdp4_crtc->plane, fb);
162 crtc_flush(crtc); 192 update_scanout(crtc, fb);
163
164 /* enable vblank to complete flip: */
165 request_pending(crtc, PENDING_FLIP);
166} 193}
167 194
168static void unref_fb_worker(struct drm_flip_work *work, void *val) 195static void unref_fb_worker(struct drm_flip_work *work, void *val)
@@ -320,6 +347,20 @@ static int mdp4_crtc_mode_set(struct drm_crtc *crtc,
320 mode->vsync_end, mode->vtotal, 347 mode->vsync_end, mode->vtotal,
321 mode->type, mode->flags); 348 mode->type, mode->flags);
322 349
350 /* grab extra ref for update_scanout() */
351 drm_framebuffer_reference(crtc->fb);
352
353 ret = mdp4_plane_mode_set(mdp4_crtc->plane, crtc, crtc->fb,
354 0, 0, mode->hdisplay, mode->vdisplay,
355 x << 16, y << 16,
356 mode->hdisplay << 16, mode->vdisplay << 16);
357 if (ret) {
358 drm_framebuffer_unreference(crtc->fb);
359 dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n",
360 mdp4_crtc->name, ret);
361 return ret;
362 }
363
323 mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_SIZE(dma), 364 mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_SIZE(dma),
324 MDP4_DMA_SRC_SIZE_WIDTH(mode->hdisplay) | 365 MDP4_DMA_SRC_SIZE_WIDTH(mode->hdisplay) |
325 MDP4_DMA_SRC_SIZE_HEIGHT(mode->vdisplay)); 366 MDP4_DMA_SRC_SIZE_HEIGHT(mode->vdisplay));
@@ -341,24 +382,15 @@ static int mdp4_crtc_mode_set(struct drm_crtc *crtc,
341 382
342 mdp4_write(mdp4_kms, REG_MDP4_OVLP_CFG(ovlp), 1); 383 mdp4_write(mdp4_kms, REG_MDP4_OVLP_CFG(ovlp), 1);
343 384
344 update_fb(crtc, false, crtc->fb);
345
346 ret = mdp4_plane_mode_set(mdp4_crtc->plane, crtc, crtc->fb,
347 0, 0, mode->hdisplay, mode->vdisplay,
348 x << 16, y << 16,
349 mode->hdisplay << 16, mode->vdisplay << 16);
350 if (ret) {
351 dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n",
352 mdp4_crtc->name, ret);
353 return ret;
354 }
355
356 if (dma == DMA_E) { 385 if (dma == DMA_E) {
357 mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(0), 0x00ff0000); 386 mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(0), 0x00ff0000);
358 mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(1), 0x00ff0000); 387 mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(1), 0x00ff0000);
359 mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(2), 0x00ff0000); 388 mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(2), 0x00ff0000);
360 } 389 }
361 390
391 update_fb(crtc, crtc->fb);
392 update_scanout(crtc, crtc->fb);
393
362 return 0; 394 return 0;
363} 395}
364 396
@@ -385,13 +417,24 @@ static int mdp4_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
385 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); 417 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
386 struct drm_plane *plane = mdp4_crtc->plane; 418 struct drm_plane *plane = mdp4_crtc->plane;
387 struct drm_display_mode *mode = &crtc->mode; 419 struct drm_display_mode *mode = &crtc->mode;
420 int ret;
388 421
389 update_fb(crtc, false, crtc->fb); 422 /* grab extra ref for update_scanout() */
423 drm_framebuffer_reference(crtc->fb);
390 424
391 return mdp4_plane_mode_set(plane, crtc, crtc->fb, 425 ret = mdp4_plane_mode_set(plane, crtc, crtc->fb,
392 0, 0, mode->hdisplay, mode->vdisplay, 426 0, 0, mode->hdisplay, mode->vdisplay,
393 x << 16, y << 16, 427 x << 16, y << 16,
394 mode->hdisplay << 16, mode->vdisplay << 16); 428 mode->hdisplay << 16, mode->vdisplay << 16);
429 if (ret) {
430 drm_framebuffer_unreference(crtc->fb);
431 return ret;
432 }
433
434 update_fb(crtc, crtc->fb);
435 update_scanout(crtc, crtc->fb);
436
437 return 0;
395} 438}
396 439
397static void mdp4_crtc_load_lut(struct drm_crtc *crtc) 440static void mdp4_crtc_load_lut(struct drm_crtc *crtc)
@@ -419,7 +462,7 @@ static int mdp4_crtc_page_flip(struct drm_crtc *crtc,
419 mdp4_crtc->event = event; 462 mdp4_crtc->event = event;
420 spin_unlock_irqrestore(&dev->event_lock, flags); 463 spin_unlock_irqrestore(&dev->event_lock, flags);
421 464
422 update_fb(crtc, true, new_fb); 465 update_fb(crtc, new_fb);
423 466
424 return msm_gem_queue_inactive_cb(obj, &mdp4_crtc->pageflip_cb); 467 return msm_gem_queue_inactive_cb(obj, &mdp4_crtc->pageflip_cb);
425} 468}
@@ -442,12 +485,12 @@ static int mdp4_crtc_set_property(struct drm_crtc *crtc,
442static void update_cursor(struct drm_crtc *crtc) 485static void update_cursor(struct drm_crtc *crtc)
443{ 486{
444 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); 487 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
488 struct mdp4_kms *mdp4_kms = get_kms(crtc);
445 enum mdp4_dma dma = mdp4_crtc->dma; 489 enum mdp4_dma dma = mdp4_crtc->dma;
446 unsigned long flags; 490 unsigned long flags;
447 491
448 spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags); 492 spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
449 if (mdp4_crtc->cursor.stale) { 493 if (mdp4_crtc->cursor.stale) {
450 struct mdp4_kms *mdp4_kms = get_kms(crtc);
451 struct drm_gem_object *next_bo = mdp4_crtc->cursor.next_bo; 494 struct drm_gem_object *next_bo = mdp4_crtc->cursor.next_bo;
452 struct drm_gem_object *prev_bo = mdp4_crtc->cursor.scanout_bo; 495 struct drm_gem_object *prev_bo = mdp4_crtc->cursor.scanout_bo;
453 uint32_t iova = mdp4_crtc->cursor.next_iova; 496 uint32_t iova = mdp4_crtc->cursor.next_iova;
@@ -479,6 +522,11 @@ static void update_cursor(struct drm_crtc *crtc)
479 mdp4_crtc->cursor.scanout_bo = next_bo; 522 mdp4_crtc->cursor.scanout_bo = next_bo;
480 mdp4_crtc->cursor.stale = false; 523 mdp4_crtc->cursor.stale = false;
481 } 524 }
525
526 mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_POS(dma),
527 MDP4_DMA_CURSOR_POS_X(mdp4_crtc->cursor.x) |
528 MDP4_DMA_CURSOR_POS_Y(mdp4_crtc->cursor.y));
529
482 spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags); 530 spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);
483} 531}
484 532
@@ -530,6 +578,7 @@ static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
530 drm_gem_object_unreference_unlocked(old_bo); 578 drm_gem_object_unreference_unlocked(old_bo);
531 } 579 }
532 580
581 crtc_flush(crtc);
533 request_pending(crtc, PENDING_CURSOR); 582 request_pending(crtc, PENDING_CURSOR);
534 583
535 return 0; 584 return 0;
@@ -542,12 +591,15 @@ fail:
542static int mdp4_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) 591static int mdp4_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
543{ 592{
544 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); 593 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
545 struct mdp4_kms *mdp4_kms = get_kms(crtc); 594 unsigned long flags;
546 enum mdp4_dma dma = mdp4_crtc->dma;
547 595
548 mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_POS(dma), 596 spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
549 MDP4_DMA_CURSOR_POS_X(x) | 597 mdp4_crtc->cursor.x = x;
550 MDP4_DMA_CURSOR_POS_Y(y)); 598 mdp4_crtc->cursor.y = y;
599 spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);
600
601 crtc_flush(crtc);
602 request_pending(crtc, PENDING_CURSOR);
551 603
552 return 0; 604 return 0;
553} 605}
@@ -713,6 +765,7 @@ struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
713 crtc = &mdp4_crtc->base; 765 crtc = &mdp4_crtc->base;
714 766
715 mdp4_crtc->plane = plane; 767 mdp4_crtc->plane = plane;
768 mdp4_crtc->id = id;
716 769
717 mdp4_crtc->ovlp = ovlp_id; 770 mdp4_crtc->ovlp = ovlp_id;
718 mdp4_crtc->dma = dma_id; 771 mdp4_crtc->dma = dma_id;
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
index 2406027200ec..1e893dd13859 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
@@ -170,8 +170,8 @@ int mdp4_plane_mode_set(struct drm_plane *plane,
170 MDP4_PIPE_DST_SIZE_HEIGHT(crtc_h)); 170 MDP4_PIPE_DST_SIZE_HEIGHT(crtc_h));
171 171
172 mdp4_write(mdp4_kms, REG_MDP4_PIPE_DST_XY(pipe), 172 mdp4_write(mdp4_kms, REG_MDP4_PIPE_DST_XY(pipe),
173 MDP4_PIPE_SRC_XY_X(crtc_x) | 173 MDP4_PIPE_DST_XY_X(crtc_x) |
174 MDP4_PIPE_SRC_XY_Y(crtc_y)); 174 MDP4_PIPE_DST_XY_Y(crtc_y));
175 175
176 mdp4_plane_set_scanout(plane, fb); 176 mdp4_plane_set_scanout(plane, fb);
177 177
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index 71a3b2345eb3..f2794021f086 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -296,6 +296,7 @@ static int mdp5_crtc_mode_set(struct drm_crtc *crtc,
296 x << 16, y << 16, 296 x << 16, y << 16,
297 mode->hdisplay << 16, mode->vdisplay << 16); 297 mode->hdisplay << 16, mode->vdisplay << 16);
298 if (ret) { 298 if (ret) {
299 drm_framebuffer_unreference(crtc->fb);
299 dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n", 300 dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n",
300 mdp5_crtc->name, ret); 301 mdp5_crtc->name, ret);
301 return ret; 302 return ret;
@@ -343,11 +344,15 @@ static int mdp5_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
343 0, 0, mode->hdisplay, mode->vdisplay, 344 0, 0, mode->hdisplay, mode->vdisplay,
344 x << 16, y << 16, 345 x << 16, y << 16,
345 mode->hdisplay << 16, mode->vdisplay << 16); 346 mode->hdisplay << 16, mode->vdisplay << 16);
347 if (ret) {
348 drm_framebuffer_unreference(crtc->fb);
349 return ret;
350 }
346 351
347 update_fb(crtc, crtc->fb); 352 update_fb(crtc, crtc->fb);
348 update_scanout(crtc, crtc->fb); 353 update_scanout(crtc, crtc->fb);
349 354
350 return ret; 355 return 0;
351} 356}
352 357
353static void mdp5_crtc_load_lut(struct drm_crtc *crtc) 358static void mdp5_crtc_load_lut(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index d8d60c969ac7..3da8264d3039 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -644,7 +644,7 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
644 644
645fail: 645fail:
646 if (obj) 646 if (obj)
647 drm_gem_object_unreference_unlocked(obj); 647 drm_gem_object_unreference(obj);
648 648
649 return ERR_PTR(ret); 649 return ERR_PTR(ret);
650} 650}
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 5281d4bc37f7..5423e914e491 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -163,7 +163,7 @@ retry:
163 163
164 164
165 /* if locking succeeded, pin bo: */ 165 /* if locking succeeded, pin bo: */
166 ret = msm_gem_get_iova(&msm_obj->base, 166 ret = msm_gem_get_iova_locked(&msm_obj->base,
167 submit->gpu->id, &iova); 167 submit->gpu->id, &iova);
168 168
169 /* this would break the logic in the fail path.. there is no 169 /* this would break the logic in the fail path.. there is no
@@ -247,7 +247,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
247 /* For now, just map the entire thing. Eventually we probably 247 /* For now, just map the entire thing. Eventually we probably
248 * to do it page-by-page, w/ kmap() if not vmap()d.. 248 * to do it page-by-page, w/ kmap() if not vmap()d..
249 */ 249 */
250 ptr = msm_gem_vaddr(&obj->base); 250 ptr = msm_gem_vaddr_locked(&obj->base);
251 251
252 if (IS_ERR(ptr)) { 252 if (IS_ERR(ptr)) {
253 ret = PTR_ERR(ptr); 253 ret = PTR_ERR(ptr);
@@ -307,14 +307,12 @@ static void submit_cleanup(struct msm_gem_submit *submit, bool fail)
307{ 307{
308 unsigned i; 308 unsigned i;
309 309
310 mutex_lock(&submit->dev->struct_mutex);
311 for (i = 0; i < submit->nr_bos; i++) { 310 for (i = 0; i < submit->nr_bos; i++) {
312 struct msm_gem_object *msm_obj = submit->bos[i].obj; 311 struct msm_gem_object *msm_obj = submit->bos[i].obj;
313 submit_unlock_unpin_bo(submit, i); 312 submit_unlock_unpin_bo(submit, i);
314 list_del_init(&msm_obj->submit_entry); 313 list_del_init(&msm_obj->submit_entry);
315 drm_gem_object_unreference(&msm_obj->base); 314 drm_gem_object_unreference(&msm_obj->base);
316 } 315 }
317 mutex_unlock(&submit->dev->struct_mutex);
318 316
319 ww_acquire_fini(&submit->ticket); 317 ww_acquire_fini(&submit->ticket);
320 kfree(submit); 318 kfree(submit);
@@ -342,6 +340,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
342 if (args->nr_cmds > MAX_CMDS) 340 if (args->nr_cmds > MAX_CMDS)
343 return -EINVAL; 341 return -EINVAL;
344 342
343 mutex_lock(&dev->struct_mutex);
344
345 submit = submit_create(dev, gpu, args->nr_bos); 345 submit = submit_create(dev, gpu, args->nr_bos);
346 if (!submit) { 346 if (!submit) {
347 ret = -ENOMEM; 347 ret = -ENOMEM;
@@ -410,5 +410,6 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
410out: 410out:
411 if (submit) 411 if (submit)
412 submit_cleanup(submit, !!ret); 412 submit_cleanup(submit, !!ret);
413 mutex_unlock(&dev->struct_mutex);
413 return ret; 414 return ret;
414} 415}
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 4ebce8be489d..0cfe3f426ee4 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -298,8 +298,6 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
298 struct msm_drm_private *priv = dev->dev_private; 298 struct msm_drm_private *priv = dev->dev_private;
299 int i, ret; 299 int i, ret;
300 300
301 mutex_lock(&dev->struct_mutex);
302
303 submit->fence = ++priv->next_fence; 301 submit->fence = ++priv->next_fence;
304 302
305 gpu->submitted_fence = submit->fence; 303 gpu->submitted_fence = submit->fence;
@@ -331,7 +329,6 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
331 msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence); 329 msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence);
332 } 330 }
333 hangcheck_timer_reset(gpu); 331 hangcheck_timer_reset(gpu);
334 mutex_unlock(&dev->struct_mutex);
335 332
336 return ret; 333 return ret;
337} 334}