aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2015-07-29 22:41:44 -0400
committerDave Airlie <airlied@redhat.com>2015-07-29 22:41:44 -0400
commitbdce3e7c729907e303396690b2b23b972c6717be (patch)
treece9c2de26e25b39880c0f5b6fe83c857de058fd8
parentd698291cd4fe5cf0da31da672ad0030cd47a0323 (diff)
parentb4cba04f05ed6b9b2278547295ecc5c40180e612 (diff)
Merge branch 'msm-fixes-4.2' of git://people.freedesktop.org/~robclark/linux into drm-fixes
Fix for nasty crash on mdp4 in disable path, fix for dma-buf export, smb leak on mdp5 which could result in intermittent modeset fails, and don't let interrupted system call disturb atomic commit once we are past the point of no return. * 'msm-fixes-4.2' of git://people.freedesktop.org/~robclark/linux: drm/msm/mdp5: release SMB (shared memory blocks) in various cases drm/msm: change to uninterruptible wait in atomic commit drm/msm: mdp4: Fix drm_framebuffer dereference crash drm/msm: fix msm_gem_prime_get_sg_table()
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c4
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c13
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c33
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c87
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h1
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c8
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c13
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h4
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gem_prime.c8
11 files changed, 127 insertions, 48 deletions
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
index 0d1dbb737933..247a424445f7 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
@@ -220,13 +220,15 @@ static int mdp4_plane_mode_set(struct drm_plane *plane,
220 uint32_t op_mode = 0; 220 uint32_t op_mode = 0;
221 uint32_t phasex_step = MDP4_VG_PHASE_STEP_DEFAULT; 221 uint32_t phasex_step = MDP4_VG_PHASE_STEP_DEFAULT;
222 uint32_t phasey_step = MDP4_VG_PHASE_STEP_DEFAULT; 222 uint32_t phasey_step = MDP4_VG_PHASE_STEP_DEFAULT;
223 enum mdp4_frame_format frame_type = mdp4_get_frame_format(fb); 223 enum mdp4_frame_format frame_type;
224 224
225 if (!(crtc && fb)) { 225 if (!(crtc && fb)) {
226 DBG("%s: disabled!", mdp4_plane->name); 226 DBG("%s: disabled!", mdp4_plane->name);
227 return 0; 227 return 0;
228 } 228 }
229 229
230 frame_type = mdp4_get_frame_format(fb);
231
230 /* src values are in Q16 fixed point, convert to integer: */ 232 /* src values are in Q16 fixed point, convert to integer: */
231 src_x = src_x >> 16; 233 src_x = src_x >> 16;
232 src_y = src_y >> 16; 234 src_y = src_y >> 16;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index 206f758f7d64..e253db5de5aa 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -76,7 +76,20 @@ static void mdp5_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *st
76 76
77static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state) 77static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state)
78{ 78{
79 int i;
79 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 80 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
81 int nplanes = mdp5_kms->dev->mode_config.num_total_plane;
82
83 for (i = 0; i < nplanes; i++) {
84 struct drm_plane *plane = state->planes[i];
85 struct drm_plane_state *plane_state = state->plane_states[i];
86
87 if (!plane)
88 continue;
89
90 mdp5_plane_complete_commit(plane, plane_state);
91 }
92
80 mdp5_disable(mdp5_kms); 93 mdp5_disable(mdp5_kms);
81} 94}
82 95
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
index e0eb24587c84..e79ac09b7216 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -227,6 +227,8 @@ void mdp5_plane_install_properties(struct drm_plane *plane,
227 struct drm_mode_object *obj); 227 struct drm_mode_object *obj);
228uint32_t mdp5_plane_get_flush(struct drm_plane *plane); 228uint32_t mdp5_plane_get_flush(struct drm_plane *plane);
229void mdp5_plane_complete_flip(struct drm_plane *plane); 229void mdp5_plane_complete_flip(struct drm_plane *plane);
230void mdp5_plane_complete_commit(struct drm_plane *plane,
231 struct drm_plane_state *state);
230enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane); 232enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane);
231struct drm_plane *mdp5_plane_init(struct drm_device *dev, 233struct drm_plane *mdp5_plane_init(struct drm_device *dev,
232 enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset); 234 enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index 57b8f56ae9d0..22275568ab8b 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -31,8 +31,6 @@ struct mdp5_plane {
31 31
32 uint32_t nformats; 32 uint32_t nformats;
33 uint32_t formats[32]; 33 uint32_t formats[32];
34
35 bool enabled;
36}; 34};
37#define to_mdp5_plane(x) container_of(x, struct mdp5_plane, base) 35#define to_mdp5_plane(x) container_of(x, struct mdp5_plane, base)
38 36
@@ -56,22 +54,6 @@ static bool plane_enabled(struct drm_plane_state *state)
56 return state->fb && state->crtc; 54 return state->fb && state->crtc;
57} 55}
58 56
59static int mdp5_plane_disable(struct drm_plane *plane)
60{
61 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
62 struct mdp5_kms *mdp5_kms = get_kms(plane);
63 enum mdp5_pipe pipe = mdp5_plane->pipe;
64
65 DBG("%s: disable", mdp5_plane->name);
66
67 if (mdp5_kms) {
68 /* Release the memory we requested earlier from the SMP: */
69 mdp5_smp_release(mdp5_kms->smp, pipe);
70 }
71
72 return 0;
73}
74
75static void mdp5_plane_destroy(struct drm_plane *plane) 57static void mdp5_plane_destroy(struct drm_plane *plane)
76{ 58{
77 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); 59 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
@@ -224,7 +206,6 @@ static void mdp5_plane_atomic_update(struct drm_plane *plane,
224 206
225 if (!plane_enabled(state)) { 207 if (!plane_enabled(state)) {
226 to_mdp5_plane_state(state)->pending = true; 208 to_mdp5_plane_state(state)->pending = true;
227 mdp5_plane_disable(plane);
228 } else if (to_mdp5_plane_state(state)->mode_changed) { 209 } else if (to_mdp5_plane_state(state)->mode_changed) {
229 int ret; 210 int ret;
230 to_mdp5_plane_state(state)->pending = true; 211 to_mdp5_plane_state(state)->pending = true;
@@ -602,6 +583,20 @@ uint32_t mdp5_plane_get_flush(struct drm_plane *plane)
602 return mdp5_plane->flush_mask; 583 return mdp5_plane->flush_mask;
603} 584}
604 585
586/* called after vsync in thread context */
587void mdp5_plane_complete_commit(struct drm_plane *plane,
588 struct drm_plane_state *state)
589{
590 struct mdp5_kms *mdp5_kms = get_kms(plane);
591 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
592 enum mdp5_pipe pipe = mdp5_plane->pipe;
593
594 if (!plane_enabled(plane->state)) {
595 DBG("%s: free SMP", mdp5_plane->name);
596 mdp5_smp_release(mdp5_kms->smp, pipe);
597 }
598}
599
605/* initialize plane */ 600/* initialize plane */
606struct drm_plane *mdp5_plane_init(struct drm_device *dev, 601struct drm_plane *mdp5_plane_init(struct drm_device *dev,
607 enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset) 602 enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset)
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
index 16702aecf0df..64a27d86f2f5 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
@@ -34,22 +34,44 @@
34 * and CANNOT be re-allocated (eg: MMB0 and MMB1 both tied to RGB0). 34 * and CANNOT be re-allocated (eg: MMB0 and MMB1 both tied to RGB0).
35 * 35 *
36 * For each block that can be dynamically allocated, it can be either 36 * For each block that can be dynamically allocated, it can be either
37 * free, or pending/in-use by a client. The updates happen in three steps: 37 * free:
38 * The block is free.
39 *
40 * pending:
41 * The block is allocated to some client and not free.
42 *
43 * configured:
44 * The block is allocated to some client, and assigned to that
45 * client in MDP5_MDP_SMP_ALLOC registers.
46 *
47 * inuse:
48 * The block is being actively used by a client.
49 *
50 * The updates happen in the following steps:
38 * 51 *
39 * 1) mdp5_smp_request(): 52 * 1) mdp5_smp_request():
40 * When plane scanout is setup, calculate required number of 53 * When plane scanout is setup, calculate required number of
41 * blocks needed per client, and request. Blocks not inuse or 54 * blocks needed per client, and request. Blocks neither inuse nor
42 * pending by any other client are added to client's pending 55 * configured nor pending by any other client are added to client's
43 * set. 56 * pending set.
57 * For shrinking, blocks in pending but not in configured can be freed
58 * directly, but those already in configured will be freed later by
59 * mdp5_smp_commit.
44 * 60 *
45 * 2) mdp5_smp_configure(): 61 * 2) mdp5_smp_configure():
46 * As hw is programmed, before FLUSH, MDP5_MDP_SMP_ALLOC registers 62 * As hw is programmed, before FLUSH, MDP5_MDP_SMP_ALLOC registers
47 * are configured for the union(pending, inuse) 63 * are configured for the union(pending, inuse)
64 * Current pending is copied to configured.
65 * It is assumed that mdp5_smp_request and mdp5_smp_configure not run
66 * concurrently for the same pipe.
48 * 67 *
49 * 3) mdp5_smp_commit(): 68 * 3) mdp5_smp_commit():
50 * After next vblank, copy pending -> inuse. Optionally update 69 * After next vblank, copy configured -> inuse. Optionally update
51 * MDP5_SMP_ALLOC registers if there are newly unused blocks 70 * MDP5_SMP_ALLOC registers if there are newly unused blocks
52 * 71 *
72 * 4) mdp5_smp_release():
73 * Must be called after the pipe is disabled and no longer uses any SMB
74 *
53 * On the next vblank after changes have been committed to hw, the 75 * On the next vblank after changes have been committed to hw, the
54 * client's pending blocks become it's in-use blocks (and no-longer 76 * client's pending blocks become it's in-use blocks (and no-longer
55 * in-use blocks become available to other clients). 77 * in-use blocks become available to other clients).
@@ -77,6 +99,9 @@ struct mdp5_smp {
77 struct mdp5_client_smp_state client_state[MAX_CLIENTS]; 99 struct mdp5_client_smp_state client_state[MAX_CLIENTS];
78}; 100};
79 101
102static void update_smp_state(struct mdp5_smp *smp,
103 u32 cid, mdp5_smp_state_t *assigned);
104
80static inline 105static inline
81struct mdp5_kms *get_kms(struct mdp5_smp *smp) 106struct mdp5_kms *get_kms(struct mdp5_smp *smp)
82{ 107{
@@ -149,7 +174,12 @@ static int smp_request_block(struct mdp5_smp *smp,
149 for (i = cur_nblks; i > nblks; i--) { 174 for (i = cur_nblks; i > nblks; i--) {
150 int blk = find_first_bit(ps->pending, cnt); 175 int blk = find_first_bit(ps->pending, cnt);
151 clear_bit(blk, ps->pending); 176 clear_bit(blk, ps->pending);
152 /* don't clear in global smp_state until _commit() */ 177
178 /* clear in global smp_state if not in configured
179 * otherwise until _commit()
180 */
181 if (!test_bit(blk, ps->configured))
182 clear_bit(blk, smp->state);
153 } 183 }
154 } 184 }
155 185
@@ -223,10 +253,33 @@ int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe, u32 fmt, u32 wid
223/* Release SMP blocks for all clients of the pipe */ 253/* Release SMP blocks for all clients of the pipe */
224void mdp5_smp_release(struct mdp5_smp *smp, enum mdp5_pipe pipe) 254void mdp5_smp_release(struct mdp5_smp *smp, enum mdp5_pipe pipe)
225{ 255{
226 int i, nblks; 256 int i;
257 unsigned long flags;
258 int cnt = smp->blk_cnt;
259
260 for (i = 0; i < pipe2nclients(pipe); i++) {
261 mdp5_smp_state_t assigned;
262 u32 cid = pipe2client(pipe, i);
263 struct mdp5_client_smp_state *ps = &smp->client_state[cid];
264
265 spin_lock_irqsave(&smp->state_lock, flags);
266
267 /* clear hw assignment */
268 bitmap_or(assigned, ps->inuse, ps->configured, cnt);
269 update_smp_state(smp, CID_UNUSED, &assigned);
270
271 /* free to global pool */
272 bitmap_andnot(smp->state, smp->state, ps->pending, cnt);
273 bitmap_andnot(smp->state, smp->state, assigned, cnt);
274
275 /* clear client's infor */
276 bitmap_zero(ps->pending, cnt);
277 bitmap_zero(ps->configured, cnt);
278 bitmap_zero(ps->inuse, cnt);
279
280 spin_unlock_irqrestore(&smp->state_lock, flags);
281 }
227 282
228 for (i = 0, nblks = 0; i < pipe2nclients(pipe); i++)
229 smp_request_block(smp, pipe2client(pipe, i), 0);
230 set_fifo_thresholds(smp, pipe, 0); 283 set_fifo_thresholds(smp, pipe, 0);
231} 284}
232 285
@@ -274,12 +327,20 @@ void mdp5_smp_configure(struct mdp5_smp *smp, enum mdp5_pipe pipe)
274 u32 cid = pipe2client(pipe, i); 327 u32 cid = pipe2client(pipe, i);
275 struct mdp5_client_smp_state *ps = &smp->client_state[cid]; 328 struct mdp5_client_smp_state *ps = &smp->client_state[cid];
276 329
277 bitmap_or(assigned, ps->inuse, ps->pending, cnt); 330 /*
331 * if vblank has not happened since last smp_configure
332 * skip the configure for now
333 */
334 if (!bitmap_equal(ps->inuse, ps->configured, cnt))
335 continue;
336
337 bitmap_copy(ps->configured, ps->pending, cnt);
338 bitmap_or(assigned, ps->inuse, ps->configured, cnt);
278 update_smp_state(smp, cid, &assigned); 339 update_smp_state(smp, cid, &assigned);
279 } 340 }
280} 341}
281 342
282/* step #3: after vblank, copy pending -> inuse: */ 343/* step #3: after vblank, copy configured -> inuse: */
283void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe) 344void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe)
284{ 345{
285 int cnt = smp->blk_cnt; 346 int cnt = smp->blk_cnt;
@@ -295,7 +356,7 @@ void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe)
295 * using, which can be released and made available to other 356 * using, which can be released and made available to other
296 * clients: 357 * clients:
297 */ 358 */
298 if (bitmap_andnot(released, ps->inuse, ps->pending, cnt)) { 359 if (bitmap_andnot(released, ps->inuse, ps->configured, cnt)) {
299 unsigned long flags; 360 unsigned long flags;
300 361
301 spin_lock_irqsave(&smp->state_lock, flags); 362 spin_lock_irqsave(&smp->state_lock, flags);
@@ -306,7 +367,7 @@ void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe)
306 update_smp_state(smp, CID_UNUSED, &released); 367 update_smp_state(smp, CID_UNUSED, &released);
307 } 368 }
308 369
309 bitmap_copy(ps->inuse, ps->pending, cnt); 370 bitmap_copy(ps->inuse, ps->configured, cnt);
310 } 371 }
311} 372}
312 373
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
index e47179f63585..5b6c2363f592 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
@@ -23,6 +23,7 @@
23 23
24struct mdp5_client_smp_state { 24struct mdp5_client_smp_state {
25 mdp5_smp_state_t inuse; 25 mdp5_smp_state_t inuse;
26 mdp5_smp_state_t configured;
26 mdp5_smp_state_t pending; 27 mdp5_smp_state_t pending;
27}; 28};
28 29
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index 1b22d8bfe142..1ceb4f22dd89 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -283,12 +283,8 @@ int msm_atomic_commit(struct drm_device *dev,
283 283
284 timeout = ktime_add_ms(ktime_get(), 1000); 284 timeout = ktime_add_ms(ktime_get(), 1000);
285 285
286 ret = msm_wait_fence_interruptable(dev, c->fence, &timeout); 286 /* uninterruptible wait */
287 if (ret) { 287 msm_wait_fence(dev, c->fence, &timeout, false);
288 WARN_ON(ret); // TODO unswap state back? or??
289 commit_destroy(c);
290 return ret;
291 }
292 288
293 complete_commit(c); 289 complete_commit(c);
294 290
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index b7ef56ed8d1c..d3467b115e04 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -637,8 +637,8 @@ static void msm_debugfs_cleanup(struct drm_minor *minor)
637 * Fences: 637 * Fences:
638 */ 638 */
639 639
640int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence, 640int msm_wait_fence(struct drm_device *dev, uint32_t fence,
641 ktime_t *timeout) 641 ktime_t *timeout , bool interruptible)
642{ 642{
643 struct msm_drm_private *priv = dev->dev_private; 643 struct msm_drm_private *priv = dev->dev_private;
644 int ret; 644 int ret;
@@ -667,7 +667,12 @@ int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
667 remaining_jiffies = timespec_to_jiffies(&ts); 667 remaining_jiffies = timespec_to_jiffies(&ts);
668 } 668 }
669 669
670 ret = wait_event_interruptible_timeout(priv->fence_event, 670 if (interruptible)
671 ret = wait_event_interruptible_timeout(priv->fence_event,
672 fence_completed(dev, fence),
673 remaining_jiffies);
674 else
675 ret = wait_event_timeout(priv->fence_event,
671 fence_completed(dev, fence), 676 fence_completed(dev, fence),
672 remaining_jiffies); 677 remaining_jiffies);
673 678
@@ -853,7 +858,7 @@ static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
853 return -EINVAL; 858 return -EINVAL;
854 } 859 }
855 860
856 return msm_wait_fence_interruptable(dev, args->fence, &timeout); 861 return msm_wait_fence(dev, args->fence, &timeout, true);
857} 862}
858 863
859static const struct drm_ioctl_desc msm_ioctls[] = { 864static const struct drm_ioctl_desc msm_ioctls[] = {
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index e7c5ea125d45..4ff0ec9c994b 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -164,8 +164,8 @@ int msm_atomic_commit(struct drm_device *dev,
164 164
165int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu); 165int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
166 166
167int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence, 167int msm_wait_fence(struct drm_device *dev, uint32_t fence,
168 ktime_t *timeout); 168 ktime_t *timeout, bool interruptible);
169int msm_queue_fence_cb(struct drm_device *dev, 169int msm_queue_fence_cb(struct drm_device *dev,
170 struct msm_fence_cb *cb, uint32_t fence); 170 struct msm_fence_cb *cb, uint32_t fence);
171void msm_update_fence(struct drm_device *dev, uint32_t fence); 171void msm_update_fence(struct drm_device *dev, uint32_t fence);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index f211b80e3a1e..c76cc853b08a 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -460,7 +460,7 @@ int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
460 if (op & MSM_PREP_NOSYNC) 460 if (op & MSM_PREP_NOSYNC)
461 timeout = NULL; 461 timeout = NULL;
462 462
463 ret = msm_wait_fence_interruptable(dev, fence, timeout); 463 ret = msm_wait_fence(dev, fence, timeout, true);
464 } 464 }
465 465
466 /* TODO cache maintenance */ 466 /* TODO cache maintenance */
diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
index dd7a7ab603e2..831461bc98a5 100644
--- a/drivers/gpu/drm/msm/msm_gem_prime.c
+++ b/drivers/gpu/drm/msm/msm_gem_prime.c
@@ -23,8 +23,12 @@
23struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj) 23struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj)
24{ 24{
25 struct msm_gem_object *msm_obj = to_msm_bo(obj); 25 struct msm_gem_object *msm_obj = to_msm_bo(obj);
26 BUG_ON(!msm_obj->sgt); /* should have already pinned! */ 26 int npages = obj->size >> PAGE_SHIFT;
27 return msm_obj->sgt; 27
28 if (WARN_ON(!msm_obj->pages)) /* should have already pinned! */
29 return NULL;
30
31 return drm_prime_pages_to_sg(msm_obj->pages, npages);
28} 32}
29 33
30void *msm_gem_prime_vmap(struct drm_gem_object *obj) 34void *msm_gem_prime_vmap(struct drm_gem_object *obj)