diff options
author | Dave Airlie <airlied@redhat.com> | 2016-10-03 22:43:31 -0400 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2016-10-03 22:43:31 -0400 |
commit | f5dce66593ffb76d750918d6f1962fd62fb54d64 (patch) | |
tree | 05e79a3257a9d05728bca2592ba07c073724ea68 | |
parent | 28a396545a2a5fbdffb2b661ed6c9b6820e28772 (diff) | |
parent | c0462796464219fed0fbc1e8b2b93eb6751769f5 (diff) |
Merge branch 'exynos-drm-next' of git://git.kernel.org/pub/scm/linux/kernel/git/daeinki/drm-exynos into drm-next
This pull request includes,
- Code refactoring on HDMI DDC and PHY.
- Regression fixup on deadlock issue with G2D pm integration.
- Fixup on page fault issue with wait_for_vblank mechianism specific to Exynos drm.
- And some cleanups.
* 'exynos-drm-next' of git://git.kernel.org/pub/scm/linux/kernel/git/daeinki/drm-exynos:
drm/exynos: g2d: simplify g2d_free_runqueue_node()
drm/exynos: g2d: use autosuspend mode for PM runtime
drm/exynos: g2d: wait for engine to finish
drm/exynos: g2d: remove runqueue nodes in g2d_{close,remove}()
drm/exynos: g2d: move PM management to runqueue worker
Revert "drm/exynos: g2d: fix system and runtime pm integration"
drm/exynos: use drm core to handle page-flip event
drm/exynos: mark exynos_dp_crtc_clock_enable() static
drm/exynos/fimd: add clock rate checking
drm/exynos: fix pending update handling
drm/exynos/vidi: use timer for vblanks instead of sleeping worker
drm/exynos: g2d: beautify probing message
drm/exynos: mixer: simplify loop in vp_win_reset()
drm/exynos: mixer: convert booleans to flags in mixer context
gpu: drm: exynos_hdmi: Remove duplicate initialization of regulator bulk consumer
gpu: drm: exynos_hdmi: Move PHY logic into single function
gpu: drm: exynos_hdmi: Move DDC logic into single function
-rw-r--r-- | drivers/gpu/drm/exynos/exynos5433_drm_decon.c | 11 | ||||
-rw-r--r-- | drivers/gpu/drm/exynos/exynos7_drm_decon.c | 9 | ||||
-rw-r--r-- | drivers/gpu/drm/exynos/exynos_dp.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/exynos/exynos_drm_crtc.c | 58 | ||||
-rw-r--r-- | drivers/gpu/drm/exynos/exynos_drm_drv.c | 44 | ||||
-rw-r--r-- | drivers/gpu/drm/exynos/exynos_drm_drv.h | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/exynos/exynos_drm_fimd.c | 54 | ||||
-rw-r--r-- | drivers/gpu/drm/exynos/exynos_drm_g2d.c | 239 | ||||
-rw-r--r-- | drivers/gpu/drm/exynos/exynos_drm_plane.c | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/exynos/exynos_drm_vidi.c | 76 | ||||
-rw-r--r-- | drivers/gpu/drm/exynos/exynos_hdmi.c | 112 | ||||
-rw-r--r-- | drivers/gpu/drm/exynos/exynos_mixer.c | 68 |
12 files changed, 352 insertions, 326 deletions
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c index ac21b4000835..6ca1f3117fe8 100644 --- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c | |||
@@ -551,7 +551,6 @@ static irqreturn_t decon_irq_handler(int irq, void *dev_id) | |||
551 | { | 551 | { |
552 | struct decon_context *ctx = dev_id; | 552 | struct decon_context *ctx = dev_id; |
553 | u32 val; | 553 | u32 val; |
554 | int win; | ||
555 | 554 | ||
556 | if (!test_bit(BIT_CLKS_ENABLED, &ctx->flags)) | 555 | if (!test_bit(BIT_CLKS_ENABLED, &ctx->flags)) |
557 | goto out; | 556 | goto out; |
@@ -560,16 +559,6 @@ static irqreturn_t decon_irq_handler(int irq, void *dev_id) | |||
560 | val &= VIDINTCON1_INTFRMDONEPEND | VIDINTCON1_INTFRMPEND; | 559 | val &= VIDINTCON1_INTFRMDONEPEND | VIDINTCON1_INTFRMPEND; |
561 | 560 | ||
562 | if (val) { | 561 | if (val) { |
563 | for (win = ctx->first_win; win < WINDOWS_NR ; win++) { | ||
564 | struct exynos_drm_plane *plane = &ctx->planes[win]; | ||
565 | |||
566 | if (!plane->pending_fb) | ||
567 | continue; | ||
568 | |||
569 | exynos_drm_crtc_finish_update(ctx->crtc, plane); | ||
570 | } | ||
571 | |||
572 | /* clear */ | ||
573 | writel(val, ctx->addr + DECON_VIDINTCON1); | 562 | writel(val, ctx->addr + DECON_VIDINTCON1); |
574 | drm_crtc_handle_vblank(&ctx->crtc->base); | 563 | drm_crtc_handle_vblank(&ctx->crtc->base); |
575 | } | 564 | } |
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c index 7f9901b7777b..f4d5a2133777 100644 --- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c | |||
@@ -603,7 +603,6 @@ static irqreturn_t decon_irq_handler(int irq, void *dev_id) | |||
603 | { | 603 | { |
604 | struct decon_context *ctx = (struct decon_context *)dev_id; | 604 | struct decon_context *ctx = (struct decon_context *)dev_id; |
605 | u32 val, clear_bit; | 605 | u32 val, clear_bit; |
606 | int win; | ||
607 | 606 | ||
608 | val = readl(ctx->regs + VIDINTCON1); | 607 | val = readl(ctx->regs + VIDINTCON1); |
609 | 608 | ||
@@ -617,14 +616,6 @@ static irqreturn_t decon_irq_handler(int irq, void *dev_id) | |||
617 | 616 | ||
618 | if (!ctx->i80_if) { | 617 | if (!ctx->i80_if) { |
619 | drm_crtc_handle_vblank(&ctx->crtc->base); | 618 | drm_crtc_handle_vblank(&ctx->crtc->base); |
620 | for (win = 0 ; win < WINDOWS_NR ; win++) { | ||
621 | struct exynos_drm_plane *plane = &ctx->planes[win]; | ||
622 | |||
623 | if (!plane->pending_fb) | ||
624 | continue; | ||
625 | |||
626 | exynos_drm_crtc_finish_update(ctx->crtc, plane); | ||
627 | } | ||
628 | 619 | ||
629 | /* set wait vsync event to zero and wake up queue. */ | 620 | /* set wait vsync event to zero and wake up queue. */ |
630 | if (atomic_read(&ctx->wait_vsync_event)) { | 621 | if (atomic_read(&ctx->wait_vsync_event)) { |
diff --git a/drivers/gpu/drm/exynos/exynos_dp.c b/drivers/gpu/drm/exynos/exynos_dp.c index 4f0850585b8e..528229faffe4 100644 --- a/drivers/gpu/drm/exynos/exynos_dp.c +++ b/drivers/gpu/drm/exynos/exynos_dp.c | |||
@@ -43,7 +43,7 @@ struct exynos_dp_device { | |||
43 | struct analogix_dp_plat_data plat_data; | 43 | struct analogix_dp_plat_data plat_data; |
44 | }; | 44 | }; |
45 | 45 | ||
46 | int exynos_dp_crtc_clock_enable(struct analogix_dp_plat_data *plat_data, | 46 | static int exynos_dp_crtc_clock_enable(struct analogix_dp_plat_data *plat_data, |
47 | bool enable) | 47 | bool enable) |
48 | { | 48 | { |
49 | struct exynos_dp_device *dp = to_dp(plat_data); | 49 | struct exynos_dp_device *dp = to_dp(plat_data); |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c index 785ffa6cc309..2530bf57716a 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c | |||
@@ -69,8 +69,6 @@ static void exynos_crtc_atomic_begin(struct drm_crtc *crtc, | |||
69 | { | 69 | { |
70 | struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); | 70 | struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); |
71 | 71 | ||
72 | exynos_crtc->event = crtc->state->event; | ||
73 | |||
74 | if (exynos_crtc->ops->atomic_begin) | 72 | if (exynos_crtc->ops->atomic_begin) |
75 | exynos_crtc->ops->atomic_begin(exynos_crtc); | 73 | exynos_crtc->ops->atomic_begin(exynos_crtc); |
76 | } | 74 | } |
@@ -79,9 +77,24 @@ static void exynos_crtc_atomic_flush(struct drm_crtc *crtc, | |||
79 | struct drm_crtc_state *old_crtc_state) | 77 | struct drm_crtc_state *old_crtc_state) |
80 | { | 78 | { |
81 | struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); | 79 | struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); |
80 | struct drm_pending_vblank_event *event; | ||
81 | unsigned long flags; | ||
82 | 82 | ||
83 | if (exynos_crtc->ops->atomic_flush) | 83 | if (exynos_crtc->ops->atomic_flush) |
84 | exynos_crtc->ops->atomic_flush(exynos_crtc); | 84 | exynos_crtc->ops->atomic_flush(exynos_crtc); |
85 | |||
86 | event = crtc->state->event; | ||
87 | if (event) { | ||
88 | crtc->state->event = NULL; | ||
89 | |||
90 | spin_lock_irqsave(&crtc->dev->event_lock, flags); | ||
91 | if (drm_crtc_vblank_get(crtc) == 0) | ||
92 | drm_crtc_arm_vblank_event(crtc, event); | ||
93 | else | ||
94 | drm_crtc_send_vblank_event(crtc, event); | ||
95 | spin_unlock_irqrestore(&crtc->dev->event_lock, flags); | ||
96 | } | ||
97 | |||
85 | } | 98 | } |
86 | 99 | ||
87 | static const struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = { | 100 | static const struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = { |
@@ -134,8 +147,6 @@ struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev, | |||
134 | exynos_crtc->ops = ops; | 147 | exynos_crtc->ops = ops; |
135 | exynos_crtc->ctx = ctx; | 148 | exynos_crtc->ctx = ctx; |
136 | 149 | ||
137 | init_waitqueue_head(&exynos_crtc->wait_update); | ||
138 | |||
139 | crtc = &exynos_crtc->base; | 150 | crtc = &exynos_crtc->base; |
140 | 151 | ||
141 | private->crtc[pipe] = crtc; | 152 | private->crtc[pipe] = crtc; |
@@ -175,32 +186,6 @@ void exynos_drm_crtc_disable_vblank(struct drm_device *dev, unsigned int pipe) | |||
175 | exynos_crtc->ops->disable_vblank(exynos_crtc); | 186 | exynos_crtc->ops->disable_vblank(exynos_crtc); |
176 | } | 187 | } |
177 | 188 | ||
178 | void exynos_drm_crtc_wait_pending_update(struct exynos_drm_crtc *exynos_crtc) | ||
179 | { | ||
180 | wait_event_timeout(exynos_crtc->wait_update, | ||
181 | (atomic_read(&exynos_crtc->pending_update) == 0), | ||
182 | msecs_to_jiffies(50)); | ||
183 | } | ||
184 | |||
185 | void exynos_drm_crtc_finish_update(struct exynos_drm_crtc *exynos_crtc, | ||
186 | struct exynos_drm_plane *exynos_plane) | ||
187 | { | ||
188 | struct drm_crtc *crtc = &exynos_crtc->base; | ||
189 | unsigned long flags; | ||
190 | |||
191 | exynos_plane->pending_fb = NULL; | ||
192 | |||
193 | if (atomic_dec_and_test(&exynos_crtc->pending_update)) | ||
194 | wake_up(&exynos_crtc->wait_update); | ||
195 | |||
196 | spin_lock_irqsave(&crtc->dev->event_lock, flags); | ||
197 | if (exynos_crtc->event) | ||
198 | drm_crtc_send_vblank_event(crtc, exynos_crtc->event); | ||
199 | |||
200 | exynos_crtc->event = NULL; | ||
201 | spin_unlock_irqrestore(&crtc->dev->event_lock, flags); | ||
202 | } | ||
203 | |||
204 | int exynos_drm_crtc_get_pipe_from_type(struct drm_device *drm_dev, | 189 | int exynos_drm_crtc_get_pipe_from_type(struct drm_device *drm_dev, |
205 | enum exynos_drm_output_type out_type) | 190 | enum exynos_drm_output_type out_type) |
206 | { | 191 | { |
@@ -228,20 +213,19 @@ void exynos_drm_crtc_te_handler(struct drm_crtc *crtc) | |||
228 | void exynos_drm_crtc_cancel_page_flip(struct drm_crtc *crtc, | 213 | void exynos_drm_crtc_cancel_page_flip(struct drm_crtc *crtc, |
229 | struct drm_file *file) | 214 | struct drm_file *file) |
230 | { | 215 | { |
231 | struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); | ||
232 | struct drm_pending_vblank_event *e; | 216 | struct drm_pending_vblank_event *e; |
233 | unsigned long flags; | 217 | unsigned long flags; |
234 | 218 | ||
235 | spin_lock_irqsave(&crtc->dev->event_lock, flags); | 219 | spin_lock_irqsave(&crtc->dev->event_lock, flags); |
236 | 220 | ||
237 | e = exynos_crtc->event; | 221 | e = crtc->state->event; |
238 | if (e && e->base.file_priv == file) { | 222 | if (e && e->base.file_priv == file) |
239 | exynos_crtc->event = NULL; | 223 | crtc->state->event = NULL; |
240 | atomic_dec(&exynos_crtc->pending_update); | 224 | else |
241 | } | 225 | e = NULL; |
242 | 226 | ||
243 | spin_unlock_irqrestore(&crtc->dev->event_lock, flags); | 227 | spin_unlock_irqrestore(&crtc->dev->event_lock, flags); |
244 | 228 | ||
245 | if (e && e->base.file_priv == file) | 229 | if (e) |
246 | drm_event_cancel_free(crtc->dev, &e->base); | 230 | drm_event_cancel_free(crtc->dev, &e->base); |
247 | } | 231 | } |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index 486943e70f70..def78c8c1780 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c | |||
@@ -45,37 +45,11 @@ struct exynos_atomic_commit { | |||
45 | u32 crtcs; | 45 | u32 crtcs; |
46 | }; | 46 | }; |
47 | 47 | ||
48 | static void exynos_atomic_wait_for_commit(struct drm_atomic_state *state) | ||
49 | { | ||
50 | struct drm_crtc_state *crtc_state; | ||
51 | struct drm_crtc *crtc; | ||
52 | int i, ret; | ||
53 | |||
54 | for_each_crtc_in_state(state, crtc, crtc_state, i) { | ||
55 | struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); | ||
56 | |||
57 | if (!crtc->state->enable) | ||
58 | continue; | ||
59 | |||
60 | ret = drm_crtc_vblank_get(crtc); | ||
61 | if (ret) | ||
62 | continue; | ||
63 | |||
64 | exynos_drm_crtc_wait_pending_update(exynos_crtc); | ||
65 | drm_crtc_vblank_put(crtc); | ||
66 | } | ||
67 | } | ||
68 | |||
69 | static void exynos_atomic_commit_complete(struct exynos_atomic_commit *commit) | 48 | static void exynos_atomic_commit_complete(struct exynos_atomic_commit *commit) |
70 | { | 49 | { |
71 | struct drm_device *dev = commit->dev; | 50 | struct drm_device *dev = commit->dev; |
72 | struct exynos_drm_private *priv = dev->dev_private; | 51 | struct exynos_drm_private *priv = dev->dev_private; |
73 | struct drm_atomic_state *state = commit->state; | 52 | struct drm_atomic_state *state = commit->state; |
74 | struct drm_plane *plane; | ||
75 | struct drm_crtc *crtc; | ||
76 | struct drm_plane_state *plane_state; | ||
77 | struct drm_crtc_state *crtc_state; | ||
78 | int i; | ||
79 | 53 | ||
80 | drm_atomic_helper_commit_modeset_disables(dev, state); | 54 | drm_atomic_helper_commit_modeset_disables(dev, state); |
81 | 55 | ||
@@ -89,25 +63,9 @@ static void exynos_atomic_commit_complete(struct exynos_atomic_commit *commit) | |||
89 | * have the relevant clocks enabled to perform the update. | 63 | * have the relevant clocks enabled to perform the update. |
90 | */ | 64 | */ |
91 | 65 | ||
92 | for_each_crtc_in_state(state, crtc, crtc_state, i) { | ||
93 | struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); | ||
94 | |||
95 | atomic_set(&exynos_crtc->pending_update, 0); | ||
96 | } | ||
97 | |||
98 | for_each_plane_in_state(state, plane, plane_state, i) { | ||
99 | struct exynos_drm_crtc *exynos_crtc = | ||
100 | to_exynos_crtc(plane->crtc); | ||
101 | |||
102 | if (!plane->crtc) | ||
103 | continue; | ||
104 | |||
105 | atomic_inc(&exynos_crtc->pending_update); | ||
106 | } | ||
107 | |||
108 | drm_atomic_helper_commit_planes(dev, state, 0); | 66 | drm_atomic_helper_commit_planes(dev, state, 0); |
109 | 67 | ||
110 | exynos_atomic_wait_for_commit(state); | 68 | drm_atomic_helper_wait_for_vblanks(dev, state); |
111 | 69 | ||
112 | drm_atomic_helper_cleanup_planes(dev, state); | 70 | drm_atomic_helper_cleanup_planes(dev, state); |
113 | 71 | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h index 7f1a49d5bdbe..d215149e737b 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.h +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h | |||
@@ -86,7 +86,6 @@ struct exynos_drm_plane { | |||
86 | struct drm_plane base; | 86 | struct drm_plane base; |
87 | const struct exynos_drm_plane_config *config; | 87 | const struct exynos_drm_plane_config *config; |
88 | unsigned int index; | 88 | unsigned int index; |
89 | struct drm_framebuffer *pending_fb; | ||
90 | }; | 89 | }; |
91 | 90 | ||
92 | #define EXYNOS_DRM_PLANE_CAP_DOUBLE (1 << 0) | 91 | #define EXYNOS_DRM_PLANE_CAP_DOUBLE (1 << 0) |
@@ -172,9 +171,6 @@ struct exynos_drm_crtc { | |||
172 | struct drm_crtc base; | 171 | struct drm_crtc base; |
173 | enum exynos_drm_output_type type; | 172 | enum exynos_drm_output_type type; |
174 | unsigned int pipe; | 173 | unsigned int pipe; |
175 | struct drm_pending_vblank_event *event; | ||
176 | wait_queue_head_t wait_update; | ||
177 | atomic_t pending_update; | ||
178 | const struct exynos_drm_crtc_ops *ops; | 174 | const struct exynos_drm_crtc_ops *ops; |
179 | void *ctx; | 175 | void *ctx; |
180 | struct exynos_drm_clk *pipe_clk; | 176 | struct exynos_drm_clk *pipe_clk; |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c index d47216488985..e2e405170d35 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c | |||
@@ -198,6 +198,7 @@ struct fimd_context { | |||
198 | atomic_t wait_vsync_event; | 198 | atomic_t wait_vsync_event; |
199 | atomic_t win_updated; | 199 | atomic_t win_updated; |
200 | atomic_t triggering; | 200 | atomic_t triggering; |
201 | u32 clkdiv; | ||
201 | 202 | ||
202 | const struct fimd_driver_data *driver_data; | 203 | const struct fimd_driver_data *driver_data; |
203 | struct drm_encoder *encoder; | 204 | struct drm_encoder *encoder; |
@@ -389,15 +390,18 @@ static void fimd_clear_channels(struct exynos_drm_crtc *crtc) | |||
389 | pm_runtime_put(ctx->dev); | 390 | pm_runtime_put(ctx->dev); |
390 | } | 391 | } |
391 | 392 | ||
392 | static u32 fimd_calc_clkdiv(struct fimd_context *ctx, | 393 | |
393 | const struct drm_display_mode *mode) | 394 | static int fimd_atomic_check(struct exynos_drm_crtc *crtc, |
395 | struct drm_crtc_state *state) | ||
394 | { | 396 | { |
395 | unsigned long ideal_clk; | 397 | struct drm_display_mode *mode = &state->adjusted_mode; |
398 | struct fimd_context *ctx = crtc->ctx; | ||
399 | unsigned long ideal_clk, lcd_rate; | ||
396 | u32 clkdiv; | 400 | u32 clkdiv; |
397 | 401 | ||
398 | if (mode->clock == 0) { | 402 | if (mode->clock == 0) { |
399 | DRM_ERROR("Mode has zero clock value.\n"); | 403 | DRM_INFO("Mode has zero clock value.\n"); |
400 | return 0xff; | 404 | return -EINVAL; |
401 | } | 405 | } |
402 | 406 | ||
403 | ideal_clk = mode->clock * 1000; | 407 | ideal_clk = mode->clock * 1000; |
@@ -410,10 +414,23 @@ static u32 fimd_calc_clkdiv(struct fimd_context *ctx, | |||
410 | ideal_clk *= 2; | 414 | ideal_clk *= 2; |
411 | } | 415 | } |
412 | 416 | ||
417 | lcd_rate = clk_get_rate(ctx->lcd_clk); | ||
418 | if (2 * lcd_rate < ideal_clk) { | ||
419 | DRM_INFO("sclk_fimd clock too low(%lu) for requested pixel clock(%lu)\n", | ||
420 | lcd_rate, ideal_clk); | ||
421 | return -EINVAL; | ||
422 | } | ||
423 | |||
413 | /* Find the clock divider value that gets us closest to ideal_clk */ | 424 | /* Find the clock divider value that gets us closest to ideal_clk */ |
414 | clkdiv = DIV_ROUND_CLOSEST(clk_get_rate(ctx->lcd_clk), ideal_clk); | 425 | clkdiv = DIV_ROUND_CLOSEST(lcd_rate, ideal_clk); |
426 | if (clkdiv >= 0x200) { | ||
427 | DRM_INFO("requested pixel clock(%lu) too low\n", ideal_clk); | ||
428 | return -EINVAL; | ||
429 | } | ||
430 | |||
431 | ctx->clkdiv = (clkdiv < 0x100) ? clkdiv : 0xff; | ||
415 | 432 | ||
416 | return (clkdiv < 0x100) ? clkdiv : 0xff; | 433 | return 0; |
417 | } | 434 | } |
418 | 435 | ||
419 | static void fimd_setup_trigger(struct fimd_context *ctx) | 436 | static void fimd_setup_trigger(struct fimd_context *ctx) |
@@ -442,7 +459,7 @@ static void fimd_commit(struct exynos_drm_crtc *crtc) | |||
442 | struct drm_display_mode *mode = &crtc->base.state->adjusted_mode; | 459 | struct drm_display_mode *mode = &crtc->base.state->adjusted_mode; |
443 | const struct fimd_driver_data *driver_data = ctx->driver_data; | 460 | const struct fimd_driver_data *driver_data = ctx->driver_data; |
444 | void *timing_base = ctx->regs + driver_data->timing_base; | 461 | void *timing_base = ctx->regs + driver_data->timing_base; |
445 | u32 val, clkdiv; | 462 | u32 val; |
446 | 463 | ||
447 | if (ctx->suspended) | 464 | if (ctx->suspended) |
448 | return; | 465 | return; |
@@ -543,9 +560,8 @@ static void fimd_commit(struct exynos_drm_crtc *crtc) | |||
543 | if (ctx->driver_data->has_clksel) | 560 | if (ctx->driver_data->has_clksel) |
544 | val |= VIDCON0_CLKSEL_LCD; | 561 | val |= VIDCON0_CLKSEL_LCD; |
545 | 562 | ||
546 | clkdiv = fimd_calc_clkdiv(ctx, mode); | 563 | if (ctx->clkdiv > 1) |
547 | if (clkdiv > 1) | 564 | val |= VIDCON0_CLKVAL_F(ctx->clkdiv - 1) | VIDCON0_CLKDIR; |
548 | val |= VIDCON0_CLKVAL_F(clkdiv - 1) | VIDCON0_CLKDIR; | ||
549 | 565 | ||
550 | writel(val, ctx->regs + VIDCON0); | 566 | writel(val, ctx->regs + VIDCON0); |
551 | } | 567 | } |
@@ -939,14 +955,14 @@ static const struct exynos_drm_crtc_ops fimd_crtc_ops = { | |||
939 | .update_plane = fimd_update_plane, | 955 | .update_plane = fimd_update_plane, |
940 | .disable_plane = fimd_disable_plane, | 956 | .disable_plane = fimd_disable_plane, |
941 | .atomic_flush = fimd_atomic_flush, | 957 | .atomic_flush = fimd_atomic_flush, |
958 | .atomic_check = fimd_atomic_check, | ||
942 | .te_handler = fimd_te_handler, | 959 | .te_handler = fimd_te_handler, |
943 | }; | 960 | }; |
944 | 961 | ||
945 | static irqreturn_t fimd_irq_handler(int irq, void *dev_id) | 962 | static irqreturn_t fimd_irq_handler(int irq, void *dev_id) |
946 | { | 963 | { |
947 | struct fimd_context *ctx = (struct fimd_context *)dev_id; | 964 | struct fimd_context *ctx = (struct fimd_context *)dev_id; |
948 | u32 val, clear_bit, start, start_s; | 965 | u32 val, clear_bit; |
949 | int win; | ||
950 | 966 | ||
951 | val = readl(ctx->regs + VIDINTCON1); | 967 | val = readl(ctx->regs + VIDINTCON1); |
952 | 968 | ||
@@ -961,18 +977,6 @@ static irqreturn_t fimd_irq_handler(int irq, void *dev_id) | |||
961 | if (!ctx->i80_if) | 977 | if (!ctx->i80_if) |
962 | drm_crtc_handle_vblank(&ctx->crtc->base); | 978 | drm_crtc_handle_vblank(&ctx->crtc->base); |
963 | 979 | ||
964 | for (win = 0 ; win < WINDOWS_NR ; win++) { | ||
965 | struct exynos_drm_plane *plane = &ctx->planes[win]; | ||
966 | |||
967 | if (!plane->pending_fb) | ||
968 | continue; | ||
969 | |||
970 | start = readl(ctx->regs + VIDWx_BUF_START(win, 0)); | ||
971 | start_s = readl(ctx->regs + VIDWx_BUF_START_S(win, 0)); | ||
972 | if (start == start_s) | ||
973 | exynos_drm_crtc_finish_update(ctx->crtc, plane); | ||
974 | } | ||
975 | |||
976 | if (ctx->i80_if) { | 980 | if (ctx->i80_if) { |
977 | /* Exits triggering mode */ | 981 | /* Exits triggering mode */ |
978 | atomic_set(&ctx->triggering, 0); | 982 | atomic_set(&ctx->triggering, 0); |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c index 6eca8bb88648..aa92decf4233 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c | |||
@@ -138,6 +138,18 @@ enum g2d_reg_type { | |||
138 | MAX_REG_TYPE_NR | 138 | MAX_REG_TYPE_NR |
139 | }; | 139 | }; |
140 | 140 | ||
141 | enum g2d_flag_bits { | ||
142 | /* | ||
143 | * If set, suspends the runqueue worker after the currently | ||
144 | * processed node is finished. | ||
145 | */ | ||
146 | G2D_BIT_SUSPEND_RUNQUEUE, | ||
147 | /* | ||
148 | * If set, indicates that the engine is currently busy. | ||
149 | */ | ||
150 | G2D_BIT_ENGINE_BUSY, | ||
151 | }; | ||
152 | |||
141 | /* cmdlist data structure */ | 153 | /* cmdlist data structure */ |
142 | struct g2d_cmdlist { | 154 | struct g2d_cmdlist { |
143 | u32 head; | 155 | u32 head; |
@@ -226,7 +238,7 @@ struct g2d_data { | |||
226 | struct workqueue_struct *g2d_workq; | 238 | struct workqueue_struct *g2d_workq; |
227 | struct work_struct runqueue_work; | 239 | struct work_struct runqueue_work; |
228 | struct exynos_drm_subdrv subdrv; | 240 | struct exynos_drm_subdrv subdrv; |
229 | bool suspended; | 241 | unsigned long flags; |
230 | 242 | ||
231 | /* cmdlist */ | 243 | /* cmdlist */ |
232 | struct g2d_cmdlist_node *cmdlist_node; | 244 | struct g2d_cmdlist_node *cmdlist_node; |
@@ -246,6 +258,12 @@ struct g2d_data { | |||
246 | unsigned long max_pool; | 258 | unsigned long max_pool; |
247 | }; | 259 | }; |
248 | 260 | ||
261 | static inline void g2d_hw_reset(struct g2d_data *g2d) | ||
262 | { | ||
263 | writel(G2D_R | G2D_SFRCLEAR, g2d->regs + G2D_SOFT_RESET); | ||
264 | clear_bit(G2D_BIT_ENGINE_BUSY, &g2d->flags); | ||
265 | } | ||
266 | |||
249 | static int g2d_init_cmdlist(struct g2d_data *g2d) | 267 | static int g2d_init_cmdlist(struct g2d_data *g2d) |
250 | { | 268 | { |
251 | struct device *dev = g2d->dev; | 269 | struct device *dev = g2d->dev; |
@@ -803,12 +821,8 @@ static void g2d_dma_start(struct g2d_data *g2d, | |||
803 | struct g2d_cmdlist_node *node = | 821 | struct g2d_cmdlist_node *node = |
804 | list_first_entry(&runqueue_node->run_cmdlist, | 822 | list_first_entry(&runqueue_node->run_cmdlist, |
805 | struct g2d_cmdlist_node, list); | 823 | struct g2d_cmdlist_node, list); |
806 | int ret; | ||
807 | |||
808 | ret = pm_runtime_get_sync(g2d->dev); | ||
809 | if (ret < 0) | ||
810 | return; | ||
811 | 824 | ||
825 | set_bit(G2D_BIT_ENGINE_BUSY, &g2d->flags); | ||
812 | writel_relaxed(node->dma_addr, g2d->regs + G2D_DMA_SFR_BASE_ADDR); | 826 | writel_relaxed(node->dma_addr, g2d->regs + G2D_DMA_SFR_BASE_ADDR); |
813 | writel_relaxed(G2D_DMA_START, g2d->regs + G2D_DMA_COMMAND); | 827 | writel_relaxed(G2D_DMA_START, g2d->regs + G2D_DMA_COMMAND); |
814 | } | 828 | } |
@@ -831,9 +845,6 @@ static void g2d_free_runqueue_node(struct g2d_data *g2d, | |||
831 | { | 845 | { |
832 | struct g2d_cmdlist_node *node; | 846 | struct g2d_cmdlist_node *node; |
833 | 847 | ||
834 | if (!runqueue_node) | ||
835 | return; | ||
836 | |||
837 | mutex_lock(&g2d->cmdlist_mutex); | 848 | mutex_lock(&g2d->cmdlist_mutex); |
838 | /* | 849 | /* |
839 | * commands in run_cmdlist have been completed so unmap all gem | 850 | * commands in run_cmdlist have been completed so unmap all gem |
@@ -847,29 +858,65 @@ static void g2d_free_runqueue_node(struct g2d_data *g2d, | |||
847 | kmem_cache_free(g2d->runqueue_slab, runqueue_node); | 858 | kmem_cache_free(g2d->runqueue_slab, runqueue_node); |
848 | } | 859 | } |
849 | 860 | ||
850 | static void g2d_exec_runqueue(struct g2d_data *g2d) | 861 | /** |
862 | * g2d_remove_runqueue_nodes - remove items from the list of runqueue nodes | ||
863 | * @g2d: G2D state object | ||
864 | * @file: if not zero, only remove items with this DRM file | ||
865 | * | ||
866 | * Has to be called under runqueue lock. | ||
867 | */ | ||
868 | static void g2d_remove_runqueue_nodes(struct g2d_data *g2d, struct drm_file* file) | ||
851 | { | 869 | { |
852 | g2d->runqueue_node = g2d_get_runqueue_node(g2d); | 870 | struct g2d_runqueue_node *node, *n; |
853 | if (g2d->runqueue_node) | 871 | |
854 | g2d_dma_start(g2d, g2d->runqueue_node); | 872 | if (list_empty(&g2d->runqueue)) |
873 | return; | ||
874 | |||
875 | list_for_each_entry_safe(node, n, &g2d->runqueue, list) { | ||
876 | if (file && node->filp != file) | ||
877 | continue; | ||
878 | |||
879 | list_del_init(&node->list); | ||
880 | g2d_free_runqueue_node(g2d, node); | ||
881 | } | ||
855 | } | 882 | } |
856 | 883 | ||
857 | static void g2d_runqueue_worker(struct work_struct *work) | 884 | static void g2d_runqueue_worker(struct work_struct *work) |
858 | { | 885 | { |
859 | struct g2d_data *g2d = container_of(work, struct g2d_data, | 886 | struct g2d_data *g2d = container_of(work, struct g2d_data, |
860 | runqueue_work); | 887 | runqueue_work); |
888 | struct g2d_runqueue_node *runqueue_node; | ||
889 | |||
890 | /* | ||
891 | * The engine is busy and the completion of the current node is going | ||
892 | * to poke the runqueue worker, so nothing to do here. | ||
893 | */ | ||
894 | if (test_bit(G2D_BIT_ENGINE_BUSY, &g2d->flags)) | ||
895 | return; | ||
861 | 896 | ||
862 | mutex_lock(&g2d->runqueue_mutex); | 897 | mutex_lock(&g2d->runqueue_mutex); |
863 | pm_runtime_put_sync(g2d->dev); | ||
864 | 898 | ||
865 | complete(&g2d->runqueue_node->complete); | 899 | runqueue_node = g2d->runqueue_node; |
866 | if (g2d->runqueue_node->async) | 900 | g2d->runqueue_node = NULL; |
867 | g2d_free_runqueue_node(g2d, g2d->runqueue_node); | 901 | |
902 | if (runqueue_node) { | ||
903 | pm_runtime_mark_last_busy(g2d->dev); | ||
904 | pm_runtime_put_autosuspend(g2d->dev); | ||
905 | |||
906 | complete(&runqueue_node->complete); | ||
907 | if (runqueue_node->async) | ||
908 | g2d_free_runqueue_node(g2d, runqueue_node); | ||
909 | } | ||
910 | |||
911 | if (!test_bit(G2D_BIT_SUSPEND_RUNQUEUE, &g2d->flags)) { | ||
912 | g2d->runqueue_node = g2d_get_runqueue_node(g2d); | ||
913 | |||
914 | if (g2d->runqueue_node) { | ||
915 | pm_runtime_get_sync(g2d->dev); | ||
916 | g2d_dma_start(g2d, g2d->runqueue_node); | ||
917 | } | ||
918 | } | ||
868 | 919 | ||
869 | if (g2d->suspended) | ||
870 | g2d->runqueue_node = NULL; | ||
871 | else | ||
872 | g2d_exec_runqueue(g2d); | ||
873 | mutex_unlock(&g2d->runqueue_mutex); | 920 | mutex_unlock(&g2d->runqueue_mutex); |
874 | } | 921 | } |
875 | 922 | ||
@@ -918,12 +965,72 @@ static irqreturn_t g2d_irq_handler(int irq, void *dev_id) | |||
918 | } | 965 | } |
919 | } | 966 | } |
920 | 967 | ||
921 | if (pending & G2D_INTP_ACMD_FIN) | 968 | if (pending & G2D_INTP_ACMD_FIN) { |
969 | clear_bit(G2D_BIT_ENGINE_BUSY, &g2d->flags); | ||
922 | queue_work(g2d->g2d_workq, &g2d->runqueue_work); | 970 | queue_work(g2d->g2d_workq, &g2d->runqueue_work); |
971 | } | ||
923 | 972 | ||
924 | return IRQ_HANDLED; | 973 | return IRQ_HANDLED; |
925 | } | 974 | } |
926 | 975 | ||
976 | /** | ||
977 | * g2d_wait_finish - wait for the G2D engine to finish the current runqueue node | ||
978 | * @g2d: G2D state object | ||
979 | * @file: if not zero, only wait if the current runqueue node belongs | ||
980 | * to the DRM file | ||
981 | * | ||
982 | * Should the engine not become idle after a 100ms timeout, a hardware | ||
983 | * reset is issued. | ||
984 | */ | ||
985 | static void g2d_wait_finish(struct g2d_data *g2d, struct drm_file *file) | ||
986 | { | ||
987 | struct device *dev = g2d->dev; | ||
988 | |||
989 | struct g2d_runqueue_node *runqueue_node = NULL; | ||
990 | unsigned int tries = 10; | ||
991 | |||
992 | mutex_lock(&g2d->runqueue_mutex); | ||
993 | |||
994 | /* If no node is currently processed, we have nothing to do. */ | ||
995 | if (!g2d->runqueue_node) | ||
996 | goto out; | ||
997 | |||
998 | runqueue_node = g2d->runqueue_node; | ||
999 | |||
1000 | /* Check if the currently processed item belongs to us. */ | ||
1001 | if (file && runqueue_node->filp != file) | ||
1002 | goto out; | ||
1003 | |||
1004 | mutex_unlock(&g2d->runqueue_mutex); | ||
1005 | |||
1006 | /* Wait for the G2D engine to finish. */ | ||
1007 | while (tries-- && (g2d->runqueue_node == runqueue_node)) | ||
1008 | mdelay(10); | ||
1009 | |||
1010 | mutex_lock(&g2d->runqueue_mutex); | ||
1011 | |||
1012 | if (g2d->runqueue_node != runqueue_node) | ||
1013 | goto out; | ||
1014 | |||
1015 | dev_err(dev, "wait timed out, resetting engine...\n"); | ||
1016 | g2d_hw_reset(g2d); | ||
1017 | |||
1018 | /* | ||
1019 | * After the hardware reset of the engine we are going to loose | ||
1020 | * the IRQ which triggers the PM runtime put(). | ||
1021 | * So do this manually here. | ||
1022 | */ | ||
1023 | pm_runtime_mark_last_busy(dev); | ||
1024 | pm_runtime_put_autosuspend(dev); | ||
1025 | |||
1026 | complete(&runqueue_node->complete); | ||
1027 | if (runqueue_node->async) | ||
1028 | g2d_free_runqueue_node(g2d, runqueue_node); | ||
1029 | |||
1030 | out: | ||
1031 | mutex_unlock(&g2d->runqueue_mutex); | ||
1032 | } | ||
1033 | |||
927 | static int g2d_check_reg_offset(struct device *dev, | 1034 | static int g2d_check_reg_offset(struct device *dev, |
928 | struct g2d_cmdlist_node *node, | 1035 | struct g2d_cmdlist_node *node, |
929 | int nr, bool for_addr) | 1036 | int nr, bool for_addr) |
@@ -1259,10 +1366,11 @@ int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data, | |||
1259 | runqueue_node->pid = current->pid; | 1366 | runqueue_node->pid = current->pid; |
1260 | runqueue_node->filp = file; | 1367 | runqueue_node->filp = file; |
1261 | list_add_tail(&runqueue_node->list, &g2d->runqueue); | 1368 | list_add_tail(&runqueue_node->list, &g2d->runqueue); |
1262 | if (!g2d->runqueue_node) | ||
1263 | g2d_exec_runqueue(g2d); | ||
1264 | mutex_unlock(&g2d->runqueue_mutex); | 1369 | mutex_unlock(&g2d->runqueue_mutex); |
1265 | 1370 | ||
1371 | /* Let the runqueue know that there is work to do. */ | ||
1372 | queue_work(g2d->g2d_workq, &g2d->runqueue_work); | ||
1373 | |||
1266 | if (runqueue_node->async) | 1374 | if (runqueue_node->async) |
1267 | goto out; | 1375 | goto out; |
1268 | 1376 | ||
@@ -1339,15 +1447,26 @@ static void g2d_close(struct drm_device *drm_dev, struct device *dev, | |||
1339 | if (!g2d) | 1447 | if (!g2d) |
1340 | return; | 1448 | return; |
1341 | 1449 | ||
1450 | /* Remove the runqueue nodes that belong to us. */ | ||
1451 | mutex_lock(&g2d->runqueue_mutex); | ||
1452 | g2d_remove_runqueue_nodes(g2d, file); | ||
1453 | mutex_unlock(&g2d->runqueue_mutex); | ||
1454 | |||
1455 | /* | ||
1456 | * Wait for the runqueue worker to finish its current node. | ||
1457 | * After this the engine should no longer be accessing any | ||
1458 | * memory belonging to us. | ||
1459 | */ | ||
1460 | g2d_wait_finish(g2d, file); | ||
1461 | |||
1462 | /* | ||
1463 | * Even after the engine is idle, there might still be stale cmdlists | ||
1464 | * (i.e. cmdlisst which we submitted but never executed) around, with | ||
1465 | * their corresponding GEM/userptr buffers. | ||
1466 | * Properly unmap these buffers here. | ||
1467 | */ | ||
1342 | mutex_lock(&g2d->cmdlist_mutex); | 1468 | mutex_lock(&g2d->cmdlist_mutex); |
1343 | list_for_each_entry_safe(node, n, &g2d_priv->inuse_cmdlist, list) { | 1469 | list_for_each_entry_safe(node, n, &g2d_priv->inuse_cmdlist, list) { |
1344 | /* | ||
1345 | * unmap all gem objects not completed. | ||
1346 | * | ||
1347 | * P.S. if current process was terminated forcely then | ||
1348 | * there may be some commands in inuse_cmdlist so unmap | ||
1349 | * them. | ||
1350 | */ | ||
1351 | g2d_unmap_cmdlist_gem(g2d, node, file); | 1470 | g2d_unmap_cmdlist_gem(g2d, node, file); |
1352 | list_move_tail(&node->list, &g2d->free_cmdlist); | 1471 | list_move_tail(&node->list, &g2d->free_cmdlist); |
1353 | } | 1472 | } |
@@ -1399,7 +1518,11 @@ static int g2d_probe(struct platform_device *pdev) | |||
1399 | goto err_destroy_workqueue; | 1518 | goto err_destroy_workqueue; |
1400 | } | 1519 | } |
1401 | 1520 | ||
1521 | pm_runtime_use_autosuspend(dev); | ||
1522 | pm_runtime_set_autosuspend_delay(dev, 2000); | ||
1402 | pm_runtime_enable(dev); | 1523 | pm_runtime_enable(dev); |
1524 | clear_bit(G2D_BIT_SUSPEND_RUNQUEUE, &g2d->flags); | ||
1525 | clear_bit(G2D_BIT_ENGINE_BUSY, &g2d->flags); | ||
1403 | 1526 | ||
1404 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1527 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
1405 | 1528 | ||
@@ -1440,7 +1563,7 @@ static int g2d_probe(struct platform_device *pdev) | |||
1440 | goto err_put_clk; | 1563 | goto err_put_clk; |
1441 | } | 1564 | } |
1442 | 1565 | ||
1443 | dev_info(dev, "The exynos g2d(ver %d.%d) successfully probed\n", | 1566 | dev_info(dev, "The Exynos G2D (ver %d.%d) successfully probed.\n", |
1444 | G2D_HW_MAJOR_VER, G2D_HW_MINOR_VER); | 1567 | G2D_HW_MAJOR_VER, G2D_HW_MINOR_VER); |
1445 | 1568 | ||
1446 | return 0; | 1569 | return 0; |
@@ -1458,14 +1581,17 @@ static int g2d_remove(struct platform_device *pdev) | |||
1458 | { | 1581 | { |
1459 | struct g2d_data *g2d = platform_get_drvdata(pdev); | 1582 | struct g2d_data *g2d = platform_get_drvdata(pdev); |
1460 | 1583 | ||
1584 | /* Suspend operation and wait for engine idle. */ | ||
1585 | set_bit(G2D_BIT_SUSPEND_RUNQUEUE, &g2d->flags); | ||
1586 | g2d_wait_finish(g2d, NULL); | ||
1587 | |||
1461 | cancel_work_sync(&g2d->runqueue_work); | 1588 | cancel_work_sync(&g2d->runqueue_work); |
1462 | exynos_drm_subdrv_unregister(&g2d->subdrv); | 1589 | exynos_drm_subdrv_unregister(&g2d->subdrv); |
1463 | 1590 | ||
1464 | while (g2d->runqueue_node) { | 1591 | /* There should be no locking needed here. */ |
1465 | g2d_free_runqueue_node(g2d, g2d->runqueue_node); | 1592 | g2d_remove_runqueue_nodes(g2d, NULL); |
1466 | g2d->runqueue_node = g2d_get_runqueue_node(g2d); | ||
1467 | } | ||
1468 | 1593 | ||
1594 | pm_runtime_dont_use_autosuspend(&pdev->dev); | ||
1469 | pm_runtime_disable(&pdev->dev); | 1595 | pm_runtime_disable(&pdev->dev); |
1470 | 1596 | ||
1471 | g2d_fini_cmdlist(g2d); | 1597 | g2d_fini_cmdlist(g2d); |
@@ -1475,20 +1601,37 @@ static int g2d_remove(struct platform_device *pdev) | |||
1475 | return 0; | 1601 | return 0; |
1476 | } | 1602 | } |
1477 | 1603 | ||
1478 | #ifdef CONFIG_PM | 1604 | #ifdef CONFIG_PM_SLEEP |
1479 | static int g2d_runtime_suspend(struct device *dev) | 1605 | static int g2d_suspend(struct device *dev) |
1480 | { | 1606 | { |
1481 | struct g2d_data *g2d = dev_get_drvdata(dev); | 1607 | struct g2d_data *g2d = dev_get_drvdata(dev); |
1482 | 1608 | ||
1483 | mutex_lock(&g2d->runqueue_mutex); | 1609 | /* |
1484 | g2d->suspended = true; | 1610 | * Suspend the runqueue worker operation and wait until the G2D |
1485 | mutex_unlock(&g2d->runqueue_mutex); | 1611 | * engine is idle. |
1612 | */ | ||
1613 | set_bit(G2D_BIT_SUSPEND_RUNQUEUE, &g2d->flags); | ||
1614 | g2d_wait_finish(g2d, NULL); | ||
1615 | flush_work(&g2d->runqueue_work); | ||
1486 | 1616 | ||
1487 | while (g2d->runqueue_node) | 1617 | return 0; |
1488 | /* FIXME: good range? */ | 1618 | } |
1489 | usleep_range(500, 1000); | ||
1490 | 1619 | ||
1491 | flush_work(&g2d->runqueue_work); | 1620 | static int g2d_resume(struct device *dev) |
1621 | { | ||
1622 | struct g2d_data *g2d = dev_get_drvdata(dev); | ||
1623 | |||
1624 | clear_bit(G2D_BIT_SUSPEND_RUNQUEUE, &g2d->flags); | ||
1625 | queue_work(g2d->g2d_workq, &g2d->runqueue_work); | ||
1626 | |||
1627 | return 0; | ||
1628 | } | ||
1629 | #endif | ||
1630 | |||
1631 | #ifdef CONFIG_PM | ||
1632 | static int g2d_runtime_suspend(struct device *dev) | ||
1633 | { | ||
1634 | struct g2d_data *g2d = dev_get_drvdata(dev); | ||
1492 | 1635 | ||
1493 | clk_disable_unprepare(g2d->gate_clk); | 1636 | clk_disable_unprepare(g2d->gate_clk); |
1494 | 1637 | ||
@@ -1504,16 +1647,12 @@ static int g2d_runtime_resume(struct device *dev) | |||
1504 | if (ret < 0) | 1647 | if (ret < 0) |
1505 | dev_warn(dev, "failed to enable clock.\n"); | 1648 | dev_warn(dev, "failed to enable clock.\n"); |
1506 | 1649 | ||
1507 | g2d->suspended = false; | ||
1508 | g2d_exec_runqueue(g2d); | ||
1509 | |||
1510 | return ret; | 1650 | return ret; |
1511 | } | 1651 | } |
1512 | #endif | 1652 | #endif |
1513 | 1653 | ||
1514 | static const struct dev_pm_ops g2d_pm_ops = { | 1654 | static const struct dev_pm_ops g2d_pm_ops = { |
1515 | SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, | 1655 | SET_SYSTEM_SLEEP_PM_OPS(g2d_suspend, g2d_resume) |
1516 | pm_runtime_force_resume) | ||
1517 | SET_RUNTIME_PM_OPS(g2d_runtime_suspend, g2d_runtime_resume, NULL) | 1656 | SET_RUNTIME_PM_OPS(g2d_runtime_suspend, g2d_runtime_resume, NULL) |
1518 | }; | 1657 | }; |
1519 | 1658 | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c index 7f32419b25ea..c2f17f30afab 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_plane.c +++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c | |||
@@ -238,7 +238,6 @@ static void exynos_plane_atomic_update(struct drm_plane *plane, | |||
238 | return; | 238 | return; |
239 | 239 | ||
240 | plane->crtc = state->crtc; | 240 | plane->crtc = state->crtc; |
241 | exynos_plane->pending_fb = state->fb; | ||
242 | 241 | ||
243 | if (exynos_crtc->ops->update_plane) | 242 | if (exynos_crtc->ops->update_plane) |
244 | exynos_crtc->ops->update_plane(exynos_crtc, exynos_plane); | 243 | exynos_crtc->ops->update_plane(exynos_crtc, exynos_plane); |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c index e8f6c92b2a36..57fe514d5c5b 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
16 | #include <linux/platform_device.h> | 16 | #include <linux/platform_device.h> |
17 | #include <linux/component.h> | 17 | #include <linux/component.h> |
18 | #include <linux/timer.h> | ||
18 | 19 | ||
19 | #include <drm/exynos_drm.h> | 20 | #include <drm/exynos_drm.h> |
20 | 21 | ||
@@ -28,6 +29,9 @@ | |||
28 | #include "exynos_drm_plane.h" | 29 | #include "exynos_drm_plane.h" |
29 | #include "exynos_drm_vidi.h" | 30 | #include "exynos_drm_vidi.h" |
30 | 31 | ||
32 | /* VIDI uses fixed refresh rate of 50Hz */ | ||
33 | #define VIDI_REFRESH_TIME (1000 / 50) | ||
34 | |||
31 | /* vidi has totally three virtual windows. */ | 35 | /* vidi has totally three virtual windows. */ |
32 | #define WINDOWS_NR 3 | 36 | #define WINDOWS_NR 3 |
33 | 37 | ||
@@ -43,12 +47,9 @@ struct vidi_context { | |||
43 | struct exynos_drm_plane planes[WINDOWS_NR]; | 47 | struct exynos_drm_plane planes[WINDOWS_NR]; |
44 | struct edid *raw_edid; | 48 | struct edid *raw_edid; |
45 | unsigned int clkdiv; | 49 | unsigned int clkdiv; |
46 | unsigned long irq_flags; | ||
47 | unsigned int connected; | 50 | unsigned int connected; |
48 | bool vblank_on; | ||
49 | bool suspended; | 51 | bool suspended; |
50 | bool direct_vblank; | 52 | struct timer_list timer; |
51 | struct work_struct work; | ||
52 | struct mutex lock; | 53 | struct mutex lock; |
53 | int pipe; | 54 | int pipe; |
54 | }; | 55 | }; |
@@ -102,30 +103,14 @@ static int vidi_enable_vblank(struct exynos_drm_crtc *crtc) | |||
102 | if (ctx->suspended) | 103 | if (ctx->suspended) |
103 | return -EPERM; | 104 | return -EPERM; |
104 | 105 | ||
105 | if (!test_and_set_bit(0, &ctx->irq_flags)) | 106 | mod_timer(&ctx->timer, |
106 | ctx->vblank_on = true; | 107 | jiffies + msecs_to_jiffies(VIDI_REFRESH_TIME) - 1); |
107 | |||
108 | ctx->direct_vblank = true; | ||
109 | |||
110 | /* | ||
111 | * in case of page flip request, vidi_finish_pageflip function | ||
112 | * will not be called because direct_vblank is true and then | ||
113 | * that function will be called by crtc_ops->update_plane callback | ||
114 | */ | ||
115 | schedule_work(&ctx->work); | ||
116 | 108 | ||
117 | return 0; | 109 | return 0; |
118 | } | 110 | } |
119 | 111 | ||
120 | static void vidi_disable_vblank(struct exynos_drm_crtc *crtc) | 112 | static void vidi_disable_vblank(struct exynos_drm_crtc *crtc) |
121 | { | 113 | { |
122 | struct vidi_context *ctx = crtc->ctx; | ||
123 | |||
124 | if (ctx->suspended) | ||
125 | return; | ||
126 | |||
127 | if (test_and_clear_bit(0, &ctx->irq_flags)) | ||
128 | ctx->vblank_on = false; | ||
129 | } | 114 | } |
130 | 115 | ||
131 | static void vidi_update_plane(struct exynos_drm_crtc *crtc, | 116 | static void vidi_update_plane(struct exynos_drm_crtc *crtc, |
@@ -140,9 +125,6 @@ static void vidi_update_plane(struct exynos_drm_crtc *crtc, | |||
140 | 125 | ||
141 | addr = exynos_drm_fb_dma_addr(state->fb, 0); | 126 | addr = exynos_drm_fb_dma_addr(state->fb, 0); |
142 | DRM_DEBUG_KMS("dma_addr = %pad\n", &addr); | 127 | DRM_DEBUG_KMS("dma_addr = %pad\n", &addr); |
143 | |||
144 | if (ctx->vblank_on) | ||
145 | schedule_work(&ctx->work); | ||
146 | } | 128 | } |
147 | 129 | ||
148 | static void vidi_enable(struct exynos_drm_crtc *crtc) | 130 | static void vidi_enable(struct exynos_drm_crtc *crtc) |
@@ -153,17 +135,17 @@ static void vidi_enable(struct exynos_drm_crtc *crtc) | |||
153 | 135 | ||
154 | ctx->suspended = false; | 136 | ctx->suspended = false; |
155 | 137 | ||
156 | /* if vblank was enabled status, enable it again. */ | ||
157 | if (test_and_clear_bit(0, &ctx->irq_flags)) | ||
158 | vidi_enable_vblank(ctx->crtc); | ||
159 | |||
160 | mutex_unlock(&ctx->lock); | 138 | mutex_unlock(&ctx->lock); |
139 | |||
140 | drm_crtc_vblank_on(&crtc->base); | ||
161 | } | 141 | } |
162 | 142 | ||
163 | static void vidi_disable(struct exynos_drm_crtc *crtc) | 143 | static void vidi_disable(struct exynos_drm_crtc *crtc) |
164 | { | 144 | { |
165 | struct vidi_context *ctx = crtc->ctx; | 145 | struct vidi_context *ctx = crtc->ctx; |
166 | 146 | ||
147 | drm_crtc_vblank_off(&crtc->base); | ||
148 | |||
167 | mutex_lock(&ctx->lock); | 149 | mutex_lock(&ctx->lock); |
168 | 150 | ||
169 | ctx->suspended = true; | 151 | ctx->suspended = true; |
@@ -190,37 +172,16 @@ static const struct exynos_drm_crtc_ops vidi_crtc_ops = { | |||
190 | .update_plane = vidi_update_plane, | 172 | .update_plane = vidi_update_plane, |
191 | }; | 173 | }; |
192 | 174 | ||
193 | static void vidi_fake_vblank_handler(struct work_struct *work) | 175 | static void vidi_fake_vblank_timer(unsigned long arg) |
194 | { | 176 | { |
195 | struct vidi_context *ctx = container_of(work, struct vidi_context, | 177 | struct vidi_context *ctx = (void *)arg; |
196 | work); | ||
197 | int win; | ||
198 | 178 | ||
199 | if (ctx->pipe < 0) | 179 | if (ctx->pipe < 0) |
200 | return; | 180 | return; |
201 | 181 | ||
202 | /* refresh rate is about 50Hz. */ | 182 | if (drm_crtc_handle_vblank(&ctx->crtc->base)) |
203 | usleep_range(16000, 20000); | 183 | mod_timer(&ctx->timer, |
204 | 184 | jiffies + msecs_to_jiffies(VIDI_REFRESH_TIME) - 1); | |
205 | mutex_lock(&ctx->lock); | ||
206 | |||
207 | if (ctx->direct_vblank) { | ||
208 | drm_crtc_handle_vblank(&ctx->crtc->base); | ||
209 | ctx->direct_vblank = false; | ||
210 | mutex_unlock(&ctx->lock); | ||
211 | return; | ||
212 | } | ||
213 | |||
214 | mutex_unlock(&ctx->lock); | ||
215 | |||
216 | for (win = 0 ; win < WINDOWS_NR ; win++) { | ||
217 | struct exynos_drm_plane *plane = &ctx->planes[win]; | ||
218 | |||
219 | if (!plane->pending_fb) | ||
220 | continue; | ||
221 | |||
222 | exynos_drm_crtc_finish_update(ctx->crtc, plane); | ||
223 | } | ||
224 | } | 185 | } |
225 | 186 | ||
226 | static ssize_t vidi_show_connection(struct device *dev, | 187 | static ssize_t vidi_show_connection(struct device *dev, |
@@ -489,6 +450,9 @@ static int vidi_bind(struct device *dev, struct device *master, void *data) | |||
489 | 450 | ||
490 | static void vidi_unbind(struct device *dev, struct device *master, void *data) | 451 | static void vidi_unbind(struct device *dev, struct device *master, void *data) |
491 | { | 452 | { |
453 | struct vidi_context *ctx = dev_get_drvdata(dev); | ||
454 | |||
455 | del_timer_sync(&ctx->timer); | ||
492 | } | 456 | } |
493 | 457 | ||
494 | static const struct component_ops vidi_component_ops = { | 458 | static const struct component_ops vidi_component_ops = { |
@@ -507,7 +471,7 @@ static int vidi_probe(struct platform_device *pdev) | |||
507 | 471 | ||
508 | ctx->pdev = pdev; | 472 | ctx->pdev = pdev; |
509 | 473 | ||
510 | INIT_WORK(&ctx->work, vidi_fake_vblank_handler); | 474 | setup_timer(&ctx->timer, vidi_fake_vblank_timer, (unsigned long)ctx); |
511 | 475 | ||
512 | mutex_init(&ctx->lock); | 476 | mutex_init(&ctx->lock); |
513 | 477 | ||
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index 2275efe41acd..e8fb6ef947ee 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c | |||
@@ -1669,10 +1669,9 @@ static int hdmi_resources_init(struct hdmi_context *hdata) | |||
1669 | if (ret) | 1669 | if (ret) |
1670 | return ret; | 1670 | return ret; |
1671 | 1671 | ||
1672 | for (i = 0; i < ARRAY_SIZE(supply); ++i) { | 1672 | for (i = 0; i < ARRAY_SIZE(supply); ++i) |
1673 | hdata->regul_bulk[i].supply = supply[i]; | 1673 | hdata->regul_bulk[i].supply = supply[i]; |
1674 | hdata->regul_bulk[i].consumer = NULL; | 1674 | |
1675 | } | ||
1676 | ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(supply), hdata->regul_bulk); | 1675 | ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(supply), hdata->regul_bulk); |
1677 | if (ret) { | 1676 | if (ret) { |
1678 | if (ret != -EPROBE_DEFER) | 1677 | if (ret != -EPROBE_DEFER) |
@@ -1760,28 +1759,74 @@ static const struct component_ops hdmi_component_ops = { | |||
1760 | .unbind = hdmi_unbind, | 1759 | .unbind = hdmi_unbind, |
1761 | }; | 1760 | }; |
1762 | 1761 | ||
1763 | static struct device_node *hdmi_legacy_ddc_dt_binding(struct device *dev) | 1762 | static int hdmi_get_ddc_adapter(struct hdmi_context *hdata) |
1764 | { | 1763 | { |
1765 | const char *compatible_str = "samsung,exynos4210-hdmiddc"; | 1764 | const char *compatible_str = "samsung,exynos4210-hdmiddc"; |
1766 | struct device_node *np; | 1765 | struct device_node *np; |
1766 | struct i2c_adapter *adpt; | ||
1767 | 1767 | ||
1768 | np = of_find_compatible_node(NULL, NULL, compatible_str); | 1768 | np = of_find_compatible_node(NULL, NULL, compatible_str); |
1769 | if (np) | 1769 | if (np) |
1770 | return of_get_next_parent(np); | 1770 | np = of_get_next_parent(np); |
1771 | else | ||
1772 | np = of_parse_phandle(hdata->dev->of_node, "ddc", 0); | ||
1773 | |||
1774 | if (!np) { | ||
1775 | DRM_ERROR("Failed to find ddc node in device tree\n"); | ||
1776 | return -ENODEV; | ||
1777 | } | ||
1771 | 1778 | ||
1772 | return NULL; | 1779 | adpt = of_find_i2c_adapter_by_node(np); |
1780 | of_node_put(np); | ||
1781 | |||
1782 | if (!adpt) { | ||
1783 | DRM_INFO("Failed to get ddc i2c adapter by node\n"); | ||
1784 | return -EPROBE_DEFER; | ||
1785 | } | ||
1786 | |||
1787 | hdata->ddc_adpt = adpt; | ||
1788 | |||
1789 | return 0; | ||
1773 | } | 1790 | } |
1774 | 1791 | ||
1775 | static struct device_node *hdmi_legacy_phy_dt_binding(struct device *dev) | 1792 | static int hdmi_get_phy_io(struct hdmi_context *hdata) |
1776 | { | 1793 | { |
1777 | const char *compatible_str = "samsung,exynos4212-hdmiphy"; | 1794 | const char *compatible_str = "samsung,exynos4212-hdmiphy"; |
1795 | struct device_node *np; | ||
1796 | int ret = 0; | ||
1797 | |||
1798 | np = of_find_compatible_node(NULL, NULL, compatible_str); | ||
1799 | if (!np) { | ||
1800 | np = of_parse_phandle(hdata->dev->of_node, "phy", 0); | ||
1801 | if (!np) { | ||
1802 | DRM_ERROR("Failed to find hdmiphy node in device tree\n"); | ||
1803 | return -ENODEV; | ||
1804 | } | ||
1805 | } | ||
1806 | |||
1807 | if (hdata->drv_data->is_apb_phy) { | ||
1808 | hdata->regs_hdmiphy = of_iomap(np, 0); | ||
1809 | if (!hdata->regs_hdmiphy) { | ||
1810 | DRM_ERROR("failed to ioremap hdmi phy\n"); | ||
1811 | ret = -ENOMEM; | ||
1812 | goto out; | ||
1813 | } | ||
1814 | } else { | ||
1815 | hdata->hdmiphy_port = of_find_i2c_device_by_node(np); | ||
1816 | if (!hdata->hdmiphy_port) { | ||
1817 | DRM_INFO("Failed to get hdmi phy i2c client\n"); | ||
1818 | ret = -EPROBE_DEFER; | ||
1819 | goto out; | ||
1820 | } | ||
1821 | } | ||
1778 | 1822 | ||
1779 | return of_find_compatible_node(NULL, NULL, compatible_str); | 1823 | out: |
1824 | of_node_put(np); | ||
1825 | return ret; | ||
1780 | } | 1826 | } |
1781 | 1827 | ||
1782 | static int hdmi_probe(struct platform_device *pdev) | 1828 | static int hdmi_probe(struct platform_device *pdev) |
1783 | { | 1829 | { |
1784 | struct device_node *ddc_node, *phy_node; | ||
1785 | struct device *dev = &pdev->dev; | 1830 | struct device *dev = &pdev->dev; |
1786 | struct hdmi_context *hdata; | 1831 | struct hdmi_context *hdata; |
1787 | struct resource *res; | 1832 | struct resource *res; |
@@ -1811,52 +1856,13 @@ static int hdmi_probe(struct platform_device *pdev) | |||
1811 | return ret; | 1856 | return ret; |
1812 | } | 1857 | } |
1813 | 1858 | ||
1814 | ddc_node = hdmi_legacy_ddc_dt_binding(dev); | 1859 | ret = hdmi_get_ddc_adapter(hdata); |
1815 | if (ddc_node) | 1860 | if (ret) |
1816 | goto out_get_ddc_adpt; | 1861 | return ret; |
1817 | |||
1818 | ddc_node = of_parse_phandle(dev->of_node, "ddc", 0); | ||
1819 | if (!ddc_node) { | ||
1820 | DRM_ERROR("Failed to find ddc node in device tree\n"); | ||
1821 | return -ENODEV; | ||
1822 | } | ||
1823 | of_node_put(dev->of_node); | ||
1824 | |||
1825 | out_get_ddc_adpt: | ||
1826 | hdata->ddc_adpt = of_find_i2c_adapter_by_node(ddc_node); | ||
1827 | if (!hdata->ddc_adpt) { | ||
1828 | DRM_ERROR("Failed to get ddc i2c adapter by node\n"); | ||
1829 | return -EPROBE_DEFER; | ||
1830 | } | ||
1831 | |||
1832 | phy_node = hdmi_legacy_phy_dt_binding(dev); | ||
1833 | if (phy_node) | ||
1834 | goto out_get_phy_port; | ||
1835 | 1862 | ||
1836 | phy_node = of_parse_phandle(dev->of_node, "phy", 0); | 1863 | ret = hdmi_get_phy_io(hdata); |
1837 | if (!phy_node) { | 1864 | if (ret) |
1838 | DRM_ERROR("Failed to find hdmiphy node in device tree\n"); | ||
1839 | ret = -ENODEV; | ||
1840 | goto err_ddc; | 1865 | goto err_ddc; |
1841 | } | ||
1842 | of_node_put(dev->of_node); | ||
1843 | |||
1844 | out_get_phy_port: | ||
1845 | if (hdata->drv_data->is_apb_phy) { | ||
1846 | hdata->regs_hdmiphy = of_iomap(phy_node, 0); | ||
1847 | if (!hdata->regs_hdmiphy) { | ||
1848 | DRM_ERROR("failed to ioremap hdmi phy\n"); | ||
1849 | ret = -ENOMEM; | ||
1850 | goto err_ddc; | ||
1851 | } | ||
1852 | } else { | ||
1853 | hdata->hdmiphy_port = of_find_i2c_device_by_node(phy_node); | ||
1854 | if (!hdata->hdmiphy_port) { | ||
1855 | DRM_ERROR("Failed to get hdmi phy i2c client\n"); | ||
1856 | ret = -EPROBE_DEFER; | ||
1857 | goto err_ddc; | ||
1858 | } | ||
1859 | } | ||
1860 | 1866 | ||
1861 | INIT_DELAYED_WORK(&hdata->hotplug_work, hdmi_hotplug_work_func); | 1867 | INIT_DELAYED_WORK(&hdata->hotplug_work, hdmi_hotplug_work_func); |
1862 | 1868 | ||
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c index e1d47f9435fc..edb20a34c66c 100644 --- a/drivers/gpu/drm/exynos/exynos_mixer.c +++ b/drivers/gpu/drm/exynos/exynos_mixer.c | |||
@@ -73,6 +73,9 @@ enum mixer_version_id { | |||
73 | enum mixer_flag_bits { | 73 | enum mixer_flag_bits { |
74 | MXR_BIT_POWERED, | 74 | MXR_BIT_POWERED, |
75 | MXR_BIT_VSYNC, | 75 | MXR_BIT_VSYNC, |
76 | MXR_BIT_INTERLACE, | ||
77 | MXR_BIT_VP_ENABLED, | ||
78 | MXR_BIT_HAS_SCLK, | ||
76 | }; | 79 | }; |
77 | 80 | ||
78 | static const uint32_t mixer_formats[] = { | 81 | static const uint32_t mixer_formats[] = { |
@@ -98,9 +101,6 @@ struct mixer_context { | |||
98 | struct exynos_drm_plane planes[MIXER_WIN_NR]; | 101 | struct exynos_drm_plane planes[MIXER_WIN_NR]; |
99 | int pipe; | 102 | int pipe; |
100 | unsigned long flags; | 103 | unsigned long flags; |
101 | bool interlace; | ||
102 | bool vp_enabled; | ||
103 | bool has_sclk; | ||
104 | 104 | ||
105 | struct mixer_resources mixer_res; | 105 | struct mixer_resources mixer_res; |
106 | enum mixer_version_id mxr_ver; | 106 | enum mixer_version_id mxr_ver; |
@@ -346,7 +346,7 @@ static void mixer_vsync_set_update(struct mixer_context *ctx, bool enable) | |||
346 | mixer_reg_writemask(res, MXR_STATUS, enable ? | 346 | mixer_reg_writemask(res, MXR_STATUS, enable ? |
347 | MXR_STATUS_SYNC_ENABLE : 0, MXR_STATUS_SYNC_ENABLE); | 347 | MXR_STATUS_SYNC_ENABLE : 0, MXR_STATUS_SYNC_ENABLE); |
348 | 348 | ||
349 | if (ctx->vp_enabled) | 349 | if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags)) |
350 | vp_reg_write(res, VP_SHADOW_UPDATE, enable ? | 350 | vp_reg_write(res, VP_SHADOW_UPDATE, enable ? |
351 | VP_SHADOW_UPDATE_ENABLE : 0); | 351 | VP_SHADOW_UPDATE_ENABLE : 0); |
352 | } | 352 | } |
@@ -357,8 +357,8 @@ static void mixer_cfg_scan(struct mixer_context *ctx, unsigned int height) | |||
357 | u32 val; | 357 | u32 val; |
358 | 358 | ||
359 | /* choosing between interlace and progressive mode */ | 359 | /* choosing between interlace and progressive mode */ |
360 | val = (ctx->interlace ? MXR_CFG_SCAN_INTERLACE : | 360 | val = test_bit(MXR_BIT_INTERLACE, &ctx->flags) ? |
361 | MXR_CFG_SCAN_PROGRESSIVE); | 361 | MXR_CFG_SCAN_INTERLACE : MXR_CFG_SCAN_PROGRESSIVE; |
362 | 362 | ||
363 | if (ctx->mxr_ver != MXR_VER_128_0_0_184) { | 363 | if (ctx->mxr_ver != MXR_VER_128_0_0_184) { |
364 | /* choosing between proper HD and SD mode */ | 364 | /* choosing between proper HD and SD mode */ |
@@ -436,9 +436,10 @@ static void mixer_cfg_layer(struct mixer_context *ctx, unsigned int win, | |||
436 | mixer_reg_writemask(res, MXR_LAYER_CFG, | 436 | mixer_reg_writemask(res, MXR_LAYER_CFG, |
437 | MXR_LAYER_CFG_GRP1_VAL(priority), | 437 | MXR_LAYER_CFG_GRP1_VAL(priority), |
438 | MXR_LAYER_CFG_GRP1_MASK); | 438 | MXR_LAYER_CFG_GRP1_MASK); |
439 | |||
439 | break; | 440 | break; |
440 | case VP_DEFAULT_WIN: | 441 | case VP_DEFAULT_WIN: |
441 | if (ctx->vp_enabled) { | 442 | if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags)) { |
442 | vp_reg_writemask(res, VP_ENABLE, val, VP_ENABLE_ON); | 443 | vp_reg_writemask(res, VP_ENABLE, val, VP_ENABLE_ON); |
443 | mixer_reg_writemask(res, MXR_CFG, val, | 444 | mixer_reg_writemask(res, MXR_CFG, val, |
444 | MXR_CFG_VP_ENABLE); | 445 | MXR_CFG_VP_ENABLE); |
@@ -501,7 +502,7 @@ static void vp_video_buffer(struct mixer_context *ctx, | |||
501 | chroma_addr[0] = exynos_drm_fb_dma_addr(fb, 1); | 502 | chroma_addr[0] = exynos_drm_fb_dma_addr(fb, 1); |
502 | 503 | ||
503 | if (mode->flags & DRM_MODE_FLAG_INTERLACE) { | 504 | if (mode->flags & DRM_MODE_FLAG_INTERLACE) { |
504 | ctx->interlace = true; | 505 | __set_bit(MXR_BIT_INTERLACE, &ctx->flags); |
505 | if (tiled_mode) { | 506 | if (tiled_mode) { |
506 | luma_addr[1] = luma_addr[0] + 0x40; | 507 | luma_addr[1] = luma_addr[0] + 0x40; |
507 | chroma_addr[1] = chroma_addr[0] + 0x40; | 508 | chroma_addr[1] = chroma_addr[0] + 0x40; |
@@ -510,7 +511,7 @@ static void vp_video_buffer(struct mixer_context *ctx, | |||
510 | chroma_addr[1] = chroma_addr[0] + fb->pitches[0]; | 511 | chroma_addr[1] = chroma_addr[0] + fb->pitches[0]; |
511 | } | 512 | } |
512 | } else { | 513 | } else { |
513 | ctx->interlace = false; | 514 | __clear_bit(MXR_BIT_INTERLACE, &ctx->flags); |
514 | luma_addr[1] = 0; | 515 | luma_addr[1] = 0; |
515 | chroma_addr[1] = 0; | 516 | chroma_addr[1] = 0; |
516 | } | 517 | } |
@@ -518,7 +519,7 @@ static void vp_video_buffer(struct mixer_context *ctx, | |||
518 | spin_lock_irqsave(&res->reg_slock, flags); | 519 | spin_lock_irqsave(&res->reg_slock, flags); |
519 | 520 | ||
520 | /* interlace or progressive scan mode */ | 521 | /* interlace or progressive scan mode */ |
521 | val = (ctx->interlace ? ~0 : 0); | 522 | val = (test_bit(MXR_BIT_INTERLACE, &ctx->flags) ? ~0 : 0); |
522 | vp_reg_writemask(res, VP_MODE, val, VP_MODE_LINE_SKIP); | 523 | vp_reg_writemask(res, VP_MODE, val, VP_MODE_LINE_SKIP); |
523 | 524 | ||
524 | /* setup format */ | 525 | /* setup format */ |
@@ -541,7 +542,7 @@ static void vp_video_buffer(struct mixer_context *ctx, | |||
541 | 542 | ||
542 | vp_reg_write(res, VP_DST_WIDTH, state->crtc.w); | 543 | vp_reg_write(res, VP_DST_WIDTH, state->crtc.w); |
543 | vp_reg_write(res, VP_DST_H_POSITION, state->crtc.x); | 544 | vp_reg_write(res, VP_DST_H_POSITION, state->crtc.x); |
544 | if (ctx->interlace) { | 545 | if (test_bit(MXR_BIT_INTERLACE, &ctx->flags)) { |
545 | vp_reg_write(res, VP_DST_HEIGHT, state->crtc.h / 2); | 546 | vp_reg_write(res, VP_DST_HEIGHT, state->crtc.h / 2); |
546 | vp_reg_write(res, VP_DST_V_POSITION, state->crtc.y / 2); | 547 | vp_reg_write(res, VP_DST_V_POSITION, state->crtc.y / 2); |
547 | } else { | 548 | } else { |
@@ -636,9 +637,9 @@ static void mixer_graph_buffer(struct mixer_context *ctx, | |||
636 | src_y_offset = 0; | 637 | src_y_offset = 0; |
637 | 638 | ||
638 | if (mode->flags & DRM_MODE_FLAG_INTERLACE) | 639 | if (mode->flags & DRM_MODE_FLAG_INTERLACE) |
639 | ctx->interlace = true; | 640 | __set_bit(MXR_BIT_INTERLACE, &ctx->flags); |
640 | else | 641 | else |
641 | ctx->interlace = false; | 642 | __clear_bit(MXR_BIT_INTERLACE, &ctx->flags); |
642 | 643 | ||
643 | spin_lock_irqsave(&res->reg_slock, flags); | 644 | spin_lock_irqsave(&res->reg_slock, flags); |
644 | 645 | ||
@@ -697,10 +698,10 @@ static void mixer_graph_buffer(struct mixer_context *ctx, | |||
697 | static void vp_win_reset(struct mixer_context *ctx) | 698 | static void vp_win_reset(struct mixer_context *ctx) |
698 | { | 699 | { |
699 | struct mixer_resources *res = &ctx->mixer_res; | 700 | struct mixer_resources *res = &ctx->mixer_res; |
700 | int tries = 100; | 701 | unsigned int tries = 100; |
701 | 702 | ||
702 | vp_reg_write(res, VP_SRESET, VP_SRESET_PROCESSING); | 703 | vp_reg_write(res, VP_SRESET, VP_SRESET_PROCESSING); |
703 | for (tries = 100; tries; --tries) { | 704 | while (tries--) { |
704 | /* waiting until VP_SRESET_PROCESSING is 0 */ | 705 | /* waiting until VP_SRESET_PROCESSING is 0 */ |
705 | if (~vp_reg_read(res, VP_SRESET) & VP_SRESET_PROCESSING) | 706 | if (~vp_reg_read(res, VP_SRESET) & VP_SRESET_PROCESSING) |
706 | break; | 707 | break; |
@@ -733,7 +734,7 @@ static void mixer_win_reset(struct mixer_context *ctx) | |||
733 | mixer_reg_write(res, MXR_BG_COLOR1, 0x008080); | 734 | mixer_reg_write(res, MXR_BG_COLOR1, 0x008080); |
734 | mixer_reg_write(res, MXR_BG_COLOR2, 0x008080); | 735 | mixer_reg_write(res, MXR_BG_COLOR2, 0x008080); |
735 | 736 | ||
736 | if (ctx->vp_enabled) { | 737 | if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags)) { |
737 | /* configuration of Video Processor Registers */ | 738 | /* configuration of Video Processor Registers */ |
738 | vp_win_reset(ctx); | 739 | vp_win_reset(ctx); |
739 | vp_default_filter(res); | 740 | vp_default_filter(res); |
@@ -742,7 +743,7 @@ static void mixer_win_reset(struct mixer_context *ctx) | |||
742 | /* disable all layers */ | 743 | /* disable all layers */ |
743 | mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_GRP0_ENABLE); | 744 | mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_GRP0_ENABLE); |
744 | mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_GRP1_ENABLE); | 745 | mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_GRP1_ENABLE); |
745 | if (ctx->vp_enabled) | 746 | if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags)) |
746 | mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_VP_ENABLE); | 747 | mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_VP_ENABLE); |
747 | 748 | ||
748 | spin_unlock_irqrestore(&res->reg_slock, flags); | 749 | spin_unlock_irqrestore(&res->reg_slock, flags); |
@@ -753,7 +754,6 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg) | |||
753 | struct mixer_context *ctx = arg; | 754 | struct mixer_context *ctx = arg; |
754 | struct mixer_resources *res = &ctx->mixer_res; | 755 | struct mixer_resources *res = &ctx->mixer_res; |
755 | u32 val, base, shadow; | 756 | u32 val, base, shadow; |
756 | int win; | ||
757 | 757 | ||
758 | spin_lock(&res->reg_slock); | 758 | spin_lock(&res->reg_slock); |
759 | 759 | ||
@@ -767,7 +767,7 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg) | |||
767 | val &= ~MXR_INT_STATUS_VSYNC; | 767 | val &= ~MXR_INT_STATUS_VSYNC; |
768 | 768 | ||
769 | /* interlace scan need to check shadow register */ | 769 | /* interlace scan need to check shadow register */ |
770 | if (ctx->interlace) { | 770 | if (test_bit(MXR_BIT_INTERLACE, &ctx->flags)) { |
771 | base = mixer_reg_read(res, MXR_GRAPHIC_BASE(0)); | 771 | base = mixer_reg_read(res, MXR_GRAPHIC_BASE(0)); |
772 | shadow = mixer_reg_read(res, MXR_GRAPHIC_BASE_S(0)); | 772 | shadow = mixer_reg_read(res, MXR_GRAPHIC_BASE_S(0)); |
773 | if (base != shadow) | 773 | if (base != shadow) |
@@ -780,14 +780,6 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg) | |||
780 | } | 780 | } |
781 | 781 | ||
782 | drm_crtc_handle_vblank(&ctx->crtc->base); | 782 | drm_crtc_handle_vblank(&ctx->crtc->base); |
783 | for (win = 0 ; win < MIXER_WIN_NR ; win++) { | ||
784 | struct exynos_drm_plane *plane = &ctx->planes[win]; | ||
785 | |||
786 | if (!plane->pending_fb) | ||
787 | continue; | ||
788 | |||
789 | exynos_drm_crtc_finish_update(ctx->crtc, plane); | ||
790 | } | ||
791 | } | 783 | } |
792 | 784 | ||
793 | out: | 785 | out: |
@@ -867,7 +859,7 @@ static int vp_resources_init(struct mixer_context *mixer_ctx) | |||
867 | return -ENODEV; | 859 | return -ENODEV; |
868 | } | 860 | } |
869 | 861 | ||
870 | if (mixer_ctx->has_sclk) { | 862 | if (test_bit(MXR_BIT_HAS_SCLK, &mixer_ctx->flags)) { |
871 | mixer_res->sclk_mixer = devm_clk_get(dev, "sclk_mixer"); | 863 | mixer_res->sclk_mixer = devm_clk_get(dev, "sclk_mixer"); |
872 | if (IS_ERR(mixer_res->sclk_mixer)) { | 864 | if (IS_ERR(mixer_res->sclk_mixer)) { |
873 | dev_err(dev, "failed to get clock 'sclk_mixer'\n"); | 865 | dev_err(dev, "failed to get clock 'sclk_mixer'\n"); |
@@ -917,7 +909,7 @@ static int mixer_initialize(struct mixer_context *mixer_ctx, | |||
917 | return ret; | 909 | return ret; |
918 | } | 910 | } |
919 | 911 | ||
920 | if (mixer_ctx->vp_enabled) { | 912 | if (test_bit(MXR_BIT_VP_ENABLED, &mixer_ctx->flags)) { |
921 | /* acquire vp resources: regs, irqs, clocks */ | 913 | /* acquire vp resources: regs, irqs, clocks */ |
922 | ret = vp_resources_init(mixer_ctx); | 914 | ret = vp_resources_init(mixer_ctx); |
923 | if (ret) { | 915 | if (ret) { |
@@ -1160,7 +1152,8 @@ static int mixer_bind(struct device *dev, struct device *manager, void *data) | |||
1160 | return ret; | 1152 | return ret; |
1161 | 1153 | ||
1162 | for (i = 0; i < MIXER_WIN_NR; i++) { | 1154 | for (i = 0; i < MIXER_WIN_NR; i++) { |
1163 | if (i == VP_DEFAULT_WIN && !ctx->vp_enabled) | 1155 | if (i == VP_DEFAULT_WIN && !test_bit(MXR_BIT_VP_ENABLED, |
1156 | &ctx->flags)) | ||
1164 | continue; | 1157 | continue; |
1165 | 1158 | ||
1166 | ret = exynos_plane_init(drm_dev, &ctx->planes[i], i, | 1159 | ret = exynos_plane_init(drm_dev, &ctx->planes[i], i, |
@@ -1215,10 +1208,13 @@ static int mixer_probe(struct platform_device *pdev) | |||
1215 | 1208 | ||
1216 | ctx->pdev = pdev; | 1209 | ctx->pdev = pdev; |
1217 | ctx->dev = dev; | 1210 | ctx->dev = dev; |
1218 | ctx->vp_enabled = drv->is_vp_enabled; | ||
1219 | ctx->has_sclk = drv->has_sclk; | ||
1220 | ctx->mxr_ver = drv->version; | 1211 | ctx->mxr_ver = drv->version; |
1221 | 1212 | ||
1213 | if (drv->is_vp_enabled) | ||
1214 | __set_bit(MXR_BIT_VP_ENABLED, &ctx->flags); | ||
1215 | if (drv->has_sclk) | ||
1216 | __set_bit(MXR_BIT_HAS_SCLK, &ctx->flags); | ||
1217 | |||
1222 | platform_set_drvdata(pdev, ctx); | 1218 | platform_set_drvdata(pdev, ctx); |
1223 | 1219 | ||
1224 | ret = component_add(&pdev->dev, &mixer_component_ops); | 1220 | ret = component_add(&pdev->dev, &mixer_component_ops); |
@@ -1244,9 +1240,9 @@ static int __maybe_unused exynos_mixer_suspend(struct device *dev) | |||
1244 | 1240 | ||
1245 | clk_disable_unprepare(res->hdmi); | 1241 | clk_disable_unprepare(res->hdmi); |
1246 | clk_disable_unprepare(res->mixer); | 1242 | clk_disable_unprepare(res->mixer); |
1247 | if (ctx->vp_enabled) { | 1243 | if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags)) { |
1248 | clk_disable_unprepare(res->vp); | 1244 | clk_disable_unprepare(res->vp); |
1249 | if (ctx->has_sclk) | 1245 | if (test_bit(MXR_BIT_HAS_SCLK, &ctx->flags)) |
1250 | clk_disable_unprepare(res->sclk_mixer); | 1246 | clk_disable_unprepare(res->sclk_mixer); |
1251 | } | 1247 | } |
1252 | 1248 | ||
@@ -1269,14 +1265,14 @@ static int __maybe_unused exynos_mixer_resume(struct device *dev) | |||
1269 | DRM_ERROR("Failed to prepare_enable the hdmi clk [%d]\n", ret); | 1265 | DRM_ERROR("Failed to prepare_enable the hdmi clk [%d]\n", ret); |
1270 | return ret; | 1266 | return ret; |
1271 | } | 1267 | } |
1272 | if (ctx->vp_enabled) { | 1268 | if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags)) { |
1273 | ret = clk_prepare_enable(res->vp); | 1269 | ret = clk_prepare_enable(res->vp); |
1274 | if (ret < 0) { | 1270 | if (ret < 0) { |
1275 | DRM_ERROR("Failed to prepare_enable the vp clk [%d]\n", | 1271 | DRM_ERROR("Failed to prepare_enable the vp clk [%d]\n", |
1276 | ret); | 1272 | ret); |
1277 | return ret; | 1273 | return ret; |
1278 | } | 1274 | } |
1279 | if (ctx->has_sclk) { | 1275 | if (test_bit(MXR_BIT_HAS_SCLK, &ctx->flags)) { |
1280 | ret = clk_prepare_enable(res->sclk_mixer); | 1276 | ret = clk_prepare_enable(res->sclk_mixer); |
1281 | if (ret < 0) { | 1277 | if (ret < 0) { |
1282 | DRM_ERROR("Failed to prepare_enable the " \ | 1278 | DRM_ERROR("Failed to prepare_enable the " \ |