aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c17
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c9
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c105
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h27
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c12
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c20
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c16
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c17
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c14
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c258
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c11
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h9
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h14
13 files changed, 177 insertions, 352 deletions
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c
index 7a1ad3af08e3..20e956e14c21 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c
@@ -98,21 +98,6 @@ static const struct drm_plane_funcs mdp4_plane_funcs = {
98 .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, 98 .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
99}; 99};
100 100
101static int mdp4_plane_prepare_fb(struct drm_plane *plane,
102 struct drm_plane_state *new_state)
103{
104 struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
105 struct mdp4_kms *mdp4_kms = get_kms(plane);
106 struct msm_kms *kms = &mdp4_kms->base.base;
107 struct drm_framebuffer *fb = new_state->fb;
108
109 if (!fb)
110 return 0;
111
112 DBG("%s: prepare: FB[%u]", mdp4_plane->name, fb->base.id);
113 return msm_framebuffer_prepare(fb, kms->aspace);
114}
115
116static void mdp4_plane_cleanup_fb(struct drm_plane *plane, 101static void mdp4_plane_cleanup_fb(struct drm_plane *plane,
117 struct drm_plane_state *old_state) 102 struct drm_plane_state *old_state)
118{ 103{
@@ -152,7 +137,7 @@ static void mdp4_plane_atomic_update(struct drm_plane *plane,
152} 137}
153 138
154static const struct drm_plane_helper_funcs mdp4_plane_helper_funcs = { 139static const struct drm_plane_helper_funcs mdp4_plane_helper_funcs = {
155 .prepare_fb = mdp4_plane_prepare_fb, 140 .prepare_fb = msm_atomic_prepare_fb,
156 .cleanup_fb = mdp4_plane_cleanup_fb, 141 .cleanup_fb = mdp4_plane_cleanup_fb,
157 .atomic_check = mdp4_plane_atomic_check, 142 .atomic_check = mdp4_plane_atomic_check,
158 .atomic_update = mdp4_plane_atomic_update, 143 .atomic_update = mdp4_plane_atomic_update,
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
index 76b96081916f..10271359789e 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
@@ -430,6 +430,7 @@ static void mdp5_crtc_atomic_disable(struct drm_crtc *crtc,
430 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); 430 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
431 struct mdp5_kms *mdp5_kms = get_kms(crtc); 431 struct mdp5_kms *mdp5_kms = get_kms(crtc);
432 struct device *dev = &mdp5_kms->pdev->dev; 432 struct device *dev = &mdp5_kms->pdev->dev;
433 unsigned long flags;
433 434
434 DBG("%s", crtc->name); 435 DBG("%s", crtc->name);
435 436
@@ -445,6 +446,14 @@ static void mdp5_crtc_atomic_disable(struct drm_crtc *crtc,
445 mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err); 446 mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err);
446 pm_runtime_put_sync(dev); 447 pm_runtime_put_sync(dev);
447 448
449 if (crtc->state->event && !crtc->state->active) {
450 WARN_ON(mdp5_crtc->event);
451 spin_lock_irqsave(&mdp5_kms->dev->event_lock, flags);
452 drm_crtc_send_vblank_event(crtc, crtc->state->event);
453 crtc->state->event = NULL;
454 spin_unlock_irqrestore(&mdp5_kms->dev->event_lock, flags);
455 }
456
448 mdp5_crtc->enabled = false; 457 mdp5_crtc->enabled = false;
449} 458}
450 459
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
index 6d8e3a9a6fc0..6e12e275deba 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
@@ -70,60 +70,110 @@ static int mdp5_hw_init(struct msm_kms *kms)
70 return 0; 70 return 0;
71} 71}
72 72
73struct mdp5_state *mdp5_get_state(struct drm_atomic_state *s) 73/* Global/shared object state funcs */
74
75/*
76 * This is a helper that returns the private state currently in operation.
77 * Note that this would return the "old_state" if called in the atomic check
78 * path, and the "new_state" after the atomic swap has been done.
79 */
80struct mdp5_global_state *
81mdp5_get_existing_global_state(struct mdp5_kms *mdp5_kms)
82{
83 return to_mdp5_global_state(mdp5_kms->glob_state.state);
84}
85
86/*
87 * This acquires the modeset lock set aside for global state, creates
88 * a new duplicated private object state.
89 */
90struct mdp5_global_state *mdp5_get_global_state(struct drm_atomic_state *s)
74{ 91{
75 struct msm_drm_private *priv = s->dev->dev_private; 92 struct msm_drm_private *priv = s->dev->dev_private;
76 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms)); 93 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
77 struct msm_kms_state *state = to_kms_state(s); 94 struct drm_private_state *priv_state;
78 struct mdp5_state *new_state;
79 int ret; 95 int ret;
80 96
81 if (state->state) 97 ret = drm_modeset_lock(&mdp5_kms->glob_state_lock, s->acquire_ctx);
82 return state->state;
83
84 ret = drm_modeset_lock(&mdp5_kms->state_lock, s->acquire_ctx);
85 if (ret) 98 if (ret)
86 return ERR_PTR(ret); 99 return ERR_PTR(ret);
87 100
88 new_state = kmalloc(sizeof(*mdp5_kms->state), GFP_KERNEL); 101 priv_state = drm_atomic_get_private_obj_state(s, &mdp5_kms->glob_state);
89 if (!new_state) 102 if (IS_ERR(priv_state))
90 return ERR_PTR(-ENOMEM); 103 return ERR_CAST(priv_state);
91 104
92 /* Copy state: */ 105 return to_mdp5_global_state(priv_state);
93 new_state->hwpipe = mdp5_kms->state->hwpipe; 106}
94 new_state->hwmixer = mdp5_kms->state->hwmixer; 107
95 if (mdp5_kms->smp) 108static struct drm_private_state *
96 new_state->smp = mdp5_kms->state->smp; 109mdp5_global_duplicate_state(struct drm_private_obj *obj)
110{
111 struct mdp5_global_state *state;
112
113 state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
114 if (!state)
115 return NULL;
97 116
98 state->state = new_state; 117 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
99 118
100 return new_state; 119 return &state->base;
101} 120}
102 121
103static void mdp5_swap_state(struct msm_kms *kms, struct drm_atomic_state *state) 122static void mdp5_global_destroy_state(struct drm_private_obj *obj,
123 struct drm_private_state *state)
104{ 124{
105 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 125 struct mdp5_global_state *mdp5_state = to_mdp5_global_state(state);
106 swap(to_kms_state(state)->state, mdp5_kms->state); 126
127 kfree(mdp5_state);
128}
129
130static const struct drm_private_state_funcs mdp5_global_state_funcs = {
131 .atomic_duplicate_state = mdp5_global_duplicate_state,
132 .atomic_destroy_state = mdp5_global_destroy_state,
133};
134
135static int mdp5_global_obj_init(struct mdp5_kms *mdp5_kms)
136{
137 struct mdp5_global_state *state;
138
139 drm_modeset_lock_init(&mdp5_kms->glob_state_lock);
140
141 state = kzalloc(sizeof(*state), GFP_KERNEL);
142 if (!state)
143 return -ENOMEM;
144
145 state->mdp5_kms = mdp5_kms;
146
147 drm_atomic_private_obj_init(&mdp5_kms->glob_state,
148 &state->base,
149 &mdp5_global_state_funcs);
150 return 0;
107} 151}
108 152
109static void mdp5_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state) 153static void mdp5_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state)
110{ 154{
111 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 155 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
112 struct device *dev = &mdp5_kms->pdev->dev; 156 struct device *dev = &mdp5_kms->pdev->dev;
157 struct mdp5_global_state *global_state;
158
159 global_state = mdp5_get_existing_global_state(mdp5_kms);
113 160
114 pm_runtime_get_sync(dev); 161 pm_runtime_get_sync(dev);
115 162
116 if (mdp5_kms->smp) 163 if (mdp5_kms->smp)
117 mdp5_smp_prepare_commit(mdp5_kms->smp, &mdp5_kms->state->smp); 164 mdp5_smp_prepare_commit(mdp5_kms->smp, &global_state->smp);
118} 165}
119 166
120static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state) 167static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state)
121{ 168{
122 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 169 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
123 struct device *dev = &mdp5_kms->pdev->dev; 170 struct device *dev = &mdp5_kms->pdev->dev;
171 struct mdp5_global_state *global_state;
172
173 global_state = mdp5_get_existing_global_state(mdp5_kms);
124 174
125 if (mdp5_kms->smp) 175 if (mdp5_kms->smp)
126 mdp5_smp_complete_commit(mdp5_kms->smp, &mdp5_kms->state->smp); 176 mdp5_smp_complete_commit(mdp5_kms->smp, &global_state->smp);
127 177
128 pm_runtime_put_sync(dev); 178 pm_runtime_put_sync(dev);
129} 179}
@@ -229,7 +279,6 @@ static const struct mdp_kms_funcs kms_funcs = {
229 .irq = mdp5_irq, 279 .irq = mdp5_irq,
230 .enable_vblank = mdp5_enable_vblank, 280 .enable_vblank = mdp5_enable_vblank,
231 .disable_vblank = mdp5_disable_vblank, 281 .disable_vblank = mdp5_disable_vblank,
232 .swap_state = mdp5_swap_state,
233 .prepare_commit = mdp5_prepare_commit, 282 .prepare_commit = mdp5_prepare_commit,
234 .complete_commit = mdp5_complete_commit, 283 .complete_commit = mdp5_complete_commit,
235 .wait_for_crtc_commit_done = mdp5_wait_for_crtc_commit_done, 284 .wait_for_crtc_commit_done = mdp5_wait_for_crtc_commit_done,
@@ -727,7 +776,8 @@ static void mdp5_destroy(struct platform_device *pdev)
727 if (mdp5_kms->rpm_enabled) 776 if (mdp5_kms->rpm_enabled)
728 pm_runtime_disable(&pdev->dev); 777 pm_runtime_disable(&pdev->dev);
729 778
730 kfree(mdp5_kms->state); 779 drm_atomic_private_obj_fini(&mdp5_kms->glob_state);
780 drm_modeset_lock_fini(&mdp5_kms->glob_state_lock);
731} 781}
732 782
733static int construct_pipes(struct mdp5_kms *mdp5_kms, int cnt, 783static int construct_pipes(struct mdp5_kms *mdp5_kms, int cnt,
@@ -880,12 +930,9 @@ static int mdp5_init(struct platform_device *pdev, struct drm_device *dev)
880 mdp5_kms->dev = dev; 930 mdp5_kms->dev = dev;
881 mdp5_kms->pdev = pdev; 931 mdp5_kms->pdev = pdev;
882 932
883 drm_modeset_lock_init(&mdp5_kms->state_lock); 933 ret = mdp5_global_obj_init(mdp5_kms);
884 mdp5_kms->state = kzalloc(sizeof(*mdp5_kms->state), GFP_KERNEL); 934 if (ret)
885 if (!mdp5_kms->state) {
886 ret = -ENOMEM;
887 goto fail; 935 goto fail;
888 }
889 936
890 mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys", "MDP5"); 937 mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys", "MDP5");
891 if (IS_ERR(mdp5_kms->mmio)) { 938 if (IS_ERR(mdp5_kms->mmio)) {
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h
index 425a03d213e5..854dfd30e829 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h
@@ -28,8 +28,6 @@
28#include "mdp5_ctl.h" 28#include "mdp5_ctl.h"
29#include "mdp5_smp.h" 29#include "mdp5_smp.h"
30 30
31struct mdp5_state;
32
33struct mdp5_kms { 31struct mdp5_kms {
34 struct mdp_kms base; 32 struct mdp_kms base;
35 33
@@ -49,11 +47,12 @@ struct mdp5_kms {
49 struct mdp5_cfg_handler *cfg; 47 struct mdp5_cfg_handler *cfg;
50 uint32_t caps; /* MDP capabilities (MDP_CAP_XXX bits) */ 48 uint32_t caps; /* MDP capabilities (MDP_CAP_XXX bits) */
51 49
52 /** 50 /*
53 * Global atomic state. Do not access directly, use mdp5_get_state() 51 * Global private object state, Do not access directly, use
52 * mdp5_global_get_state()
54 */ 53 */
55 struct mdp5_state *state; 54 struct drm_modeset_lock glob_state_lock;
56 struct drm_modeset_lock state_lock; 55 struct drm_private_obj glob_state;
57 56
58 struct mdp5_smp *smp; 57 struct mdp5_smp *smp;
59 struct mdp5_ctl_manager *ctlm; 58 struct mdp5_ctl_manager *ctlm;
@@ -81,19 +80,23 @@ struct mdp5_kms {
81}; 80};
82#define to_mdp5_kms(x) container_of(x, struct mdp5_kms, base) 81#define to_mdp5_kms(x) container_of(x, struct mdp5_kms, base)
83 82
84/* Global atomic state for tracking resources that are shared across 83/* Global private object state for tracking resources that are shared across
85 * multiple kms objects (planes/crtcs/etc). 84 * multiple kms objects (planes/crtcs/etc).
86 *
87 * For atomic updates which require modifying global state,
88 */ 85 */
89struct mdp5_state { 86#define to_mdp5_global_state(x) container_of(x, struct mdp5_global_state, base)
87struct mdp5_global_state {
88 struct drm_private_state base;
89
90 struct drm_atomic_state *state;
91 struct mdp5_kms *mdp5_kms;
92
90 struct mdp5_hw_pipe_state hwpipe; 93 struct mdp5_hw_pipe_state hwpipe;
91 struct mdp5_hw_mixer_state hwmixer; 94 struct mdp5_hw_mixer_state hwmixer;
92 struct mdp5_smp_state smp; 95 struct mdp5_smp_state smp;
93}; 96};
94 97
95struct mdp5_state *__must_check 98struct mdp5_global_state * mdp5_get_existing_global_state(struct mdp5_kms *mdp5_kms);
96mdp5_get_state(struct drm_atomic_state *s); 99struct mdp5_global_state *__must_check mdp5_get_global_state(struct drm_atomic_state *s);
97 100
98/* Atomic plane state. Subclasses the base drm_plane_state in order to 101/* Atomic plane state. Subclasses the base drm_plane_state in order to
99 * track assigned hwpipe and hw specific state. 102 * track assigned hwpipe and hw specific state.
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c
index 8a00991f03c7..113e6b569562 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c
@@ -52,14 +52,14 @@ int mdp5_mixer_assign(struct drm_atomic_state *s, struct drm_crtc *crtc,
52{ 52{
53 struct msm_drm_private *priv = s->dev->dev_private; 53 struct msm_drm_private *priv = s->dev->dev_private;
54 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms)); 54 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
55 struct mdp5_state *state = mdp5_get_state(s); 55 struct mdp5_global_state *global_state = mdp5_get_global_state(s);
56 struct mdp5_hw_mixer_state *new_state; 56 struct mdp5_hw_mixer_state *new_state;
57 int i; 57 int i;
58 58
59 if (IS_ERR(state)) 59 if (IS_ERR(global_state))
60 return PTR_ERR(state); 60 return PTR_ERR(global_state);
61 61
62 new_state = &state->hwmixer; 62 new_state = &global_state->hwmixer;
63 63
64 for (i = 0; i < mdp5_kms->num_hwmixers; i++) { 64 for (i = 0; i < mdp5_kms->num_hwmixers; i++) {
65 struct mdp5_hw_mixer *cur = mdp5_kms->hwmixers[i]; 65 struct mdp5_hw_mixer *cur = mdp5_kms->hwmixers[i];
@@ -129,8 +129,8 @@ int mdp5_mixer_assign(struct drm_atomic_state *s, struct drm_crtc *crtc,
129 129
130void mdp5_mixer_release(struct drm_atomic_state *s, struct mdp5_hw_mixer *mixer) 130void mdp5_mixer_release(struct drm_atomic_state *s, struct mdp5_hw_mixer *mixer)
131{ 131{
132 struct mdp5_state *state = mdp5_get_state(s); 132 struct mdp5_global_state *global_state = mdp5_get_global_state(s);
133 struct mdp5_hw_mixer_state *new_state = &state->hwmixer; 133 struct mdp5_hw_mixer_state *new_state = &global_state->hwmixer;
134 134
135 if (!mixer) 135 if (!mixer)
136 return; 136 return;
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c
index ff52c49095f9..1ef26bc63163 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c
@@ -24,17 +24,19 @@ int mdp5_pipe_assign(struct drm_atomic_state *s, struct drm_plane *plane,
24{ 24{
25 struct msm_drm_private *priv = s->dev->dev_private; 25 struct msm_drm_private *priv = s->dev->dev_private;
26 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms)); 26 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
27 struct mdp5_state *state; 27 struct mdp5_global_state *new_global_state, *old_global_state;
28 struct mdp5_hw_pipe_state *old_state, *new_state; 28 struct mdp5_hw_pipe_state *old_state, *new_state;
29 int i, j; 29 int i, j;
30 30
31 state = mdp5_get_state(s); 31 new_global_state = mdp5_get_global_state(s);
32 if (IS_ERR(state)) 32 if (IS_ERR(new_global_state))
33 return PTR_ERR(state); 33 return PTR_ERR(new_global_state);
34 34
35 /* grab old_state after mdp5_get_state(), since now we hold lock: */ 35 /* grab old_state after mdp5_get_global_state(), since now we hold lock: */
36 old_state = &mdp5_kms->state->hwpipe; 36 old_global_state = mdp5_get_existing_global_state(mdp5_kms);
37 new_state = &state->hwpipe; 37
38 old_state = &old_global_state->hwpipe;
39 new_state = &new_global_state->hwpipe;
38 40
39 for (i = 0; i < mdp5_kms->num_hwpipes; i++) { 41 for (i = 0; i < mdp5_kms->num_hwpipes; i++) {
40 struct mdp5_hw_pipe *cur = mdp5_kms->hwpipes[i]; 42 struct mdp5_hw_pipe *cur = mdp5_kms->hwpipes[i];
@@ -107,7 +109,7 @@ int mdp5_pipe_assign(struct drm_atomic_state *s, struct drm_plane *plane,
107 WARN_ON(r_hwpipe); 109 WARN_ON(r_hwpipe);
108 110
109 DBG("%s: alloc SMP blocks", (*hwpipe)->name); 111 DBG("%s: alloc SMP blocks", (*hwpipe)->name);
110 ret = mdp5_smp_assign(mdp5_kms->smp, &state->smp, 112 ret = mdp5_smp_assign(mdp5_kms->smp, &new_global_state->smp,
111 (*hwpipe)->pipe, blkcfg); 113 (*hwpipe)->pipe, blkcfg);
112 if (ret) 114 if (ret)
113 return -ENOMEM; 115 return -ENOMEM;
@@ -132,7 +134,7 @@ void mdp5_pipe_release(struct drm_atomic_state *s, struct mdp5_hw_pipe *hwpipe)
132{ 134{
133 struct msm_drm_private *priv = s->dev->dev_private; 135 struct msm_drm_private *priv = s->dev->dev_private;
134 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms)); 136 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
135 struct mdp5_state *state = mdp5_get_state(s); 137 struct mdp5_global_state *state = mdp5_get_global_state(s);
136 struct mdp5_hw_pipe_state *new_state = &state->hwpipe; 138 struct mdp5_hw_pipe_state *new_state = &state->hwpipe;
137 139
138 if (!hwpipe) 140 if (!hwpipe)
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
index a9f31da7d45a..e09bc53a0e65 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
@@ -245,20 +245,6 @@ static const struct drm_plane_funcs mdp5_plane_funcs = {
245 .atomic_print_state = mdp5_plane_atomic_print_state, 245 .atomic_print_state = mdp5_plane_atomic_print_state,
246}; 246};
247 247
248static int mdp5_plane_prepare_fb(struct drm_plane *plane,
249 struct drm_plane_state *new_state)
250{
251 struct mdp5_kms *mdp5_kms = get_kms(plane);
252 struct msm_kms *kms = &mdp5_kms->base.base;
253 struct drm_framebuffer *fb = new_state->fb;
254
255 if (!new_state->fb)
256 return 0;
257
258 DBG("%s: prepare: FB[%u]", plane->name, fb->base.id);
259 return msm_framebuffer_prepare(fb, kms->aspace);
260}
261
262static void mdp5_plane_cleanup_fb(struct drm_plane *plane, 248static void mdp5_plane_cleanup_fb(struct drm_plane *plane,
263 struct drm_plane_state *old_state) 249 struct drm_plane_state *old_state)
264{ 250{
@@ -543,7 +529,7 @@ static void mdp5_plane_atomic_async_update(struct drm_plane *plane,
543} 529}
544 530
545static const struct drm_plane_helper_funcs mdp5_plane_helper_funcs = { 531static const struct drm_plane_helper_funcs mdp5_plane_helper_funcs = {
546 .prepare_fb = mdp5_plane_prepare_fb, 532 .prepare_fb = msm_atomic_prepare_fb,
547 .cleanup_fb = mdp5_plane_cleanup_fb, 533 .cleanup_fb = mdp5_plane_cleanup_fb,
548 .atomic_check = mdp5_plane_atomic_check, 534 .atomic_check = mdp5_plane_atomic_check,
549 .atomic_update = mdp5_plane_atomic_update, 535 .atomic_update = mdp5_plane_atomic_update,
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c
index ae4983d9d0a5..96c2b828dba4 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c
@@ -340,17 +340,20 @@ void mdp5_smp_dump(struct mdp5_smp *smp, struct drm_printer *p)
340 struct mdp5_kms *mdp5_kms = get_kms(smp); 340 struct mdp5_kms *mdp5_kms = get_kms(smp);
341 struct mdp5_hw_pipe_state *hwpstate; 341 struct mdp5_hw_pipe_state *hwpstate;
342 struct mdp5_smp_state *state; 342 struct mdp5_smp_state *state;
343 struct mdp5_global_state *global_state;
343 int total = 0, i, j; 344 int total = 0, i, j;
344 345
345 drm_printf(p, "name\tinuse\tplane\n"); 346 drm_printf(p, "name\tinuse\tplane\n");
346 drm_printf(p, "----\t-----\t-----\n"); 347 drm_printf(p, "----\t-----\t-----\n");
347 348
348 if (drm_can_sleep()) 349 if (drm_can_sleep())
349 drm_modeset_lock(&mdp5_kms->state_lock, NULL); 350 drm_modeset_lock(&mdp5_kms->glob_state_lock, NULL);
351
352 global_state = mdp5_get_existing_global_state(mdp5_kms);
350 353
351 /* grab these *after* we hold the state_lock */ 354 /* grab these *after* we hold the state_lock */
352 hwpstate = &mdp5_kms->state->hwpipe; 355 hwpstate = &global_state->hwpipe;
353 state = &mdp5_kms->state->smp; 356 state = &global_state->smp;
354 357
355 for (i = 0; i < mdp5_kms->num_hwpipes; i++) { 358 for (i = 0; i < mdp5_kms->num_hwpipes; i++) {
356 struct mdp5_hw_pipe *hwpipe = mdp5_kms->hwpipes[i]; 359 struct mdp5_hw_pipe *hwpipe = mdp5_kms->hwpipes[i];
@@ -374,7 +377,7 @@ void mdp5_smp_dump(struct mdp5_smp *smp, struct drm_printer *p)
374 bitmap_weight(state->state, smp->blk_cnt)); 377 bitmap_weight(state->state, smp->blk_cnt));
375 378
376 if (drm_can_sleep()) 379 if (drm_can_sleep())
377 drm_modeset_unlock(&mdp5_kms->state_lock); 380 drm_modeset_unlock(&mdp5_kms->glob_state_lock);
378} 381}
379 382
380void mdp5_smp_destroy(struct mdp5_smp *smp) 383void mdp5_smp_destroy(struct mdp5_smp *smp)
@@ -384,7 +387,8 @@ void mdp5_smp_destroy(struct mdp5_smp *smp)
384 387
385struct mdp5_smp *mdp5_smp_init(struct mdp5_kms *mdp5_kms, const struct mdp5_smp_block *cfg) 388struct mdp5_smp *mdp5_smp_init(struct mdp5_kms *mdp5_kms, const struct mdp5_smp_block *cfg)
386{ 389{
387 struct mdp5_smp_state *state = &mdp5_kms->state->smp; 390 struct mdp5_smp_state *state;
391 struct mdp5_global_state *global_state;
388 struct mdp5_smp *smp = NULL; 392 struct mdp5_smp *smp = NULL;
389 int ret; 393 int ret;
390 394
@@ -398,6 +402,9 @@ struct mdp5_smp *mdp5_smp_init(struct mdp5_kms *mdp5_kms, const struct mdp5_smp_
398 smp->blk_cnt = cfg->mmb_count; 402 smp->blk_cnt = cfg->mmb_count;
399 smp->blk_size = cfg->mmb_size; 403 smp->blk_size = cfg->mmb_size;
400 404
405 global_state = mdp5_get_existing_global_state(mdp5_kms);
406 state = &global_state->smp;
407
401 /* statically tied MMBs cannot be re-allocated: */ 408 /* statically tied MMBs cannot be re-allocated: */
402 bitmap_copy(state->state, cfg->reserved_state, smp->blk_cnt); 409 bitmap_copy(state->state, cfg->reserved_state, smp->blk_cnt);
403 memcpy(smp->reserved, cfg->reserved, sizeof(smp->reserved)); 410 memcpy(smp->reserved, cfg->reserved, sizeof(smp->reserved));
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 8baba30d6c65..2f1a2780658a 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -1036,7 +1036,6 @@ static int dsi_tx_buf_alloc(struct msm_dsi_host *msm_host, int size)
1036 1036
1037 ret = msm_gem_get_iova(msm_host->tx_gem_obj, 1037 ret = msm_gem_get_iova(msm_host->tx_gem_obj,
1038 priv->kms->aspace, &iova); 1038 priv->kms->aspace, &iova);
1039 mutex_unlock(&dev->struct_mutex);
1040 if (ret) { 1039 if (ret) {
1041 pr_err("%s: failed to get iova, %d\n", __func__, ret); 1040 pr_err("%s: failed to get iova, %d\n", __func__, ret);
1042 return ret; 1041 return ret;
@@ -1067,9 +1066,20 @@ static int dsi_tx_buf_alloc(struct msm_dsi_host *msm_host, int size)
1067static void dsi_tx_buf_free(struct msm_dsi_host *msm_host) 1066static void dsi_tx_buf_free(struct msm_dsi_host *msm_host)
1068{ 1067{
1069 struct drm_device *dev = msm_host->dev; 1068 struct drm_device *dev = msm_host->dev;
1069 struct msm_drm_private *priv;
1070 1070
1071 /*
1072 * This is possible if we're tearing down before we've had a chance to
1073 * fully initialize. A very real possibility if our probe is deferred,
1074 * in which case we'll hit msm_dsi_host_destroy() without having run
1075 * through the dsi_tx_buf_alloc().
1076 */
1077 if (!dev)
1078 return;
1079
1080 priv = dev->dev_private;
1071 if (msm_host->tx_gem_obj) { 1081 if (msm_host->tx_gem_obj) {
1072 msm_gem_put_iova(msm_host->tx_gem_obj, 0); 1082 msm_gem_put_iova(msm_host->tx_gem_obj, priv->kms->aspace);
1073 drm_gem_object_put_unlocked(msm_host->tx_gem_obj); 1083 drm_gem_object_put_unlocked(msm_host->tx_gem_obj);
1074 msm_host->tx_gem_obj = NULL; 1084 msm_host->tx_gem_obj = NULL;
1075 } 1085 }
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index bf5f8c39f34d..f0635c3da7f4 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -16,69 +16,8 @@
16 */ 16 */
17 17
18#include "msm_drv.h" 18#include "msm_drv.h"
19#include "msm_kms.h"
20#include "msm_gem.h" 19#include "msm_gem.h"
21#include "msm_fence.h" 20#include "msm_kms.h"
22
23struct msm_commit {
24 struct drm_device *dev;
25 struct drm_atomic_state *state;
26 struct work_struct work;
27 uint32_t crtc_mask;
28};
29
30static void commit_worker(struct work_struct *work);
31
32/* block until specified crtcs are no longer pending update, and
33 * atomically mark them as pending update
34 */
35static int start_atomic(struct msm_drm_private *priv, uint32_t crtc_mask)
36{
37 int ret;
38
39 spin_lock(&priv->pending_crtcs_event.lock);
40 ret = wait_event_interruptible_locked(priv->pending_crtcs_event,
41 !(priv->pending_crtcs & crtc_mask));
42 if (ret == 0) {
43 DBG("start: %08x", crtc_mask);
44 priv->pending_crtcs |= crtc_mask;
45 }
46 spin_unlock(&priv->pending_crtcs_event.lock);
47
48 return ret;
49}
50
51/* clear specified crtcs (no longer pending update)
52 */
53static void end_atomic(struct msm_drm_private *priv, uint32_t crtc_mask)
54{
55 spin_lock(&priv->pending_crtcs_event.lock);
56 DBG("end: %08x", crtc_mask);
57 priv->pending_crtcs &= ~crtc_mask;
58 wake_up_all_locked(&priv->pending_crtcs_event);
59 spin_unlock(&priv->pending_crtcs_event.lock);
60}
61
62static struct msm_commit *commit_init(struct drm_atomic_state *state)
63{
64 struct msm_commit *c = kzalloc(sizeof(*c), GFP_KERNEL);
65
66 if (!c)
67 return NULL;
68
69 c->dev = state->dev;
70 c->state = state;
71
72 INIT_WORK(&c->work, commit_worker);
73
74 return c;
75}
76
77static void commit_destroy(struct msm_commit *c)
78{
79 end_atomic(c->dev->dev_private, c->crtc_mask);
80 kfree(c);
81}
82 21
83static void msm_atomic_wait_for_commit_done(struct drm_device *dev, 22static void msm_atomic_wait_for_commit_done(struct drm_device *dev,
84 struct drm_atomic_state *old_state) 23 struct drm_atomic_state *old_state)
@@ -97,195 +36,48 @@ static void msm_atomic_wait_for_commit_done(struct drm_device *dev,
97 } 36 }
98} 37}
99 38
100/* The (potentially) asynchronous part of the commit. At this point 39int msm_atomic_prepare_fb(struct drm_plane *plane,
101 * nothing can fail short of armageddon. 40 struct drm_plane_state *new_state)
102 */
103static void complete_commit(struct msm_commit *c, bool async)
104{ 41{
105 struct drm_atomic_state *state = c->state; 42 struct msm_drm_private *priv = plane->dev->dev_private;
106 struct drm_device *dev = state->dev;
107 struct msm_drm_private *priv = dev->dev_private;
108 struct msm_kms *kms = priv->kms; 43 struct msm_kms *kms = priv->kms;
44 struct drm_gem_object *obj;
45 struct msm_gem_object *msm_obj;
46 struct dma_fence *fence;
109 47
110 drm_atomic_helper_wait_for_fences(dev, state, false); 48 if (!new_state->fb)
111 49 return 0;
112 kms->funcs->prepare_commit(kms, state);
113
114 drm_atomic_helper_commit_modeset_disables(dev, state);
115
116 drm_atomic_helper_commit_planes(dev, state, 0);
117
118 drm_atomic_helper_commit_modeset_enables(dev, state);
119
120 /* NOTE: _wait_for_vblanks() only waits for vblank on
121 * enabled CRTCs. So we end up faulting when disabling
122 * due to (potentially) unref'ing the outgoing fb's
123 * before the vblank when the disable has latched.
124 *
125 * But if it did wait on disabled (or newly disabled)
126 * CRTCs, that would be racy (ie. we could have missed
127 * the irq. We need some way to poll for pipe shut
128 * down. Or just live with occasionally hitting the
129 * timeout in the CRTC disable path (which really should
130 * not be critical path)
131 */
132
133 msm_atomic_wait_for_commit_done(dev, state);
134
135 drm_atomic_helper_cleanup_planes(dev, state);
136 50
137 kms->funcs->complete_commit(kms, state); 51 obj = msm_framebuffer_bo(new_state->fb, 0);
52 msm_obj = to_msm_bo(obj);
53 fence = reservation_object_get_excl_rcu(msm_obj->resv);
138 54
139 drm_atomic_state_put(state); 55 drm_atomic_set_fence_for_plane(new_state, fence);
140 56
141 commit_destroy(c); 57 return msm_framebuffer_prepare(new_state->fb, kms->aspace);
142}
143
144static void commit_worker(struct work_struct *work)
145{
146 complete_commit(container_of(work, struct msm_commit, work), true);
147} 58}
148 59
149/** 60void msm_atomic_commit_tail(struct drm_atomic_state *state)
150 * drm_atomic_helper_commit - commit validated state object
151 * @dev: DRM device
152 * @state: the driver state object
153 * @nonblock: nonblocking commit
154 *
155 * This function commits a with drm_atomic_helper_check() pre-validated state
156 * object. This can still fail when e.g. the framebuffer reservation fails.
157 *
158 * RETURNS
159 * Zero for success or -errno.
160 */
161int msm_atomic_commit(struct drm_device *dev,
162 struct drm_atomic_state *state, bool nonblock)
163{ 61{
62 struct drm_device *dev = state->dev;
164 struct msm_drm_private *priv = dev->dev_private; 63 struct msm_drm_private *priv = dev->dev_private;
165 struct msm_commit *c; 64 struct msm_kms *kms = priv->kms;
166 struct drm_crtc *crtc;
167 struct drm_crtc_state *crtc_state;
168 struct drm_plane *plane;
169 struct drm_plane_state *old_plane_state, *new_plane_state;
170 int i, ret;
171
172 ret = drm_atomic_helper_prepare_planes(dev, state);
173 if (ret)
174 return ret;
175
176 /*
177 * Note that plane->atomic_async_check() should fail if we need
178 * to re-assign hwpipe or anything that touches global atomic
179 * state, so we'll never go down the async update path in those
180 * cases.
181 */
182 if (state->async_update) {
183 drm_atomic_helper_async_commit(dev, state);
184 drm_atomic_helper_cleanup_planes(dev, state);
185 return 0;
186 }
187
188 c = commit_init(state);
189 if (!c) {
190 ret = -ENOMEM;
191 goto error;
192 }
193
194 /*
195 * Figure out what crtcs we have:
196 */
197 for_each_new_crtc_in_state(state, crtc, crtc_state, i)
198 c->crtc_mask |= drm_crtc_mask(crtc);
199
200 /*
201 * Figure out what fence to wait for:
202 */
203 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
204 if ((new_plane_state->fb != old_plane_state->fb) && new_plane_state->fb) {
205 struct drm_gem_object *obj = msm_framebuffer_bo(new_plane_state->fb, 0);
206 struct msm_gem_object *msm_obj = to_msm_bo(obj);
207 struct dma_fence *fence = reservation_object_get_excl_rcu(msm_obj->resv);
208 65
209 drm_atomic_set_fence_for_plane(new_plane_state, fence); 66 kms->funcs->prepare_commit(kms, state);
210 }
211 }
212 67
213 /* 68 drm_atomic_helper_commit_modeset_disables(dev, state);
214 * Wait for pending updates on any of the same crtc's and then
215 * mark our set of crtc's as busy:
216 */
217 ret = start_atomic(dev->dev_private, c->crtc_mask);
218 if (ret)
219 goto err_free;
220 69
221 BUG_ON(drm_atomic_helper_swap_state(state, false) < 0); 70 drm_atomic_helper_commit_planes(dev, state, 0);
222 71
223 /* 72 drm_atomic_helper_commit_modeset_enables(dev, state);
224 * This is the point of no return - everything below never fails except
225 * when the hw goes bonghits. Which means we can commit the new state on
226 * the software side now.
227 *
228 * swap driver private state while still holding state_lock
229 */
230 if (to_kms_state(state)->state)
231 priv->kms->funcs->swap_state(priv->kms, state);
232 73
233 /* 74 msm_atomic_wait_for_commit_done(dev, state);
234 * Everything below can be run asynchronously without the need to grab
235 * any modeset locks at all under one conditions: It must be guaranteed
236 * that the asynchronous work has either been cancelled (if the driver
237 * supports it, which at least requires that the framebuffers get
238 * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
239 * before the new state gets committed on the software side with
240 * drm_atomic_helper_swap_state().
241 *
242 * This scheme allows new atomic state updates to be prepared and
243 * checked in parallel to the asynchronous completion of the previous
244 * update. Which is important since compositors need to figure out the
245 * composition of the next frame right after having submitted the
246 * current layout.
247 */
248 75
249 drm_atomic_state_get(state); 76 kms->funcs->complete_commit(kms, state);
250 if (nonblock) {
251 queue_work(priv->atomic_wq, &c->work);
252 return 0;
253 }
254 77
255 complete_commit(c, false); 78 drm_atomic_helper_wait_for_vblanks(dev, state);
256 79
257 return 0; 80 drm_atomic_helper_commit_hw_done(state);
258 81
259err_free:
260 kfree(c);
261error:
262 drm_atomic_helper_cleanup_planes(dev, state); 82 drm_atomic_helper_cleanup_planes(dev, state);
263 return ret;
264}
265
266struct drm_atomic_state *msm_atomic_state_alloc(struct drm_device *dev)
267{
268 struct msm_kms_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
269
270 if (!state || drm_atomic_state_init(dev, &state->base) < 0) {
271 kfree(state);
272 return NULL;
273 }
274
275 return &state->base;
276}
277
278void msm_atomic_state_clear(struct drm_atomic_state *s)
279{
280 struct msm_kms_state *state = to_kms_state(s);
281 drm_atomic_state_default_clear(&state->base);
282 kfree(state->state);
283 state->state = NULL;
284}
285
286void msm_atomic_state_free(struct drm_atomic_state *state)
287{
288 kfree(to_kms_state(state)->state);
289 drm_atomic_state_default_release(state);
290 kfree(state);
291} 83}
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 30cd514d8f7c..021a0b6f9a59 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -41,10 +41,11 @@ static const struct drm_mode_config_funcs mode_config_funcs = {
41 .fb_create = msm_framebuffer_create, 41 .fb_create = msm_framebuffer_create,
42 .output_poll_changed = drm_fb_helper_output_poll_changed, 42 .output_poll_changed = drm_fb_helper_output_poll_changed,
43 .atomic_check = drm_atomic_helper_check, 43 .atomic_check = drm_atomic_helper_check,
44 .atomic_commit = msm_atomic_commit, 44 .atomic_commit = drm_atomic_helper_commit,
45 .atomic_state_alloc = msm_atomic_state_alloc, 45};
46 .atomic_state_clear = msm_atomic_state_clear, 46
47 .atomic_state_free = msm_atomic_state_free, 47static const struct drm_mode_config_helper_funcs mode_config_helper_funcs = {
48 .atomic_commit_tail = msm_atomic_commit_tail,
48}; 49};
49 50
50#ifdef CONFIG_DRM_MSM_REGISTER_LOGGING 51#ifdef CONFIG_DRM_MSM_REGISTER_LOGGING
@@ -384,7 +385,6 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
384 385
385 priv->wq = alloc_ordered_workqueue("msm", 0); 386 priv->wq = alloc_ordered_workqueue("msm", 0);
386 priv->atomic_wq = alloc_ordered_workqueue("msm:atomic", 0); 387 priv->atomic_wq = alloc_ordered_workqueue("msm:atomic", 0);
387 init_waitqueue_head(&priv->pending_crtcs_event);
388 388
389 INIT_LIST_HEAD(&priv->inactive_list); 389 INIT_LIST_HEAD(&priv->inactive_list);
390 INIT_LIST_HEAD(&priv->vblank_ctrl.event_list); 390 INIT_LIST_HEAD(&priv->vblank_ctrl.event_list);
@@ -442,6 +442,7 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
442 } 442 }
443 443
444 ddev->mode_config.funcs = &mode_config_funcs; 444 ddev->mode_config.funcs = &mode_config_funcs;
445 ddev->mode_config.helper_private = &mode_config_helper_funcs;
445 446
446 ret = drm_vblank_init(ddev, priv->num_crtcs); 447 ret = drm_vblank_init(ddev, priv->num_crtcs);
447 if (ret < 0) { 448 if (ret < 0) {
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 48ed5b9a8580..b2da1fbf81e0 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -117,10 +117,6 @@ struct msm_drm_private {
117 struct workqueue_struct *wq; 117 struct workqueue_struct *wq;
118 struct workqueue_struct *atomic_wq; 118 struct workqueue_struct *atomic_wq;
119 119
120 /* crtcs pending async atomic updates: */
121 uint32_t pending_crtcs;
122 wait_queue_head_t pending_crtcs_event;
123
124 unsigned int num_planes; 120 unsigned int num_planes;
125 struct drm_plane *planes[16]; 121 struct drm_plane *planes[16];
126 122
@@ -160,8 +156,9 @@ struct msm_format {
160 uint32_t pixel_format; 156 uint32_t pixel_format;
161}; 157};
162 158
163int msm_atomic_commit(struct drm_device *dev, 159int msm_atomic_prepare_fb(struct drm_plane *plane,
164 struct drm_atomic_state *state, bool nonblock); 160 struct drm_plane_state *new_state);
161void msm_atomic_commit_tail(struct drm_atomic_state *state);
165struct drm_atomic_state *msm_atomic_state_alloc(struct drm_device *dev); 162struct drm_atomic_state *msm_atomic_state_alloc(struct drm_device *dev);
166void msm_atomic_state_clear(struct drm_atomic_state *state); 163void msm_atomic_state_clear(struct drm_atomic_state *state);
167void msm_atomic_state_free(struct drm_atomic_state *state); 164void msm_atomic_state_free(struct drm_atomic_state *state);
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index aaa329dc020e..dfd92947de2c 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -40,8 +40,6 @@ struct msm_kms_funcs {
40 irqreturn_t (*irq)(struct msm_kms *kms); 40 irqreturn_t (*irq)(struct msm_kms *kms);
41 int (*enable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc); 41 int (*enable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc);
42 void (*disable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc); 42 void (*disable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc);
43 /* swap global atomic state: */
44 void (*swap_state)(struct msm_kms *kms, struct drm_atomic_state *state);
45 /* modeset, bracketing atomic_commit(): */ 43 /* modeset, bracketing atomic_commit(): */
46 void (*prepare_commit)(struct msm_kms *kms, struct drm_atomic_state *state); 44 void (*prepare_commit)(struct msm_kms *kms, struct drm_atomic_state *state);
47 void (*complete_commit)(struct msm_kms *kms, struct drm_atomic_state *state); 45 void (*complete_commit)(struct msm_kms *kms, struct drm_atomic_state *state);
@@ -80,18 +78,6 @@ struct msm_kms {
80 struct msm_gem_address_space *aspace; 78 struct msm_gem_address_space *aspace;
81}; 79};
82 80
83/**
84 * Subclass of drm_atomic_state, to allow kms backend to have driver
85 * private global state. The kms backend can do whatever it wants
86 * with the ->state ptr. On ->atomic_state_clear() the ->state ptr
87 * is kfree'd and set back to NULL.
88 */
89struct msm_kms_state {
90 struct drm_atomic_state base;
91 void *state;
92};
93#define to_kms_state(x) container_of(x, struct msm_kms_state, base)
94
95static inline void msm_kms_init(struct msm_kms *kms, 81static inline void msm_kms_init(struct msm_kms *kms,
96 const struct msm_kms_funcs *funcs) 82 const struct msm_kms_funcs *funcs)
97{ 83{