diff options
author | Rob Clark <robdclark@gmail.com> | 2013-11-30 17:24:22 -0500 |
---|---|---|
committer | Rob Clark <robdclark@gmail.com> | 2014-01-09 14:44:05 -0500 |
commit | 9e0efa63565511dc75846e6b036a4b80e92b9a98 (patch) | |
tree | 4f9efd191aaf9cb622c9d451e96e4c3859e4850c /drivers/gpu/drm/msm | |
parent | dd2da6e34672100b5fd811fbf2cf97e29c08080f (diff) |
drm/msm: move irq utils to mdp_kms
We'll want basically the same thing for mdp5, so refactor it out so it
can be shared.
Signed-off-by: Rob Clark <robdclark@gmail.com>
Diffstat (limited to 'drivers/gpu/drm/msm')
-rw-r--r-- | drivers/gpu/drm/msm/Makefile | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c | 20 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c | 142 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c | 16 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h | 26 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/mdp/mdp_kms.c | 144 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/mdp/mdp_kms.h | 57 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/msm_kms.h | 11 |
10 files changed, 254 insertions, 169 deletions
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile index 9481736eb2dd..17847af4e7ab 100644 --- a/drivers/gpu/drm/msm/Makefile +++ b/drivers/gpu/drm/msm/Makefile | |||
@@ -13,6 +13,7 @@ msm-y := \ | |||
13 | hdmi/hdmi_phy_8960.o \ | 13 | hdmi/hdmi_phy_8960.o \ |
14 | hdmi/hdmi_phy_8x60.o \ | 14 | hdmi/hdmi_phy_8x60.o \ |
15 | mdp/mdp_format.o \ | 15 | mdp/mdp_format.o \ |
16 | mdp/mdp_kms.o \ | ||
16 | mdp/mdp4/mdp4_crtc.o \ | 17 | mdp/mdp4/mdp4_crtc.o \ |
17 | mdp/mdp4/mdp4_dtv_encoder.o \ | 18 | mdp/mdp4/mdp4_dtv_encoder.o \ |
18 | mdp/mdp4/mdp4_irq.o \ | 19 | mdp/mdp4/mdp4_irq.o \ |
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c index c11400a1c603..1964f4f0d452 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c | |||
@@ -66,15 +66,15 @@ struct mdp4_crtc { | |||
66 | /* for unref'ing cursor bo's after scanout completes: */ | 66 | /* for unref'ing cursor bo's after scanout completes: */ |
67 | struct drm_flip_work unref_cursor_work; | 67 | struct drm_flip_work unref_cursor_work; |
68 | 68 | ||
69 | struct mdp4_irq vblank; | 69 | struct mdp_irq vblank; |
70 | struct mdp4_irq err; | 70 | struct mdp_irq err; |
71 | }; | 71 | }; |
72 | #define to_mdp4_crtc(x) container_of(x, struct mdp4_crtc, base) | 72 | #define to_mdp4_crtc(x) container_of(x, struct mdp4_crtc, base) |
73 | 73 | ||
74 | static struct mdp4_kms *get_kms(struct drm_crtc *crtc) | 74 | static struct mdp4_kms *get_kms(struct drm_crtc *crtc) |
75 | { | 75 | { |
76 | struct msm_drm_private *priv = crtc->dev->dev_private; | 76 | struct msm_drm_private *priv = crtc->dev->dev_private; |
77 | return to_mdp4_kms(priv->kms); | 77 | return to_mdp4_kms(to_mdp_kms(priv->kms)); |
78 | } | 78 | } |
79 | 79 | ||
80 | static void update_fb(struct drm_crtc *crtc, bool async, | 80 | static void update_fb(struct drm_crtc *crtc, bool async, |
@@ -93,7 +93,7 @@ static void update_fb(struct drm_crtc *crtc, bool async, | |||
93 | 93 | ||
94 | if (!async) { | 94 | if (!async) { |
95 | /* enable vblank to pick up the old_fb */ | 95 | /* enable vblank to pick up the old_fb */ |
96 | mdp4_irq_register(get_kms(crtc), &mdp4_crtc->vblank); | 96 | mdp_irq_register(&get_kms(crtc)->base, &mdp4_crtc->vblank); |
97 | } | 97 | } |
98 | } | 98 | } |
99 | 99 | ||
@@ -145,7 +145,7 @@ static void request_pending(struct drm_crtc *crtc, uint32_t pending) | |||
145 | struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); | 145 | struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); |
146 | 146 | ||
147 | atomic_or(pending, &mdp4_crtc->pending); | 147 | atomic_or(pending, &mdp4_crtc->pending); |
148 | mdp4_irq_register(get_kms(crtc), &mdp4_crtc->vblank); | 148 | mdp_irq_register(&get_kms(crtc)->base, &mdp4_crtc->vblank); |
149 | } | 149 | } |
150 | 150 | ||
151 | static void pageflip_cb(struct msm_fence_cb *cb) | 151 | static void pageflip_cb(struct msm_fence_cb *cb) |
@@ -210,9 +210,9 @@ static void mdp4_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
210 | if (enabled != mdp4_crtc->enabled) { | 210 | if (enabled != mdp4_crtc->enabled) { |
211 | if (enabled) { | 211 | if (enabled) { |
212 | mdp4_enable(mdp4_kms); | 212 | mdp4_enable(mdp4_kms); |
213 | mdp4_irq_register(mdp4_kms, &mdp4_crtc->err); | 213 | mdp_irq_register(&mdp4_kms->base, &mdp4_crtc->err); |
214 | } else { | 214 | } else { |
215 | mdp4_irq_unregister(mdp4_kms, &mdp4_crtc->err); | 215 | mdp_irq_unregister(&mdp4_kms->base, &mdp4_crtc->err); |
216 | mdp4_disable(mdp4_kms); | 216 | mdp4_disable(mdp4_kms); |
217 | } | 217 | } |
218 | mdp4_crtc->enabled = enabled; | 218 | mdp4_crtc->enabled = enabled; |
@@ -571,14 +571,14 @@ static const struct drm_crtc_helper_funcs mdp4_crtc_helper_funcs = { | |||
571 | .load_lut = mdp4_crtc_load_lut, | 571 | .load_lut = mdp4_crtc_load_lut, |
572 | }; | 572 | }; |
573 | 573 | ||
574 | static void mdp4_crtc_vblank_irq(struct mdp4_irq *irq, uint32_t irqstatus) | 574 | static void mdp4_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus) |
575 | { | 575 | { |
576 | struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, vblank); | 576 | struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, vblank); |
577 | struct drm_crtc *crtc = &mdp4_crtc->base; | 577 | struct drm_crtc *crtc = &mdp4_crtc->base; |
578 | struct msm_drm_private *priv = crtc->dev->dev_private; | 578 | struct msm_drm_private *priv = crtc->dev->dev_private; |
579 | unsigned pending; | 579 | unsigned pending; |
580 | 580 | ||
581 | mdp4_irq_unregister(get_kms(crtc), &mdp4_crtc->vblank); | 581 | mdp_irq_unregister(&get_kms(crtc)->base, &mdp4_crtc->vblank); |
582 | 582 | ||
583 | pending = atomic_xchg(&mdp4_crtc->pending, 0); | 583 | pending = atomic_xchg(&mdp4_crtc->pending, 0); |
584 | 584 | ||
@@ -593,7 +593,7 @@ static void mdp4_crtc_vblank_irq(struct mdp4_irq *irq, uint32_t irqstatus) | |||
593 | } | 593 | } |
594 | } | 594 | } |
595 | 595 | ||
596 | static void mdp4_crtc_err_irq(struct mdp4_irq *irq, uint32_t irqstatus) | 596 | static void mdp4_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus) |
597 | { | 597 | { |
598 | struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, err); | 598 | struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, err); |
599 | struct drm_crtc *crtc = &mdp4_crtc->base; | 599 | struct drm_crtc *crtc = &mdp4_crtc->base; |
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c index 3799ccc517b2..067ed03b35fe 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c | |||
@@ -35,7 +35,7 @@ struct mdp4_dtv_encoder { | |||
35 | static struct mdp4_kms *get_kms(struct drm_encoder *encoder) | 35 | static struct mdp4_kms *get_kms(struct drm_encoder *encoder) |
36 | { | 36 | { |
37 | struct msm_drm_private *priv = encoder->dev->dev_private; | 37 | struct msm_drm_private *priv = encoder->dev->dev_private; |
38 | return to_mdp4_kms(priv->kms); | 38 | return to_mdp4_kms(to_mdp_kms(priv->kms)); |
39 | } | 39 | } |
40 | 40 | ||
41 | #ifdef CONFIG_MSM_BUS_SCALING | 41 | #ifdef CONFIG_MSM_BUS_SCALING |
@@ -137,7 +137,7 @@ static void mdp4_dtv_encoder_dpms(struct drm_encoder *encoder, int mode) | |||
137 | * the settings changes for the new modeset (like new | 137 | * the settings changes for the new modeset (like new |
138 | * scanout buffer) don't latch properly.. | 138 | * scanout buffer) don't latch properly.. |
139 | */ | 139 | */ |
140 | mdp4_irq_wait(mdp4_kms, MDP4_IRQ_EXTERNAL_VSYNC); | 140 | mdp_irq_wait(&mdp4_kms->base, MDP4_IRQ_EXTERNAL_VSYNC); |
141 | 141 | ||
142 | clk_disable_unprepare(mdp4_dtv_encoder->src_clk); | 142 | clk_disable_unprepare(mdp4_dtv_encoder->src_clk); |
143 | clk_disable_unprepare(mdp4_dtv_encoder->hdmi_clk); | 143 | clk_disable_unprepare(mdp4_dtv_encoder->hdmi_clk); |
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c index 5c6b7fca4edd..c740ccd1cc67 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c | |||
@@ -19,77 +19,49 @@ | |||
19 | #include "msm_drv.h" | 19 | #include "msm_drv.h" |
20 | #include "mdp4_kms.h" | 20 | #include "mdp4_kms.h" |
21 | 21 | ||
22 | 22 | void mdp4_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask) | |
23 | struct mdp4_irq_wait { | ||
24 | struct mdp4_irq irq; | ||
25 | int count; | ||
26 | }; | ||
27 | |||
28 | static DECLARE_WAIT_QUEUE_HEAD(wait_event); | ||
29 | |||
30 | static DEFINE_SPINLOCK(list_lock); | ||
31 | |||
32 | static void update_irq(struct mdp4_kms *mdp4_kms) | ||
33 | { | 23 | { |
34 | struct mdp4_irq *irq; | 24 | mdp4_write(to_mdp4_kms(mdp_kms), REG_MDP4_INTR_ENABLE, irqmask); |
35 | uint32_t irqmask = mdp4_kms->vblank_mask; | ||
36 | |||
37 | BUG_ON(!spin_is_locked(&list_lock)); | ||
38 | |||
39 | list_for_each_entry(irq, &mdp4_kms->irq_list, node) | ||
40 | irqmask |= irq->irqmask; | ||
41 | |||
42 | mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, irqmask); | ||
43 | } | 25 | } |
44 | 26 | ||
45 | static void update_irq_unlocked(struct mdp4_kms *mdp4_kms) | 27 | static void mdp4_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus) |
46 | { | ||
47 | unsigned long flags; | ||
48 | spin_lock_irqsave(&list_lock, flags); | ||
49 | update_irq(mdp4_kms); | ||
50 | spin_unlock_irqrestore(&list_lock, flags); | ||
51 | } | ||
52 | |||
53 | static void mdp4_irq_error_handler(struct mdp4_irq *irq, uint32_t irqstatus) | ||
54 | { | 28 | { |
55 | DRM_ERROR("errors: %08x\n", irqstatus); | 29 | DRM_ERROR("errors: %08x\n", irqstatus); |
56 | } | 30 | } |
57 | 31 | ||
58 | void mdp4_irq_preinstall(struct msm_kms *kms) | 32 | void mdp4_irq_preinstall(struct msm_kms *kms) |
59 | { | 33 | { |
60 | struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms); | 34 | struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); |
61 | mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, 0xffffffff); | 35 | mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, 0xffffffff); |
62 | } | 36 | } |
63 | 37 | ||
64 | int mdp4_irq_postinstall(struct msm_kms *kms) | 38 | int mdp4_irq_postinstall(struct msm_kms *kms) |
65 | { | 39 | { |
66 | struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms); | 40 | struct mdp_kms *mdp_kms = to_mdp_kms(kms); |
67 | struct mdp4_irq *error_handler = &mdp4_kms->error_handler; | 41 | struct mdp4_kms *mdp4_kms = to_mdp4_kms(mdp_kms); |
68 | 42 | struct mdp_irq *error_handler = &mdp4_kms->error_handler; | |
69 | INIT_LIST_HEAD(&mdp4_kms->irq_list); | ||
70 | 43 | ||
71 | error_handler->irq = mdp4_irq_error_handler; | 44 | error_handler->irq = mdp4_irq_error_handler; |
72 | error_handler->irqmask = MDP4_IRQ_PRIMARY_INTF_UDERRUN | | 45 | error_handler->irqmask = MDP4_IRQ_PRIMARY_INTF_UDERRUN | |
73 | MDP4_IRQ_EXTERNAL_INTF_UDERRUN; | 46 | MDP4_IRQ_EXTERNAL_INTF_UDERRUN; |
74 | 47 | ||
75 | mdp4_irq_register(mdp4_kms, error_handler); | 48 | mdp_irq_register(mdp_kms, error_handler); |
76 | 49 | ||
77 | return 0; | 50 | return 0; |
78 | } | 51 | } |
79 | 52 | ||
80 | void mdp4_irq_uninstall(struct msm_kms *kms) | 53 | void mdp4_irq_uninstall(struct msm_kms *kms) |
81 | { | 54 | { |
82 | struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms); | 55 | struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); |
83 | mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, 0x00000000); | 56 | mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, 0x00000000); |
84 | } | 57 | } |
85 | 58 | ||
86 | irqreturn_t mdp4_irq(struct msm_kms *kms) | 59 | irqreturn_t mdp4_irq(struct msm_kms *kms) |
87 | { | 60 | { |
88 | struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms); | 61 | struct mdp_kms *mdp_kms = to_mdp_kms(kms); |
62 | struct mdp4_kms *mdp4_kms = to_mdp4_kms(mdp_kms); | ||
89 | struct drm_device *dev = mdp4_kms->dev; | 63 | struct drm_device *dev = mdp4_kms->dev; |
90 | struct msm_drm_private *priv = dev->dev_private; | 64 | struct msm_drm_private *priv = dev->dev_private; |
91 | struct mdp4_irq *handler, *n; | ||
92 | unsigned long flags; | ||
93 | unsigned int id; | 65 | unsigned int id; |
94 | uint32_t status; | 66 | uint32_t status; |
95 | 67 | ||
@@ -102,102 +74,20 @@ irqreturn_t mdp4_irq(struct msm_kms *kms) | |||
102 | if (status & mdp4_crtc_vblank(priv->crtcs[id])) | 74 | if (status & mdp4_crtc_vblank(priv->crtcs[id])) |
103 | drm_handle_vblank(dev, id); | 75 | drm_handle_vblank(dev, id); |
104 | 76 | ||
105 | spin_lock_irqsave(&list_lock, flags); | 77 | mdp_dispatch_irqs(mdp_kms, status); |
106 | mdp4_kms->in_irq = true; | ||
107 | list_for_each_entry_safe(handler, n, &mdp4_kms->irq_list, node) { | ||
108 | if (handler->irqmask & status) { | ||
109 | spin_unlock_irqrestore(&list_lock, flags); | ||
110 | handler->irq(handler, handler->irqmask & status); | ||
111 | spin_lock_irqsave(&list_lock, flags); | ||
112 | } | ||
113 | } | ||
114 | mdp4_kms->in_irq = false; | ||
115 | update_irq(mdp4_kms); | ||
116 | spin_unlock_irqrestore(&list_lock, flags); | ||
117 | 78 | ||
118 | return IRQ_HANDLED; | 79 | return IRQ_HANDLED; |
119 | } | 80 | } |
120 | 81 | ||
121 | int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc) | 82 | int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc) |
122 | { | 83 | { |
123 | struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms); | 84 | mdp_update_vblank_mask(to_mdp_kms(kms), |
124 | unsigned long flags; | 85 | mdp4_crtc_vblank(crtc), true); |
125 | |||
126 | spin_lock_irqsave(&list_lock, flags); | ||
127 | mdp4_kms->vblank_mask |= mdp4_crtc_vblank(crtc); | ||
128 | update_irq(mdp4_kms); | ||
129 | spin_unlock_irqrestore(&list_lock, flags); | ||
130 | |||
131 | return 0; | 86 | return 0; |
132 | } | 87 | } |
133 | 88 | ||
134 | void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc) | 89 | void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc) |
135 | { | 90 | { |
136 | struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms); | 91 | mdp_update_vblank_mask(to_mdp_kms(kms), |
137 | unsigned long flags; | 92 | mdp4_crtc_vblank(crtc), false); |
138 | |||
139 | spin_lock_irqsave(&list_lock, flags); | ||
140 | mdp4_kms->vblank_mask &= ~mdp4_crtc_vblank(crtc); | ||
141 | update_irq(mdp4_kms); | ||
142 | spin_unlock_irqrestore(&list_lock, flags); | ||
143 | } | ||
144 | |||
145 | static void wait_irq(struct mdp4_irq *irq, uint32_t irqstatus) | ||
146 | { | ||
147 | struct mdp4_irq_wait *wait = | ||
148 | container_of(irq, struct mdp4_irq_wait, irq); | ||
149 | wait->count--; | ||
150 | wake_up_all(&wait_event); | ||
151 | } | ||
152 | |||
153 | void mdp4_irq_wait(struct mdp4_kms *mdp4_kms, uint32_t irqmask) | ||
154 | { | ||
155 | struct mdp4_irq_wait wait = { | ||
156 | .irq = { | ||
157 | .irq = wait_irq, | ||
158 | .irqmask = irqmask, | ||
159 | }, | ||
160 | .count = 1, | ||
161 | }; | ||
162 | mdp4_irq_register(mdp4_kms, &wait.irq); | ||
163 | wait_event(wait_event, (wait.count <= 0)); | ||
164 | mdp4_irq_unregister(mdp4_kms, &wait.irq); | ||
165 | } | ||
166 | |||
167 | void mdp4_irq_register(struct mdp4_kms *mdp4_kms, struct mdp4_irq *irq) | ||
168 | { | ||
169 | unsigned long flags; | ||
170 | bool needs_update = false; | ||
171 | |||
172 | spin_lock_irqsave(&list_lock, flags); | ||
173 | |||
174 | if (!irq->registered) { | ||
175 | irq->registered = true; | ||
176 | list_add(&irq->node, &mdp4_kms->irq_list); | ||
177 | needs_update = !mdp4_kms->in_irq; | ||
178 | } | ||
179 | |||
180 | spin_unlock_irqrestore(&list_lock, flags); | ||
181 | |||
182 | if (needs_update) | ||
183 | update_irq_unlocked(mdp4_kms); | ||
184 | } | ||
185 | |||
186 | void mdp4_irq_unregister(struct mdp4_kms *mdp4_kms, struct mdp4_irq *irq) | ||
187 | { | ||
188 | unsigned long flags; | ||
189 | bool needs_update = false; | ||
190 | |||
191 | spin_lock_irqsave(&list_lock, flags); | ||
192 | |||
193 | if (irq->registered) { | ||
194 | irq->registered = false; | ||
195 | list_del(&irq->node); | ||
196 | needs_update = !mdp4_kms->in_irq; | ||
197 | } | ||
198 | |||
199 | spin_unlock_irqrestore(&list_lock, flags); | ||
200 | |||
201 | if (needs_update) | ||
202 | update_irq_unlocked(mdp4_kms); | ||
203 | } | 93 | } |
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c index ee4b27eded98..4d1cc2ea700e 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c | |||
@@ -24,7 +24,7 @@ static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev) | |||
24 | 24 | ||
25 | static int mdp4_hw_init(struct msm_kms *kms) | 25 | static int mdp4_hw_init(struct msm_kms *kms) |
26 | { | 26 | { |
27 | struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms); | 27 | struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); |
28 | struct drm_device *dev = mdp4_kms->dev; | 28 | struct drm_device *dev = mdp4_kms->dev; |
29 | uint32_t version, major, minor, dmap_cfg, vg_cfg; | 29 | uint32_t version, major, minor, dmap_cfg, vg_cfg; |
30 | unsigned long clk; | 30 | unsigned long clk; |
@@ -133,7 +133,7 @@ static long mdp4_round_pixclk(struct msm_kms *kms, unsigned long rate, | |||
133 | 133 | ||
134 | static void mdp4_preclose(struct msm_kms *kms, struct drm_file *file) | 134 | static void mdp4_preclose(struct msm_kms *kms, struct drm_file *file) |
135 | { | 135 | { |
136 | struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms); | 136 | struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); |
137 | struct msm_drm_private *priv = mdp4_kms->dev->dev_private; | 137 | struct msm_drm_private *priv = mdp4_kms->dev->dev_private; |
138 | unsigned i; | 138 | unsigned i; |
139 | 139 | ||
@@ -143,11 +143,12 @@ static void mdp4_preclose(struct msm_kms *kms, struct drm_file *file) | |||
143 | 143 | ||
144 | static void mdp4_destroy(struct msm_kms *kms) | 144 | static void mdp4_destroy(struct msm_kms *kms) |
145 | { | 145 | { |
146 | struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms); | 146 | struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); |
147 | kfree(mdp4_kms); | 147 | kfree(mdp4_kms); |
148 | } | 148 | } |
149 | 149 | ||
150 | static const struct msm_kms_funcs kms_funcs = { | 150 | static const struct mdp_kms_funcs kms_funcs = { |
151 | .base = { | ||
151 | .hw_init = mdp4_hw_init, | 152 | .hw_init = mdp4_hw_init, |
152 | .irq_preinstall = mdp4_irq_preinstall, | 153 | .irq_preinstall = mdp4_irq_preinstall, |
153 | .irq_postinstall = mdp4_irq_postinstall, | 154 | .irq_postinstall = mdp4_irq_postinstall, |
@@ -159,6 +160,8 @@ static const struct msm_kms_funcs kms_funcs = { | |||
159 | .round_pixclk = mdp4_round_pixclk, | 160 | .round_pixclk = mdp4_round_pixclk, |
160 | .preclose = mdp4_preclose, | 161 | .preclose = mdp4_preclose, |
161 | .destroy = mdp4_destroy, | 162 | .destroy = mdp4_destroy, |
163 | }, | ||
164 | .set_irqmask = mdp4_set_irqmask, | ||
162 | }; | 165 | }; |
163 | 166 | ||
164 | int mdp4_disable(struct mdp4_kms *mdp4_kms) | 167 | int mdp4_disable(struct mdp4_kms *mdp4_kms) |
@@ -273,8 +276,9 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev) | |||
273 | goto fail; | 276 | goto fail; |
274 | } | 277 | } |
275 | 278 | ||
276 | kms = &mdp4_kms->base; | 279 | mdp_kms_init(&mdp4_kms->base, &kms_funcs); |
277 | kms->funcs = &kms_funcs; | 280 | |
281 | kms = &mdp4_kms->base.base; | ||
278 | 282 | ||
279 | mdp4_kms->dev = dev; | 283 | mdp4_kms->dev = dev; |
280 | 284 | ||
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h index d5e6819b1f51..66a4d31aec80 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h | |||
@@ -23,22 +23,8 @@ | |||
23 | #include "mdp/mdp_kms.h" | 23 | #include "mdp/mdp_kms.h" |
24 | #include "mdp4.xml.h" | 24 | #include "mdp4.xml.h" |
25 | 25 | ||
26 | |||
27 | /* For transiently registering for different MDP4 irqs that various parts | ||
28 | * of the KMS code need during setup/configuration. We these are not | ||
29 | * necessarily the same as what drm_vblank_get/put() are requesting, and | ||
30 | * the hysteresis in drm_vblank_put() is not necessarily desirable for | ||
31 | * internal housekeeping related irq usage. | ||
32 | */ | ||
33 | struct mdp4_irq { | ||
34 | struct list_head node; | ||
35 | uint32_t irqmask; | ||
36 | bool registered; | ||
37 | void (*irq)(struct mdp4_irq *irq, uint32_t irqstatus); | ||
38 | }; | ||
39 | |||
40 | struct mdp4_kms { | 26 | struct mdp4_kms { |
41 | struct msm_kms base; | 27 | struct mdp_kms base; |
42 | 28 | ||
43 | struct drm_device *dev; | 29 | struct drm_device *dev; |
44 | 30 | ||
@@ -57,11 +43,7 @@ struct mdp4_kms { | |||
57 | struct clk *pclk; | 43 | struct clk *pclk; |
58 | struct clk *lut_clk; | 44 | struct clk *lut_clk; |
59 | 45 | ||
60 | /* irq handling: */ | 46 | struct mdp_irq error_handler; |
61 | bool in_irq; | ||
62 | struct list_head irq_list; /* list of mdp4_irq */ | ||
63 | uint32_t vblank_mask; /* irq bits set for userspace vblank */ | ||
64 | struct mdp4_irq error_handler; | ||
65 | }; | 47 | }; |
66 | #define to_mdp4_kms(x) container_of(x, struct mdp4_kms, base) | 48 | #define to_mdp4_kms(x) container_of(x, struct mdp4_kms, base) |
67 | 49 | ||
@@ -166,13 +148,11 @@ static inline uint32_t mixercfg(int mixer, enum mdp4_pipe pipe, | |||
166 | int mdp4_disable(struct mdp4_kms *mdp4_kms); | 148 | int mdp4_disable(struct mdp4_kms *mdp4_kms); |
167 | int mdp4_enable(struct mdp4_kms *mdp4_kms); | 149 | int mdp4_enable(struct mdp4_kms *mdp4_kms); |
168 | 150 | ||
151 | void mdp4_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask); | ||
169 | void mdp4_irq_preinstall(struct msm_kms *kms); | 152 | void mdp4_irq_preinstall(struct msm_kms *kms); |
170 | int mdp4_irq_postinstall(struct msm_kms *kms); | 153 | int mdp4_irq_postinstall(struct msm_kms *kms); |
171 | void mdp4_irq_uninstall(struct msm_kms *kms); | 154 | void mdp4_irq_uninstall(struct msm_kms *kms); |
172 | irqreturn_t mdp4_irq(struct msm_kms *kms); | 155 | irqreturn_t mdp4_irq(struct msm_kms *kms); |
173 | void mdp4_irq_wait(struct mdp4_kms *mdp4_kms, uint32_t irqmask); | ||
174 | void mdp4_irq_register(struct mdp4_kms *mdp4_kms, struct mdp4_irq *irq); | ||
175 | void mdp4_irq_unregister(struct mdp4_kms *mdp4_kms, struct mdp4_irq *irq); | ||
176 | int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); | 156 | int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); |
177 | void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); | 157 | void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); |
178 | 158 | ||
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c index d2edf2b2a816..2406027200ec 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c | |||
@@ -34,7 +34,7 @@ struct mdp4_plane { | |||
34 | static struct mdp4_kms *get_kms(struct drm_plane *plane) | 34 | static struct mdp4_kms *get_kms(struct drm_plane *plane) |
35 | { | 35 | { |
36 | struct msm_drm_private *priv = plane->dev->dev_private; | 36 | struct msm_drm_private *priv = plane->dev->dev_private; |
37 | return to_mdp4_kms(priv->kms); | 37 | return to_mdp4_kms(to_mdp_kms(priv->kms)); |
38 | } | 38 | } |
39 | 39 | ||
40 | static int mdp4_plane_update(struct drm_plane *plane, | 40 | static int mdp4_plane_update(struct drm_plane *plane, |
diff --git a/drivers/gpu/drm/msm/mdp/mdp_kms.c b/drivers/gpu/drm/msm/mdp/mdp_kms.c new file mode 100644 index 000000000000..3be48f7c36be --- /dev/null +++ b/drivers/gpu/drm/msm/mdp/mdp_kms.c | |||
@@ -0,0 +1,144 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2013 Red Hat | ||
3 | * Author: Rob Clark <robdclark@gmail.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of the GNU General Public License version 2 as published by | ||
7 | * the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along with | ||
15 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
16 | */ | ||
17 | |||
18 | |||
19 | #include "msm_drv.h" | ||
20 | #include "mdp_kms.h" | ||
21 | |||
22 | |||
23 | struct mdp_irq_wait { | ||
24 | struct mdp_irq irq; | ||
25 | int count; | ||
26 | }; | ||
27 | |||
28 | static DECLARE_WAIT_QUEUE_HEAD(wait_event); | ||
29 | |||
30 | static DEFINE_SPINLOCK(list_lock); | ||
31 | |||
32 | static void update_irq(struct mdp_kms *mdp_kms) | ||
33 | { | ||
34 | struct mdp_irq *irq; | ||
35 | uint32_t irqmask = mdp_kms->vblank_mask; | ||
36 | |||
37 | BUG_ON(!spin_is_locked(&list_lock)); | ||
38 | |||
39 | list_for_each_entry(irq, &mdp_kms->irq_list, node) | ||
40 | irqmask |= irq->irqmask; | ||
41 | |||
42 | mdp_kms->funcs->set_irqmask(mdp_kms, irqmask); | ||
43 | } | ||
44 | |||
45 | static void update_irq_unlocked(struct mdp_kms *mdp_kms) | ||
46 | { | ||
47 | unsigned long flags; | ||
48 | spin_lock_irqsave(&list_lock, flags); | ||
49 | update_irq(mdp_kms); | ||
50 | spin_unlock_irqrestore(&list_lock, flags); | ||
51 | } | ||
52 | |||
53 | void mdp_dispatch_irqs(struct mdp_kms *mdp_kms, uint32_t status) | ||
54 | { | ||
55 | struct mdp_irq *handler, *n; | ||
56 | unsigned long flags; | ||
57 | |||
58 | spin_lock_irqsave(&list_lock, flags); | ||
59 | mdp_kms->in_irq = true; | ||
60 | list_for_each_entry_safe(handler, n, &mdp_kms->irq_list, node) { | ||
61 | if (handler->irqmask & status) { | ||
62 | spin_unlock_irqrestore(&list_lock, flags); | ||
63 | handler->irq(handler, handler->irqmask & status); | ||
64 | spin_lock_irqsave(&list_lock, flags); | ||
65 | } | ||
66 | } | ||
67 | mdp_kms->in_irq = false; | ||
68 | update_irq(mdp_kms); | ||
69 | spin_unlock_irqrestore(&list_lock, flags); | ||
70 | |||
71 | } | ||
72 | |||
73 | void mdp_update_vblank_mask(struct mdp_kms *mdp_kms, uint32_t mask, bool enable) | ||
74 | { | ||
75 | unsigned long flags; | ||
76 | |||
77 | spin_lock_irqsave(&list_lock, flags); | ||
78 | if (enable) | ||
79 | mdp_kms->vblank_mask |= mask; | ||
80 | else | ||
81 | mdp_kms->vblank_mask &= ~mask; | ||
82 | update_irq(mdp_kms); | ||
83 | spin_unlock_irqrestore(&list_lock, flags); | ||
84 | } | ||
85 | |||
86 | static void wait_irq(struct mdp_irq *irq, uint32_t irqstatus) | ||
87 | { | ||
88 | struct mdp_irq_wait *wait = | ||
89 | container_of(irq, struct mdp_irq_wait, irq); | ||
90 | wait->count--; | ||
91 | wake_up_all(&wait_event); | ||
92 | } | ||
93 | |||
94 | void mdp_irq_wait(struct mdp_kms *mdp_kms, uint32_t irqmask) | ||
95 | { | ||
96 | struct mdp_irq_wait wait = { | ||
97 | .irq = { | ||
98 | .irq = wait_irq, | ||
99 | .irqmask = irqmask, | ||
100 | }, | ||
101 | .count = 1, | ||
102 | }; | ||
103 | mdp_irq_register(mdp_kms, &wait.irq); | ||
104 | wait_event(wait_event, (wait.count <= 0)); | ||
105 | mdp_irq_unregister(mdp_kms, &wait.irq); | ||
106 | } | ||
107 | |||
108 | void mdp_irq_register(struct mdp_kms *mdp_kms, struct mdp_irq *irq) | ||
109 | { | ||
110 | unsigned long flags; | ||
111 | bool needs_update = false; | ||
112 | |||
113 | spin_lock_irqsave(&list_lock, flags); | ||
114 | |||
115 | if (!irq->registered) { | ||
116 | irq->registered = true; | ||
117 | list_add(&irq->node, &mdp_kms->irq_list); | ||
118 | needs_update = !mdp_kms->in_irq; | ||
119 | } | ||
120 | |||
121 | spin_unlock_irqrestore(&list_lock, flags); | ||
122 | |||
123 | if (needs_update) | ||
124 | update_irq_unlocked(mdp_kms); | ||
125 | } | ||
126 | |||
127 | void mdp_irq_unregister(struct mdp_kms *mdp_kms, struct mdp_irq *irq) | ||
128 | { | ||
129 | unsigned long flags; | ||
130 | bool needs_update = false; | ||
131 | |||
132 | spin_lock_irqsave(&list_lock, flags); | ||
133 | |||
134 | if (irq->registered) { | ||
135 | irq->registered = false; | ||
136 | list_del(&irq->node); | ||
137 | needs_update = !mdp_kms->in_irq; | ||
138 | } | ||
139 | |||
140 | spin_unlock_irqrestore(&list_lock, flags); | ||
141 | |||
142 | if (needs_update) | ||
143 | update_irq_unlocked(mdp_kms); | ||
144 | } | ||
diff --git a/drivers/gpu/drm/msm/mdp/mdp_kms.h b/drivers/gpu/drm/msm/mdp/mdp_kms.h index 710edf7e19d1..99557b5ad4fd 100644 --- a/drivers/gpu/drm/msm/mdp/mdp_kms.h +++ b/drivers/gpu/drm/msm/mdp/mdp_kms.h | |||
@@ -23,8 +23,64 @@ | |||
23 | #include <linux/regulator/consumer.h> | 23 | #include <linux/regulator/consumer.h> |
24 | 24 | ||
25 | #include "msm_drv.h" | 25 | #include "msm_drv.h" |
26 | #include "msm_kms.h" | ||
26 | #include "mdp_common.xml.h" | 27 | #include "mdp_common.xml.h" |
27 | 28 | ||
29 | struct mdp_kms; | ||
30 | |||
31 | struct mdp_kms_funcs { | ||
32 | struct msm_kms_funcs base; | ||
33 | void (*set_irqmask)(struct mdp_kms *mdp_kms, uint32_t irqmask); | ||
34 | }; | ||
35 | |||
36 | struct mdp_kms { | ||
37 | struct msm_kms base; | ||
38 | |||
39 | const struct mdp_kms_funcs *funcs; | ||
40 | |||
41 | /* irq handling: */ | ||
42 | bool in_irq; | ||
43 | struct list_head irq_list; /* list of mdp4_irq */ | ||
44 | uint32_t vblank_mask; /* irq bits set for userspace vblank */ | ||
45 | }; | ||
46 | #define to_mdp_kms(x) container_of(x, struct mdp_kms, base) | ||
47 | |||
48 | static inline void mdp_kms_init(struct mdp_kms *mdp_kms, | ||
49 | const struct mdp_kms_funcs *funcs) | ||
50 | { | ||
51 | mdp_kms->funcs = funcs; | ||
52 | INIT_LIST_HEAD(&mdp_kms->irq_list); | ||
53 | msm_kms_init(&mdp_kms->base, &funcs->base); | ||
54 | } | ||
55 | |||
56 | /* | ||
57 | * irq helpers: | ||
58 | */ | ||
59 | |||
60 | /* For transiently registering for different MDP irqs that various parts | ||
61 | * of the KMS code need during setup/configuration. These are not | ||
62 | * necessarily the same as what drm_vblank_get/put() are requesting, and | ||
63 | * the hysteresis in drm_vblank_put() is not necessarily desirable for | ||
64 | * internal housekeeping related irq usage. | ||
65 | */ | ||
66 | struct mdp_irq { | ||
67 | struct list_head node; | ||
68 | uint32_t irqmask; | ||
69 | bool registered; | ||
70 | void (*irq)(struct mdp_irq *irq, uint32_t irqstatus); | ||
71 | }; | ||
72 | |||
73 | void mdp_dispatch_irqs(struct mdp_kms *mdp_kms, uint32_t status); | ||
74 | void mdp_update_vblank_mask(struct mdp_kms *mdp_kms, uint32_t mask, bool enable); | ||
75 | void mdp_irq_wait(struct mdp_kms *mdp_kms, uint32_t irqmask); | ||
76 | void mdp_irq_register(struct mdp_kms *mdp_kms, struct mdp_irq *irq); | ||
77 | void mdp_irq_unregister(struct mdp_kms *mdp_kms, struct mdp_irq *irq); | ||
78 | |||
79 | |||
80 | /* | ||
81 | * pixel format helpers: | ||
82 | */ | ||
83 | |||
28 | struct mdp_format { | 84 | struct mdp_format { |
29 | struct msm_format base; | 85 | struct msm_format base; |
30 | enum mdp_bpc bpc_r, bpc_g, bpc_b; | 86 | enum mdp_bpc bpc_r, bpc_g, bpc_b; |
@@ -35,7 +91,6 @@ struct mdp_format { | |||
35 | }; | 91 | }; |
36 | #define to_mdp_format(x) container_of(x, struct mdp_format, base) | 92 | #define to_mdp_format(x) container_of(x, struct mdp_format, base) |
37 | 93 | ||
38 | |||
39 | uint32_t mdp_get_formats(uint32_t *formats, uint32_t max_formats); | 94 | uint32_t mdp_get_formats(uint32_t *formats, uint32_t max_formats); |
40 | const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format); | 95 | const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format); |
41 | 96 | ||
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h index f01e239f7261..dc0d30f5b291 100644 --- a/drivers/gpu/drm/msm/msm_kms.h +++ b/drivers/gpu/drm/msm/msm_kms.h | |||
@@ -49,8 +49,19 @@ struct msm_kms_funcs { | |||
49 | 49 | ||
50 | struct msm_kms { | 50 | struct msm_kms { |
51 | const struct msm_kms_funcs *funcs; | 51 | const struct msm_kms_funcs *funcs; |
52 | |||
53 | /* irq handling: */ | ||
54 | bool in_irq; | ||
55 | struct list_head irq_list; /* list of mdp4_irq */ | ||
56 | uint32_t vblank_mask; /* irq bits set for userspace vblank */ | ||
52 | }; | 57 | }; |
53 | 58 | ||
59 | static inline void msm_kms_init(struct msm_kms *kms, | ||
60 | const struct msm_kms_funcs *funcs) | ||
61 | { | ||
62 | kms->funcs = funcs; | ||
63 | } | ||
64 | |||
54 | struct msm_kms *mdp4_kms_init(struct drm_device *dev); | 65 | struct msm_kms *mdp4_kms_init(struct drm_device *dev); |
55 | 66 | ||
56 | #endif /* __MSM_KMS_H__ */ | 67 | #endif /* __MSM_KMS_H__ */ |