aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/omapdrm/omap_irq.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/omapdrm/omap_irq.c')
-rw-r--r--drivers/gpu/drm/omapdrm/omap_irq.c94
1 files changed, 35 insertions, 59 deletions
diff --git a/drivers/gpu/drm/omapdrm/omap_irq.c b/drivers/gpu/drm/omapdrm/omap_irq.c
index 1982759a1c27..0ef2d609653e 100644
--- a/drivers/gpu/drm/omapdrm/omap_irq.c
+++ b/drivers/gpu/drm/omapdrm/omap_irq.c
@@ -21,17 +21,23 @@
21 21
22static DEFINE_SPINLOCK(list_lock); 22static DEFINE_SPINLOCK(list_lock);
23 23
24struct omap_irq_wait {
25 struct list_head node;
26 uint32_t irqmask;
27 int count;
28};
29
24/* call with list_lock and dispc runtime held */ 30/* call with list_lock and dispc runtime held */
25static void omap_irq_update(struct drm_device *dev) 31static void omap_irq_update(struct drm_device *dev)
26{ 32{
27 struct omap_drm_private *priv = dev->dev_private; 33 struct omap_drm_private *priv = dev->dev_private;
28 struct omap_drm_irq *irq; 34 struct omap_irq_wait *wait;
29 uint32_t irqmask = priv->irq_mask; 35 uint32_t irqmask = priv->irq_mask;
30 36
31 assert_spin_locked(&list_lock); 37 assert_spin_locked(&list_lock);
32 38
33 list_for_each_entry(irq, &priv->irq_list, node) 39 list_for_each_entry(wait, &priv->wait_list, node)
34 irqmask |= irq->irqmask; 40 irqmask |= wait->irqmask;
35 41
36 DBG("irqmask=%08x", irqmask); 42 DBG("irqmask=%08x", irqmask);
37 43
@@ -39,61 +45,29 @@ static void omap_irq_update(struct drm_device *dev)
39 dispc_read_irqenable(); /* flush posted write */ 45 dispc_read_irqenable(); /* flush posted write */
40} 46}
41 47
42static void omap_irq_register(struct drm_device *dev, struct omap_drm_irq *irq)
43{
44 struct omap_drm_private *priv = dev->dev_private;
45 unsigned long flags;
46
47 spin_lock_irqsave(&list_lock, flags);
48
49 if (!WARN_ON(irq->registered)) {
50 irq->registered = true;
51 list_add(&irq->node, &priv->irq_list);
52 omap_irq_update(dev);
53 }
54
55 spin_unlock_irqrestore(&list_lock, flags);
56}
57
58static void omap_irq_unregister(struct drm_device *dev,
59 struct omap_drm_irq *irq)
60{
61 unsigned long flags;
62
63 spin_lock_irqsave(&list_lock, flags);
64
65 if (!WARN_ON(!irq->registered)) {
66 irq->registered = false;
67 list_del(&irq->node);
68 omap_irq_update(dev);
69 }
70
71 spin_unlock_irqrestore(&list_lock, flags);
72}
73
74struct omap_irq_wait {
75 struct omap_drm_irq irq;
76 int count;
77};
78
79static DECLARE_WAIT_QUEUE_HEAD(wait_event); 48static DECLARE_WAIT_QUEUE_HEAD(wait_event);
80 49
81static void wait_irq(struct omap_drm_irq *irq) 50static void omap_irq_wait_handler(struct omap_irq_wait *wait)
82{ 51{
83 struct omap_irq_wait *wait =
84 container_of(irq, struct omap_irq_wait, irq);
85 wait->count--; 52 wait->count--;
86 wake_up_all(&wait_event); 53 wake_up(&wait_event);
87} 54}
88 55
89struct omap_irq_wait * omap_irq_wait_init(struct drm_device *dev, 56struct omap_irq_wait * omap_irq_wait_init(struct drm_device *dev,
90 uint32_t irqmask, int count) 57 uint32_t irqmask, int count)
91{ 58{
59 struct omap_drm_private *priv = dev->dev_private;
92 struct omap_irq_wait *wait = kzalloc(sizeof(*wait), GFP_KERNEL); 60 struct omap_irq_wait *wait = kzalloc(sizeof(*wait), GFP_KERNEL);
93 wait->irq.irq = wait_irq; 61 unsigned long flags;
94 wait->irq.irqmask = irqmask; 62
63 wait->irqmask = irqmask;
95 wait->count = count; 64 wait->count = count;
96 omap_irq_register(dev, &wait->irq); 65
66 spin_lock_irqsave(&list_lock, flags);
67 list_add(&wait->node, &priv->wait_list);
68 omap_irq_update(dev);
69 spin_unlock_irqrestore(&list_lock, flags);
70
97 return wait; 71 return wait;
98} 72}
99 73
@@ -101,11 +75,16 @@ int omap_irq_wait(struct drm_device *dev, struct omap_irq_wait *wait,
101 unsigned long timeout) 75 unsigned long timeout)
102{ 76{
103 int ret = wait_event_timeout(wait_event, (wait->count <= 0), timeout); 77 int ret = wait_event_timeout(wait_event, (wait->count <= 0), timeout);
104 omap_irq_unregister(dev, &wait->irq); 78 unsigned long flags;
79
80 spin_lock_irqsave(&list_lock, flags);
81 list_del(&wait->node);
82 omap_irq_update(dev);
83 spin_unlock_irqrestore(&list_lock, flags);
84
105 kfree(wait); 85 kfree(wait);
106 if (ret == 0) 86
107 return -1; 87 return ret == 0 ? -1 : 0;
108 return 0;
109} 88}
110 89
111/** 90/**
@@ -213,7 +192,7 @@ static irqreturn_t omap_irq_handler(int irq, void *arg)
213{ 192{
214 struct drm_device *dev = (struct drm_device *) arg; 193 struct drm_device *dev = (struct drm_device *) arg;
215 struct omap_drm_private *priv = dev->dev_private; 194 struct omap_drm_private *priv = dev->dev_private;
216 struct omap_drm_irq *handler, *n; 195 struct omap_irq_wait *wait, *n;
217 unsigned long flags; 196 unsigned long flags;
218 unsigned int id; 197 unsigned int id;
219 u32 irqstatus; 198 u32 irqstatus;
@@ -241,12 +220,9 @@ static irqreturn_t omap_irq_handler(int irq, void *arg)
241 omap_irq_fifo_underflow(priv, irqstatus); 220 omap_irq_fifo_underflow(priv, irqstatus);
242 221
243 spin_lock_irqsave(&list_lock, flags); 222 spin_lock_irqsave(&list_lock, flags);
244 list_for_each_entry_safe(handler, n, &priv->irq_list, node) { 223 list_for_each_entry_safe(wait, n, &priv->wait_list, node) {
245 if (handler->irqmask & irqstatus) { 224 if (wait->irqmask & irqstatus)
246 spin_unlock_irqrestore(&list_lock, flags); 225 omap_irq_wait_handler(wait);
247 handler->irq(handler);
248 spin_lock_irqsave(&list_lock, flags);
249 }
250 } 226 }
251 spin_unlock_irqrestore(&list_lock, flags); 227 spin_unlock_irqrestore(&list_lock, flags);
252 228
@@ -275,7 +251,7 @@ int omap_drm_irq_install(struct drm_device *dev)
275 unsigned int i; 251 unsigned int i;
276 int ret; 252 int ret;
277 253
278 INIT_LIST_HEAD(&priv->irq_list); 254 INIT_LIST_HEAD(&priv->wait_list);
279 255
280 priv->irq_mask = DISPC_IRQ_OCP_ERR; 256 priv->irq_mask = DISPC_IRQ_OCP_ERR;
281 257