aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/char/drm/via_irq.c
diff options
context:
space:
mode:
authorDave Airlie <airlied@starflyer.(none)>2005-09-25 00:28:13 -0400
committerDave Airlie <airlied@linux.ie>2005-09-25 00:28:13 -0400
commitb5e89ed53ed8d24f83ba1941c07382af00ed238e (patch)
tree747bae7a565f88a2e1d5974776eeb054a932c505 /drivers/char/drm/via_irq.c
parent99a2657a29e2d623c3568cd86b27cac13fb63140 (diff)
drm: lindent the drm directory.
I've been threatening this for a while, so no point hanging around. This lindents the DRM code which was always really bad in tabbing department. I've also fixed some misnamed files in comments and removed some trailing whitespace. Signed-off-by: Dave Airlie <airlied@linux.ie>
Diffstat (limited to 'drivers/char/drm/via_irq.c')
-rw-r--r--drivers/char/drm/via_irq.c106
1 files changed, 55 insertions, 51 deletions
diff --git a/drivers/char/drm/via_irq.c b/drivers/char/drm/via_irq.c
index e8027f3a93b0..d023add1929b 100644
--- a/drivers/char/drm/via_irq.c
+++ b/drivers/char/drm/via_irq.c
@@ -54,23 +54,26 @@
54/* 54/*
55 * Device-specific IRQs go here. This type might need to be extended with 55 * Device-specific IRQs go here. This type might need to be extended with
56 * the register if there are multiple IRQ control registers. 56 * the register if there are multiple IRQ control registers.
57 * Currently we activate the HQV interrupts of Unichrome Pro group A. 57 * Currently we activate the HQV interrupts of Unichrome Pro group A.
58 */ 58 */
59 59
60static maskarray_t via_pro_group_a_irqs[] = { 60static maskarray_t via_pro_group_a_irqs[] = {
61 {VIA_IRQ_HQV0_ENABLE, VIA_IRQ_HQV0_PENDING, 0x000003D0, 0x00008010, 0x00000000 }, 61 {VIA_IRQ_HQV0_ENABLE, VIA_IRQ_HQV0_PENDING, 0x000003D0, 0x00008010,
62 {VIA_IRQ_HQV1_ENABLE, VIA_IRQ_HQV1_PENDING, 0x000013D0, 0x00008010, 0x00000000 }}; 62 0x00000000},
63static int via_num_pro_group_a = sizeof(via_pro_group_a_irqs)/sizeof(maskarray_t); 63 {VIA_IRQ_HQV1_ENABLE, VIA_IRQ_HQV1_PENDING, 0x000013D0, 0x00008010,
64 64 0x00000000}
65static maskarray_t via_unichrome_irqs[] = {}; 65};
66static int via_num_unichrome = sizeof(via_unichrome_irqs)/sizeof(maskarray_t); 66static int via_num_pro_group_a =
67 67 sizeof(via_pro_group_a_irqs) / sizeof(maskarray_t);
68 68
69static unsigned time_diff(struct timeval *now,struct timeval *then) 69static maskarray_t via_unichrome_irqs[] = { };
70static int via_num_unichrome = sizeof(via_unichrome_irqs) / sizeof(maskarray_t);
71
72static unsigned time_diff(struct timeval *now, struct timeval *then)
70{ 73{
71 return (now->tv_usec >= then->tv_usec) ? 74 return (now->tv_usec >= then->tv_usec) ?
72 now->tv_usec - then->tv_usec : 75 now->tv_usec - then->tv_usec :
73 1000000 - (then->tv_usec - now->tv_usec); 76 1000000 - (then->tv_usec - now->tv_usec);
74} 77}
75 78
76irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS) 79irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
@@ -86,38 +89,37 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
86 status = VIA_READ(VIA_REG_INTERRUPT); 89 status = VIA_READ(VIA_REG_INTERRUPT);
87 if (status & VIA_IRQ_VBLANK_PENDING) { 90 if (status & VIA_IRQ_VBLANK_PENDING) {
88 atomic_inc(&dev->vbl_received); 91 atomic_inc(&dev->vbl_received);
89 if (!(atomic_read(&dev->vbl_received) & 0x0F)) { 92 if (!(atomic_read(&dev->vbl_received) & 0x0F)) {
90 do_gettimeofday(&cur_vblank); 93 do_gettimeofday(&cur_vblank);
91 if (dev_priv->last_vblank_valid) { 94 if (dev_priv->last_vblank_valid) {
92 dev_priv->usec_per_vblank = 95 dev_priv->usec_per_vblank =
93 time_diff( &cur_vblank,&dev_priv->last_vblank) >> 4; 96 time_diff(&cur_vblank,
97 &dev_priv->last_vblank) >> 4;
94 } 98 }
95 dev_priv->last_vblank = cur_vblank; 99 dev_priv->last_vblank = cur_vblank;
96 dev_priv->last_vblank_valid = 1; 100 dev_priv->last_vblank_valid = 1;
97 } 101 }
98 if (!(atomic_read(&dev->vbl_received) & 0xFF)) { 102 if (!(atomic_read(&dev->vbl_received) & 0xFF)) {
99 DRM_DEBUG("US per vblank is: %u\n", 103 DRM_DEBUG("US per vblank is: %u\n",
100 dev_priv->usec_per_vblank); 104 dev_priv->usec_per_vblank);
101 } 105 }
102 DRM_WAKEUP(&dev->vbl_queue); 106 DRM_WAKEUP(&dev->vbl_queue);
103 drm_vbl_send_signals(dev); 107 drm_vbl_send_signals(dev);
104 handled = 1; 108 handled = 1;
105 } 109 }
106
107 110
108 for (i=0; i<dev_priv->num_irqs; ++i) { 111 for (i = 0; i < dev_priv->num_irqs; ++i) {
109 if (status & cur_irq->pending_mask) { 112 if (status & cur_irq->pending_mask) {
110 atomic_inc( &cur_irq->irq_received ); 113 atomic_inc(&cur_irq->irq_received);
111 DRM_WAKEUP( &cur_irq->irq_queue ); 114 DRM_WAKEUP(&cur_irq->irq_queue);
112 handled = 1; 115 handled = 1;
113 } 116 }
114 cur_irq++; 117 cur_irq++;
115 } 118 }
116 119
117 /* Acknowlege interrupts */ 120 /* Acknowlege interrupts */
118 VIA_WRITE(VIA_REG_INTERRUPT, status); 121 VIA_WRITE(VIA_REG_INTERRUPT, status);
119 122
120
121 if (handled) 123 if (handled)
122 return IRQ_HANDLED; 124 return IRQ_HANDLED;
123 else 125 else
@@ -131,7 +133,7 @@ static __inline__ void viadrv_acknowledge_irqs(drm_via_private_t * dev_priv)
131 if (dev_priv) { 133 if (dev_priv) {
132 /* Acknowlege interrupts */ 134 /* Acknowlege interrupts */
133 status = VIA_READ(VIA_REG_INTERRUPT); 135 status = VIA_READ(VIA_REG_INTERRUPT);
134 VIA_WRITE(VIA_REG_INTERRUPT, status | 136 VIA_WRITE(VIA_REG_INTERRUPT, status |
135 dev_priv->irq_pending_mask); 137 dev_priv->irq_pending_mask);
136 } 138 }
137} 139}
@@ -158,12 +160,12 @@ int via_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence)
158 DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ, 160 DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
159 (((cur_vblank = atomic_read(&dev->vbl_received)) - 161 (((cur_vblank = atomic_read(&dev->vbl_received)) -
160 *sequence) <= (1 << 23))); 162 *sequence) <= (1 << 23)));
161 163
162 *sequence = cur_vblank; 164 *sequence = cur_vblank;
163 return ret; 165 return ret;
164} 166}
165 167
166static int 168static int
167via_driver_irq_wait(drm_device_t * dev, unsigned int irq, int force_sequence, 169via_driver_irq_wait(drm_device_t * dev, unsigned int irq, int force_sequence,
168 unsigned int *sequence) 170 unsigned int *sequence)
169{ 171{
@@ -180,27 +182,29 @@ via_driver_irq_wait(drm_device_t * dev, unsigned int irq, int force_sequence,
180 return DRM_ERR(EINVAL); 182 return DRM_ERR(EINVAL);
181 } 183 }
182 184
183 if (irq >= dev_priv->num_irqs ) { 185 if (irq >= dev_priv->num_irqs) {
184 DRM_ERROR("%s Trying to wait on unknown irq %d\n", __FUNCTION__, irq); 186 DRM_ERROR("%s Trying to wait on unknown irq %d\n", __FUNCTION__,
187 irq);
185 return DRM_ERR(EINVAL); 188 return DRM_ERR(EINVAL);
186 } 189 }
187 190
188 cur_irq += irq; 191 cur_irq += irq;
189 192
190 if (masks[irq][2] && !force_sequence) { 193 if (masks[irq][2] && !force_sequence) {
191 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ, 194 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
192 ((VIA_READ(masks[irq][2]) & masks[irq][3]) == masks[irq][4])); 195 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
196 masks[irq][4]));
193 cur_irq_sequence = atomic_read(&cur_irq->irq_received); 197 cur_irq_sequence = atomic_read(&cur_irq->irq_received);
194 } else { 198 } else {
195 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ, 199 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
196 (((cur_irq_sequence = atomic_read(&cur_irq->irq_received)) - 200 (((cur_irq_sequence =
197 *sequence) <= (1 << 23))); 201 atomic_read(&cur_irq->irq_received)) -
202 *sequence) <= (1 << 23)));
198 } 203 }
199 *sequence = cur_irq_sequence; 204 *sequence = cur_irq_sequence;
200 return ret; 205 return ret;
201} 206}
202 207
203
204/* 208/*
205 * drm_dma.h hooks 209 * drm_dma.h hooks
206 */ 210 */
@@ -219,29 +223,29 @@ void via_driver_irq_preinstall(drm_device_t * dev)
219 dev_priv->irq_pending_mask = VIA_IRQ_VBLANK_PENDING; 223 dev_priv->irq_pending_mask = VIA_IRQ_VBLANK_PENDING;
220 224
221 dev_priv->irq_masks = (dev_priv->pro_group_a) ? 225 dev_priv->irq_masks = (dev_priv->pro_group_a) ?
222 via_pro_group_a_irqs : via_unichrome_irqs; 226 via_pro_group_a_irqs : via_unichrome_irqs;
223 dev_priv->num_irqs = (dev_priv->pro_group_a) ? 227 dev_priv->num_irqs = (dev_priv->pro_group_a) ?
224 via_num_pro_group_a : via_num_unichrome; 228 via_num_pro_group_a : via_num_unichrome;
225 229
226 for(i=0; i < dev_priv->num_irqs; ++i) { 230 for (i = 0; i < dev_priv->num_irqs; ++i) {
227 atomic_set(&cur_irq->irq_received, 0); 231 atomic_set(&cur_irq->irq_received, 0);
228 cur_irq->enable_mask = dev_priv->irq_masks[i][0]; 232 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
229 cur_irq->pending_mask = dev_priv->irq_masks[i][1]; 233 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
230 DRM_INIT_WAITQUEUE( &cur_irq->irq_queue ); 234 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
231 dev_priv->irq_enable_mask |= cur_irq->enable_mask; 235 dev_priv->irq_enable_mask |= cur_irq->enable_mask;
232 dev_priv->irq_pending_mask |= cur_irq->pending_mask; 236 dev_priv->irq_pending_mask |= cur_irq->pending_mask;
233 cur_irq++; 237 cur_irq++;
234 238
235 DRM_DEBUG("Initializing IRQ %d\n", i); 239 DRM_DEBUG("Initializing IRQ %d\n", i);
236 } 240 }
237 241
238 dev_priv->last_vblank_valid = 0; 242 dev_priv->last_vblank_valid = 0;
239 243
240 // Clear VSync interrupt regs 244 // Clear VSync interrupt regs
241 status = VIA_READ(VIA_REG_INTERRUPT); 245 status = VIA_READ(VIA_REG_INTERRUPT);
242 VIA_WRITE(VIA_REG_INTERRUPT, status & 246 VIA_WRITE(VIA_REG_INTERRUPT, status &
243 ~(dev_priv->irq_enable_mask)); 247 ~(dev_priv->irq_enable_mask));
244 248
245 /* Clear bits if they're already high */ 249 /* Clear bits if they're already high */
246 viadrv_acknowledge_irqs(dev_priv); 250 viadrv_acknowledge_irqs(dev_priv);
247 } 251 }
@@ -262,7 +266,7 @@ void via_driver_irq_postinstall(drm_device_t * dev)
262 266
263 VIA_WRITE8(0x83d4, 0x11); 267 VIA_WRITE8(0x83d4, 0x11);
264 VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) | 0x30); 268 VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) | 0x30);
265 269
266 } 270 }
267} 271}
268 272
@@ -280,7 +284,7 @@ void via_driver_irq_uninstall(drm_device_t * dev)
280 VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) & ~0x30); 284 VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) & ~0x30);
281 285
282 status = VIA_READ(VIA_REG_INTERRUPT); 286 status = VIA_READ(VIA_REG_INTERRUPT);
283 VIA_WRITE(VIA_REG_INTERRUPT, status & 287 VIA_WRITE(VIA_REG_INTERRUPT, status &
284 ~(VIA_IRQ_VBLANK_ENABLE | dev_priv->irq_enable_mask)); 288 ~(VIA_IRQ_VBLANK_ENABLE | dev_priv->irq_enable_mask));
285 } 289 }
286} 290}
@@ -302,7 +306,7 @@ int via_wait_irq(DRM_IOCTL_ARGS)
302 306
303 DRM_COPY_FROM_USER_IOCTL(irqwait, argp, sizeof(irqwait)); 307 DRM_COPY_FROM_USER_IOCTL(irqwait, argp, sizeof(irqwait));
304 if (irqwait.request.irq >= dev_priv->num_irqs) { 308 if (irqwait.request.irq >= dev_priv->num_irqs) {
305 DRM_ERROR("%s Trying to wait on unknown irq %d\n", __FUNCTION__, 309 DRM_ERROR("%s Trying to wait on unknown irq %d\n", __FUNCTION__,
306 irqwait.request.irq); 310 irqwait.request.irq);
307 return DRM_ERR(EINVAL); 311 return DRM_ERR(EINVAL);
308 } 312 }
@@ -320,7 +324,7 @@ int via_wait_irq(DRM_IOCTL_ARGS)
320 } 324 }
321 325
322 if (irqwait.request.type & VIA_IRQ_SIGNAL) { 326 if (irqwait.request.type & VIA_IRQ_SIGNAL) {
323 DRM_ERROR("%s Signals on Via IRQs not implemented yet.\n", 327 DRM_ERROR("%s Signals on Via IRQs not implemented yet.\n",
324 __FUNCTION__); 328 __FUNCTION__);
325 return DRM_ERR(EINVAL); 329 return DRM_ERR(EINVAL);
326 } 330 }