aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/char/drm/savage_bci.c
diff options
context:
space:
mode:
authorDave Airlie <airlied@starflyer.(none)>2005-09-25 00:28:13 -0400
committerDave Airlie <airlied@linux.ie>2005-09-25 00:28:13 -0400
commitb5e89ed53ed8d24f83ba1941c07382af00ed238e (patch)
tree747bae7a565f88a2e1d5974776eeb054a932c505 /drivers/char/drm/savage_bci.c
parent99a2657a29e2d623c3568cd86b27cac13fb63140 (diff)
drm: lindent the drm directory.
I've been threatening this for a while, so no point hanging around. This lindents the DRM code which was always really bad in tabbing department. I've also fixed some misnamed files in comments and removed some trailing whitespace. Signed-off-by: Dave Airlie <airlied@linux.ie>
Diffstat (limited to 'drivers/char/drm/savage_bci.c')
-rw-r--r--drivers/char/drm/savage_bci.c201
1 files changed, 104 insertions, 97 deletions
diff --git a/drivers/char/drm/savage_bci.c b/drivers/char/drm/savage_bci.c
index 2fd40bac7c97..0ad9201af8bd 100644
--- a/drivers/char/drm/savage_bci.c
+++ b/drivers/char/drm/savage_bci.c
@@ -28,12 +28,12 @@
28 28
29/* Need a long timeout for shadow status updates can take a while 29/* Need a long timeout for shadow status updates can take a while
30 * and so can waiting for events when the queue is full. */ 30 * and so can waiting for events when the queue is full. */
31#define SAVAGE_DEFAULT_USEC_TIMEOUT 1000000 /* 1s */ 31#define SAVAGE_DEFAULT_USEC_TIMEOUT 1000000 /* 1s */
32#define SAVAGE_EVENT_USEC_TIMEOUT 5000000 /* 5s */ 32#define SAVAGE_EVENT_USEC_TIMEOUT 5000000 /* 5s */
33#define SAVAGE_FREELIST_DEBUG 0 33#define SAVAGE_FREELIST_DEBUG 0
34 34
35static int 35static int
36savage_bci_wait_fifo_shadow(drm_savage_private_t *dev_priv, unsigned int n) 36savage_bci_wait_fifo_shadow(drm_savage_private_t * dev_priv, unsigned int n)
37{ 37{
38 uint32_t mask = dev_priv->status_used_mask; 38 uint32_t mask = dev_priv->status_used_mask;
39 uint32_t threshold = dev_priv->bci_threshold_hi; 39 uint32_t threshold = dev_priv->bci_threshold_hi;
@@ -62,7 +62,7 @@ savage_bci_wait_fifo_shadow(drm_savage_private_t *dev_priv, unsigned int n)
62} 62}
63 63
64static int 64static int
65savage_bci_wait_fifo_s3d(drm_savage_private_t *dev_priv, unsigned int n) 65savage_bci_wait_fifo_s3d(drm_savage_private_t * dev_priv, unsigned int n)
66{ 66{
67 uint32_t maxUsed = dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - n; 67 uint32_t maxUsed = dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - n;
68 uint32_t status; 68 uint32_t status;
@@ -83,7 +83,7 @@ savage_bci_wait_fifo_s3d(drm_savage_private_t *dev_priv, unsigned int n)
83} 83}
84 84
85static int 85static int
86savage_bci_wait_fifo_s4(drm_savage_private_t *dev_priv, unsigned int n) 86savage_bci_wait_fifo_s4(drm_savage_private_t * dev_priv, unsigned int n)
87{ 87{
88 uint32_t maxUsed = dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - n; 88 uint32_t maxUsed = dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - n;
89 uint32_t status; 89 uint32_t status;
@@ -115,7 +115,7 @@ savage_bci_wait_fifo_s4(drm_savage_private_t *dev_priv, unsigned int n)
115 * rule. Otherwise there may be glitches every 2^16 events. 115 * rule. Otherwise there may be glitches every 2^16 events.
116 */ 116 */
117static int 117static int
118savage_bci_wait_event_shadow(drm_savage_private_t *dev_priv, uint16_t e) 118savage_bci_wait_event_shadow(drm_savage_private_t * dev_priv, uint16_t e)
119{ 119{
120 uint32_t status; 120 uint32_t status;
121 int i; 121 int i;
@@ -138,7 +138,7 @@ savage_bci_wait_event_shadow(drm_savage_private_t *dev_priv, uint16_t e)
138} 138}
139 139
140static int 140static int
141savage_bci_wait_event_reg(drm_savage_private_t *dev_priv, uint16_t e) 141savage_bci_wait_event_reg(drm_savage_private_t * dev_priv, uint16_t e)
142{ 142{
143 uint32_t status; 143 uint32_t status;
144 int i; 144 int i;
@@ -159,7 +159,7 @@ savage_bci_wait_event_reg(drm_savage_private_t *dev_priv, uint16_t e)
159 return DRM_ERR(EBUSY); 159 return DRM_ERR(EBUSY);
160} 160}
161 161
162uint16_t savage_bci_emit_event(drm_savage_private_t *dev_priv, 162uint16_t savage_bci_emit_event(drm_savage_private_t * dev_priv,
163 unsigned int flags) 163 unsigned int flags)
164{ 164{
165 uint16_t count; 165 uint16_t count;
@@ -175,12 +175,12 @@ uint16_t savage_bci_emit_event(drm_savage_private_t *dev_priv,
175 } 175 }
176 count = (count + 1) & 0xffff; 176 count = (count + 1) & 0xffff;
177 if (count == 0) { 177 if (count == 0) {
178 count++; /* See the comment above savage_wait_event_*. */ 178 count++; /* See the comment above savage_wait_event_*. */
179 dev_priv->event_wrap++; 179 dev_priv->event_wrap++;
180 } 180 }
181 dev_priv->event_counter = count; 181 dev_priv->event_counter = count;
182 if (dev_priv->status_ptr) 182 if (dev_priv->status_ptr)
183 dev_priv->status_ptr[1023] = (uint32_t)count; 183 dev_priv->status_ptr[1023] = (uint32_t) count;
184 184
185 if ((flags & (SAVAGE_WAIT_2D | SAVAGE_WAIT_3D))) { 185 if ((flags & (SAVAGE_WAIT_2D | SAVAGE_WAIT_3D))) {
186 unsigned int wait_cmd = BCI_CMD_WAIT; 186 unsigned int wait_cmd = BCI_CMD_WAIT;
@@ -193,7 +193,7 @@ uint16_t savage_bci_emit_event(drm_savage_private_t *dev_priv,
193 } else { 193 } else {
194 BEGIN_BCI(1); 194 BEGIN_BCI(1);
195 } 195 }
196 BCI_WRITE(BCI_CMD_UPDATE_EVENT_TAG | (uint32_t)count); 196 BCI_WRITE(BCI_CMD_UPDATE_EVENT_TAG | (uint32_t) count);
197 197
198 return count; 198 return count;
199} 199}
@@ -201,7 +201,7 @@ uint16_t savage_bci_emit_event(drm_savage_private_t *dev_priv,
201/* 201/*
202 * Freelist management 202 * Freelist management
203 */ 203 */
204static int savage_freelist_init(drm_device_t *dev) 204static int savage_freelist_init(drm_device_t * dev)
205{ 205{
206 drm_savage_private_t *dev_priv = dev->dev_private; 206 drm_savage_private_t *dev_priv = dev->dev_private;
207 drm_device_dma_t *dma = dev->dma; 207 drm_device_dma_t *dma = dev->dma;
@@ -234,7 +234,7 @@ static int savage_freelist_init(drm_device_t *dev)
234 return 0; 234 return 0;
235} 235}
236 236
237static drm_buf_t *savage_freelist_get(drm_device_t *dev) 237static drm_buf_t *savage_freelist_get(drm_device_t * dev)
238{ 238{
239 drm_savage_private_t *dev_priv = dev->dev_private; 239 drm_savage_private_t *dev_priv = dev->dev_private;
240 drm_savage_buf_priv_t *tail = dev_priv->tail.prev; 240 drm_savage_buf_priv_t *tail = dev_priv->tail.prev;
@@ -249,7 +249,7 @@ static drm_buf_t *savage_freelist_get(drm_device_t *dev)
249 event = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff; 249 event = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff;
250 wrap = dev_priv->event_wrap; 250 wrap = dev_priv->event_wrap;
251 if (event > dev_priv->event_counter) 251 if (event > dev_priv->event_counter)
252 wrap--; /* hardware hasn't passed the last wrap yet */ 252 wrap--; /* hardware hasn't passed the last wrap yet */
253 253
254 DRM_DEBUG(" tail=0x%04x %d\n", tail->age.event, tail->age.wrap); 254 DRM_DEBUG(" tail=0x%04x %d\n", tail->age.event, tail->age.wrap);
255 DRM_DEBUG(" head=0x%04x %d\n", event, wrap); 255 DRM_DEBUG(" head=0x%04x %d\n", event, wrap);
@@ -267,7 +267,7 @@ static drm_buf_t *savage_freelist_get(drm_device_t *dev)
267 return NULL; 267 return NULL;
268} 268}
269 269
270void savage_freelist_put(drm_device_t *dev, drm_buf_t *buf) 270void savage_freelist_put(drm_device_t * dev, drm_buf_t * buf)
271{ 271{
272 drm_savage_private_t *dev_priv = dev->dev_private; 272 drm_savage_private_t *dev_priv = dev->dev_private;
273 drm_savage_buf_priv_t *entry = buf->dev_private, *prev, *next; 273 drm_savage_buf_priv_t *entry = buf->dev_private, *prev, *next;
@@ -290,15 +290,14 @@ void savage_freelist_put(drm_device_t *dev, drm_buf_t *buf)
290/* 290/*
291 * Command DMA 291 * Command DMA
292 */ 292 */
293static int savage_dma_init(drm_savage_private_t *dev_priv) 293static int savage_dma_init(drm_savage_private_t * dev_priv)
294{ 294{
295 unsigned int i; 295 unsigned int i;
296 296
297 dev_priv->nr_dma_pages = dev_priv->cmd_dma->size / 297 dev_priv->nr_dma_pages = dev_priv->cmd_dma->size /
298 (SAVAGE_DMA_PAGE_SIZE*4); 298 (SAVAGE_DMA_PAGE_SIZE * 4);
299 dev_priv->dma_pages = drm_alloc(sizeof(drm_savage_dma_page_t) * 299 dev_priv->dma_pages = drm_alloc(sizeof(drm_savage_dma_page_t) *
300 dev_priv->nr_dma_pages, 300 dev_priv->nr_dma_pages, DRM_MEM_DRIVER);
301 DRM_MEM_DRIVER);
302 if (dev_priv->dma_pages == NULL) 301 if (dev_priv->dma_pages == NULL)
303 return DRM_ERR(ENOMEM); 302 return DRM_ERR(ENOMEM);
304 303
@@ -315,7 +314,7 @@ static int savage_dma_init(drm_savage_private_t *dev_priv)
315 return 0; 314 return 0;
316} 315}
317 316
318void savage_dma_reset(drm_savage_private_t *dev_priv) 317void savage_dma_reset(drm_savage_private_t * dev_priv)
319{ 318{
320 uint16_t event; 319 uint16_t event;
321 unsigned int wrap, i; 320 unsigned int wrap, i;
@@ -330,7 +329,7 @@ void savage_dma_reset(drm_savage_private_t *dev_priv)
330 dev_priv->first_dma_page = dev_priv->current_dma_page = 0; 329 dev_priv->first_dma_page = dev_priv->current_dma_page = 0;
331} 330}
332 331
333void savage_dma_wait(drm_savage_private_t *dev_priv, unsigned int page) 332void savage_dma_wait(drm_savage_private_t * dev_priv, unsigned int page)
334{ 333{
335 uint16_t event; 334 uint16_t event;
336 unsigned int wrap; 335 unsigned int wrap;
@@ -346,7 +345,7 @@ void savage_dma_wait(drm_savage_private_t *dev_priv, unsigned int page)
346 event = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff; 345 event = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff;
347 wrap = dev_priv->event_wrap; 346 wrap = dev_priv->event_wrap;
348 if (event > dev_priv->event_counter) 347 if (event > dev_priv->event_counter)
349 wrap--; /* hardware hasn't passed the last wrap yet */ 348 wrap--; /* hardware hasn't passed the last wrap yet */
350 349
351 if (dev_priv->dma_pages[page].age.wrap > wrap || 350 if (dev_priv->dma_pages[page].age.wrap > wrap ||
352 (dev_priv->dma_pages[page].age.wrap == wrap && 351 (dev_priv->dma_pages[page].age.wrap == wrap &&
@@ -358,13 +357,13 @@ void savage_dma_wait(drm_savage_private_t *dev_priv, unsigned int page)
358 } 357 }
359} 358}
360 359
361uint32_t *savage_dma_alloc(drm_savage_private_t *dev_priv, unsigned int n) 360uint32_t *savage_dma_alloc(drm_savage_private_t * dev_priv, unsigned int n)
362{ 361{
363 unsigned int cur = dev_priv->current_dma_page; 362 unsigned int cur = dev_priv->current_dma_page;
364 unsigned int rest = SAVAGE_DMA_PAGE_SIZE - 363 unsigned int rest = SAVAGE_DMA_PAGE_SIZE -
365 dev_priv->dma_pages[cur].used; 364 dev_priv->dma_pages[cur].used;
366 unsigned int nr_pages = (n - rest + SAVAGE_DMA_PAGE_SIZE-1) / 365 unsigned int nr_pages = (n - rest + SAVAGE_DMA_PAGE_SIZE - 1) /
367 SAVAGE_DMA_PAGE_SIZE; 366 SAVAGE_DMA_PAGE_SIZE;
368 uint32_t *dma_ptr; 367 uint32_t *dma_ptr;
369 unsigned int i; 368 unsigned int i;
370 369
@@ -372,9 +371,8 @@ uint32_t *savage_dma_alloc(drm_savage_private_t *dev_priv, unsigned int n)
372 cur, dev_priv->dma_pages[cur].used, n, rest, nr_pages); 371 cur, dev_priv->dma_pages[cur].used, n, rest, nr_pages);
373 372
374 if (cur + nr_pages < dev_priv->nr_dma_pages) { 373 if (cur + nr_pages < dev_priv->nr_dma_pages) {
375 dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle + 374 dma_ptr = (uint32_t *) dev_priv->cmd_dma->handle +
376 cur*SAVAGE_DMA_PAGE_SIZE + 375 cur * SAVAGE_DMA_PAGE_SIZE + dev_priv->dma_pages[cur].used;
377 dev_priv->dma_pages[cur].used;
378 if (n < rest) 376 if (n < rest)
379 rest = n; 377 rest = n;
380 dev_priv->dma_pages[cur].used += rest; 378 dev_priv->dma_pages[cur].used += rest;
@@ -382,13 +380,14 @@ uint32_t *savage_dma_alloc(drm_savage_private_t *dev_priv, unsigned int n)
382 cur++; 380 cur++;
383 } else { 381 } else {
384 dev_priv->dma_flush(dev_priv); 382 dev_priv->dma_flush(dev_priv);
385 nr_pages = (n + SAVAGE_DMA_PAGE_SIZE-1) / SAVAGE_DMA_PAGE_SIZE; 383 nr_pages =
384 (n + SAVAGE_DMA_PAGE_SIZE - 1) / SAVAGE_DMA_PAGE_SIZE;
386 for (i = cur; i < dev_priv->nr_dma_pages; ++i) { 385 for (i = cur; i < dev_priv->nr_dma_pages; ++i) {
387 dev_priv->dma_pages[i].age = dev_priv->last_dma_age; 386 dev_priv->dma_pages[i].age = dev_priv->last_dma_age;
388 dev_priv->dma_pages[i].used = 0; 387 dev_priv->dma_pages[i].used = 0;
389 dev_priv->dma_pages[i].flushed = 0; 388 dev_priv->dma_pages[i].flushed = 0;
390 } 389 }
391 dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle; 390 dma_ptr = (uint32_t *) dev_priv->cmd_dma->handle;
392 dev_priv->first_dma_page = cur = 0; 391 dev_priv->first_dma_page = cur = 0;
393 } 392 }
394 for (i = cur; nr_pages > 0; ++i, --nr_pages) { 393 for (i = cur; nr_pages > 0; ++i, --nr_pages) {
@@ -414,7 +413,7 @@ uint32_t *savage_dma_alloc(drm_savage_private_t *dev_priv, unsigned int n)
414 return dma_ptr; 413 return dma_ptr;
415} 414}
416 415
417static void savage_dma_flush(drm_savage_private_t *dev_priv) 416static void savage_dma_flush(drm_savage_private_t * dev_priv)
418{ 417{
419 unsigned int first = dev_priv->first_dma_page; 418 unsigned int first = dev_priv->first_dma_page;
420 unsigned int cur = dev_priv->current_dma_page; 419 unsigned int cur = dev_priv->current_dma_page;
@@ -439,11 +438,10 @@ static void savage_dma_flush(drm_savage_private_t *dev_priv)
439 438
440 /* pad with noops */ 439 /* pad with noops */
441 if (pad) { 440 if (pad) {
442 uint32_t *dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle + 441 uint32_t *dma_ptr = (uint32_t *) dev_priv->cmd_dma->handle +
443 cur * SAVAGE_DMA_PAGE_SIZE + 442 cur * SAVAGE_DMA_PAGE_SIZE + dev_priv->dma_pages[cur].used;
444 dev_priv->dma_pages[cur].used;
445 dev_priv->dma_pages[cur].used += pad; 443 dev_priv->dma_pages[cur].used += pad;
446 while(pad != 0) { 444 while (pad != 0) {
447 *dma_ptr++ = BCI_CMD_WAIT; 445 *dma_ptr++ = BCI_CMD_WAIT;
448 pad--; 446 pad--;
449 } 447 }
@@ -453,11 +451,10 @@ static void savage_dma_flush(drm_savage_private_t *dev_priv)
453 451
454 /* do flush ... */ 452 /* do flush ... */
455 phys_addr = dev_priv->cmd_dma->offset + 453 phys_addr = dev_priv->cmd_dma->offset +
456 (first * SAVAGE_DMA_PAGE_SIZE + 454 (first * SAVAGE_DMA_PAGE_SIZE +
457 dev_priv->dma_pages[first].flushed) * 4; 455 dev_priv->dma_pages[first].flushed) * 4;
458 len = (cur - first) * SAVAGE_DMA_PAGE_SIZE + 456 len = (cur - first) * SAVAGE_DMA_PAGE_SIZE +
459 dev_priv->dma_pages[cur].used - 457 dev_priv->dma_pages[cur].used - dev_priv->dma_pages[first].flushed;
460 dev_priv->dma_pages[first].flushed;
461 458
462 DRM_DEBUG("phys_addr=%lx, len=%u\n", 459 DRM_DEBUG("phys_addr=%lx, len=%u\n",
463 phys_addr | dev_priv->dma_type, len); 460 phys_addr | dev_priv->dma_type, len);
@@ -499,7 +496,7 @@ static void savage_dma_flush(drm_savage_private_t *dev_priv)
499 dev_priv->dma_pages[cur].flushed); 496 dev_priv->dma_pages[cur].flushed);
500} 497}
501 498
502static void savage_fake_dma_flush(drm_savage_private_t *dev_priv) 499static void savage_fake_dma_flush(drm_savage_private_t * dev_priv)
503{ 500{
504 unsigned int i, j; 501 unsigned int i, j;
505 BCI_LOCALS; 502 BCI_LOCALS;
@@ -515,8 +512,8 @@ static void savage_fake_dma_flush(drm_savage_private_t *dev_priv)
515 for (i = dev_priv->first_dma_page; 512 for (i = dev_priv->first_dma_page;
516 i <= dev_priv->current_dma_page && dev_priv->dma_pages[i].used; 513 i <= dev_priv->current_dma_page && dev_priv->dma_pages[i].used;
517 ++i) { 514 ++i) {
518 uint32_t *dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle + 515 uint32_t *dma_ptr = (uint32_t *) dev_priv->cmd_dma->handle +
519 i * SAVAGE_DMA_PAGE_SIZE; 516 i * SAVAGE_DMA_PAGE_SIZE;
520#if SAVAGE_DMA_DEBUG 517#if SAVAGE_DMA_DEBUG
521 /* Sanity check: all pages except the last one must be full. */ 518 /* Sanity check: all pages except the last one must be full. */
522 if (i < dev_priv->current_dma_page && 519 if (i < dev_priv->current_dma_page &&
@@ -543,7 +540,7 @@ static void savage_fake_dma_flush(drm_savage_private_t *dev_priv)
543 * initialized. We also need to take care of deleting the MTRRs in 540 * initialized. We also need to take care of deleting the MTRRs in
544 * postcleanup. 541 * postcleanup.
545 */ 542 */
546int savage_preinit(drm_device_t *dev, unsigned long chipset) 543int savage_preinit(drm_device_t * dev, unsigned long chipset)
547{ 544{
548 drm_savage_private_t *dev_priv; 545 drm_savage_private_t *dev_priv;
549 unsigned long mmio_base, fb_base, fb_size, aperture_base; 546 unsigned long mmio_base, fb_base, fb_size, aperture_base;
@@ -578,19 +575,22 @@ int savage_preinit(drm_device_t *dev, unsigned long chipset)
578 * MTRRs. */ 575 * MTRRs. */
579 dev_priv->mtrr[0].base = fb_base; 576 dev_priv->mtrr[0].base = fb_base;
580 dev_priv->mtrr[0].size = 0x01000000; 577 dev_priv->mtrr[0].size = 0x01000000;
581 dev_priv->mtrr[0].handle = mtrr_add( 578 dev_priv->mtrr[0].handle =
582 dev_priv->mtrr[0].base, dev_priv->mtrr[0].size, 579 mtrr_add(dev_priv->mtrr[0].base,
583 MTRR_TYPE_WRCOMB, 1); 580 dev_priv->mtrr[0].size, MTRR_TYPE_WRCOMB,
584 dev_priv->mtrr[1].base = fb_base+0x02000000; 581 1);
582 dev_priv->mtrr[1].base = fb_base + 0x02000000;
585 dev_priv->mtrr[1].size = 0x02000000; 583 dev_priv->mtrr[1].size = 0x02000000;
586 dev_priv->mtrr[1].handle = mtrr_add( 584 dev_priv->mtrr[1].handle =
587 dev_priv->mtrr[1].base, dev_priv->mtrr[1].size, 585 mtrr_add(dev_priv->mtrr[1].base,
588 MTRR_TYPE_WRCOMB, 1); 586 dev_priv->mtrr[1].size, MTRR_TYPE_WRCOMB,
589 dev_priv->mtrr[2].base = fb_base+0x04000000; 587 1);
588 dev_priv->mtrr[2].base = fb_base + 0x04000000;
590 dev_priv->mtrr[2].size = 0x04000000; 589 dev_priv->mtrr[2].size = 0x04000000;
591 dev_priv->mtrr[2].handle = mtrr_add( 590 dev_priv->mtrr[2].handle =
592 dev_priv->mtrr[2].base, dev_priv->mtrr[2].size, 591 mtrr_add(dev_priv->mtrr[2].base,
593 MTRR_TYPE_WRCOMB, 1); 592 dev_priv->mtrr[2].size, MTRR_TYPE_WRCOMB,
593 1);
594 } else { 594 } else {
595 DRM_ERROR("strange pci_resource_len %08lx\n", 595 DRM_ERROR("strange pci_resource_len %08lx\n",
596 drm_get_resource_len(dev, 0)); 596 drm_get_resource_len(dev, 0));
@@ -608,9 +608,10 @@ int savage_preinit(drm_device_t *dev, unsigned long chipset)
608 * aperture. */ 608 * aperture. */
609 dev_priv->mtrr[0].base = fb_base; 609 dev_priv->mtrr[0].base = fb_base;
610 dev_priv->mtrr[0].size = 0x08000000; 610 dev_priv->mtrr[0].size = 0x08000000;
611 dev_priv->mtrr[0].handle = mtrr_add( 611 dev_priv->mtrr[0].handle =
612 dev_priv->mtrr[0].base, dev_priv->mtrr[0].size, 612 mtrr_add(dev_priv->mtrr[0].base,
613 MTRR_TYPE_WRCOMB, 1); 613 dev_priv->mtrr[0].size, MTRR_TYPE_WRCOMB,
614 1);
614 } else { 615 } else {
615 DRM_ERROR("strange pci_resource_len %08lx\n", 616 DRM_ERROR("strange pci_resource_len %08lx\n",
616 drm_get_resource_len(dev, 1)); 617 drm_get_resource_len(dev, 1));
@@ -647,7 +648,7 @@ int savage_preinit(drm_device_t *dev, unsigned long chipset)
647/* 648/*
648 * Delete MTRRs and free device-private data. 649 * Delete MTRRs and free device-private data.
649 */ 650 */
650int savage_postcleanup(drm_device_t *dev) 651int savage_postcleanup(drm_device_t * dev)
651{ 652{
652 drm_savage_private_t *dev_priv = dev->dev_private; 653 drm_savage_private_t *dev_priv = dev->dev_private;
653 int i; 654 int i;
@@ -663,7 +664,7 @@ int savage_postcleanup(drm_device_t *dev)
663 return 0; 664 return 0;
664} 665}
665 666
666static int savage_do_init_bci(drm_device_t *dev, drm_savage_init_t *init) 667static int savage_do_init_bci(drm_device_t * dev, drm_savage_init_t * init)
667{ 668{
668 drm_savage_private_t *dev_priv = dev->dev_private; 669 drm_savage_private_t *dev_priv = dev->dev_private;
669 670
@@ -731,7 +732,7 @@ static int savage_do_init_bci(drm_device_t *dev, drm_savage_init_t *init)
731 } 732 }
732 if (init->agp_textures_offset) { 733 if (init->agp_textures_offset) {
733 dev_priv->agp_textures = 734 dev_priv->agp_textures =
734 drm_core_findmap(dev, init->agp_textures_offset); 735 drm_core_findmap(dev, init->agp_textures_offset);
735 if (!dev_priv->agp_textures) { 736 if (!dev_priv->agp_textures) {
736 DRM_ERROR("could not find agp texture region!\n"); 737 DRM_ERROR("could not find agp texture region!\n");
737 savage_do_cleanup_bci(dev); 738 savage_do_cleanup_bci(dev);
@@ -802,8 +803,8 @@ static int savage_do_init_bci(drm_device_t *dev, drm_savage_init_t *init)
802 } 803 }
803 804
804 dev_priv->sarea_priv = 805 dev_priv->sarea_priv =
805 (drm_savage_sarea_t *)((uint8_t *)dev_priv->sarea->handle + 806 (drm_savage_sarea_t *) ((uint8_t *) dev_priv->sarea->handle +
806 init->sarea_priv_offset); 807 init->sarea_priv_offset);
807 808
808 /* setup bitmap descriptors */ 809 /* setup bitmap descriptors */
809 { 810 {
@@ -812,35 +813,36 @@ static int savage_do_init_bci(drm_device_t *dev, drm_savage_init_t *init)
812 unsigned int front_stride, back_stride, depth_stride; 813 unsigned int front_stride, back_stride, depth_stride;
813 if (dev_priv->chipset <= S3_SAVAGE4) { 814 if (dev_priv->chipset <= S3_SAVAGE4) {
814 color_tile_format = dev_priv->fb_bpp == 16 ? 815 color_tile_format = dev_priv->fb_bpp == 16 ?
815 SAVAGE_BD_TILE_16BPP : SAVAGE_BD_TILE_32BPP; 816 SAVAGE_BD_TILE_16BPP : SAVAGE_BD_TILE_32BPP;
816 depth_tile_format = dev_priv->depth_bpp == 16 ? 817 depth_tile_format = dev_priv->depth_bpp == 16 ?
817 SAVAGE_BD_TILE_16BPP : SAVAGE_BD_TILE_32BPP; 818 SAVAGE_BD_TILE_16BPP : SAVAGE_BD_TILE_32BPP;
818 } else { 819 } else {
819 color_tile_format = SAVAGE_BD_TILE_DEST; 820 color_tile_format = SAVAGE_BD_TILE_DEST;
820 depth_tile_format = SAVAGE_BD_TILE_DEST; 821 depth_tile_format = SAVAGE_BD_TILE_DEST;
821 } 822 }
822 front_stride = dev_priv->front_pitch / (dev_priv->fb_bpp/8); 823 front_stride = dev_priv->front_pitch / (dev_priv->fb_bpp / 8);
823 back_stride = dev_priv-> back_pitch / (dev_priv->fb_bpp/8); 824 back_stride = dev_priv->back_pitch / (dev_priv->fb_bpp / 8);
824 depth_stride = dev_priv->depth_pitch / (dev_priv->depth_bpp/8); 825 depth_stride =
826 dev_priv->depth_pitch / (dev_priv->depth_bpp / 8);
825 827
826 dev_priv->front_bd = front_stride | SAVAGE_BD_BW_DISABLE | 828 dev_priv->front_bd = front_stride | SAVAGE_BD_BW_DISABLE |
827 (dev_priv->fb_bpp << SAVAGE_BD_BPP_SHIFT) | 829 (dev_priv->fb_bpp << SAVAGE_BD_BPP_SHIFT) |
828 (color_tile_format << SAVAGE_BD_TILE_SHIFT); 830 (color_tile_format << SAVAGE_BD_TILE_SHIFT);
829 831
830 dev_priv-> back_bd = back_stride | SAVAGE_BD_BW_DISABLE | 832 dev_priv->back_bd = back_stride | SAVAGE_BD_BW_DISABLE |
831 (dev_priv->fb_bpp << SAVAGE_BD_BPP_SHIFT) | 833 (dev_priv->fb_bpp << SAVAGE_BD_BPP_SHIFT) |
832 (color_tile_format << SAVAGE_BD_TILE_SHIFT); 834 (color_tile_format << SAVAGE_BD_TILE_SHIFT);
833 835
834 dev_priv->depth_bd = depth_stride | SAVAGE_BD_BW_DISABLE | 836 dev_priv->depth_bd = depth_stride | SAVAGE_BD_BW_DISABLE |
835 (dev_priv->depth_bpp << SAVAGE_BD_BPP_SHIFT) | 837 (dev_priv->depth_bpp << SAVAGE_BD_BPP_SHIFT) |
836 (depth_tile_format << SAVAGE_BD_TILE_SHIFT); 838 (depth_tile_format << SAVAGE_BD_TILE_SHIFT);
837 } 839 }
838 840
839 /* setup status and bci ptr */ 841 /* setup status and bci ptr */
840 dev_priv->event_counter = 0; 842 dev_priv->event_counter = 0;
841 dev_priv->event_wrap = 0; 843 dev_priv->event_wrap = 0;
842 dev_priv->bci_ptr = (volatile uint32_t *) 844 dev_priv->bci_ptr = (volatile uint32_t *)
843 ((uint8_t *)dev_priv->mmio->handle + SAVAGE_BCI_OFFSET); 845 ((uint8_t *) dev_priv->mmio->handle + SAVAGE_BCI_OFFSET);
844 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { 846 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
845 dev_priv->status_used_mask = SAVAGE_FIFO_USED_MASK_S3D; 847 dev_priv->status_used_mask = SAVAGE_FIFO_USED_MASK_S3D;
846 } else { 848 } else {
@@ -848,7 +850,7 @@ static int savage_do_init_bci(drm_device_t *dev, drm_savage_init_t *init)
848 } 850 }
849 if (dev_priv->status != NULL) { 851 if (dev_priv->status != NULL) {
850 dev_priv->status_ptr = 852 dev_priv->status_ptr =
851 (volatile uint32_t *)dev_priv->status->handle; 853 (volatile uint32_t *)dev_priv->status->handle;
852 dev_priv->wait_fifo = savage_bci_wait_fifo_shadow; 854 dev_priv->wait_fifo = savage_bci_wait_fifo_shadow;
853 dev_priv->wait_evnt = savage_bci_wait_event_shadow; 855 dev_priv->wait_evnt = savage_bci_wait_event_shadow;
854 dev_priv->status_ptr[1023] = dev_priv->event_counter; 856 dev_priv->status_ptr[1023] = dev_priv->event_counter;
@@ -874,7 +876,7 @@ static int savage_do_init_bci(drm_device_t *dev, drm_savage_init_t *init)
874 return DRM_ERR(ENOMEM); 876 return DRM_ERR(ENOMEM);
875 } 877 }
876 878
877 if (savage_dma_init(dev_priv) < 0) { 879 if (savage_dma_init(dev_priv) < 0) {
878 DRM_ERROR("could not initialize command DMA\n"); 880 DRM_ERROR("could not initialize command DMA\n");
879 savage_do_cleanup_bci(dev); 881 savage_do_cleanup_bci(dev);
880 return DRM_ERR(ENOMEM); 882 return DRM_ERR(ENOMEM);
@@ -883,7 +885,7 @@ static int savage_do_init_bci(drm_device_t *dev, drm_savage_init_t *init)
883 return 0; 885 return 0;
884} 886}
885 887
886int savage_do_cleanup_bci(drm_device_t *dev) 888int savage_do_cleanup_bci(drm_device_t * dev)
887{ 889{
888 drm_savage_private_t *dev_priv = dev->dev_private; 890 drm_savage_private_t *dev_priv = dev->dev_private;
889 891
@@ -907,7 +909,7 @@ int savage_do_cleanup_bci(drm_device_t *dev)
907 909
908 if (dev_priv->dma_pages) 910 if (dev_priv->dma_pages)
909 drm_free(dev_priv->dma_pages, 911 drm_free(dev_priv->dma_pages,
910 sizeof(drm_savage_dma_page_t)*dev_priv->nr_dma_pages, 912 sizeof(drm_savage_dma_page_t) * dev_priv->nr_dma_pages,
911 DRM_MEM_DRIVER); 913 DRM_MEM_DRIVER);
912 914
913 return 0; 915 return 0;
@@ -920,7 +922,7 @@ static int savage_bci_init(DRM_IOCTL_ARGS)
920 922
921 LOCK_TEST_WITH_RETURN(dev, filp); 923 LOCK_TEST_WITH_RETURN(dev, filp);
922 924
923 DRM_COPY_FROM_USER_IOCTL(init, (drm_savage_init_t __user *)data, 925 DRM_COPY_FROM_USER_IOCTL(init, (drm_savage_init_t __user *) data,
924 sizeof(init)); 926 sizeof(init));
925 927
926 switch (init.func) { 928 switch (init.func) {
@@ -943,13 +945,13 @@ static int savage_bci_event_emit(DRM_IOCTL_ARGS)
943 945
944 LOCK_TEST_WITH_RETURN(dev, filp); 946 LOCK_TEST_WITH_RETURN(dev, filp);
945 947
946 DRM_COPY_FROM_USER_IOCTL(event, (drm_savage_event_emit_t __user *)data, 948 DRM_COPY_FROM_USER_IOCTL(event, (drm_savage_event_emit_t __user *) data,
947 sizeof(event)); 949 sizeof(event));
948 950
949 event.count = savage_bci_emit_event(dev_priv, event.flags); 951 event.count = savage_bci_emit_event(dev_priv, event.flags);
950 event.count |= dev_priv->event_wrap << 16; 952 event.count |= dev_priv->event_wrap << 16;
951 DRM_COPY_TO_USER_IOCTL(&((drm_savage_event_emit_t __user *)data)->count, 953 DRM_COPY_TO_USER_IOCTL(&((drm_savage_event_emit_t __user *) data)->
952 event.count, sizeof(event.count)); 954 count, event.count, sizeof(event.count));
953 return 0; 955 return 0;
954} 956}
955 957
@@ -963,7 +965,7 @@ static int savage_bci_event_wait(DRM_IOCTL_ARGS)
963 965
964 DRM_DEBUG("\n"); 966 DRM_DEBUG("\n");
965 967
966 DRM_COPY_FROM_USER_IOCTL(event, (drm_savage_event_wait_t __user *)data, 968 DRM_COPY_FROM_USER_IOCTL(event, (drm_savage_event_wait_t __user *) data,
967 sizeof(event)); 969 sizeof(event));
968 970
969 UPDATE_EVENT_COUNTER(); 971 UPDATE_EVENT_COUNTER();
@@ -973,7 +975,7 @@ static int savage_bci_event_wait(DRM_IOCTL_ARGS)
973 hw_e = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff; 975 hw_e = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff;
974 hw_w = dev_priv->event_wrap; 976 hw_w = dev_priv->event_wrap;
975 if (hw_e > dev_priv->event_counter) 977 if (hw_e > dev_priv->event_counter)
976 hw_w--; /* hardware hasn't passed the last wrap yet */ 978 hw_w--; /* hardware hasn't passed the last wrap yet */
977 979
978 event_e = event.count & 0xffff; 980 event_e = event.count & 0xffff;
979 event_w = event.count >> 16; 981 event_w = event.count >> 16;
@@ -982,7 +984,7 @@ static int savage_bci_event_wait(DRM_IOCTL_ARGS)
982 * - event counter wrapped since the event was emitted or 984 * - event counter wrapped since the event was emitted or
983 * - the hardware has advanced up to or over the event to wait for. 985 * - the hardware has advanced up to or over the event to wait for.
984 */ 986 */
985 if (event_w < hw_w || (event_w == hw_w && event_e <= hw_e) ) 987 if (event_w < hw_w || (event_w == hw_w && event_e <= hw_e))
986 return 0; 988 return 0;
987 else 989 else
988 return dev_priv->wait_evnt(dev_priv, event_e); 990 return dev_priv->wait_evnt(dev_priv, event_e);
@@ -992,7 +994,8 @@ static int savage_bci_event_wait(DRM_IOCTL_ARGS)
992 * DMA buffer management 994 * DMA buffer management
993 */ 995 */
994 996
995static int savage_bci_get_buffers(DRMFILE filp, drm_device_t *dev, drm_dma_t *d) 997static int savage_bci_get_buffers(DRMFILE filp, drm_device_t * dev,
998 drm_dma_t * d)
996{ 999{
997 drm_buf_t *buf; 1000 drm_buf_t *buf;
998 int i; 1001 int i;
@@ -1025,7 +1028,7 @@ int savage_bci_buffers(DRM_IOCTL_ARGS)
1025 1028
1026 LOCK_TEST_WITH_RETURN(dev, filp); 1029 LOCK_TEST_WITH_RETURN(dev, filp);
1027 1030
1028 DRM_COPY_FROM_USER_IOCTL(d, (drm_dma_t __user *)data, sizeof(d)); 1031 DRM_COPY_FROM_USER_IOCTL(d, (drm_dma_t __user *) data, sizeof(d));
1029 1032
1030 /* Please don't send us buffers. 1033 /* Please don't send us buffers.
1031 */ 1034 */
@@ -1049,12 +1052,13 @@ int savage_bci_buffers(DRM_IOCTL_ARGS)
1049 ret = savage_bci_get_buffers(filp, dev, &d); 1052 ret = savage_bci_get_buffers(filp, dev, &d);
1050 } 1053 }
1051 1054
1052 DRM_COPY_TO_USER_IOCTL((drm_dma_t __user *)data, d, sizeof(d)); 1055 DRM_COPY_TO_USER_IOCTL((drm_dma_t __user *) data, d, sizeof(d));
1053 1056
1054 return ret; 1057 return ret;
1055} 1058}
1056 1059
1057void savage_reclaim_buffers(drm_device_t *dev, DRMFILE filp) { 1060void savage_reclaim_buffers(drm_device_t * dev, DRMFILE filp)
1061{
1058 drm_device_dma_t *dma = dev->dma; 1062 drm_device_dma_t *dma = dev->dma;
1059 drm_savage_private_t *dev_priv = dev->dev_private; 1063 drm_savage_private_t *dev_priv = dev->dev_private;
1060 int i; 1064 int i;
@@ -1066,7 +1070,7 @@ void savage_reclaim_buffers(drm_device_t *dev, DRMFILE filp) {
1066 if (!dma->buflist) 1070 if (!dma->buflist)
1067 return; 1071 return;
1068 1072
1069 /*i830_flush_queue(dev);*/ 1073 /*i830_flush_queue(dev); */
1070 1074
1071 for (i = 0; i < dma->buf_count; i++) { 1075 for (i = 0; i < dma->buf_count; i++) {
1072 drm_buf_t *buf = dma->buflist[i]; 1076 drm_buf_t *buf = dma->buflist[i];
@@ -1085,12 +1089,15 @@ void savage_reclaim_buffers(drm_device_t *dev, DRMFILE filp) {
1085 drm_core_reclaim_buffers(dev, filp); 1089 drm_core_reclaim_buffers(dev, filp);
1086} 1090}
1087 1091
1088
1089drm_ioctl_desc_t savage_ioctls[] = { 1092drm_ioctl_desc_t savage_ioctls[] = {
1090 [DRM_IOCTL_NR(DRM_SAVAGE_BCI_INIT)] = {savage_bci_init, 1, 1}, 1093 [DRM_IOCTL_NR(DRM_SAVAGE_BCI_INIT)] = {savage_bci_init, 1, 1}
1091 [DRM_IOCTL_NR(DRM_SAVAGE_BCI_CMDBUF)] = {savage_bci_cmdbuf, 1, 0}, 1094 ,
1092 [DRM_IOCTL_NR(DRM_SAVAGE_BCI_EVENT_EMIT)] = {savage_bci_event_emit, 1, 0}, 1095 [DRM_IOCTL_NR(DRM_SAVAGE_BCI_CMDBUF)] = {savage_bci_cmdbuf, 1, 0}
1093 [DRM_IOCTL_NR(DRM_SAVAGE_BCI_EVENT_WAIT)] = {savage_bci_event_wait, 1, 0}, 1096 ,
1097 [DRM_IOCTL_NR(DRM_SAVAGE_BCI_EVENT_EMIT)] = {savage_bci_event_emit, 1, 0}
1098 ,
1099 [DRM_IOCTL_NR(DRM_SAVAGE_BCI_EVENT_WAIT)] = {savage_bci_event_wait, 1, 0}
1100 ,
1094}; 1101};
1095 1102
1096int savage_max_ioctl = DRM_ARRAY_SIZE(savage_ioctls); 1103int savage_max_ioctl = DRM_ARRAY_SIZE(savage_ioctls);