aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/radeon/r300_cmdbuf.c280
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h32
-rw-r--r--drivers/gpu/drm/radeon/radeon_state.c197
3 files changed, 262 insertions, 247 deletions
diff --git a/drivers/gpu/drm/radeon/r300_cmdbuf.c b/drivers/gpu/drm/radeon/r300_cmdbuf.c
index 34bffa0e4b73..7f59352cd637 100644
--- a/drivers/gpu/drm/radeon/r300_cmdbuf.c
+++ b/drivers/gpu/drm/radeon/r300_cmdbuf.c
@@ -33,6 +33,7 @@
33 33
34#include "drmP.h" 34#include "drmP.h"
35#include "drm.h" 35#include "drm.h"
36#include "drm_buffer.h"
36#include "radeon_drm.h" 37#include "radeon_drm.h"
37#include "radeon_drv.h" 38#include "radeon_drv.h"
38#include "r300_reg.h" 39#include "r300_reg.h"
@@ -299,46 +300,42 @@ static __inline__ int r300_emit_carefully_checked_packet0(drm_radeon_private_t *
299 int reg; 300 int reg;
300 int sz; 301 int sz;
301 int i; 302 int i;
302 int values[64]; 303 u32 *value;
303 RING_LOCALS; 304 RING_LOCALS;
304 305
305 sz = header.packet0.count; 306 sz = header.packet0.count;
306 reg = (header.packet0.reghi << 8) | header.packet0.reglo; 307 reg = (header.packet0.reghi << 8) | header.packet0.reglo;
307 308
308 if ((sz > 64) || (sz < 0)) { 309 if ((sz > 64) || (sz < 0)) {
309 DRM_ERROR 310 DRM_ERROR("Cannot emit more than 64 values at a time (reg=%04x sz=%d)\n",
310 ("Cannot emit more than 64 values at a time (reg=%04x sz=%d)\n", 311 reg, sz);
311 reg, sz);
312 return -EINVAL; 312 return -EINVAL;
313 } 313 }
314
314 for (i = 0; i < sz; i++) { 315 for (i = 0; i < sz; i++) {
315 values[i] = ((int *)cmdbuf->buf)[i];
316 switch (r300_reg_flags[(reg >> 2) + i]) { 316 switch (r300_reg_flags[(reg >> 2) + i]) {
317 case MARK_SAFE: 317 case MARK_SAFE:
318 break; 318 break;
319 case MARK_CHECK_OFFSET: 319 case MARK_CHECK_OFFSET:
320 if (!radeon_check_offset(dev_priv, (u32) values[i])) { 320 value = drm_buffer_pointer_to_dword(cmdbuf->buffer, i);
321 DRM_ERROR 321 if (!radeon_check_offset(dev_priv, *value)) {
322 ("Offset failed range check (reg=%04x sz=%d)\n", 322 DRM_ERROR("Offset failed range check (reg=%04x sz=%d)\n",
323 reg, sz); 323 reg, sz);
324 return -EINVAL; 324 return -EINVAL;
325 } 325 }
326 break; 326 break;
327 default: 327 default:
328 DRM_ERROR("Register %04x failed check as flag=%02x\n", 328 DRM_ERROR("Register %04x failed check as flag=%02x\n",
329 reg + i * 4, r300_reg_flags[(reg >> 2) + i]); 329 reg + i * 4, r300_reg_flags[(reg >> 2) + i]);
330 return -EINVAL; 330 return -EINVAL;
331 } 331 }
332 } 332 }
333 333
334 BEGIN_RING(1 + sz); 334 BEGIN_RING(1 + sz);
335 OUT_RING(CP_PACKET0(reg, sz - 1)); 335 OUT_RING(CP_PACKET0(reg, sz - 1));
336 OUT_RING_TABLE(values, sz); 336 OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
337 ADVANCE_RING(); 337 ADVANCE_RING();
338 338
339 cmdbuf->buf += sz * 4;
340 cmdbuf->bufsz -= sz * 4;
341
342 return 0; 339 return 0;
343} 340}
344 341
@@ -362,7 +359,7 @@ static __inline__ int r300_emit_packet0(drm_radeon_private_t *dev_priv,
362 if (!sz) 359 if (!sz)
363 return 0; 360 return 0;
364 361
365 if (sz * 4 > cmdbuf->bufsz) 362 if (sz * 4 > drm_buffer_unprocessed(cmdbuf->buffer))
366 return -EINVAL; 363 return -EINVAL;
367 364
368 if (reg + sz * 4 >= 0x10000) { 365 if (reg + sz * 4 >= 0x10000) {
@@ -380,12 +377,9 @@ static __inline__ int r300_emit_packet0(drm_radeon_private_t *dev_priv,
380 377
381 BEGIN_RING(1 + sz); 378 BEGIN_RING(1 + sz);
382 OUT_RING(CP_PACKET0(reg, sz - 1)); 379 OUT_RING(CP_PACKET0(reg, sz - 1));
383 OUT_RING_TABLE((int *)cmdbuf->buf, sz); 380 OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
384 ADVANCE_RING(); 381 ADVANCE_RING();
385 382
386 cmdbuf->buf += sz * 4;
387 cmdbuf->bufsz -= sz * 4;
388
389 return 0; 383 return 0;
390} 384}
391 385
@@ -407,7 +401,7 @@ static __inline__ int r300_emit_vpu(drm_radeon_private_t *dev_priv,
407 401
408 if (!sz) 402 if (!sz)
409 return 0; 403 return 0;
410 if (sz * 16 > cmdbuf->bufsz) 404 if (sz * 16 > drm_buffer_unprocessed(cmdbuf->buffer))
411 return -EINVAL; 405 return -EINVAL;
412 406
413 /* VAP is very sensitive so we purge cache before we program it 407 /* VAP is very sensitive so we purge cache before we program it
@@ -426,7 +420,7 @@ static __inline__ int r300_emit_vpu(drm_radeon_private_t *dev_priv,
426 BEGIN_RING(3 + sz * 4); 420 BEGIN_RING(3 + sz * 4);
427 OUT_RING_REG(R300_VAP_PVS_UPLOAD_ADDRESS, addr); 421 OUT_RING_REG(R300_VAP_PVS_UPLOAD_ADDRESS, addr);
428 OUT_RING(CP_PACKET0_TABLE(R300_VAP_PVS_UPLOAD_DATA, sz * 4 - 1)); 422 OUT_RING(CP_PACKET0_TABLE(R300_VAP_PVS_UPLOAD_DATA, sz * 4 - 1));
429 OUT_RING_TABLE((int *)cmdbuf->buf, sz * 4); 423 OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz * 4);
430 ADVANCE_RING(); 424 ADVANCE_RING();
431 425
432 BEGIN_RING(2); 426 BEGIN_RING(2);
@@ -434,9 +428,6 @@ static __inline__ int r300_emit_vpu(drm_radeon_private_t *dev_priv,
434 OUT_RING(0); 428 OUT_RING(0);
435 ADVANCE_RING(); 429 ADVANCE_RING();
436 430
437 cmdbuf->buf += sz * 16;
438 cmdbuf->bufsz -= sz * 16;
439
440 return 0; 431 return 0;
441} 432}
442 433
@@ -449,14 +440,14 @@ static __inline__ int r300_emit_clear(drm_radeon_private_t *dev_priv,
449{ 440{
450 RING_LOCALS; 441 RING_LOCALS;
451 442
452 if (8 * 4 > cmdbuf->bufsz) 443 if (8 * 4 > drm_buffer_unprocessed(cmdbuf->buffer))
453 return -EINVAL; 444 return -EINVAL;
454 445
455 BEGIN_RING(10); 446 BEGIN_RING(10);
456 OUT_RING(CP_PACKET3(R200_3D_DRAW_IMMD_2, 8)); 447 OUT_RING(CP_PACKET3(R200_3D_DRAW_IMMD_2, 8));
457 OUT_RING(R300_PRIM_TYPE_POINT | R300_PRIM_WALK_RING | 448 OUT_RING(R300_PRIM_TYPE_POINT | R300_PRIM_WALK_RING |
458 (1 << R300_PRIM_NUM_VERTICES_SHIFT)); 449 (1 << R300_PRIM_NUM_VERTICES_SHIFT));
459 OUT_RING_TABLE((int *)cmdbuf->buf, 8); 450 OUT_RING_DRM_BUFFER(cmdbuf->buffer, 8);
460 ADVANCE_RING(); 451 ADVANCE_RING();
461 452
462 BEGIN_RING(4); 453 BEGIN_RING(4);
@@ -468,9 +459,6 @@ static __inline__ int r300_emit_clear(drm_radeon_private_t *dev_priv,
468 /* set flush flag */ 459 /* set flush flag */
469 dev_priv->track_flush |= RADEON_FLUSH_EMITED; 460 dev_priv->track_flush |= RADEON_FLUSH_EMITED;
470 461
471 cmdbuf->buf += 8 * 4;
472 cmdbuf->bufsz -= 8 * 4;
473
474 return 0; 462 return 0;
475} 463}
476 464
@@ -480,28 +468,29 @@ static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv,
480{ 468{
481 int count, i, k; 469 int count, i, k;
482#define MAX_ARRAY_PACKET 64 470#define MAX_ARRAY_PACKET 64
483 u32 payload[MAX_ARRAY_PACKET]; 471 u32 *data;
484 u32 narrays; 472 u32 narrays;
485 RING_LOCALS; 473 RING_LOCALS;
486 474
487 count = (header >> 16) & 0x3fff; 475 count = (header & RADEON_CP_PACKET_COUNT_MASK) >> 16;
488 476
489 if ((count + 1) > MAX_ARRAY_PACKET) { 477 if ((count + 1) > MAX_ARRAY_PACKET) {
490 DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n", 478 DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n",
491 count); 479 count);
492 return -EINVAL; 480 return -EINVAL;
493 } 481 }
494 memset(payload, 0, MAX_ARRAY_PACKET * 4);
495 memcpy(payload, cmdbuf->buf + 4, (count + 1) * 4);
496
497 /* carefully check packet contents */ 482 /* carefully check packet contents */
498 483
499 narrays = payload[0]; 484 /* We have already read the header so advance the buffer. */
485 drm_buffer_advance(cmdbuf->buffer, 4);
486
487 narrays = *(u32 *)drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
500 k = 0; 488 k = 0;
501 i = 1; 489 i = 1;
502 while ((k < narrays) && (i < (count + 1))) { 490 while ((k < narrays) && (i < (count + 1))) {
503 i++; /* skip attribute field */ 491 i++; /* skip attribute field */
504 if (!radeon_check_offset(dev_priv, payload[i])) { 492 data = drm_buffer_pointer_to_dword(cmdbuf->buffer, i);
493 if (!radeon_check_offset(dev_priv, *data)) {
505 DRM_ERROR 494 DRM_ERROR
506 ("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n", 495 ("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n",
507 k, i); 496 k, i);
@@ -512,7 +501,8 @@ static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv,
512 if (k == narrays) 501 if (k == narrays)
513 break; 502 break;
514 /* have one more to process, they come in pairs */ 503 /* have one more to process, they come in pairs */
515 if (!radeon_check_offset(dev_priv, payload[i])) { 504 data = drm_buffer_pointer_to_dword(cmdbuf->buffer, i);
505 if (!radeon_check_offset(dev_priv, *data)) {
516 DRM_ERROR 506 DRM_ERROR
517 ("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n", 507 ("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n",
518 k, i); 508 k, i);
@@ -533,30 +523,30 @@ static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv,
533 523
534 BEGIN_RING(count + 2); 524 BEGIN_RING(count + 2);
535 OUT_RING(header); 525 OUT_RING(header);
536 OUT_RING_TABLE(payload, count + 1); 526 OUT_RING_DRM_BUFFER(cmdbuf->buffer, count + 1);
537 ADVANCE_RING(); 527 ADVANCE_RING();
538 528
539 cmdbuf->buf += (count + 2) * 4;
540 cmdbuf->bufsz -= (count + 2) * 4;
541
542 return 0; 529 return 0;
543} 530}
544 531
545static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv, 532static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv,
546 drm_radeon_kcmd_buffer_t *cmdbuf) 533 drm_radeon_kcmd_buffer_t *cmdbuf)
547{ 534{
548 u32 *cmd = (u32 *) cmdbuf->buf; 535 u32 *cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
549 int count, ret; 536 int count, ret;
550 RING_LOCALS; 537 RING_LOCALS;
551 538
552 count=(cmd[0]>>16) & 0x3fff;
553 539
554 if (cmd[0] & 0x8000) { 540 count = (*cmd & RADEON_CP_PACKET_COUNT_MASK) >> 16;
555 u32 offset;
556 541
557 if (cmd[1] & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL 542 if (*cmd & 0x8000) {
543 u32 offset;
544 u32 *cmd1 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
545 if (*cmd1 & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL
558 | RADEON_GMC_DST_PITCH_OFFSET_CNTL)) { 546 | RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
559 offset = cmd[2] << 10; 547
548 u32 *cmd2 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 2);
549 offset = *cmd2 << 10;
560 ret = !radeon_check_offset(dev_priv, offset); 550 ret = !radeon_check_offset(dev_priv, offset);
561 if (ret) { 551 if (ret) {
562 DRM_ERROR("Invalid bitblt first offset is %08X\n", offset); 552 DRM_ERROR("Invalid bitblt first offset is %08X\n", offset);
@@ -564,9 +554,10 @@ static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv,
564 } 554 }
565 } 555 }
566 556
567 if ((cmd[1] & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) && 557 if ((*cmd1 & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) &&
568 (cmd[1] & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) { 558 (*cmd1 & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
569 offset = cmd[3] << 10; 559 u32 *cmd3 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 3);
560 offset = *cmd3 << 10;
570 ret = !radeon_check_offset(dev_priv, offset); 561 ret = !radeon_check_offset(dev_priv, offset);
571 if (ret) { 562 if (ret) {
572 DRM_ERROR("Invalid bitblt second offset is %08X\n", offset); 563 DRM_ERROR("Invalid bitblt second offset is %08X\n", offset);
@@ -577,28 +568,25 @@ static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv,
577 } 568 }
578 569
579 BEGIN_RING(count+2); 570 BEGIN_RING(count+2);
580 OUT_RING(cmd[0]); 571 OUT_RING_DRM_BUFFER(cmdbuf->buffer, count + 2);
581 OUT_RING_TABLE((int *)(cmdbuf->buf + 4), count + 1);
582 ADVANCE_RING(); 572 ADVANCE_RING();
583 573
584 cmdbuf->buf += (count+2)*4;
585 cmdbuf->bufsz -= (count+2)*4;
586
587 return 0; 574 return 0;
588} 575}
589 576
590static __inline__ int r300_emit_draw_indx_2(drm_radeon_private_t *dev_priv, 577static __inline__ int r300_emit_draw_indx_2(drm_radeon_private_t *dev_priv,
591 drm_radeon_kcmd_buffer_t *cmdbuf) 578 drm_radeon_kcmd_buffer_t *cmdbuf)
592{ 579{
593 u32 *cmd; 580 u32 *cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
581 u32 *cmd1 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
594 int count; 582 int count;
595 int expected_count; 583 int expected_count;
596 RING_LOCALS; 584 RING_LOCALS;
597 585
598 cmd = (u32 *) cmdbuf->buf; 586 count = (*cmd & RADEON_CP_PACKET_COUNT_MASK) >> 16;
599 count = (cmd[0]>>16) & 0x3fff; 587
600 expected_count = cmd[1] >> 16; 588 expected_count = *cmd1 >> 16;
601 if (!(cmd[1] & R300_VAP_VF_CNTL__INDEX_SIZE_32bit)) 589 if (!(*cmd1 & R300_VAP_VF_CNTL__INDEX_SIZE_32bit))
602 expected_count = (expected_count+1)/2; 590 expected_count = (expected_count+1)/2;
603 591
604 if (count && count != expected_count) { 592 if (count && count != expected_count) {
@@ -608,55 +596,53 @@ static __inline__ int r300_emit_draw_indx_2(drm_radeon_private_t *dev_priv,
608 } 596 }
609 597
610 BEGIN_RING(count+2); 598 BEGIN_RING(count+2);
611 OUT_RING(cmd[0]); 599 OUT_RING_DRM_BUFFER(cmdbuf->buffer, count + 2);
612 OUT_RING_TABLE((int *)(cmdbuf->buf + 4), count + 1);
613 ADVANCE_RING(); 600 ADVANCE_RING();
614 601
615 cmdbuf->buf += (count+2)*4;
616 cmdbuf->bufsz -= (count+2)*4;
617
618 if (!count) { 602 if (!count) {
619 drm_r300_cmd_header_t header; 603 drm_r300_cmd_header_t stack_header, *header;
604 u32 *cmd1, *cmd2, *cmd3;
620 605
621 if (cmdbuf->bufsz < 4*4 + sizeof(header)) { 606 if (drm_buffer_unprocessed(cmdbuf->buffer)
607 < 4*4 + sizeof(stack_header)) {
622 DRM_ERROR("3D_DRAW_INDX_2: expect subsequent INDX_BUFFER, but stream is too short.\n"); 608 DRM_ERROR("3D_DRAW_INDX_2: expect subsequent INDX_BUFFER, but stream is too short.\n");
623 return -EINVAL; 609 return -EINVAL;
624 } 610 }
625 611
626 header.u = *(unsigned int *)cmdbuf->buf; 612 header = drm_buffer_read_object(cmdbuf->buffer,
613 sizeof(stack_header), &stack_header);
627 614
628 cmdbuf->buf += sizeof(header); 615 cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
629 cmdbuf->bufsz -= sizeof(header); 616 cmd1 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
630 cmd = (u32 *) cmdbuf->buf; 617 cmd2 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 2);
618 cmd3 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 3);
631 619
632 if (header.header.cmd_type != R300_CMD_PACKET3 || 620 if (header->header.cmd_type != R300_CMD_PACKET3 ||
633 header.packet3.packet != R300_CMD_PACKET3_RAW || 621 header->packet3.packet != R300_CMD_PACKET3_RAW ||
634 cmd[0] != CP_PACKET3(RADEON_CP_INDX_BUFFER, 2)) { 622 *cmd != CP_PACKET3(RADEON_CP_INDX_BUFFER, 2)) {
635 DRM_ERROR("3D_DRAW_INDX_2: expect subsequent INDX_BUFFER.\n"); 623 DRM_ERROR("3D_DRAW_INDX_2: expect subsequent INDX_BUFFER.\n");
636 return -EINVAL; 624 return -EINVAL;
637 } 625 }
638 626
639 if ((cmd[1] & 0x8000ffff) != 0x80000810) { 627 if ((*cmd1 & 0x8000ffff) != 0x80000810) {
640 DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]); 628 DRM_ERROR("Invalid indx_buffer reg address %08X\n",
629 *cmd1);
641 return -EINVAL; 630 return -EINVAL;
642 } 631 }
643 if (!radeon_check_offset(dev_priv, cmd[2])) { 632 if (!radeon_check_offset(dev_priv, *cmd2)) {
644 DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]); 633 DRM_ERROR("Invalid indx_buffer offset is %08X\n",
634 *cmd2);
645 return -EINVAL; 635 return -EINVAL;
646 } 636 }
647 if (cmd[3] != expected_count) { 637 if (*cmd3 != expected_count) {
648 DRM_ERROR("INDX_BUFFER: buffer size %i, expected %i\n", 638 DRM_ERROR("INDX_BUFFER: buffer size %i, expected %i\n",
649 cmd[3], expected_count); 639 *cmd3, expected_count);
650 return -EINVAL; 640 return -EINVAL;
651 } 641 }
652 642
653 BEGIN_RING(4); 643 BEGIN_RING(4);
654 OUT_RING(cmd[0]); 644 OUT_RING_DRM_BUFFER(cmdbuf->buffer, 4);
655 OUT_RING_TABLE((int *)(cmdbuf->buf + 4), 3);
656 ADVANCE_RING(); 645 ADVANCE_RING();
657
658 cmdbuf->buf += 4*4;
659 cmdbuf->bufsz -= 4*4;
660 } 646 }
661 647
662 return 0; 648 return 0;
@@ -665,39 +651,39 @@ static __inline__ int r300_emit_draw_indx_2(drm_radeon_private_t *dev_priv,
665static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv, 651static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv,
666 drm_radeon_kcmd_buffer_t *cmdbuf) 652 drm_radeon_kcmd_buffer_t *cmdbuf)
667{ 653{
668 u32 header; 654 u32 *header;
669 int count; 655 int count;
670 RING_LOCALS; 656 RING_LOCALS;
671 657
672 if (4 > cmdbuf->bufsz) 658 if (4 > drm_buffer_unprocessed(cmdbuf->buffer))
673 return -EINVAL; 659 return -EINVAL;
674 660
675 /* Fixme !! This simply emits a packet without much checking. 661 /* Fixme !! This simply emits a packet without much checking.
676 We need to be smarter. */ 662 We need to be smarter. */
677 663
678 /* obtain first word - actual packet3 header */ 664 /* obtain first word - actual packet3 header */
679 header = *(u32 *) cmdbuf->buf; 665 header = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
680 666
681 /* Is it packet 3 ? */ 667 /* Is it packet 3 ? */
682 if ((header >> 30) != 0x3) { 668 if ((*header >> 30) != 0x3) {
683 DRM_ERROR("Not a packet3 header (0x%08x)\n", header); 669 DRM_ERROR("Not a packet3 header (0x%08x)\n", *header);
684 return -EINVAL; 670 return -EINVAL;
685 } 671 }
686 672
687 count = (header >> 16) & 0x3fff; 673 count = (*header >> 16) & 0x3fff;
688 674
689 /* Check again now that we know how much data to expect */ 675 /* Check again now that we know how much data to expect */
690 if ((count + 2) * 4 > cmdbuf->bufsz) { 676 if ((count + 2) * 4 > drm_buffer_unprocessed(cmdbuf->buffer)) {
691 DRM_ERROR 677 DRM_ERROR
692 ("Expected packet3 of length %d but have only %d bytes left\n", 678 ("Expected packet3 of length %d but have only %d bytes left\n",
693 (count + 2) * 4, cmdbuf->bufsz); 679 (count + 2) * 4, drm_buffer_unprocessed(cmdbuf->buffer));
694 return -EINVAL; 680 return -EINVAL;
695 } 681 }
696 682
697 /* Is it a packet type we know about ? */ 683 /* Is it a packet type we know about ? */
698 switch (header & 0xff00) { 684 switch (*header & 0xff00) {
699 case RADEON_3D_LOAD_VBPNTR: /* load vertex array pointers */ 685 case RADEON_3D_LOAD_VBPNTR: /* load vertex array pointers */
700 return r300_emit_3d_load_vbpntr(dev_priv, cmdbuf, header); 686 return r300_emit_3d_load_vbpntr(dev_priv, cmdbuf, *header);
701 687
702 case RADEON_CNTL_BITBLT_MULTI: 688 case RADEON_CNTL_BITBLT_MULTI:
703 return r300_emit_bitblt_multi(dev_priv, cmdbuf); 689 return r300_emit_bitblt_multi(dev_priv, cmdbuf);
@@ -723,18 +709,14 @@ static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv,
723 /* these packets are safe */ 709 /* these packets are safe */
724 break; 710 break;
725 default: 711 default:
726 DRM_ERROR("Unknown packet3 header (0x%08x)\n", header); 712 DRM_ERROR("Unknown packet3 header (0x%08x)\n", *header);
727 return -EINVAL; 713 return -EINVAL;
728 } 714 }
729 715
730 BEGIN_RING(count + 2); 716 BEGIN_RING(count + 2);
731 OUT_RING(header); 717 OUT_RING_DRM_BUFFER(cmdbuf->buffer, count + 2);
732 OUT_RING_TABLE((int *)(cmdbuf->buf + 4), count + 1);
733 ADVANCE_RING(); 718 ADVANCE_RING();
734 719
735 cmdbuf->buf += (count + 2) * 4;
736 cmdbuf->bufsz -= (count + 2) * 4;
737
738 return 0; 720 return 0;
739} 721}
740 722
@@ -748,8 +730,7 @@ static __inline__ int r300_emit_packet3(drm_radeon_private_t *dev_priv,
748{ 730{
749 int n; 731 int n;
750 int ret; 732 int ret;
751 char *orig_buf = cmdbuf->buf; 733 int orig_iter = cmdbuf->buffer->iterator;
752 int orig_bufsz = cmdbuf->bufsz;
753 734
754 /* This is a do-while-loop so that we run the interior at least once, 735 /* This is a do-while-loop so that we run the interior at least once,
755 * even if cmdbuf->nbox is 0. Compare r300_emit_cliprects for rationale. 736 * even if cmdbuf->nbox is 0. Compare r300_emit_cliprects for rationale.
@@ -761,8 +742,7 @@ static __inline__ int r300_emit_packet3(drm_radeon_private_t *dev_priv,
761 if (ret) 742 if (ret)
762 return ret; 743 return ret;
763 744
764 cmdbuf->buf = orig_buf; 745 cmdbuf->buffer->iterator = orig_iter;
765 cmdbuf->bufsz = orig_bufsz;
766 } 746 }
767 747
768 switch (header.packet3.packet) { 748 switch (header.packet3.packet) {
@@ -785,9 +765,9 @@ static __inline__ int r300_emit_packet3(drm_radeon_private_t *dev_priv,
785 break; 765 break;
786 766
787 default: 767 default:
788 DRM_ERROR("bad packet3 type %i at %p\n", 768 DRM_ERROR("bad packet3 type %i at byte %d\n",
789 header.packet3.packet, 769 header.packet3.packet,
790 cmdbuf->buf - sizeof(header)); 770 cmdbuf->buffer->iterator - sizeof(header));
791 return -EINVAL; 771 return -EINVAL;
792 } 772 }
793 773
@@ -923,12 +903,13 @@ static int r300_scratch(drm_radeon_private_t *dev_priv,
923 drm_r300_cmd_header_t header) 903 drm_r300_cmd_header_t header)
924{ 904{
925 u32 *ref_age_base; 905 u32 *ref_age_base;
926 u32 i, buf_idx, h_pending; 906 u32 i, *buf_idx, h_pending;
927 u64 ptr_addr; 907 u64 *ptr_addr;
908 u64 stack_ptr_addr;
928 RING_LOCALS; 909 RING_LOCALS;
929 910
930 if (cmdbuf->bufsz < 911 if (drm_buffer_unprocessed(cmdbuf->buffer) <
931 (sizeof(u64) + header.scratch.n_bufs * sizeof(buf_idx))) { 912 (sizeof(u64) + header.scratch.n_bufs * sizeof(*buf_idx))) {
932 return -EINVAL; 913 return -EINVAL;
933 } 914 }
934 915
@@ -938,36 +919,35 @@ static int r300_scratch(drm_radeon_private_t *dev_priv,
938 919
939 dev_priv->scratch_ages[header.scratch.reg]++; 920 dev_priv->scratch_ages[header.scratch.reg]++;
940 921
941 ptr_addr = get_unaligned((u64 *)cmdbuf->buf); 922 ptr_addr = drm_buffer_read_object(cmdbuf->buffer,
942 ref_age_base = (u32 *)(unsigned long)ptr_addr; 923 sizeof(stack_ptr_addr), &stack_ptr_addr);
943 924 ref_age_base = (u32 *)(unsigned long)*ptr_addr;
944 cmdbuf->buf += sizeof(u64);
945 cmdbuf->bufsz -= sizeof(u64);
946 925
947 for (i=0; i < header.scratch.n_bufs; i++) { 926 for (i=0; i < header.scratch.n_bufs; i++) {
948 buf_idx = *(u32 *)cmdbuf->buf; 927 buf_idx = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
949 buf_idx *= 2; /* 8 bytes per buf */ 928 *buf_idx *= 2; /* 8 bytes per buf */
950 929
951 if (DRM_COPY_TO_USER(ref_age_base + buf_idx, &dev_priv->scratch_ages[header.scratch.reg], sizeof(u32))) { 930 if (DRM_COPY_TO_USER(ref_age_base + *buf_idx,
931 &dev_priv->scratch_ages[header.scratch.reg],
932 sizeof(u32)))
952 return -EINVAL; 933 return -EINVAL;
953 }
954 934
955 if (DRM_COPY_FROM_USER(&h_pending, ref_age_base + buf_idx + 1, sizeof(u32))) { 935 if (DRM_COPY_FROM_USER(&h_pending,
936 ref_age_base + *buf_idx + 1,
937 sizeof(u32)))
956 return -EINVAL; 938 return -EINVAL;
957 }
958 939
959 if (h_pending == 0) { 940 if (h_pending == 0)
960 return -EINVAL; 941 return -EINVAL;
961 }
962 942
963 h_pending--; 943 h_pending--;
964 944
965 if (DRM_COPY_TO_USER(ref_age_base + buf_idx + 1, &h_pending, sizeof(u32))) { 945 if (DRM_COPY_TO_USER(ref_age_base + *buf_idx + 1,
946 &h_pending,
947 sizeof(u32)))
966 return -EINVAL; 948 return -EINVAL;
967 }
968 949
969 cmdbuf->buf += sizeof(buf_idx); 950 drm_buffer_advance(cmdbuf->buffer, sizeof(*buf_idx));
970 cmdbuf->bufsz -= sizeof(buf_idx);
971 } 951 }
972 952
973 BEGIN_RING(2); 953 BEGIN_RING(2);
@@ -1009,19 +989,16 @@ static inline int r300_emit_r500fp(drm_radeon_private_t *dev_priv,
1009 DRM_DEBUG("r500fp %d %d type: %d\n", sz, addr, type); 989 DRM_DEBUG("r500fp %d %d type: %d\n", sz, addr, type);
1010 if (!sz) 990 if (!sz)
1011 return 0; 991 return 0;
1012 if (sz * stride * 4 > cmdbuf->bufsz) 992 if (sz * stride * 4 > drm_buffer_unprocessed(cmdbuf->buffer))
1013 return -EINVAL; 993 return -EINVAL;
1014 994
1015 BEGIN_RING(3 + sz * stride); 995 BEGIN_RING(3 + sz * stride);
1016 OUT_RING_REG(R500_GA_US_VECTOR_INDEX, addr); 996 OUT_RING_REG(R500_GA_US_VECTOR_INDEX, addr);
1017 OUT_RING(CP_PACKET0_TABLE(R500_GA_US_VECTOR_DATA, sz * stride - 1)); 997 OUT_RING(CP_PACKET0_TABLE(R500_GA_US_VECTOR_DATA, sz * stride - 1));
1018 OUT_RING_TABLE((int *)cmdbuf->buf, sz * stride); 998 OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz * stride);
1019 999
1020 ADVANCE_RING(); 1000 ADVANCE_RING();
1021 1001
1022 cmdbuf->buf += sz * stride * 4;
1023 cmdbuf->bufsz -= sz * stride * 4;
1024
1025 return 0; 1002 return 0;
1026} 1003}
1027 1004
@@ -1053,19 +1030,18 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
1053 goto cleanup; 1030 goto cleanup;
1054 } 1031 }
1055 1032
1056 while (cmdbuf->bufsz >= sizeof(drm_r300_cmd_header_t)) { 1033 while (drm_buffer_unprocessed(cmdbuf->buffer)
1034 >= sizeof(drm_r300_cmd_header_t)) {
1057 int idx; 1035 int idx;
1058 drm_r300_cmd_header_t header; 1036 drm_r300_cmd_header_t *header, stack_header;
1059
1060 header.u = *(unsigned int *)cmdbuf->buf;
1061 1037
1062 cmdbuf->buf += sizeof(header); 1038 header = drm_buffer_read_object(cmdbuf->buffer,
1063 cmdbuf->bufsz -= sizeof(header); 1039 sizeof(stack_header), &stack_header);
1064 1040
1065 switch (header.header.cmd_type) { 1041 switch (header->header.cmd_type) {
1066 case R300_CMD_PACKET0: 1042 case R300_CMD_PACKET0:
1067 DRM_DEBUG("R300_CMD_PACKET0\n"); 1043 DRM_DEBUG("R300_CMD_PACKET0\n");
1068 ret = r300_emit_packet0(dev_priv, cmdbuf, header); 1044 ret = r300_emit_packet0(dev_priv, cmdbuf, *header);
1069 if (ret) { 1045 if (ret) {
1070 DRM_ERROR("r300_emit_packet0 failed\n"); 1046 DRM_ERROR("r300_emit_packet0 failed\n");
1071 goto cleanup; 1047 goto cleanup;
@@ -1074,7 +1050,7 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
1074 1050
1075 case R300_CMD_VPU: 1051 case R300_CMD_VPU:
1076 DRM_DEBUG("R300_CMD_VPU\n"); 1052 DRM_DEBUG("R300_CMD_VPU\n");
1077 ret = r300_emit_vpu(dev_priv, cmdbuf, header); 1053 ret = r300_emit_vpu(dev_priv, cmdbuf, *header);
1078 if (ret) { 1054 if (ret) {
1079 DRM_ERROR("r300_emit_vpu failed\n"); 1055 DRM_ERROR("r300_emit_vpu failed\n");
1080 goto cleanup; 1056 goto cleanup;
@@ -1083,7 +1059,7 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
1083 1059
1084 case R300_CMD_PACKET3: 1060 case R300_CMD_PACKET3:
1085 DRM_DEBUG("R300_CMD_PACKET3\n"); 1061 DRM_DEBUG("R300_CMD_PACKET3\n");
1086 ret = r300_emit_packet3(dev_priv, cmdbuf, header); 1062 ret = r300_emit_packet3(dev_priv, cmdbuf, *header);
1087 if (ret) { 1063 if (ret) {
1088 DRM_ERROR("r300_emit_packet3 failed\n"); 1064 DRM_ERROR("r300_emit_packet3 failed\n");
1089 goto cleanup; 1065 goto cleanup;
@@ -1117,8 +1093,8 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
1117 int i; 1093 int i;
1118 RING_LOCALS; 1094 RING_LOCALS;
1119 1095
1120 BEGIN_RING(header.delay.count); 1096 BEGIN_RING(header->delay.count);
1121 for (i = 0; i < header.delay.count; i++) 1097 for (i = 0; i < header->delay.count; i++)
1122 OUT_RING(RADEON_CP_PACKET2); 1098 OUT_RING(RADEON_CP_PACKET2);
1123 ADVANCE_RING(); 1099 ADVANCE_RING();
1124 } 1100 }
@@ -1126,7 +1102,7 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
1126 1102
1127 case R300_CMD_DMA_DISCARD: 1103 case R300_CMD_DMA_DISCARD:
1128 DRM_DEBUG("RADEON_CMD_DMA_DISCARD\n"); 1104 DRM_DEBUG("RADEON_CMD_DMA_DISCARD\n");
1129 idx = header.dma.buf_idx; 1105 idx = header->dma.buf_idx;
1130 if (idx < 0 || idx >= dma->buf_count) { 1106 if (idx < 0 || idx >= dma->buf_count) {
1131 DRM_ERROR("buffer index %d (of %d max)\n", 1107 DRM_ERROR("buffer index %d (of %d max)\n",
1132 idx, dma->buf_count - 1); 1108 idx, dma->buf_count - 1);
@@ -1149,12 +1125,12 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
1149 1125
1150 case R300_CMD_WAIT: 1126 case R300_CMD_WAIT:
1151 DRM_DEBUG("R300_CMD_WAIT\n"); 1127 DRM_DEBUG("R300_CMD_WAIT\n");
1152 r300_cmd_wait(dev_priv, header); 1128 r300_cmd_wait(dev_priv, *header);
1153 break; 1129 break;
1154 1130
1155 case R300_CMD_SCRATCH: 1131 case R300_CMD_SCRATCH:
1156 DRM_DEBUG("R300_CMD_SCRATCH\n"); 1132 DRM_DEBUG("R300_CMD_SCRATCH\n");
1157 ret = r300_scratch(dev_priv, cmdbuf, header); 1133 ret = r300_scratch(dev_priv, cmdbuf, *header);
1158 if (ret) { 1134 if (ret) {
1159 DRM_ERROR("r300_scratch failed\n"); 1135 DRM_ERROR("r300_scratch failed\n");
1160 goto cleanup; 1136 goto cleanup;
@@ -1168,16 +1144,16 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
1168 goto cleanup; 1144 goto cleanup;
1169 } 1145 }
1170 DRM_DEBUG("R300_CMD_R500FP\n"); 1146 DRM_DEBUG("R300_CMD_R500FP\n");
1171 ret = r300_emit_r500fp(dev_priv, cmdbuf, header); 1147 ret = r300_emit_r500fp(dev_priv, cmdbuf, *header);
1172 if (ret) { 1148 if (ret) {
1173 DRM_ERROR("r300_emit_r500fp failed\n"); 1149 DRM_ERROR("r300_emit_r500fp failed\n");
1174 goto cleanup; 1150 goto cleanup;
1175 } 1151 }
1176 break; 1152 break;
1177 default: 1153 default:
1178 DRM_ERROR("bad cmd_type %i at %p\n", 1154 DRM_ERROR("bad cmd_type %i at byte %d\n",
1179 header.header.cmd_type, 1155 header->header.cmd_type,
1180 cmdbuf->buf - sizeof(header)); 1156 cmdbuf->buffer->iterator - sizeof(*header));
1181 ret = -EINVAL; 1157 ret = -EINVAL;
1182 goto cleanup; 1158 goto cleanup;
1183 } 1159 }
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index b058316e311f..f6d20cee5705 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -312,9 +312,11 @@ typedef struct drm_radeon_buf_priv {
312 u32 age; 312 u32 age;
313} drm_radeon_buf_priv_t; 313} drm_radeon_buf_priv_t;
314 314
315struct drm_buffer;
316
315typedef struct drm_radeon_kcmd_buffer { 317typedef struct drm_radeon_kcmd_buffer {
316 int bufsz; 318 int bufsz;
317 char *buf; 319 struct drm_buffer *buffer;
318 int nbox; 320 int nbox;
319 struct drm_clip_rect __user *boxes; 321 struct drm_clip_rect __user *boxes;
320} drm_radeon_kcmd_buffer_t; 322} drm_radeon_kcmd_buffer_t;
@@ -2124,4 +2126,32 @@ extern void radeon_commit_ring(drm_radeon_private_t *dev_priv);
2124 write &= mask; \ 2126 write &= mask; \
2125} while (0) 2127} while (0)
2126 2128
2129/**
2130 * Copy given number of dwords from drm buffer to the ring buffer.
2131 */
2132#define OUT_RING_DRM_BUFFER(buf, sz) do { \
2133 int _size = (sz) * 4; \
2134 struct drm_buffer *_buf = (buf); \
2135 int _part_size; \
2136 while (_size > 0) { \
2137 _part_size = _size; \
2138 \
2139 if (write + _part_size/4 > mask) \
2140 _part_size = ((mask + 1) - write)*4; \
2141 \
2142 if (drm_buffer_index(_buf) + _part_size > PAGE_SIZE) \
2143 _part_size = PAGE_SIZE - drm_buffer_index(_buf);\
2144 \
2145 \
2146 \
2147 memcpy(ring + write, &_buf->data[drm_buffer_page(_buf)] \
2148 [drm_buffer_index(_buf)], _part_size); \
2149 \
2150 _size -= _part_size; \
2151 write = (write + _part_size/4) & mask; \
2152 drm_buffer_advance(_buf, _part_size); \
2153 } \
2154} while (0)
2155
2156
2127#endif /* __RADEON_DRV_H__ */ 2157#endif /* __RADEON_DRV_H__ */
diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
index 38537d971a3e..44b6d66b0ab3 100644
--- a/drivers/gpu/drm/radeon/radeon_state.c
+++ b/drivers/gpu/drm/radeon/radeon_state.c
@@ -29,6 +29,7 @@
29 29
30#include "drmP.h" 30#include "drmP.h"
31#include "drm.h" 31#include "drm.h"
32#include "drm_buffer.h"
32#include "drm_sarea.h" 33#include "drm_sarea.h"
33#include "radeon_drm.h" 34#include "radeon_drm.h"
34#include "radeon_drv.h" 35#include "radeon_drv.h"
@@ -91,21 +92,26 @@ static __inline__ int radeon_check_and_fixup_offset(drm_radeon_private_t *
91static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t * 92static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
92 dev_priv, 93 dev_priv,
93 struct drm_file *file_priv, 94 struct drm_file *file_priv,
94 int id, u32 *data) 95 int id, struct drm_buffer *buf)
95{ 96{
97 u32 *data;
96 switch (id) { 98 switch (id) {
97 99
98 case RADEON_EMIT_PP_MISC: 100 case RADEON_EMIT_PP_MISC:
99 if (radeon_check_and_fixup_offset(dev_priv, file_priv, 101 data = drm_buffer_pointer_to_dword(buf,
100 &data[(RADEON_RB3D_DEPTHOFFSET - RADEON_PP_MISC) / 4])) { 102 (RADEON_RB3D_DEPTHOFFSET - RADEON_PP_MISC) / 4);
103
104 if (radeon_check_and_fixup_offset(dev_priv, file_priv, data)) {
101 DRM_ERROR("Invalid depth buffer offset\n"); 105 DRM_ERROR("Invalid depth buffer offset\n");
102 return -EINVAL; 106 return -EINVAL;
103 } 107 }
104 break; 108 break;
105 109
106 case RADEON_EMIT_PP_CNTL: 110 case RADEON_EMIT_PP_CNTL:
107 if (radeon_check_and_fixup_offset(dev_priv, file_priv, 111 data = drm_buffer_pointer_to_dword(buf,
108 &data[(RADEON_RB3D_COLOROFFSET - RADEON_PP_CNTL) / 4])) { 112 (RADEON_RB3D_COLOROFFSET - RADEON_PP_CNTL) / 4);
113
114 if (radeon_check_and_fixup_offset(dev_priv, file_priv, data)) {
109 DRM_ERROR("Invalid colour buffer offset\n"); 115 DRM_ERROR("Invalid colour buffer offset\n");
110 return -EINVAL; 116 return -EINVAL;
111 } 117 }
@@ -117,8 +123,8 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
117 case R200_EMIT_PP_TXOFFSET_3: 123 case R200_EMIT_PP_TXOFFSET_3:
118 case R200_EMIT_PP_TXOFFSET_4: 124 case R200_EMIT_PP_TXOFFSET_4:
119 case R200_EMIT_PP_TXOFFSET_5: 125 case R200_EMIT_PP_TXOFFSET_5:
120 if (radeon_check_and_fixup_offset(dev_priv, file_priv, 126 data = drm_buffer_pointer_to_dword(buf, 0);
121 &data[0])) { 127 if (radeon_check_and_fixup_offset(dev_priv, file_priv, data)) {
122 DRM_ERROR("Invalid R200 texture offset\n"); 128 DRM_ERROR("Invalid R200 texture offset\n");
123 return -EINVAL; 129 return -EINVAL;
124 } 130 }
@@ -127,8 +133,9 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
127 case RADEON_EMIT_PP_TXFILTER_0: 133 case RADEON_EMIT_PP_TXFILTER_0:
128 case RADEON_EMIT_PP_TXFILTER_1: 134 case RADEON_EMIT_PP_TXFILTER_1:
129 case RADEON_EMIT_PP_TXFILTER_2: 135 case RADEON_EMIT_PP_TXFILTER_2:
130 if (radeon_check_and_fixup_offset(dev_priv, file_priv, 136 data = drm_buffer_pointer_to_dword(buf,
131 &data[(RADEON_PP_TXOFFSET_0 - RADEON_PP_TXFILTER_0) / 4])) { 137 (RADEON_PP_TXOFFSET_0 - RADEON_PP_TXFILTER_0) / 4);
138 if (radeon_check_and_fixup_offset(dev_priv, file_priv, data)) {
132 DRM_ERROR("Invalid R100 texture offset\n"); 139 DRM_ERROR("Invalid R100 texture offset\n");
133 return -EINVAL; 140 return -EINVAL;
134 } 141 }
@@ -142,9 +149,10 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
142 case R200_EMIT_PP_CUBIC_OFFSETS_5:{ 149 case R200_EMIT_PP_CUBIC_OFFSETS_5:{
143 int i; 150 int i;
144 for (i = 0; i < 5; i++) { 151 for (i = 0; i < 5; i++) {
152 data = drm_buffer_pointer_to_dword(buf, i);
145 if (radeon_check_and_fixup_offset(dev_priv, 153 if (radeon_check_and_fixup_offset(dev_priv,
146 file_priv, 154 file_priv,
147 &data[i])) { 155 data)) {
148 DRM_ERROR 156 DRM_ERROR
149 ("Invalid R200 cubic texture offset\n"); 157 ("Invalid R200 cubic texture offset\n");
150 return -EINVAL; 158 return -EINVAL;
@@ -158,9 +166,10 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
158 case RADEON_EMIT_PP_CUBIC_OFFSETS_T2:{ 166 case RADEON_EMIT_PP_CUBIC_OFFSETS_T2:{
159 int i; 167 int i;
160 for (i = 0; i < 5; i++) { 168 for (i = 0; i < 5; i++) {
169 data = drm_buffer_pointer_to_dword(buf, i);
161 if (radeon_check_and_fixup_offset(dev_priv, 170 if (radeon_check_and_fixup_offset(dev_priv,
162 file_priv, 171 file_priv,
163 &data[i])) { 172 data)) {
164 DRM_ERROR 173 DRM_ERROR
165 ("Invalid R100 cubic texture offset\n"); 174 ("Invalid R100 cubic texture offset\n");
166 return -EINVAL; 175 return -EINVAL;
@@ -269,23 +278,24 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
269 cmdbuf, 278 cmdbuf,
270 unsigned int *cmdsz) 279 unsigned int *cmdsz)
271{ 280{
272 u32 *cmd = (u32 *) cmdbuf->buf; 281 u32 *cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
273 u32 offset, narrays; 282 u32 offset, narrays;
274 int count, i, k; 283 int count, i, k;
275 284
276 *cmdsz = 2 + ((cmd[0] & RADEON_CP_PACKET_COUNT_MASK) >> 16); 285 count = ((*cmd & RADEON_CP_PACKET_COUNT_MASK) >> 16);
286 *cmdsz = 2 + count;
277 287
278 if ((cmd[0] & 0xc0000000) != RADEON_CP_PACKET3) { 288 if ((*cmd & 0xc0000000) != RADEON_CP_PACKET3) {
279 DRM_ERROR("Not a type 3 packet\n"); 289 DRM_ERROR("Not a type 3 packet\n");
280 return -EINVAL; 290 return -EINVAL;
281 } 291 }
282 292
283 if (4 * *cmdsz > cmdbuf->bufsz) { 293 if (4 * *cmdsz > drm_buffer_unprocessed(cmdbuf->buffer)) {
284 DRM_ERROR("Packet size larger than size of data provided\n"); 294 DRM_ERROR("Packet size larger than size of data provided\n");
285 return -EINVAL; 295 return -EINVAL;
286 } 296 }
287 297
288 switch(cmd[0] & 0xff00) { 298 switch (*cmd & 0xff00) {
289 /* XXX Are there old drivers needing other packets? */ 299 /* XXX Are there old drivers needing other packets? */
290 300
291 case RADEON_3D_DRAW_IMMD: 301 case RADEON_3D_DRAW_IMMD:
@@ -312,7 +322,6 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
312 break; 322 break;
313 323
314 case RADEON_3D_LOAD_VBPNTR: 324 case RADEON_3D_LOAD_VBPNTR:
315 count = (cmd[0] >> 16) & 0x3fff;
316 325
317 if (count > 18) { /* 12 arrays max */ 326 if (count > 18) { /* 12 arrays max */
318 DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n", 327 DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n",
@@ -321,13 +330,16 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
321 } 330 }
322 331
323 /* carefully check packet contents */ 332 /* carefully check packet contents */
324 narrays = cmd[1] & ~0xc000; 333 cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
334
335 narrays = *cmd & ~0xc000;
325 k = 0; 336 k = 0;
326 i = 2; 337 i = 2;
327 while ((k < narrays) && (i < (count + 2))) { 338 while ((k < narrays) && (i < (count + 2))) {
328 i++; /* skip attribute field */ 339 i++; /* skip attribute field */
340 cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, i);
329 if (radeon_check_and_fixup_offset(dev_priv, file_priv, 341 if (radeon_check_and_fixup_offset(dev_priv, file_priv,
330 &cmd[i])) { 342 cmd)) {
331 DRM_ERROR 343 DRM_ERROR
332 ("Invalid offset (k=%d i=%d) in 3D_LOAD_VBPNTR packet.\n", 344 ("Invalid offset (k=%d i=%d) in 3D_LOAD_VBPNTR packet.\n",
333 k, i); 345 k, i);
@@ -338,8 +350,10 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
338 if (k == narrays) 350 if (k == narrays)
339 break; 351 break;
340 /* have one more to process, they come in pairs */ 352 /* have one more to process, they come in pairs */
353 cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, i);
354
341 if (radeon_check_and_fixup_offset(dev_priv, 355 if (radeon_check_and_fixup_offset(dev_priv,
342 file_priv, &cmd[i])) 356 file_priv, cmd))
343 { 357 {
344 DRM_ERROR 358 DRM_ERROR
345 ("Invalid offset (k=%d i=%d) in 3D_LOAD_VBPNTR packet.\n", 359 ("Invalid offset (k=%d i=%d) in 3D_LOAD_VBPNTR packet.\n",
@@ -363,7 +377,9 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
363 DRM_ERROR("Invalid 3d packet for r200-class chip\n"); 377 DRM_ERROR("Invalid 3d packet for r200-class chip\n");
364 return -EINVAL; 378 return -EINVAL;
365 } 379 }
366 if (radeon_check_and_fixup_offset(dev_priv, file_priv, &cmd[1])) { 380
381 cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
382 if (radeon_check_and_fixup_offset(dev_priv, file_priv, cmd)) {
367 DRM_ERROR("Invalid rndr_gen_indx offset\n"); 383 DRM_ERROR("Invalid rndr_gen_indx offset\n");
368 return -EINVAL; 384 return -EINVAL;
369 } 385 }
@@ -374,12 +390,15 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
374 DRM_ERROR("Invalid 3d packet for r100-class chip\n"); 390 DRM_ERROR("Invalid 3d packet for r100-class chip\n");
375 return -EINVAL; 391 return -EINVAL;
376 } 392 }
377 if ((cmd[1] & 0x8000ffff) != 0x80000810) { 393
378 DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]); 394 cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
395 if ((*cmd & 0x8000ffff) != 0x80000810) {
396 DRM_ERROR("Invalid indx_buffer reg address %08X\n", *cmd);
379 return -EINVAL; 397 return -EINVAL;
380 } 398 }
381 if (radeon_check_and_fixup_offset(dev_priv, file_priv, &cmd[2])) { 399 cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 2);
382 DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]); 400 if (radeon_check_and_fixup_offset(dev_priv, file_priv, cmd)) {
401 DRM_ERROR("Invalid indx_buffer offset is %08X\n", *cmd);
383 return -EINVAL; 402 return -EINVAL;
384 } 403 }
385 break; 404 break;
@@ -388,31 +407,34 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
388 case RADEON_CNTL_PAINT_MULTI: 407 case RADEON_CNTL_PAINT_MULTI:
389 case RADEON_CNTL_BITBLT_MULTI: 408 case RADEON_CNTL_BITBLT_MULTI:
390 /* MSB of opcode: next DWORD GUI_CNTL */ 409 /* MSB of opcode: next DWORD GUI_CNTL */
391 if (cmd[1] & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL 410 cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
411 if (*cmd & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL
392 | RADEON_GMC_DST_PITCH_OFFSET_CNTL)) { 412 | RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
393 offset = cmd[2] << 10; 413 u32 *cmd2 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 2);
414 offset = *cmd2 << 10;
394 if (radeon_check_and_fixup_offset 415 if (radeon_check_and_fixup_offset
395 (dev_priv, file_priv, &offset)) { 416 (dev_priv, file_priv, &offset)) {
396 DRM_ERROR("Invalid first packet offset\n"); 417 DRM_ERROR("Invalid first packet offset\n");
397 return -EINVAL; 418 return -EINVAL;
398 } 419 }
399 cmd[2] = (cmd[2] & 0xffc00000) | offset >> 10; 420 *cmd2 = (*cmd2 & 0xffc00000) | offset >> 10;
400 } 421 }
401 422
402 if ((cmd[1] & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) && 423 if ((*cmd & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) &&
403 (cmd[1] & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) { 424 (*cmd & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
404 offset = cmd[3] << 10; 425 u32 *cmd3 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 3);
426 offset = *cmd << 10;
405 if (radeon_check_and_fixup_offset 427 if (radeon_check_and_fixup_offset
406 (dev_priv, file_priv, &offset)) { 428 (dev_priv, file_priv, &offset)) {
407 DRM_ERROR("Invalid second packet offset\n"); 429 DRM_ERROR("Invalid second packet offset\n");
408 return -EINVAL; 430 return -EINVAL;
409 } 431 }
410 cmd[3] = (cmd[3] & 0xffc00000) | offset >> 10; 432 *cmd3 = (*cmd3 & 0xffc00000) | offset >> 10;
411 } 433 }
412 break; 434 break;
413 435
414 default: 436 default:
415 DRM_ERROR("Invalid packet type %x\n", cmd[0] & 0xff00); 437 DRM_ERROR("Invalid packet type %x\n", *cmd & 0xff00);
416 return -EINVAL; 438 return -EINVAL;
417 } 439 }
418 440
@@ -2611,7 +2633,6 @@ static int radeon_emit_packets(drm_radeon_private_t * dev_priv,
2611{ 2633{
2612 int id = (int)header.packet.packet_id; 2634 int id = (int)header.packet.packet_id;
2613 int sz, reg; 2635 int sz, reg;
2614 int *data = (int *)cmdbuf->buf;
2615 RING_LOCALS; 2636 RING_LOCALS;
2616 2637
2617 if (id >= RADEON_MAX_STATE_PACKETS) 2638 if (id >= RADEON_MAX_STATE_PACKETS)
@@ -2620,23 +2641,22 @@ static int radeon_emit_packets(drm_radeon_private_t * dev_priv,
2620 sz = packet[id].len; 2641 sz = packet[id].len;
2621 reg = packet[id].start; 2642 reg = packet[id].start;
2622 2643
2623 if (sz * sizeof(int) > cmdbuf->bufsz) { 2644 if (sz * sizeof(u32) > drm_buffer_unprocessed(cmdbuf->buffer)) {
2624 DRM_ERROR("Packet size provided larger than data provided\n"); 2645 DRM_ERROR("Packet size provided larger than data provided\n");
2625 return -EINVAL; 2646 return -EINVAL;
2626 } 2647 }
2627 2648
2628 if (radeon_check_and_fixup_packets(dev_priv, file_priv, id, data)) { 2649 if (radeon_check_and_fixup_packets(dev_priv, file_priv, id,
2650 cmdbuf->buffer)) {
2629 DRM_ERROR("Packet verification failed\n"); 2651 DRM_ERROR("Packet verification failed\n");
2630 return -EINVAL; 2652 return -EINVAL;
2631 } 2653 }
2632 2654
2633 BEGIN_RING(sz + 1); 2655 BEGIN_RING(sz + 1);
2634 OUT_RING(CP_PACKET0(reg, (sz - 1))); 2656 OUT_RING(CP_PACKET0(reg, (sz - 1)));
2635 OUT_RING_TABLE(data, sz); 2657 OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
2636 ADVANCE_RING(); 2658 ADVANCE_RING();
2637 2659
2638 cmdbuf->buf += sz * sizeof(int);
2639 cmdbuf->bufsz -= sz * sizeof(int);
2640 return 0; 2660 return 0;
2641} 2661}
2642 2662
@@ -2653,10 +2673,8 @@ static __inline__ int radeon_emit_scalars(drm_radeon_private_t *dev_priv,
2653 OUT_RING(CP_PACKET0(RADEON_SE_TCL_SCALAR_INDX_REG, 0)); 2673 OUT_RING(CP_PACKET0(RADEON_SE_TCL_SCALAR_INDX_REG, 0));
2654 OUT_RING(start | (stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT)); 2674 OUT_RING(start | (stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT));
2655 OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_SCALAR_DATA_REG, sz - 1)); 2675 OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_SCALAR_DATA_REG, sz - 1));
2656 OUT_RING_TABLE(cmdbuf->buf, sz); 2676 OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
2657 ADVANCE_RING(); 2677 ADVANCE_RING();
2658 cmdbuf->buf += sz * sizeof(int);
2659 cmdbuf->bufsz -= sz * sizeof(int);
2660 return 0; 2678 return 0;
2661} 2679}
2662 2680
@@ -2675,10 +2693,8 @@ static __inline__ int radeon_emit_scalars2(drm_radeon_private_t *dev_priv,
2675 OUT_RING(CP_PACKET0(RADEON_SE_TCL_SCALAR_INDX_REG, 0)); 2693 OUT_RING(CP_PACKET0(RADEON_SE_TCL_SCALAR_INDX_REG, 0));
2676 OUT_RING(start | (stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT)); 2694 OUT_RING(start | (stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT));
2677 OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_SCALAR_DATA_REG, sz - 1)); 2695 OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_SCALAR_DATA_REG, sz - 1));
2678 OUT_RING_TABLE(cmdbuf->buf, sz); 2696 OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
2679 ADVANCE_RING(); 2697 ADVANCE_RING();
2680 cmdbuf->buf += sz * sizeof(int);
2681 cmdbuf->bufsz -= sz * sizeof(int);
2682 return 0; 2698 return 0;
2683} 2699}
2684 2700
@@ -2696,11 +2712,9 @@ static __inline__ int radeon_emit_vectors(drm_radeon_private_t *dev_priv,
2696 OUT_RING(CP_PACKET0(RADEON_SE_TCL_VECTOR_INDX_REG, 0)); 2712 OUT_RING(CP_PACKET0(RADEON_SE_TCL_VECTOR_INDX_REG, 0));
2697 OUT_RING(start | (stride << RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT)); 2713 OUT_RING(start | (stride << RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT));
2698 OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_VECTOR_DATA_REG, (sz - 1))); 2714 OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_VECTOR_DATA_REG, (sz - 1)));
2699 OUT_RING_TABLE(cmdbuf->buf, sz); 2715 OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
2700 ADVANCE_RING(); 2716 ADVANCE_RING();
2701 2717
2702 cmdbuf->buf += sz * sizeof(int);
2703 cmdbuf->bufsz -= sz * sizeof(int);
2704 return 0; 2718 return 0;
2705} 2719}
2706 2720
@@ -2714,7 +2728,7 @@ static __inline__ int radeon_emit_veclinear(drm_radeon_private_t *dev_priv,
2714 2728
2715 if (!sz) 2729 if (!sz)
2716 return 0; 2730 return 0;
2717 if (sz * 4 > cmdbuf->bufsz) 2731 if (sz * 4 > drm_buffer_unprocessed(cmdbuf->buffer))
2718 return -EINVAL; 2732 return -EINVAL;
2719 2733
2720 BEGIN_RING(5 + sz); 2734 BEGIN_RING(5 + sz);
@@ -2722,11 +2736,9 @@ static __inline__ int radeon_emit_veclinear(drm_radeon_private_t *dev_priv,
2722 OUT_RING(CP_PACKET0(RADEON_SE_TCL_VECTOR_INDX_REG, 0)); 2736 OUT_RING(CP_PACKET0(RADEON_SE_TCL_VECTOR_INDX_REG, 0));
2723 OUT_RING(start | (1 << RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT)); 2737 OUT_RING(start | (1 << RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT));
2724 OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_VECTOR_DATA_REG, (sz - 1))); 2738 OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_VECTOR_DATA_REG, (sz - 1)));
2725 OUT_RING_TABLE(cmdbuf->buf, sz); 2739 OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
2726 ADVANCE_RING(); 2740 ADVANCE_RING();
2727 2741
2728 cmdbuf->buf += sz * sizeof(int);
2729 cmdbuf->bufsz -= sz * sizeof(int);
2730 return 0; 2742 return 0;
2731} 2743}
2732 2744
@@ -2748,11 +2760,9 @@ static int radeon_emit_packet3(struct drm_device * dev,
2748 } 2760 }
2749 2761
2750 BEGIN_RING(cmdsz); 2762 BEGIN_RING(cmdsz);
2751 OUT_RING_TABLE(cmdbuf->buf, cmdsz); 2763 OUT_RING_DRM_BUFFER(cmdbuf->buffer, cmdsz);
2752 ADVANCE_RING(); 2764 ADVANCE_RING();
2753 2765
2754 cmdbuf->buf += cmdsz * 4;
2755 cmdbuf->bufsz -= cmdsz * 4;
2756 return 0; 2766 return 0;
2757} 2767}
2758 2768
@@ -2805,16 +2815,16 @@ static int radeon_emit_packet3_cliprect(struct drm_device *dev,
2805 } 2815 }
2806 2816
2807 BEGIN_RING(cmdsz); 2817 BEGIN_RING(cmdsz);
2808 OUT_RING_TABLE(cmdbuf->buf, cmdsz); 2818 OUT_RING_DRM_BUFFER(cmdbuf->buffer, cmdsz);
2809 ADVANCE_RING(); 2819 ADVANCE_RING();
2810 2820
2811 } while (++i < cmdbuf->nbox); 2821 } while (++i < cmdbuf->nbox);
2812 if (cmdbuf->nbox == 1) 2822 if (cmdbuf->nbox == 1)
2813 cmdbuf->nbox = 0; 2823 cmdbuf->nbox = 0;
2814 2824
2825 return 0;
2815 out: 2826 out:
2816 cmdbuf->buf += cmdsz * 4; 2827 drm_buffer_advance(cmdbuf->buffer, cmdsz * 4);
2817 cmdbuf->bufsz -= cmdsz * 4;
2818 return 0; 2828 return 0;
2819} 2829}
2820 2830
@@ -2847,16 +2857,16 @@ static int radeon_emit_wait(struct drm_device * dev, int flags)
2847 return 0; 2857 return 0;
2848} 2858}
2849 2859
2850static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_priv) 2860static int radeon_cp_cmdbuf(struct drm_device *dev, void *data,
2861 struct drm_file *file_priv)
2851{ 2862{
2852 drm_radeon_private_t *dev_priv = dev->dev_private; 2863 drm_radeon_private_t *dev_priv = dev->dev_private;
2853 struct drm_device_dma *dma = dev->dma; 2864 struct drm_device_dma *dma = dev->dma;
2854 struct drm_buf *buf = NULL; 2865 struct drm_buf *buf = NULL;
2866 drm_radeon_cmd_header_t stack_header;
2855 int idx; 2867 int idx;
2856 drm_radeon_kcmd_buffer_t *cmdbuf = data; 2868 drm_radeon_kcmd_buffer_t *cmdbuf = data;
2857 drm_radeon_cmd_header_t header; 2869 int orig_nbox;
2858 int orig_nbox, orig_bufsz;
2859 char *kbuf = NULL;
2860 2870
2861 LOCK_TEST_WITH_RETURN(dev, file_priv); 2871 LOCK_TEST_WITH_RETURN(dev, file_priv);
2862 2872
@@ -2871,17 +2881,16 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file
2871 * races between checking values and using those values in other code, 2881 * races between checking values and using those values in other code,
2872 * and simply to avoid a lot of function calls to copy in data. 2882 * and simply to avoid a lot of function calls to copy in data.
2873 */ 2883 */
2874 orig_bufsz = cmdbuf->bufsz; 2884 if (cmdbuf->bufsz != 0) {
2875 if (orig_bufsz != 0) { 2885 int rv;
2876 kbuf = kmalloc(cmdbuf->bufsz, GFP_KERNEL); 2886 void __user *buffer = cmdbuf->buffer;
2877 if (kbuf == NULL) 2887 rv = drm_buffer_alloc(&cmdbuf->buffer, cmdbuf->bufsz);
2878 return -ENOMEM; 2888 if (rv)
2879 if (DRM_COPY_FROM_USER(kbuf, (void __user *)cmdbuf->buf, 2889 return rv;
2880 cmdbuf->bufsz)) { 2890 rv = drm_buffer_copy_from_user(cmdbuf->buffer, buffer,
2881 kfree(kbuf); 2891 cmdbuf->bufsz);
2882 return -EFAULT; 2892 if (rv)
2883 } 2893 return rv;
2884 cmdbuf->buf = kbuf;
2885 } 2894 }
2886 2895
2887 orig_nbox = cmdbuf->nbox; 2896 orig_nbox = cmdbuf->nbox;
@@ -2890,24 +2899,24 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file
2890 int temp; 2899 int temp;
2891 temp = r300_do_cp_cmdbuf(dev, file_priv, cmdbuf); 2900 temp = r300_do_cp_cmdbuf(dev, file_priv, cmdbuf);
2892 2901
2893 if (orig_bufsz != 0) 2902 if (cmdbuf->bufsz != 0)
2894 kfree(kbuf); 2903 drm_buffer_free(cmdbuf->buffer);
2895 2904
2896 return temp; 2905 return temp;
2897 } 2906 }
2898 2907
2899 /* microcode_version != r300 */ 2908 /* microcode_version != r300 */
2900 while (cmdbuf->bufsz >= sizeof(header)) { 2909 while (drm_buffer_unprocessed(cmdbuf->buffer) >= sizeof(stack_header)) {
2901 2910
2902 header.i = *(int *)cmdbuf->buf; 2911 drm_radeon_cmd_header_t *header;
2903 cmdbuf->buf += sizeof(header); 2912 header = drm_buffer_read_object(cmdbuf->buffer,
2904 cmdbuf->bufsz -= sizeof(header); 2913 sizeof(stack_header), &stack_header);
2905 2914
2906 switch (header.header.cmd_type) { 2915 switch (header->header.cmd_type) {
2907 case RADEON_CMD_PACKET: 2916 case RADEON_CMD_PACKET:
2908 DRM_DEBUG("RADEON_CMD_PACKET\n"); 2917 DRM_DEBUG("RADEON_CMD_PACKET\n");
2909 if (radeon_emit_packets 2918 if (radeon_emit_packets
2910 (dev_priv, file_priv, header, cmdbuf)) { 2919 (dev_priv, file_priv, *header, cmdbuf)) {
2911 DRM_ERROR("radeon_emit_packets failed\n"); 2920 DRM_ERROR("radeon_emit_packets failed\n");
2912 goto err; 2921 goto err;
2913 } 2922 }
@@ -2915,7 +2924,7 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file
2915 2924
2916 case RADEON_CMD_SCALARS: 2925 case RADEON_CMD_SCALARS:
2917 DRM_DEBUG("RADEON_CMD_SCALARS\n"); 2926 DRM_DEBUG("RADEON_CMD_SCALARS\n");
2918 if (radeon_emit_scalars(dev_priv, header, cmdbuf)) { 2927 if (radeon_emit_scalars(dev_priv, *header, cmdbuf)) {
2919 DRM_ERROR("radeon_emit_scalars failed\n"); 2928 DRM_ERROR("radeon_emit_scalars failed\n");
2920 goto err; 2929 goto err;
2921 } 2930 }
@@ -2923,7 +2932,7 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file
2923 2932
2924 case RADEON_CMD_VECTORS: 2933 case RADEON_CMD_VECTORS:
2925 DRM_DEBUG("RADEON_CMD_VECTORS\n"); 2934 DRM_DEBUG("RADEON_CMD_VECTORS\n");
2926 if (radeon_emit_vectors(dev_priv, header, cmdbuf)) { 2935 if (radeon_emit_vectors(dev_priv, *header, cmdbuf)) {
2927 DRM_ERROR("radeon_emit_vectors failed\n"); 2936 DRM_ERROR("radeon_emit_vectors failed\n");
2928 goto err; 2937 goto err;
2929 } 2938 }
@@ -2931,7 +2940,7 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file
2931 2940
2932 case RADEON_CMD_DMA_DISCARD: 2941 case RADEON_CMD_DMA_DISCARD:
2933 DRM_DEBUG("RADEON_CMD_DMA_DISCARD\n"); 2942 DRM_DEBUG("RADEON_CMD_DMA_DISCARD\n");
2934 idx = header.dma.buf_idx; 2943 idx = header->dma.buf_idx;
2935 if (idx < 0 || idx >= dma->buf_count) { 2944 if (idx < 0 || idx >= dma->buf_count) {
2936 DRM_ERROR("buffer index %d (of %d max)\n", 2945 DRM_ERROR("buffer index %d (of %d max)\n",
2937 idx, dma->buf_count - 1); 2946 idx, dma->buf_count - 1);
@@ -2968,7 +2977,7 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file
2968 2977
2969 case RADEON_CMD_SCALARS2: 2978 case RADEON_CMD_SCALARS2:
2970 DRM_DEBUG("RADEON_CMD_SCALARS2\n"); 2979 DRM_DEBUG("RADEON_CMD_SCALARS2\n");
2971 if (radeon_emit_scalars2(dev_priv, header, cmdbuf)) { 2980 if (radeon_emit_scalars2(dev_priv, *header, cmdbuf)) {
2972 DRM_ERROR("radeon_emit_scalars2 failed\n"); 2981 DRM_ERROR("radeon_emit_scalars2 failed\n");
2973 goto err; 2982 goto err;
2974 } 2983 }
@@ -2976,37 +2985,37 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file
2976 2985
2977 case RADEON_CMD_WAIT: 2986 case RADEON_CMD_WAIT:
2978 DRM_DEBUG("RADEON_CMD_WAIT\n"); 2987 DRM_DEBUG("RADEON_CMD_WAIT\n");
2979 if (radeon_emit_wait(dev, header.wait.flags)) { 2988 if (radeon_emit_wait(dev, header->wait.flags)) {
2980 DRM_ERROR("radeon_emit_wait failed\n"); 2989 DRM_ERROR("radeon_emit_wait failed\n");
2981 goto err; 2990 goto err;
2982 } 2991 }
2983 break; 2992 break;
2984 case RADEON_CMD_VECLINEAR: 2993 case RADEON_CMD_VECLINEAR:
2985 DRM_DEBUG("RADEON_CMD_VECLINEAR\n"); 2994 DRM_DEBUG("RADEON_CMD_VECLINEAR\n");
2986 if (radeon_emit_veclinear(dev_priv, header, cmdbuf)) { 2995 if (radeon_emit_veclinear(dev_priv, *header, cmdbuf)) {
2987 DRM_ERROR("radeon_emit_veclinear failed\n"); 2996 DRM_ERROR("radeon_emit_veclinear failed\n");
2988 goto err; 2997 goto err;
2989 } 2998 }
2990 break; 2999 break;
2991 3000
2992 default: 3001 default:
2993 DRM_ERROR("bad cmd_type %d at %p\n", 3002 DRM_ERROR("bad cmd_type %d at byte %d\n",
2994 header.header.cmd_type, 3003 header->header.cmd_type,
2995 cmdbuf->buf - sizeof(header)); 3004 cmdbuf->buffer->iterator);
2996 goto err; 3005 goto err;
2997 } 3006 }
2998 } 3007 }
2999 3008
3000 if (orig_bufsz != 0) 3009 if (cmdbuf->bufsz != 0)
3001 kfree(kbuf); 3010 drm_buffer_free(cmdbuf->buffer);
3002 3011
3003 DRM_DEBUG("DONE\n"); 3012 DRM_DEBUG("DONE\n");
3004 COMMIT_RING(); 3013 COMMIT_RING();
3005 return 0; 3014 return 0;
3006 3015
3007 err: 3016 err:
3008 if (orig_bufsz != 0) 3017 if (cmdbuf->bufsz != 0)
3009 kfree(kbuf); 3018 drm_buffer_free(cmdbuf->buffer);
3010 return -EINVAL; 3019 return -EINVAL;
3011} 3020}
3012 3021