diff options
author | Pauli Nieminen <suokkos@gmail.com> | 2010-02-01 12:11:16 -0500 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2010-02-22 18:46:20 -0500 |
commit | b4fe945405e477cded91772b4fec854705443dd5 (patch) | |
tree | 4fb175511947cfd9980ca74413692f96561d1512 /drivers/gpu/drm/radeon/r300_cmdbuf.c | |
parent | 7a9f0dd9c49425e2b0e39ada4757bc7a38c84873 (diff) |
drm/radeon: Fix memory allocation failures in the preKMS command stream checking.
Allocation of single large block of memory may fail under memory
presure. drm_buffer object can hold one large block of data in
multiple independ pages which preents alloation failures.
This patch converts all access to command stream to use drm_buffer
interface. All direct access to array has to go tough drm_buffer
functions to get correct pointer.
Outputting the command stream to ring buffer needs to be awear of
the split nature of drm_buffer. The output operation requires the
new OUT_RING_DRM_BUFFER.
Signed-off-by: Pauli Nieminen <suokkos@gmail.com>
Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/radeon/r300_cmdbuf.c')
-rw-r--r-- | drivers/gpu/drm/radeon/r300_cmdbuf.c | 280 |
1 files changed, 128 insertions, 152 deletions
diff --git a/drivers/gpu/drm/radeon/r300_cmdbuf.c b/drivers/gpu/drm/radeon/r300_cmdbuf.c index 34bffa0e4b73..7f59352cd637 100644 --- a/drivers/gpu/drm/radeon/r300_cmdbuf.c +++ b/drivers/gpu/drm/radeon/r300_cmdbuf.c | |||
@@ -33,6 +33,7 @@ | |||
33 | 33 | ||
34 | #include "drmP.h" | 34 | #include "drmP.h" |
35 | #include "drm.h" | 35 | #include "drm.h" |
36 | #include "drm_buffer.h" | ||
36 | #include "radeon_drm.h" | 37 | #include "radeon_drm.h" |
37 | #include "radeon_drv.h" | 38 | #include "radeon_drv.h" |
38 | #include "r300_reg.h" | 39 | #include "r300_reg.h" |
@@ -299,46 +300,42 @@ static __inline__ int r300_emit_carefully_checked_packet0(drm_radeon_private_t * | |||
299 | int reg; | 300 | int reg; |
300 | int sz; | 301 | int sz; |
301 | int i; | 302 | int i; |
302 | int values[64]; | 303 | u32 *value; |
303 | RING_LOCALS; | 304 | RING_LOCALS; |
304 | 305 | ||
305 | sz = header.packet0.count; | 306 | sz = header.packet0.count; |
306 | reg = (header.packet0.reghi << 8) | header.packet0.reglo; | 307 | reg = (header.packet0.reghi << 8) | header.packet0.reglo; |
307 | 308 | ||
308 | if ((sz > 64) || (sz < 0)) { | 309 | if ((sz > 64) || (sz < 0)) { |
309 | DRM_ERROR | 310 | DRM_ERROR("Cannot emit more than 64 values at a time (reg=%04x sz=%d)\n", |
310 | ("Cannot emit more than 64 values at a time (reg=%04x sz=%d)\n", | 311 | reg, sz); |
311 | reg, sz); | ||
312 | return -EINVAL; | 312 | return -EINVAL; |
313 | } | 313 | } |
314 | |||
314 | for (i = 0; i < sz; i++) { | 315 | for (i = 0; i < sz; i++) { |
315 | values[i] = ((int *)cmdbuf->buf)[i]; | ||
316 | switch (r300_reg_flags[(reg >> 2) + i]) { | 316 | switch (r300_reg_flags[(reg >> 2) + i]) { |
317 | case MARK_SAFE: | 317 | case MARK_SAFE: |
318 | break; | 318 | break; |
319 | case MARK_CHECK_OFFSET: | 319 | case MARK_CHECK_OFFSET: |
320 | if (!radeon_check_offset(dev_priv, (u32) values[i])) { | 320 | value = drm_buffer_pointer_to_dword(cmdbuf->buffer, i); |
321 | DRM_ERROR | 321 | if (!radeon_check_offset(dev_priv, *value)) { |
322 | ("Offset failed range check (reg=%04x sz=%d)\n", | 322 | DRM_ERROR("Offset failed range check (reg=%04x sz=%d)\n", |
323 | reg, sz); | 323 | reg, sz); |
324 | return -EINVAL; | 324 | return -EINVAL; |
325 | } | 325 | } |
326 | break; | 326 | break; |
327 | default: | 327 | default: |
328 | DRM_ERROR("Register %04x failed check as flag=%02x\n", | 328 | DRM_ERROR("Register %04x failed check as flag=%02x\n", |
329 | reg + i * 4, r300_reg_flags[(reg >> 2) + i]); | 329 | reg + i * 4, r300_reg_flags[(reg >> 2) + i]); |
330 | return -EINVAL; | 330 | return -EINVAL; |
331 | } | 331 | } |
332 | } | 332 | } |
333 | 333 | ||
334 | BEGIN_RING(1 + sz); | 334 | BEGIN_RING(1 + sz); |
335 | OUT_RING(CP_PACKET0(reg, sz - 1)); | 335 | OUT_RING(CP_PACKET0(reg, sz - 1)); |
336 | OUT_RING_TABLE(values, sz); | 336 | OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz); |
337 | ADVANCE_RING(); | 337 | ADVANCE_RING(); |
338 | 338 | ||
339 | cmdbuf->buf += sz * 4; | ||
340 | cmdbuf->bufsz -= sz * 4; | ||
341 | |||
342 | return 0; | 339 | return 0; |
343 | } | 340 | } |
344 | 341 | ||
@@ -362,7 +359,7 @@ static __inline__ int r300_emit_packet0(drm_radeon_private_t *dev_priv, | |||
362 | if (!sz) | 359 | if (!sz) |
363 | return 0; | 360 | return 0; |
364 | 361 | ||
365 | if (sz * 4 > cmdbuf->bufsz) | 362 | if (sz * 4 > drm_buffer_unprocessed(cmdbuf->buffer)) |
366 | return -EINVAL; | 363 | return -EINVAL; |
367 | 364 | ||
368 | if (reg + sz * 4 >= 0x10000) { | 365 | if (reg + sz * 4 >= 0x10000) { |
@@ -380,12 +377,9 @@ static __inline__ int r300_emit_packet0(drm_radeon_private_t *dev_priv, | |||
380 | 377 | ||
381 | BEGIN_RING(1 + sz); | 378 | BEGIN_RING(1 + sz); |
382 | OUT_RING(CP_PACKET0(reg, sz - 1)); | 379 | OUT_RING(CP_PACKET0(reg, sz - 1)); |
383 | OUT_RING_TABLE((int *)cmdbuf->buf, sz); | 380 | OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz); |
384 | ADVANCE_RING(); | 381 | ADVANCE_RING(); |
385 | 382 | ||
386 | cmdbuf->buf += sz * 4; | ||
387 | cmdbuf->bufsz -= sz * 4; | ||
388 | |||
389 | return 0; | 383 | return 0; |
390 | } | 384 | } |
391 | 385 | ||
@@ -407,7 +401,7 @@ static __inline__ int r300_emit_vpu(drm_radeon_private_t *dev_priv, | |||
407 | 401 | ||
408 | if (!sz) | 402 | if (!sz) |
409 | return 0; | 403 | return 0; |
410 | if (sz * 16 > cmdbuf->bufsz) | 404 | if (sz * 16 > drm_buffer_unprocessed(cmdbuf->buffer)) |
411 | return -EINVAL; | 405 | return -EINVAL; |
412 | 406 | ||
413 | /* VAP is very sensitive so we purge cache before we program it | 407 | /* VAP is very sensitive so we purge cache before we program it |
@@ -426,7 +420,7 @@ static __inline__ int r300_emit_vpu(drm_radeon_private_t *dev_priv, | |||
426 | BEGIN_RING(3 + sz * 4); | 420 | BEGIN_RING(3 + sz * 4); |
427 | OUT_RING_REG(R300_VAP_PVS_UPLOAD_ADDRESS, addr); | 421 | OUT_RING_REG(R300_VAP_PVS_UPLOAD_ADDRESS, addr); |
428 | OUT_RING(CP_PACKET0_TABLE(R300_VAP_PVS_UPLOAD_DATA, sz * 4 - 1)); | 422 | OUT_RING(CP_PACKET0_TABLE(R300_VAP_PVS_UPLOAD_DATA, sz * 4 - 1)); |
429 | OUT_RING_TABLE((int *)cmdbuf->buf, sz * 4); | 423 | OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz * 4); |
430 | ADVANCE_RING(); | 424 | ADVANCE_RING(); |
431 | 425 | ||
432 | BEGIN_RING(2); | 426 | BEGIN_RING(2); |
@@ -434,9 +428,6 @@ static __inline__ int r300_emit_vpu(drm_radeon_private_t *dev_priv, | |||
434 | OUT_RING(0); | 428 | OUT_RING(0); |
435 | ADVANCE_RING(); | 429 | ADVANCE_RING(); |
436 | 430 | ||
437 | cmdbuf->buf += sz * 16; | ||
438 | cmdbuf->bufsz -= sz * 16; | ||
439 | |||
440 | return 0; | 431 | return 0; |
441 | } | 432 | } |
442 | 433 | ||
@@ -449,14 +440,14 @@ static __inline__ int r300_emit_clear(drm_radeon_private_t *dev_priv, | |||
449 | { | 440 | { |
450 | RING_LOCALS; | 441 | RING_LOCALS; |
451 | 442 | ||
452 | if (8 * 4 > cmdbuf->bufsz) | 443 | if (8 * 4 > drm_buffer_unprocessed(cmdbuf->buffer)) |
453 | return -EINVAL; | 444 | return -EINVAL; |
454 | 445 | ||
455 | BEGIN_RING(10); | 446 | BEGIN_RING(10); |
456 | OUT_RING(CP_PACKET3(R200_3D_DRAW_IMMD_2, 8)); | 447 | OUT_RING(CP_PACKET3(R200_3D_DRAW_IMMD_2, 8)); |
457 | OUT_RING(R300_PRIM_TYPE_POINT | R300_PRIM_WALK_RING | | 448 | OUT_RING(R300_PRIM_TYPE_POINT | R300_PRIM_WALK_RING | |
458 | (1 << R300_PRIM_NUM_VERTICES_SHIFT)); | 449 | (1 << R300_PRIM_NUM_VERTICES_SHIFT)); |
459 | OUT_RING_TABLE((int *)cmdbuf->buf, 8); | 450 | OUT_RING_DRM_BUFFER(cmdbuf->buffer, 8); |
460 | ADVANCE_RING(); | 451 | ADVANCE_RING(); |
461 | 452 | ||
462 | BEGIN_RING(4); | 453 | BEGIN_RING(4); |
@@ -468,9 +459,6 @@ static __inline__ int r300_emit_clear(drm_radeon_private_t *dev_priv, | |||
468 | /* set flush flag */ | 459 | /* set flush flag */ |
469 | dev_priv->track_flush |= RADEON_FLUSH_EMITED; | 460 | dev_priv->track_flush |= RADEON_FLUSH_EMITED; |
470 | 461 | ||
471 | cmdbuf->buf += 8 * 4; | ||
472 | cmdbuf->bufsz -= 8 * 4; | ||
473 | |||
474 | return 0; | 462 | return 0; |
475 | } | 463 | } |
476 | 464 | ||
@@ -480,28 +468,29 @@ static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv, | |||
480 | { | 468 | { |
481 | int count, i, k; | 469 | int count, i, k; |
482 | #define MAX_ARRAY_PACKET 64 | 470 | #define MAX_ARRAY_PACKET 64 |
483 | u32 payload[MAX_ARRAY_PACKET]; | 471 | u32 *data; |
484 | u32 narrays; | 472 | u32 narrays; |
485 | RING_LOCALS; | 473 | RING_LOCALS; |
486 | 474 | ||
487 | count = (header >> 16) & 0x3fff; | 475 | count = (header & RADEON_CP_PACKET_COUNT_MASK) >> 16; |
488 | 476 | ||
489 | if ((count + 1) > MAX_ARRAY_PACKET) { | 477 | if ((count + 1) > MAX_ARRAY_PACKET) { |
490 | DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n", | 478 | DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n", |
491 | count); | 479 | count); |
492 | return -EINVAL; | 480 | return -EINVAL; |
493 | } | 481 | } |
494 | memset(payload, 0, MAX_ARRAY_PACKET * 4); | ||
495 | memcpy(payload, cmdbuf->buf + 4, (count + 1) * 4); | ||
496 | |||
497 | /* carefully check packet contents */ | 482 | /* carefully check packet contents */ |
498 | 483 | ||
499 | narrays = payload[0]; | 484 | /* We have already read the header so advance the buffer. */ |
485 | drm_buffer_advance(cmdbuf->buffer, 4); | ||
486 | |||
487 | narrays = *(u32 *)drm_buffer_pointer_to_dword(cmdbuf->buffer, 0); | ||
500 | k = 0; | 488 | k = 0; |
501 | i = 1; | 489 | i = 1; |
502 | while ((k < narrays) && (i < (count + 1))) { | 490 | while ((k < narrays) && (i < (count + 1))) { |
503 | i++; /* skip attribute field */ | 491 | i++; /* skip attribute field */ |
504 | if (!radeon_check_offset(dev_priv, payload[i])) { | 492 | data = drm_buffer_pointer_to_dword(cmdbuf->buffer, i); |
493 | if (!radeon_check_offset(dev_priv, *data)) { | ||
505 | DRM_ERROR | 494 | DRM_ERROR |
506 | ("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n", | 495 | ("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n", |
507 | k, i); | 496 | k, i); |
@@ -512,7 +501,8 @@ static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv, | |||
512 | if (k == narrays) | 501 | if (k == narrays) |
513 | break; | 502 | break; |
514 | /* have one more to process, they come in pairs */ | 503 | /* have one more to process, they come in pairs */ |
515 | if (!radeon_check_offset(dev_priv, payload[i])) { | 504 | data = drm_buffer_pointer_to_dword(cmdbuf->buffer, i); |
505 | if (!radeon_check_offset(dev_priv, *data)) { | ||
516 | DRM_ERROR | 506 | DRM_ERROR |
517 | ("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n", | 507 | ("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n", |
518 | k, i); | 508 | k, i); |
@@ -533,30 +523,30 @@ static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv, | |||
533 | 523 | ||
534 | BEGIN_RING(count + 2); | 524 | BEGIN_RING(count + 2); |
535 | OUT_RING(header); | 525 | OUT_RING(header); |
536 | OUT_RING_TABLE(payload, count + 1); | 526 | OUT_RING_DRM_BUFFER(cmdbuf->buffer, count + 1); |
537 | ADVANCE_RING(); | 527 | ADVANCE_RING(); |
538 | 528 | ||
539 | cmdbuf->buf += (count + 2) * 4; | ||
540 | cmdbuf->bufsz -= (count + 2) * 4; | ||
541 | |||
542 | return 0; | 529 | return 0; |
543 | } | 530 | } |
544 | 531 | ||
545 | static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv, | 532 | static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv, |
546 | drm_radeon_kcmd_buffer_t *cmdbuf) | 533 | drm_radeon_kcmd_buffer_t *cmdbuf) |
547 | { | 534 | { |
548 | u32 *cmd = (u32 *) cmdbuf->buf; | 535 | u32 *cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0); |
549 | int count, ret; | 536 | int count, ret; |
550 | RING_LOCALS; | 537 | RING_LOCALS; |
551 | 538 | ||
552 | count=(cmd[0]>>16) & 0x3fff; | ||
553 | 539 | ||
554 | if (cmd[0] & 0x8000) { | 540 | count = (*cmd & RADEON_CP_PACKET_COUNT_MASK) >> 16; |
555 | u32 offset; | ||
556 | 541 | ||
557 | if (cmd[1] & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL | 542 | if (*cmd & 0x8000) { |
543 | u32 offset; | ||
544 | u32 *cmd1 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1); | ||
545 | if (*cmd1 & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL | ||
558 | | RADEON_GMC_DST_PITCH_OFFSET_CNTL)) { | 546 | | RADEON_GMC_DST_PITCH_OFFSET_CNTL)) { |
559 | offset = cmd[2] << 10; | 547 | |
548 | u32 *cmd2 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 2); | ||
549 | offset = *cmd2 << 10; | ||
560 | ret = !radeon_check_offset(dev_priv, offset); | 550 | ret = !radeon_check_offset(dev_priv, offset); |
561 | if (ret) { | 551 | if (ret) { |
562 | DRM_ERROR("Invalid bitblt first offset is %08X\n", offset); | 552 | DRM_ERROR("Invalid bitblt first offset is %08X\n", offset); |
@@ -564,9 +554,10 @@ static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv, | |||
564 | } | 554 | } |
565 | } | 555 | } |
566 | 556 | ||
567 | if ((cmd[1] & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) && | 557 | if ((*cmd1 & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) && |
568 | (cmd[1] & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) { | 558 | (*cmd1 & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) { |
569 | offset = cmd[3] << 10; | 559 | u32 *cmd3 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 3); |
560 | offset = *cmd3 << 10; | ||
570 | ret = !radeon_check_offset(dev_priv, offset); | 561 | ret = !radeon_check_offset(dev_priv, offset); |
571 | if (ret) { | 562 | if (ret) { |
572 | DRM_ERROR("Invalid bitblt second offset is %08X\n", offset); | 563 | DRM_ERROR("Invalid bitblt second offset is %08X\n", offset); |
@@ -577,28 +568,25 @@ static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv, | |||
577 | } | 568 | } |
578 | 569 | ||
579 | BEGIN_RING(count+2); | 570 | BEGIN_RING(count+2); |
580 | OUT_RING(cmd[0]); | 571 | OUT_RING_DRM_BUFFER(cmdbuf->buffer, count + 2); |
581 | OUT_RING_TABLE((int *)(cmdbuf->buf + 4), count + 1); | ||
582 | ADVANCE_RING(); | 572 | ADVANCE_RING(); |
583 | 573 | ||
584 | cmdbuf->buf += (count+2)*4; | ||
585 | cmdbuf->bufsz -= (count+2)*4; | ||
586 | |||
587 | return 0; | 574 | return 0; |
588 | } | 575 | } |
589 | 576 | ||
590 | static __inline__ int r300_emit_draw_indx_2(drm_radeon_private_t *dev_priv, | 577 | static __inline__ int r300_emit_draw_indx_2(drm_radeon_private_t *dev_priv, |
591 | drm_radeon_kcmd_buffer_t *cmdbuf) | 578 | drm_radeon_kcmd_buffer_t *cmdbuf) |
592 | { | 579 | { |
593 | u32 *cmd; | 580 | u32 *cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0); |
581 | u32 *cmd1 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1); | ||
594 | int count; | 582 | int count; |
595 | int expected_count; | 583 | int expected_count; |
596 | RING_LOCALS; | 584 | RING_LOCALS; |
597 | 585 | ||
598 | cmd = (u32 *) cmdbuf->buf; | 586 | count = (*cmd & RADEON_CP_PACKET_COUNT_MASK) >> 16; |
599 | count = (cmd[0]>>16) & 0x3fff; | 587 | |
600 | expected_count = cmd[1] >> 16; | 588 | expected_count = *cmd1 >> 16; |
601 | if (!(cmd[1] & R300_VAP_VF_CNTL__INDEX_SIZE_32bit)) | 589 | if (!(*cmd1 & R300_VAP_VF_CNTL__INDEX_SIZE_32bit)) |
602 | expected_count = (expected_count+1)/2; | 590 | expected_count = (expected_count+1)/2; |
603 | 591 | ||
604 | if (count && count != expected_count) { | 592 | if (count && count != expected_count) { |
@@ -608,55 +596,53 @@ static __inline__ int r300_emit_draw_indx_2(drm_radeon_private_t *dev_priv, | |||
608 | } | 596 | } |
609 | 597 | ||
610 | BEGIN_RING(count+2); | 598 | BEGIN_RING(count+2); |
611 | OUT_RING(cmd[0]); | 599 | OUT_RING_DRM_BUFFER(cmdbuf->buffer, count + 2); |
612 | OUT_RING_TABLE((int *)(cmdbuf->buf + 4), count + 1); | ||
613 | ADVANCE_RING(); | 600 | ADVANCE_RING(); |
614 | 601 | ||
615 | cmdbuf->buf += (count+2)*4; | ||
616 | cmdbuf->bufsz -= (count+2)*4; | ||
617 | |||
618 | if (!count) { | 602 | if (!count) { |
619 | drm_r300_cmd_header_t header; | 603 | drm_r300_cmd_header_t stack_header, *header; |
604 | u32 *cmd1, *cmd2, *cmd3; | ||
620 | 605 | ||
621 | if (cmdbuf->bufsz < 4*4 + sizeof(header)) { | 606 | if (drm_buffer_unprocessed(cmdbuf->buffer) |
607 | < 4*4 + sizeof(stack_header)) { | ||
622 | DRM_ERROR("3D_DRAW_INDX_2: expect subsequent INDX_BUFFER, but stream is too short.\n"); | 608 | DRM_ERROR("3D_DRAW_INDX_2: expect subsequent INDX_BUFFER, but stream is too short.\n"); |
623 | return -EINVAL; | 609 | return -EINVAL; |
624 | } | 610 | } |
625 | 611 | ||
626 | header.u = *(unsigned int *)cmdbuf->buf; | 612 | header = drm_buffer_read_object(cmdbuf->buffer, |
613 | sizeof(stack_header), &stack_header); | ||
627 | 614 | ||
628 | cmdbuf->buf += sizeof(header); | 615 | cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0); |
629 | cmdbuf->bufsz -= sizeof(header); | 616 | cmd1 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1); |
630 | cmd = (u32 *) cmdbuf->buf; | 617 | cmd2 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 2); |
618 | cmd3 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 3); | ||
631 | 619 | ||
632 | if (header.header.cmd_type != R300_CMD_PACKET3 || | 620 | if (header->header.cmd_type != R300_CMD_PACKET3 || |
633 | header.packet3.packet != R300_CMD_PACKET3_RAW || | 621 | header->packet3.packet != R300_CMD_PACKET3_RAW || |
634 | cmd[0] != CP_PACKET3(RADEON_CP_INDX_BUFFER, 2)) { | 622 | *cmd != CP_PACKET3(RADEON_CP_INDX_BUFFER, 2)) { |
635 | DRM_ERROR("3D_DRAW_INDX_2: expect subsequent INDX_BUFFER.\n"); | 623 | DRM_ERROR("3D_DRAW_INDX_2: expect subsequent INDX_BUFFER.\n"); |
636 | return -EINVAL; | 624 | return -EINVAL; |
637 | } | 625 | } |
638 | 626 | ||
639 | if ((cmd[1] & 0x8000ffff) != 0x80000810) { | 627 | if ((*cmd1 & 0x8000ffff) != 0x80000810) { |
640 | DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]); | 628 | DRM_ERROR("Invalid indx_buffer reg address %08X\n", |
629 | *cmd1); | ||
641 | return -EINVAL; | 630 | return -EINVAL; |
642 | } | 631 | } |
643 | if (!radeon_check_offset(dev_priv, cmd[2])) { | 632 | if (!radeon_check_offset(dev_priv, *cmd2)) { |
644 | DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]); | 633 | DRM_ERROR("Invalid indx_buffer offset is %08X\n", |
634 | *cmd2); | ||
645 | return -EINVAL; | 635 | return -EINVAL; |
646 | } | 636 | } |
647 | if (cmd[3] != expected_count) { | 637 | if (*cmd3 != expected_count) { |
648 | DRM_ERROR("INDX_BUFFER: buffer size %i, expected %i\n", | 638 | DRM_ERROR("INDX_BUFFER: buffer size %i, expected %i\n", |
649 | cmd[3], expected_count); | 639 | *cmd3, expected_count); |
650 | return -EINVAL; | 640 | return -EINVAL; |
651 | } | 641 | } |
652 | 642 | ||
653 | BEGIN_RING(4); | 643 | BEGIN_RING(4); |
654 | OUT_RING(cmd[0]); | 644 | OUT_RING_DRM_BUFFER(cmdbuf->buffer, 4); |
655 | OUT_RING_TABLE((int *)(cmdbuf->buf + 4), 3); | ||
656 | ADVANCE_RING(); | 645 | ADVANCE_RING(); |
657 | |||
658 | cmdbuf->buf += 4*4; | ||
659 | cmdbuf->bufsz -= 4*4; | ||
660 | } | 646 | } |
661 | 647 | ||
662 | return 0; | 648 | return 0; |
@@ -665,39 +651,39 @@ static __inline__ int r300_emit_draw_indx_2(drm_radeon_private_t *dev_priv, | |||
665 | static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv, | 651 | static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv, |
666 | drm_radeon_kcmd_buffer_t *cmdbuf) | 652 | drm_radeon_kcmd_buffer_t *cmdbuf) |
667 | { | 653 | { |
668 | u32 header; | 654 | u32 *header; |
669 | int count; | 655 | int count; |
670 | RING_LOCALS; | 656 | RING_LOCALS; |
671 | 657 | ||
672 | if (4 > cmdbuf->bufsz) | 658 | if (4 > drm_buffer_unprocessed(cmdbuf->buffer)) |
673 | return -EINVAL; | 659 | return -EINVAL; |
674 | 660 | ||
675 | /* Fixme !! This simply emits a packet without much checking. | 661 | /* Fixme !! This simply emits a packet without much checking. |
676 | We need to be smarter. */ | 662 | We need to be smarter. */ |
677 | 663 | ||
678 | /* obtain first word - actual packet3 header */ | 664 | /* obtain first word - actual packet3 header */ |
679 | header = *(u32 *) cmdbuf->buf; | 665 | header = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0); |
680 | 666 | ||
681 | /* Is it packet 3 ? */ | 667 | /* Is it packet 3 ? */ |
682 | if ((header >> 30) != 0x3) { | 668 | if ((*header >> 30) != 0x3) { |
683 | DRM_ERROR("Not a packet3 header (0x%08x)\n", header); | 669 | DRM_ERROR("Not a packet3 header (0x%08x)\n", *header); |
684 | return -EINVAL; | 670 | return -EINVAL; |
685 | } | 671 | } |
686 | 672 | ||
687 | count = (header >> 16) & 0x3fff; | 673 | count = (*header >> 16) & 0x3fff; |
688 | 674 | ||
689 | /* Check again now that we know how much data to expect */ | 675 | /* Check again now that we know how much data to expect */ |
690 | if ((count + 2) * 4 > cmdbuf->bufsz) { | 676 | if ((count + 2) * 4 > drm_buffer_unprocessed(cmdbuf->buffer)) { |
691 | DRM_ERROR | 677 | DRM_ERROR |
692 | ("Expected packet3 of length %d but have only %d bytes left\n", | 678 | ("Expected packet3 of length %d but have only %d bytes left\n", |
693 | (count + 2) * 4, cmdbuf->bufsz); | 679 | (count + 2) * 4, drm_buffer_unprocessed(cmdbuf->buffer)); |
694 | return -EINVAL; | 680 | return -EINVAL; |
695 | } | 681 | } |
696 | 682 | ||
697 | /* Is it a packet type we know about ? */ | 683 | /* Is it a packet type we know about ? */ |
698 | switch (header & 0xff00) { | 684 | switch (*header & 0xff00) { |
699 | case RADEON_3D_LOAD_VBPNTR: /* load vertex array pointers */ | 685 | case RADEON_3D_LOAD_VBPNTR: /* load vertex array pointers */ |
700 | return r300_emit_3d_load_vbpntr(dev_priv, cmdbuf, header); | 686 | return r300_emit_3d_load_vbpntr(dev_priv, cmdbuf, *header); |
701 | 687 | ||
702 | case RADEON_CNTL_BITBLT_MULTI: | 688 | case RADEON_CNTL_BITBLT_MULTI: |
703 | return r300_emit_bitblt_multi(dev_priv, cmdbuf); | 689 | return r300_emit_bitblt_multi(dev_priv, cmdbuf); |
@@ -723,18 +709,14 @@ static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv, | |||
723 | /* these packets are safe */ | 709 | /* these packets are safe */ |
724 | break; | 710 | break; |
725 | default: | 711 | default: |
726 | DRM_ERROR("Unknown packet3 header (0x%08x)\n", header); | 712 | DRM_ERROR("Unknown packet3 header (0x%08x)\n", *header); |
727 | return -EINVAL; | 713 | return -EINVAL; |
728 | } | 714 | } |
729 | 715 | ||
730 | BEGIN_RING(count + 2); | 716 | BEGIN_RING(count + 2); |
731 | OUT_RING(header); | 717 | OUT_RING_DRM_BUFFER(cmdbuf->buffer, count + 2); |
732 | OUT_RING_TABLE((int *)(cmdbuf->buf + 4), count + 1); | ||
733 | ADVANCE_RING(); | 718 | ADVANCE_RING(); |
734 | 719 | ||
735 | cmdbuf->buf += (count + 2) * 4; | ||
736 | cmdbuf->bufsz -= (count + 2) * 4; | ||
737 | |||
738 | return 0; | 720 | return 0; |
739 | } | 721 | } |
740 | 722 | ||
@@ -748,8 +730,7 @@ static __inline__ int r300_emit_packet3(drm_radeon_private_t *dev_priv, | |||
748 | { | 730 | { |
749 | int n; | 731 | int n; |
750 | int ret; | 732 | int ret; |
751 | char *orig_buf = cmdbuf->buf; | 733 | int orig_iter = cmdbuf->buffer->iterator; |
752 | int orig_bufsz = cmdbuf->bufsz; | ||
753 | 734 | ||
754 | /* This is a do-while-loop so that we run the interior at least once, | 735 | /* This is a do-while-loop so that we run the interior at least once, |
755 | * even if cmdbuf->nbox is 0. Compare r300_emit_cliprects for rationale. | 736 | * even if cmdbuf->nbox is 0. Compare r300_emit_cliprects for rationale. |
@@ -761,8 +742,7 @@ static __inline__ int r300_emit_packet3(drm_radeon_private_t *dev_priv, | |||
761 | if (ret) | 742 | if (ret) |
762 | return ret; | 743 | return ret; |
763 | 744 | ||
764 | cmdbuf->buf = orig_buf; | 745 | cmdbuf->buffer->iterator = orig_iter; |
765 | cmdbuf->bufsz = orig_bufsz; | ||
766 | } | 746 | } |
767 | 747 | ||
768 | switch (header.packet3.packet) { | 748 | switch (header.packet3.packet) { |
@@ -785,9 +765,9 @@ static __inline__ int r300_emit_packet3(drm_radeon_private_t *dev_priv, | |||
785 | break; | 765 | break; |
786 | 766 | ||
787 | default: | 767 | default: |
788 | DRM_ERROR("bad packet3 type %i at %p\n", | 768 | DRM_ERROR("bad packet3 type %i at byte %d\n", |
789 | header.packet3.packet, | 769 | header.packet3.packet, |
790 | cmdbuf->buf - sizeof(header)); | 770 | cmdbuf->buffer->iterator - sizeof(header)); |
791 | return -EINVAL; | 771 | return -EINVAL; |
792 | } | 772 | } |
793 | 773 | ||
@@ -923,12 +903,13 @@ static int r300_scratch(drm_radeon_private_t *dev_priv, | |||
923 | drm_r300_cmd_header_t header) | 903 | drm_r300_cmd_header_t header) |
924 | { | 904 | { |
925 | u32 *ref_age_base; | 905 | u32 *ref_age_base; |
926 | u32 i, buf_idx, h_pending; | 906 | u32 i, *buf_idx, h_pending; |
927 | u64 ptr_addr; | 907 | u64 *ptr_addr; |
908 | u64 stack_ptr_addr; | ||
928 | RING_LOCALS; | 909 | RING_LOCALS; |
929 | 910 | ||
930 | if (cmdbuf->bufsz < | 911 | if (drm_buffer_unprocessed(cmdbuf->buffer) < |
931 | (sizeof(u64) + header.scratch.n_bufs * sizeof(buf_idx))) { | 912 | (sizeof(u64) + header.scratch.n_bufs * sizeof(*buf_idx))) { |
932 | return -EINVAL; | 913 | return -EINVAL; |
933 | } | 914 | } |
934 | 915 | ||
@@ -938,36 +919,35 @@ static int r300_scratch(drm_radeon_private_t *dev_priv, | |||
938 | 919 | ||
939 | dev_priv->scratch_ages[header.scratch.reg]++; | 920 | dev_priv->scratch_ages[header.scratch.reg]++; |
940 | 921 | ||
941 | ptr_addr = get_unaligned((u64 *)cmdbuf->buf); | 922 | ptr_addr = drm_buffer_read_object(cmdbuf->buffer, |
942 | ref_age_base = (u32 *)(unsigned long)ptr_addr; | 923 | sizeof(stack_ptr_addr), &stack_ptr_addr); |
943 | 924 | ref_age_base = (u32 *)(unsigned long)*ptr_addr; | |
944 | cmdbuf->buf += sizeof(u64); | ||
945 | cmdbuf->bufsz -= sizeof(u64); | ||
946 | 925 | ||
947 | for (i=0; i < header.scratch.n_bufs; i++) { | 926 | for (i=0; i < header.scratch.n_bufs; i++) { |
948 | buf_idx = *(u32 *)cmdbuf->buf; | 927 | buf_idx = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0); |
949 | buf_idx *= 2; /* 8 bytes per buf */ | 928 | *buf_idx *= 2; /* 8 bytes per buf */ |
950 | 929 | ||
951 | if (DRM_COPY_TO_USER(ref_age_base + buf_idx, &dev_priv->scratch_ages[header.scratch.reg], sizeof(u32))) { | 930 | if (DRM_COPY_TO_USER(ref_age_base + *buf_idx, |
931 | &dev_priv->scratch_ages[header.scratch.reg], | ||
932 | sizeof(u32))) | ||
952 | return -EINVAL; | 933 | return -EINVAL; |
953 | } | ||
954 | 934 | ||
955 | if (DRM_COPY_FROM_USER(&h_pending, ref_age_base + buf_idx + 1, sizeof(u32))) { | 935 | if (DRM_COPY_FROM_USER(&h_pending, |
936 | ref_age_base + *buf_idx + 1, | ||
937 | sizeof(u32))) | ||
956 | return -EINVAL; | 938 | return -EINVAL; |
957 | } | ||
958 | 939 | ||
959 | if (h_pending == 0) { | 940 | if (h_pending == 0) |
960 | return -EINVAL; | 941 | return -EINVAL; |
961 | } | ||
962 | 942 | ||
963 | h_pending--; | 943 | h_pending--; |
964 | 944 | ||
965 | if (DRM_COPY_TO_USER(ref_age_base + buf_idx + 1, &h_pending, sizeof(u32))) { | 945 | if (DRM_COPY_TO_USER(ref_age_base + *buf_idx + 1, |
946 | &h_pending, | ||
947 | sizeof(u32))) | ||
966 | return -EINVAL; | 948 | return -EINVAL; |
967 | } | ||
968 | 949 | ||
969 | cmdbuf->buf += sizeof(buf_idx); | 950 | drm_buffer_advance(cmdbuf->buffer, sizeof(*buf_idx)); |
970 | cmdbuf->bufsz -= sizeof(buf_idx); | ||
971 | } | 951 | } |
972 | 952 | ||
973 | BEGIN_RING(2); | 953 | BEGIN_RING(2); |
@@ -1009,19 +989,16 @@ static inline int r300_emit_r500fp(drm_radeon_private_t *dev_priv, | |||
1009 | DRM_DEBUG("r500fp %d %d type: %d\n", sz, addr, type); | 989 | DRM_DEBUG("r500fp %d %d type: %d\n", sz, addr, type); |
1010 | if (!sz) | 990 | if (!sz) |
1011 | return 0; | 991 | return 0; |
1012 | if (sz * stride * 4 > cmdbuf->bufsz) | 992 | if (sz * stride * 4 > drm_buffer_unprocessed(cmdbuf->buffer)) |
1013 | return -EINVAL; | 993 | return -EINVAL; |
1014 | 994 | ||
1015 | BEGIN_RING(3 + sz * stride); | 995 | BEGIN_RING(3 + sz * stride); |
1016 | OUT_RING_REG(R500_GA_US_VECTOR_INDEX, addr); | 996 | OUT_RING_REG(R500_GA_US_VECTOR_INDEX, addr); |
1017 | OUT_RING(CP_PACKET0_TABLE(R500_GA_US_VECTOR_DATA, sz * stride - 1)); | 997 | OUT_RING(CP_PACKET0_TABLE(R500_GA_US_VECTOR_DATA, sz * stride - 1)); |
1018 | OUT_RING_TABLE((int *)cmdbuf->buf, sz * stride); | 998 | OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz * stride); |
1019 | 999 | ||
1020 | ADVANCE_RING(); | 1000 | ADVANCE_RING(); |
1021 | 1001 | ||
1022 | cmdbuf->buf += sz * stride * 4; | ||
1023 | cmdbuf->bufsz -= sz * stride * 4; | ||
1024 | |||
1025 | return 0; | 1002 | return 0; |
1026 | } | 1003 | } |
1027 | 1004 | ||
@@ -1053,19 +1030,18 @@ int r300_do_cp_cmdbuf(struct drm_device *dev, | |||
1053 | goto cleanup; | 1030 | goto cleanup; |
1054 | } | 1031 | } |
1055 | 1032 | ||
1056 | while (cmdbuf->bufsz >= sizeof(drm_r300_cmd_header_t)) { | 1033 | while (drm_buffer_unprocessed(cmdbuf->buffer) |
1034 | >= sizeof(drm_r300_cmd_header_t)) { | ||
1057 | int idx; | 1035 | int idx; |
1058 | drm_r300_cmd_header_t header; | 1036 | drm_r300_cmd_header_t *header, stack_header; |
1059 | |||
1060 | header.u = *(unsigned int *)cmdbuf->buf; | ||
1061 | 1037 | ||
1062 | cmdbuf->buf += sizeof(header); | 1038 | header = drm_buffer_read_object(cmdbuf->buffer, |
1063 | cmdbuf->bufsz -= sizeof(header); | 1039 | sizeof(stack_header), &stack_header); |
1064 | 1040 | ||
1065 | switch (header.header.cmd_type) { | 1041 | switch (header->header.cmd_type) { |
1066 | case R300_CMD_PACKET0: | 1042 | case R300_CMD_PACKET0: |
1067 | DRM_DEBUG("R300_CMD_PACKET0\n"); | 1043 | DRM_DEBUG("R300_CMD_PACKET0\n"); |
1068 | ret = r300_emit_packet0(dev_priv, cmdbuf, header); | 1044 | ret = r300_emit_packet0(dev_priv, cmdbuf, *header); |
1069 | if (ret) { | 1045 | if (ret) { |
1070 | DRM_ERROR("r300_emit_packet0 failed\n"); | 1046 | DRM_ERROR("r300_emit_packet0 failed\n"); |
1071 | goto cleanup; | 1047 | goto cleanup; |
@@ -1074,7 +1050,7 @@ int r300_do_cp_cmdbuf(struct drm_device *dev, | |||
1074 | 1050 | ||
1075 | case R300_CMD_VPU: | 1051 | case R300_CMD_VPU: |
1076 | DRM_DEBUG("R300_CMD_VPU\n"); | 1052 | DRM_DEBUG("R300_CMD_VPU\n"); |
1077 | ret = r300_emit_vpu(dev_priv, cmdbuf, header); | 1053 | ret = r300_emit_vpu(dev_priv, cmdbuf, *header); |
1078 | if (ret) { | 1054 | if (ret) { |
1079 | DRM_ERROR("r300_emit_vpu failed\n"); | 1055 | DRM_ERROR("r300_emit_vpu failed\n"); |
1080 | goto cleanup; | 1056 | goto cleanup; |
@@ -1083,7 +1059,7 @@ int r300_do_cp_cmdbuf(struct drm_device *dev, | |||
1083 | 1059 | ||
1084 | case R300_CMD_PACKET3: | 1060 | case R300_CMD_PACKET3: |
1085 | DRM_DEBUG("R300_CMD_PACKET3\n"); | 1061 | DRM_DEBUG("R300_CMD_PACKET3\n"); |
1086 | ret = r300_emit_packet3(dev_priv, cmdbuf, header); | 1062 | ret = r300_emit_packet3(dev_priv, cmdbuf, *header); |
1087 | if (ret) { | 1063 | if (ret) { |
1088 | DRM_ERROR("r300_emit_packet3 failed\n"); | 1064 | DRM_ERROR("r300_emit_packet3 failed\n"); |
1089 | goto cleanup; | 1065 | goto cleanup; |
@@ -1117,8 +1093,8 @@ int r300_do_cp_cmdbuf(struct drm_device *dev, | |||
1117 | int i; | 1093 | int i; |
1118 | RING_LOCALS; | 1094 | RING_LOCALS; |
1119 | 1095 | ||
1120 | BEGIN_RING(header.delay.count); | 1096 | BEGIN_RING(header->delay.count); |
1121 | for (i = 0; i < header.delay.count; i++) | 1097 | for (i = 0; i < header->delay.count; i++) |
1122 | OUT_RING(RADEON_CP_PACKET2); | 1098 | OUT_RING(RADEON_CP_PACKET2); |
1123 | ADVANCE_RING(); | 1099 | ADVANCE_RING(); |
1124 | } | 1100 | } |
@@ -1126,7 +1102,7 @@ int r300_do_cp_cmdbuf(struct drm_device *dev, | |||
1126 | 1102 | ||
1127 | case R300_CMD_DMA_DISCARD: | 1103 | case R300_CMD_DMA_DISCARD: |
1128 | DRM_DEBUG("RADEON_CMD_DMA_DISCARD\n"); | 1104 | DRM_DEBUG("RADEON_CMD_DMA_DISCARD\n"); |
1129 | idx = header.dma.buf_idx; | 1105 | idx = header->dma.buf_idx; |
1130 | if (idx < 0 || idx >= dma->buf_count) { | 1106 | if (idx < 0 || idx >= dma->buf_count) { |
1131 | DRM_ERROR("buffer index %d (of %d max)\n", | 1107 | DRM_ERROR("buffer index %d (of %d max)\n", |
1132 | idx, dma->buf_count - 1); | 1108 | idx, dma->buf_count - 1); |
@@ -1149,12 +1125,12 @@ int r300_do_cp_cmdbuf(struct drm_device *dev, | |||
1149 | 1125 | ||
1150 | case R300_CMD_WAIT: | 1126 | case R300_CMD_WAIT: |
1151 | DRM_DEBUG("R300_CMD_WAIT\n"); | 1127 | DRM_DEBUG("R300_CMD_WAIT\n"); |
1152 | r300_cmd_wait(dev_priv, header); | 1128 | r300_cmd_wait(dev_priv, *header); |
1153 | break; | 1129 | break; |
1154 | 1130 | ||
1155 | case R300_CMD_SCRATCH: | 1131 | case R300_CMD_SCRATCH: |
1156 | DRM_DEBUG("R300_CMD_SCRATCH\n"); | 1132 | DRM_DEBUG("R300_CMD_SCRATCH\n"); |
1157 | ret = r300_scratch(dev_priv, cmdbuf, header); | 1133 | ret = r300_scratch(dev_priv, cmdbuf, *header); |
1158 | if (ret) { | 1134 | if (ret) { |
1159 | DRM_ERROR("r300_scratch failed\n"); | 1135 | DRM_ERROR("r300_scratch failed\n"); |
1160 | goto cleanup; | 1136 | goto cleanup; |
@@ -1168,16 +1144,16 @@ int r300_do_cp_cmdbuf(struct drm_device *dev, | |||
1168 | goto cleanup; | 1144 | goto cleanup; |
1169 | } | 1145 | } |
1170 | DRM_DEBUG("R300_CMD_R500FP\n"); | 1146 | DRM_DEBUG("R300_CMD_R500FP\n"); |
1171 | ret = r300_emit_r500fp(dev_priv, cmdbuf, header); | 1147 | ret = r300_emit_r500fp(dev_priv, cmdbuf, *header); |
1172 | if (ret) { | 1148 | if (ret) { |
1173 | DRM_ERROR("r300_emit_r500fp failed\n"); | 1149 | DRM_ERROR("r300_emit_r500fp failed\n"); |
1174 | goto cleanup; | 1150 | goto cleanup; |
1175 | } | 1151 | } |
1176 | break; | 1152 | break; |
1177 | default: | 1153 | default: |
1178 | DRM_ERROR("bad cmd_type %i at %p\n", | 1154 | DRM_ERROR("bad cmd_type %i at byte %d\n", |
1179 | header.header.cmd_type, | 1155 | header->header.cmd_type, |
1180 | cmdbuf->buf - sizeof(header)); | 1156 | cmdbuf->buffer->iterator - sizeof(*header)); |
1181 | ret = -EINVAL; | 1157 | ret = -EINVAL; |
1182 | goto cleanup; | 1158 | goto cleanup; |
1183 | } | 1159 | } |