diff options
author | Dave Airlie <airlied@starflyer.(none)> | 2005-07-10 04:20:09 -0400 |
---|---|---|
committer | Dave Airlie <airlied@linux.ie> | 2005-07-10 04:20:09 -0400 |
commit | 6795c985a648d1e90b367cc1387c18205ecca4b8 (patch) | |
tree | c764bbcf801ecd95c8a90fb1c6660a88c8bf4077 /drivers/char/drm/mga_dma.c | |
parent | b5d499cfdeebcb71f00f3513045796ccae718140 (diff) |
Add support for PCI MGA cards to MGA DRM.
This patch adds serveral new ioctls and a new query to get_param query to
support PCI MGA cards.
Two ioctls were added to implement interrupt based waiting. With this change,
the client-side driver no longer needs to map the primary DMA region or the
MMIO region. Previously, end-of-frame waiting was done by busy waiting in the
client-side driver until one of the MMIO registers (the current DMA pointer)
matched a pointer to the end of primary DMA space. By using interrupts, the
busy waiting and the extra mappings are removed.
A third ioctl was added to bootstrap DMA. This ioctl, which is used by the
X-server, moves a *LOT* of code from the X-server into the kernel. This allows
the kernel to do whatever needs to be done to setup DMA buffers. The entire
process and the locations of the buffers are hidden from user-mode.
Additionally, a get_param query was added to differentiate between G4x0 cards
and G550 cards. A gap was left in the numbering sequence so that, if needed,
G450 cards could be distinguished from G400 cards. According to Ville
Syrjälä, the G4x0 cards and the G550 cards handle anisotropic filtering
differently. This seems the most compatible way to let the client-side driver
know which card it's own. Doing this very small change now eliminates the
need to bump the DRM minor version twice.
http://marc.theaimsgroup.com/?l=dri-devel&m=106625815319773&w=2
(airlied - this may not work at this point, I think the follow on buffer
cleanup patches will be needed)
From: Ian Romanick <idr@us.ibm.com>
Signed-off-by: Dave Airlie <airlied@linux.ie>
Diffstat (limited to 'drivers/char/drm/mga_dma.c')
-rw-r--r-- | drivers/char/drm/mga_dma.c | 601 |
1 files changed, 493 insertions, 108 deletions
diff --git a/drivers/char/drm/mga_dma.c b/drivers/char/drm/mga_dma.c index 832eaf8a5068..7899e281d062 100644 --- a/drivers/char/drm/mga_dma.c +++ b/drivers/char/drm/mga_dma.c | |||
@@ -23,18 +23,21 @@ | |||
23 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | 23 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
24 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | 24 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER |
25 | * DEALINGS IN THE SOFTWARE. | 25 | * DEALINGS IN THE SOFTWARE. |
26 | * | 26 | */ |
27 | * Authors: | 27 | |
28 | * Rickard E. (Rik) Faith <faith@valinux.com> | 28 | /** |
29 | * Jeff Hartmann <jhartmann@valinux.com> | 29 | * \file mga_dma.c |
30 | * Keith Whitwell <keith@tungstengraphics.com> | 30 | * DMA support for MGA G200 / G400. |
31 | * | 31 | * |
32 | * Rewritten by: | 32 | * \author Rickard E. (Rik) Faith <faith@valinux.com> |
33 | * Gareth Hughes <gareth@valinux.com> | 33 | * \author Jeff Hartmann <jhartmann@valinux.com> |
34 | * \author Keith Whitwell <keith@tungstengraphics.com> | ||
35 | * \author Gareth Hughes <gareth@valinux.com> | ||
34 | */ | 36 | */ |
35 | 37 | ||
36 | #include "drmP.h" | 38 | #include "drmP.h" |
37 | #include "drm.h" | 39 | #include "drm.h" |
40 | #include "drm_sarea.h" | ||
38 | #include "mga_drm.h" | 41 | #include "mga_drm.h" |
39 | #include "mga_drv.h" | 42 | #include "mga_drv.h" |
40 | 43 | ||
@@ -148,7 +151,7 @@ void mga_do_dma_flush( drm_mga_private_t *dev_priv ) | |||
148 | DRM_DEBUG( " space = 0x%06x\n", primary->space ); | 151 | DRM_DEBUG( " space = 0x%06x\n", primary->space ); |
149 | 152 | ||
150 | mga_flush_write_combine(); | 153 | mga_flush_write_combine(); |
151 | MGA_WRITE( MGA_PRIMEND, tail | MGA_PAGPXFER ); | 154 | MGA_WRITE(MGA_PRIMEND, tail | dev_priv->dma_access); |
152 | 155 | ||
153 | DRM_DEBUG( "done.\n" ); | 156 | DRM_DEBUG( "done.\n" ); |
154 | } | 157 | } |
@@ -190,7 +193,7 @@ void mga_do_dma_wrap_start( drm_mga_private_t *dev_priv ) | |||
190 | DRM_DEBUG( " space = 0x%06x\n", primary->space ); | 193 | DRM_DEBUG( " space = 0x%06x\n", primary->space ); |
191 | 194 | ||
192 | mga_flush_write_combine(); | 195 | mga_flush_write_combine(); |
193 | MGA_WRITE( MGA_PRIMEND, tail | MGA_PAGPXFER ); | 196 | MGA_WRITE(MGA_PRIMEND, tail | dev_priv->dma_access); |
194 | 197 | ||
195 | set_bit( 0, &primary->wrapped ); | 198 | set_bit( 0, &primary->wrapped ); |
196 | DRM_DEBUG( "done.\n" ); | 199 | DRM_DEBUG( "done.\n" ); |
@@ -396,23 +399,383 @@ int mga_freelist_put( drm_device_t *dev, drm_buf_t *buf ) | |||
396 | * DMA initialization, cleanup | 399 | * DMA initialization, cleanup |
397 | */ | 400 | */ |
398 | 401 | ||
402 | |||
403 | int mga_driver_preinit(drm_device_t *dev, unsigned long flags) | ||
404 | { | ||
405 | drm_mga_private_t * dev_priv; | ||
406 | |||
407 | dev_priv = drm_alloc(sizeof(drm_mga_private_t), DRM_MEM_DRIVER); | ||
408 | if (!dev_priv) | ||
409 | return DRM_ERR(ENOMEM); | ||
410 | |||
411 | dev->dev_private = (void *)dev_priv; | ||
412 | memset(dev_priv, 0, sizeof(drm_mga_private_t)); | ||
413 | |||
414 | dev_priv->usec_timeout = MGA_DEFAULT_USEC_TIMEOUT; | ||
415 | dev_priv->chipset = flags; | ||
416 | |||
417 | return 0; | ||
418 | } | ||
419 | |||
420 | /** | ||
421 | * Bootstrap the driver for AGP DMA. | ||
422 | * | ||
423 | * \todo | ||
424 | * Investigate whether there is any benifit to storing the WARP microcode in | ||
425 | * AGP memory. If not, the microcode may as well always be put in PCI | ||
426 | * memory. | ||
427 | * | ||
428 | * \todo | ||
429 | * This routine needs to set dma_bs->agp_mode to the mode actually configured | ||
430 | * in the hardware. Looking just at the Linux AGP driver code, I don't see | ||
431 | * an easy way to determine this. | ||
432 | * | ||
433 | * \sa mga_do_dma_bootstrap, mga_do_pci_dma_bootstrap | ||
434 | */ | ||
435 | static int mga_do_agp_dma_bootstrap(drm_device_t * dev, | ||
436 | drm_mga_dma_bootstrap_t * dma_bs) | ||
437 | { | ||
438 | drm_mga_private_t * const dev_priv = (drm_mga_private_t *) dev->dev_private; | ||
439 | const unsigned int warp_size = mga_warp_microcode_size(dev_priv); | ||
440 | int err; | ||
441 | unsigned offset; | ||
442 | const unsigned secondary_size = dma_bs->secondary_bin_count | ||
443 | * dma_bs->secondary_bin_size; | ||
444 | const unsigned agp_size = (dma_bs->agp_size << 20); | ||
445 | drm_buf_desc_t req; | ||
446 | drm_agp_mode_t mode; | ||
447 | drm_agp_info_t info; | ||
448 | |||
449 | |||
450 | /* Acquire AGP. */ | ||
451 | err = drm_agp_acquire(dev); | ||
452 | if (err) { | ||
453 | DRM_ERROR("Unable to acquire AGP\n"); | ||
454 | return err; | ||
455 | } | ||
456 | |||
457 | err = drm_agp_info(dev, &info); | ||
458 | if (err) { | ||
459 | DRM_ERROR("Unable to get AGP info\n"); | ||
460 | return err; | ||
461 | } | ||
462 | |||
463 | mode.mode = (info.mode & ~0x07) | dma_bs->agp_mode; | ||
464 | err = drm_agp_enable(dev, mode); | ||
465 | if (err) { | ||
466 | DRM_ERROR("Unable to enable AGP (mode = 0x%lx)\n", mode.mode); | ||
467 | return err; | ||
468 | } | ||
469 | |||
470 | |||
471 | /* In addition to the usual AGP mode configuration, the G200 AGP cards | ||
472 | * need to have the AGP mode "manually" set. | ||
473 | */ | ||
474 | |||
475 | if (dev_priv->chipset == MGA_CARD_TYPE_G200) { | ||
476 | if (mode.mode & 0x02) { | ||
477 | MGA_WRITE(MGA_AGP_PLL, MGA_AGP2XPLL_ENABLE); | ||
478 | } | ||
479 | else { | ||
480 | MGA_WRITE(MGA_AGP_PLL, MGA_AGP2XPLL_DISABLE); | ||
481 | } | ||
482 | } | ||
483 | |||
484 | |||
485 | /* Allocate and bind AGP memory. */ | ||
486 | dev_priv->agp_pages = agp_size / PAGE_SIZE; | ||
487 | dev_priv->agp_mem = drm_alloc_agp( dev, dev_priv->agp_pages, 0 ); | ||
488 | if (dev_priv->agp_mem == NULL) { | ||
489 | dev_priv->agp_pages = 0; | ||
490 | DRM_ERROR("Unable to allocate %uMB AGP memory\n", | ||
491 | dma_bs->agp_size); | ||
492 | return DRM_ERR(ENOMEM); | ||
493 | } | ||
494 | |||
495 | err = drm_bind_agp( dev_priv->agp_mem, 0 ); | ||
496 | if (err) { | ||
497 | DRM_ERROR("Unable to bind AGP memory\n"); | ||
498 | return err; | ||
499 | } | ||
500 | |||
501 | offset = 0; | ||
502 | err = drm_addmap( dev, offset, warp_size, | ||
503 | _DRM_AGP, _DRM_READ_ONLY, & dev_priv->warp ); | ||
504 | if (err) { | ||
505 | DRM_ERROR("Unable to map WARP microcode\n"); | ||
506 | return err; | ||
507 | } | ||
508 | |||
509 | offset += warp_size; | ||
510 | err = drm_addmap( dev, offset, dma_bs->primary_size, | ||
511 | _DRM_AGP, _DRM_READ_ONLY, & dev_priv->primary ); | ||
512 | if (err) { | ||
513 | DRM_ERROR("Unable to map primary DMA region\n"); | ||
514 | return err; | ||
515 | } | ||
516 | |||
517 | offset += dma_bs->primary_size; | ||
518 | err = drm_addmap( dev, offset, secondary_size, | ||
519 | _DRM_AGP, 0, & dev->agp_buffer_map ); | ||
520 | if (err) { | ||
521 | DRM_ERROR("Unable to map secondary DMA region\n"); | ||
522 | return err; | ||
523 | } | ||
524 | |||
525 | (void) memset( &req, 0, sizeof(req) ); | ||
526 | req.count = dma_bs->secondary_bin_count; | ||
527 | req.size = dma_bs->secondary_bin_size; | ||
528 | req.flags = _DRM_AGP_BUFFER; | ||
529 | req.agp_start = offset; | ||
530 | |||
531 | err = drm_addbufs_agp( dev, & req ); | ||
532 | if (err) { | ||
533 | DRM_ERROR("Unable to add secondary DMA buffers\n"); | ||
534 | return err; | ||
535 | } | ||
536 | |||
537 | offset += secondary_size; | ||
538 | err = drm_addmap( dev, offset, agp_size - offset, | ||
539 | _DRM_AGP, 0, & dev_priv->agp_textures ); | ||
540 | if (err) { | ||
541 | DRM_ERROR("Unable to map AGP texture region\n"); | ||
542 | return err; | ||
543 | } | ||
544 | |||
545 | drm_core_ioremap(dev_priv->warp, dev); | ||
546 | drm_core_ioremap(dev_priv->primary, dev); | ||
547 | drm_core_ioremap(dev->agp_buffer_map, dev); | ||
548 | |||
549 | if (!dev_priv->warp->handle || | ||
550 | !dev_priv->primary->handle || !dev->agp_buffer_map->handle) { | ||
551 | DRM_ERROR("failed to ioremap agp regions! (%p, %p, %p)\n", | ||
552 | dev_priv->warp->handle, dev_priv->primary->handle, | ||
553 | dev->agp_buffer_map->handle); | ||
554 | return DRM_ERR(ENOMEM); | ||
555 | } | ||
556 | |||
557 | dev_priv->dma_access = MGA_PAGPXFER; | ||
558 | dev_priv->wagp_enable = MGA_WAGP_ENABLE; | ||
559 | |||
560 | DRM_INFO("Initialized card for AGP DMA.\n"); | ||
561 | return 0; | ||
562 | } | ||
563 | |||
564 | /** | ||
565 | * Bootstrap the driver for PCI DMA. | ||
566 | * | ||
567 | * \todo | ||
568 | * The algorithm for decreasing the size of the primary DMA buffer could be | ||
569 | * better. The size should be rounded up to the nearest page size, then | ||
570 | * decrease the request size by a single page each pass through the loop. | ||
571 | * | ||
572 | * \todo | ||
573 | * Determine whether the maximum address passed to drm_pci_alloc is correct. | ||
574 | * The same goes for drm_addbufs_pci. | ||
575 | * | ||
576 | * \sa mga_do_dma_bootstrap, mga_do_agp_dma_bootstrap | ||
577 | */ | ||
578 | static int mga_do_pci_dma_bootstrap(drm_device_t * dev, | ||
579 | drm_mga_dma_bootstrap_t * dma_bs) | ||
580 | { | ||
581 | drm_mga_private_t * const dev_priv = (drm_mga_private_t *) dev->dev_private; | ||
582 | const unsigned int warp_size = mga_warp_microcode_size(dev_priv); | ||
583 | unsigned int primary_size; | ||
584 | unsigned int bin_count; | ||
585 | int err; | ||
586 | drm_buf_desc_t req; | ||
587 | |||
588 | |||
589 | if (dev->dma == NULL) { | ||
590 | DRM_ERROR("dev->dma is NULL\n"); | ||
591 | return DRM_ERR(EFAULT); | ||
592 | } | ||
593 | |||
594 | /* The proper alignment is 0x100 for this mapping */ | ||
595 | err = drm_addmap(dev, 0, warp_size, _DRM_CONSISTENT, | ||
596 | _DRM_READ_ONLY, &dev_priv->warp); | ||
597 | if (err != 0) { | ||
598 | DRM_ERROR("Unable to create mapping for WARP microcode\n"); | ||
599 | return err; | ||
600 | } | ||
601 | |||
602 | /* Other than the bottom two bits being used to encode other | ||
603 | * information, there don't appear to be any restrictions on the | ||
604 | * alignment of the primary or secondary DMA buffers. | ||
605 | */ | ||
606 | |||
607 | for ( primary_size = dma_bs->primary_size | ||
608 | ; primary_size != 0 | ||
609 | ; primary_size >>= 1 ) { | ||
610 | /* The proper alignment for this mapping is 0x04 */ | ||
611 | err = drm_addmap(dev, 0, primary_size, _DRM_CONSISTENT, | ||
612 | _DRM_READ_ONLY, &dev_priv->primary); | ||
613 | if (!err) | ||
614 | break; | ||
615 | } | ||
616 | |||
617 | if (err != 0) { | ||
618 | DRM_ERROR("Unable to allocate primary DMA region\n"); | ||
619 | return DRM_ERR(ENOMEM); | ||
620 | } | ||
621 | |||
622 | if (dev_priv->primary->size != dma_bs->primary_size) { | ||
623 | DRM_INFO("Primary DMA buffer size reduced from %u to %u.\n", | ||
624 | dma_bs->primary_size, | ||
625 | (unsigned) dev_priv->primary->size); | ||
626 | dma_bs->primary_size = dev_priv->primary->size; | ||
627 | } | ||
628 | |||
629 | for ( bin_count = dma_bs->secondary_bin_count | ||
630 | ; bin_count > 0 | ||
631 | ; bin_count-- ) { | ||
632 | (void) memset( &req, 0, sizeof(req) ); | ||
633 | req.count = bin_count; | ||
634 | req.size = dma_bs->secondary_bin_size; | ||
635 | |||
636 | err = drm_addbufs_pci( dev, & req ); | ||
637 | if (!err) { | ||
638 | break; | ||
639 | } | ||
640 | } | ||
641 | |||
642 | if (bin_count == 0) { | ||
643 | DRM_ERROR("Unable to add secondary DMA buffers\n"); | ||
644 | return err; | ||
645 | } | ||
646 | |||
647 | if (bin_count != dma_bs->secondary_bin_count) { | ||
648 | DRM_INFO("Secondary PCI DMA buffer bin count reduced from %u " | ||
649 | "to %u.\n", dma_bs->secondary_bin_count, bin_count); | ||
650 | |||
651 | dma_bs->secondary_bin_count = bin_count; | ||
652 | } | ||
653 | |||
654 | dev_priv->dma_access = 0; | ||
655 | dev_priv->wagp_enable = 0; | ||
656 | |||
657 | dma_bs->agp_mode = 0; | ||
658 | |||
659 | DRM_INFO("Initialized card for PCI DMA.\n"); | ||
660 | return 0; | ||
661 | } | ||
662 | |||
663 | |||
664 | static int mga_do_dma_bootstrap(drm_device_t * dev, | ||
665 | drm_mga_dma_bootstrap_t * dma_bs) | ||
666 | { | ||
667 | const int is_agp = (dma_bs->agp_mode != 0) && drm_device_is_agp(dev); | ||
668 | int err; | ||
669 | drm_mga_private_t * const dev_priv = | ||
670 | (drm_mga_private_t *) dev->dev_private; | ||
671 | |||
672 | |||
673 | dev_priv->used_new_dma_init = 1; | ||
674 | |||
675 | /* The first steps are the same for both PCI and AGP based DMA. Map | ||
676 | * the cards MMIO registers and map a status page. | ||
677 | */ | ||
678 | err = drm_addmap( dev, dev_priv->mmio_base, dev_priv->mmio_size, | ||
679 | _DRM_REGISTERS, _DRM_READ_ONLY, & dev_priv->mmio ); | ||
680 | if (err) { | ||
681 | DRM_ERROR("Unable to map MMIO region\n"); | ||
682 | return err; | ||
683 | } | ||
684 | |||
685 | |||
686 | err = drm_addmap( dev, 0, SAREA_MAX, _DRM_SHM, | ||
687 | _DRM_READ_ONLY | _DRM_LOCKED | _DRM_KERNEL, | ||
688 | & dev_priv->status ); | ||
689 | if (err) { | ||
690 | DRM_ERROR("Unable to map status region\n"); | ||
691 | return err; | ||
692 | } | ||
693 | |||
694 | |||
695 | /* The DMA initialization procedure is slightly different for PCI and | ||
696 | * AGP cards. AGP cards just allocate a large block of AGP memory and | ||
697 | * carve off portions of it for internal uses. The remaining memory | ||
698 | * is returned to user-mode to be used for AGP textures. | ||
699 | */ | ||
700 | |||
701 | if (is_agp) { | ||
702 | err = mga_do_agp_dma_bootstrap(dev, dma_bs); | ||
703 | } | ||
704 | |||
705 | /* If we attempted to initialize the card for AGP DMA but failed, | ||
706 | * clean-up any mess that may have been created. | ||
707 | */ | ||
708 | |||
709 | if (err) { | ||
710 | mga_do_cleanup_dma(dev); | ||
711 | } | ||
712 | |||
713 | |||
714 | /* Not only do we want to try and initialized PCI cards for PCI DMA, | ||
715 | * but we also try to initialized AGP cards that could not be | ||
716 | * initialized for AGP DMA. This covers the case where we have an AGP | ||
717 | * card in a system with an unsupported AGP chipset. In that case the | ||
718 | * card will be detected as AGP, but we won't be able to allocate any | ||
719 | * AGP memory, etc. | ||
720 | */ | ||
721 | |||
722 | if (!is_agp || err) { | ||
723 | err = mga_do_pci_dma_bootstrap(dev, dma_bs); | ||
724 | } | ||
725 | |||
726 | |||
727 | return err; | ||
728 | } | ||
729 | |||
730 | int mga_dma_bootstrap(DRM_IOCTL_ARGS) | ||
731 | { | ||
732 | DRM_DEVICE; | ||
733 | drm_mga_dma_bootstrap_t bootstrap; | ||
734 | int err; | ||
735 | |||
736 | |||
737 | DRM_COPY_FROM_USER_IOCTL(bootstrap, | ||
738 | (drm_mga_dma_bootstrap_t __user *) data, | ||
739 | sizeof(bootstrap)); | ||
740 | |||
741 | err = mga_do_dma_bootstrap(dev, & bootstrap); | ||
742 | if (! err) { | ||
743 | static const int modes[] = { 0, 1, 2, 2, 4, 4, 4, 4 }; | ||
744 | const drm_mga_private_t * const dev_priv = | ||
745 | (drm_mga_private_t *) dev->dev_private; | ||
746 | |||
747 | if (dev_priv->agp_textures != NULL) { | ||
748 | bootstrap.texture_handle = dev_priv->agp_textures->offset; | ||
749 | bootstrap.texture_size = dev_priv->agp_textures->size; | ||
750 | } | ||
751 | else { | ||
752 | bootstrap.texture_handle = 0; | ||
753 | bootstrap.texture_size = 0; | ||
754 | } | ||
755 | |||
756 | bootstrap.agp_mode = modes[ bootstrap.agp_mode & 0x07 ]; | ||
757 | if (DRM_COPY_TO_USER( (void __user *) data, & bootstrap, | ||
758 | sizeof(bootstrap))) { | ||
759 | err = DRM_ERR(EFAULT); | ||
760 | } | ||
761 | } | ||
762 | else { | ||
763 | mga_do_cleanup_dma(dev); | ||
764 | } | ||
765 | |||
766 | return err; | ||
767 | } | ||
768 | |||
399 | static int mga_do_init_dma( drm_device_t *dev, drm_mga_init_t *init ) | 769 | static int mga_do_init_dma( drm_device_t *dev, drm_mga_init_t *init ) |
400 | { | 770 | { |
401 | drm_mga_private_t *dev_priv; | 771 | drm_mga_private_t *dev_priv; |
402 | int ret; | 772 | int ret; |
403 | DRM_DEBUG( "\n" ); | 773 | DRM_DEBUG( "\n" ); |
404 | 774 | ||
405 | dev_priv = drm_alloc( sizeof(drm_mga_private_t), DRM_MEM_DRIVER ); | ||
406 | if ( !dev_priv ) | ||
407 | return DRM_ERR(ENOMEM); | ||
408 | |||
409 | memset( dev_priv, 0, sizeof(drm_mga_private_t) ); | ||
410 | 775 | ||
411 | dev_priv->chipset = init->chipset; | 776 | dev_priv = dev->dev_private; |
412 | 777 | ||
413 | dev_priv->usec_timeout = MGA_DEFAULT_USEC_TIMEOUT; | 778 | if (init->sgram) { |
414 | |||
415 | if ( init->sgram ) { | ||
416 | dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_BLK; | 779 | dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_BLK; |
417 | } else { | 780 | } else { |
418 | dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_RSTR; | 781 | dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_RSTR; |
@@ -436,88 +799,65 @@ static int mga_do_init_dma( drm_device_t *dev, drm_mga_init_t *init ) | |||
436 | 799 | ||
437 | DRM_GETSAREA(); | 800 | DRM_GETSAREA(); |
438 | 801 | ||
439 | if(!dev_priv->sarea) { | 802 | if (!dev_priv->sarea) { |
440 | DRM_ERROR( "failed to find sarea!\n" ); | 803 | DRM_ERROR("failed to find sarea!\n"); |
441 | /* Assign dev_private so we can do cleanup. */ | ||
442 | dev->dev_private = (void *)dev_priv; | ||
443 | mga_do_cleanup_dma( dev ); | ||
444 | return DRM_ERR(EINVAL); | 804 | return DRM_ERR(EINVAL); |
445 | } | 805 | } |
446 | 806 | ||
447 | dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset); | 807 | if (! dev_priv->used_new_dma_init) { |
448 | if(!dev_priv->mmio) { | 808 | dev_priv->status = drm_core_findmap(dev, init->status_offset); |
449 | DRM_ERROR( "failed to find mmio region!\n" ); | 809 | if (!dev_priv->status) { |
450 | /* Assign dev_private so we can do cleanup. */ | 810 | DRM_ERROR("failed to find status page!\n"); |
451 | dev->dev_private = (void *)dev_priv; | 811 | return DRM_ERR(EINVAL); |
452 | mga_do_cleanup_dma( dev ); | 812 | } |
453 | return DRM_ERR(EINVAL); | 813 | dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset); |
454 | } | 814 | if (!dev_priv->mmio) { |
455 | dev_priv->status = drm_core_findmap(dev, init->status_offset); | 815 | DRM_ERROR("failed to find mmio region!\n"); |
456 | if(!dev_priv->status) { | 816 | return DRM_ERR(EINVAL); |
457 | DRM_ERROR( "failed to find status page!\n" ); | 817 | } |
458 | /* Assign dev_private so we can do cleanup. */ | 818 | dev_priv->warp = drm_core_findmap(dev, init->warp_offset); |
459 | dev->dev_private = (void *)dev_priv; | 819 | if (!dev_priv->warp) { |
460 | mga_do_cleanup_dma( dev ); | 820 | DRM_ERROR("failed to find warp microcode region!\n"); |
461 | return DRM_ERR(EINVAL); | 821 | return DRM_ERR(EINVAL); |
462 | } | 822 | } |
463 | dev_priv->warp = drm_core_findmap(dev, init->warp_offset); | 823 | dev_priv->primary = drm_core_findmap(dev, init->primary_offset); |
464 | if(!dev_priv->warp) { | 824 | if (!dev_priv->primary) { |
465 | DRM_ERROR( "failed to find warp microcode region!\n" ); | 825 | DRM_ERROR("failed to find primary dma region!\n"); |
466 | /* Assign dev_private so we can do cleanup. */ | 826 | return DRM_ERR(EINVAL); |
467 | dev->dev_private = (void *)dev_priv; | 827 | } |
468 | mga_do_cleanup_dma( dev ); | 828 | dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); |
469 | return DRM_ERR(EINVAL); | 829 | if (!dev->agp_buffer_map) { |
470 | } | 830 | DRM_ERROR("failed to find dma buffer region!\n"); |
471 | dev_priv->primary = drm_core_findmap(dev, init->primary_offset); | 831 | return DRM_ERR(EINVAL); |
472 | if(!dev_priv->primary) { | 832 | } |
473 | DRM_ERROR( "failed to find primary dma region!\n" ); | 833 | |
474 | /* Assign dev_private so we can do cleanup. */ | 834 | drm_core_ioremap(dev_priv->warp, dev); |
475 | dev->dev_private = (void *)dev_priv; | 835 | drm_core_ioremap(dev_priv->primary, dev); |
476 | mga_do_cleanup_dma( dev ); | 836 | drm_core_ioremap(dev->agp_buffer_map, dev); |
477 | return DRM_ERR(EINVAL); | ||
478 | } | ||
479 | dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); | ||
480 | if(!dev->agp_buffer_map) { | ||
481 | DRM_ERROR( "failed to find dma buffer region!\n" ); | ||
482 | /* Assign dev_private so we can do cleanup. */ | ||
483 | dev->dev_private = (void *)dev_priv; | ||
484 | mga_do_cleanup_dma( dev ); | ||
485 | return DRM_ERR(EINVAL); | ||
486 | } | 837 | } |
487 | 838 | ||
488 | dev_priv->sarea_priv = | 839 | dev_priv->sarea_priv = |
489 | (drm_mga_sarea_t *)((u8 *)dev_priv->sarea->handle + | 840 | (drm_mga_sarea_t *)((u8 *)dev_priv->sarea->handle + |
490 | init->sarea_priv_offset); | 841 | init->sarea_priv_offset); |
491 | 842 | ||
492 | drm_core_ioremap( dev_priv->warp, dev ); | 843 | if (!dev_priv->warp->handle || |
493 | drm_core_ioremap( dev_priv->primary, dev ); | 844 | !dev_priv->primary->handle || |
494 | drm_core_ioremap( dev->agp_buffer_map, dev ); | 845 | ((dev_priv->dma_access != 0) && |
495 | 846 | ((dev->agp_buffer_map == NULL) || | |
496 | if(!dev_priv->warp->handle || | 847 | (dev->agp_buffer_map->handle == NULL)))) { |
497 | !dev_priv->primary->handle || | 848 | DRM_ERROR("failed to ioremap agp regions!\n"); |
498 | !dev->agp_buffer_map->handle ) { | ||
499 | DRM_ERROR( "failed to ioremap agp regions!\n" ); | ||
500 | /* Assign dev_private so we can do cleanup. */ | ||
501 | dev->dev_private = (void *)dev_priv; | ||
502 | mga_do_cleanup_dma( dev ); | ||
503 | return DRM_ERR(ENOMEM); | 849 | return DRM_ERR(ENOMEM); |
504 | } | 850 | } |
505 | 851 | ||
506 | ret = mga_warp_install_microcode( dev_priv ); | 852 | ret = mga_warp_install_microcode(dev_priv); |
507 | if ( ret < 0 ) { | 853 | if (ret < 0) { |
508 | DRM_ERROR( "failed to install WARP ucode!\n" ); | 854 | DRM_ERROR("failed to install WARP ucode!\n"); |
509 | /* Assign dev_private so we can do cleanup. */ | ||
510 | dev->dev_private = (void *)dev_priv; | ||
511 | mga_do_cleanup_dma( dev ); | ||
512 | return ret; | 855 | return ret; |
513 | } | 856 | } |
514 | 857 | ||
515 | ret = mga_warp_init( dev_priv ); | 858 | ret = mga_warp_init(dev_priv); |
516 | if ( ret < 0 ) { | 859 | if (ret < 0) { |
517 | DRM_ERROR( "failed to init WARP engine!\n" ); | 860 | DRM_ERROR("failed to init WARP engine!\n"); |
518 | /* Assign dev_private so we can do cleanup. */ | ||
519 | dev->dev_private = (void *)dev_priv; | ||
520 | mga_do_cleanup_dma( dev ); | ||
521 | return ret; | 861 | return ret; |
522 | } | 862 | } |
523 | 863 | ||
@@ -557,22 +897,18 @@ static int mga_do_init_dma( drm_device_t *dev, drm_mga_init_t *init ) | |||
557 | dev_priv->sarea_priv->last_frame.head = 0; | 897 | dev_priv->sarea_priv->last_frame.head = 0; |
558 | dev_priv->sarea_priv->last_frame.wrap = 0; | 898 | dev_priv->sarea_priv->last_frame.wrap = 0; |
559 | 899 | ||
560 | if ( mga_freelist_init( dev, dev_priv ) < 0 ) { | 900 | if (mga_freelist_init(dev, dev_priv) < 0) { |
561 | DRM_ERROR( "could not initialize freelist\n" ); | 901 | DRM_ERROR("could not initialize freelist\n"); |
562 | /* Assign dev_private so we can do cleanup. */ | ||
563 | dev->dev_private = (void *)dev_priv; | ||
564 | mga_do_cleanup_dma( dev ); | ||
565 | return DRM_ERR(ENOMEM); | 902 | return DRM_ERR(ENOMEM); |
566 | } | 903 | } |
567 | 904 | ||
568 | /* Make dev_private visable to others. */ | ||
569 | dev->dev_private = (void *)dev_priv; | ||
570 | return 0; | 905 | return 0; |
571 | } | 906 | } |
572 | 907 | ||
573 | static int mga_do_cleanup_dma( drm_device_t *dev ) | 908 | static int mga_do_cleanup_dma( drm_device_t *dev ) |
574 | { | 909 | { |
575 | DRM_DEBUG( "\n" ); | 910 | int err = 0; |
911 | DRM_DEBUG("\n"); | ||
576 | 912 | ||
577 | /* Make sure interrupts are disabled here because the uninstall ioctl | 913 | /* Make sure interrupts are disabled here because the uninstall ioctl |
578 | * may not have been called from userspace and after dev_private | 914 | * may not have been called from userspace and after dev_private |
@@ -583,20 +919,49 @@ static int mga_do_cleanup_dma( drm_device_t *dev ) | |||
583 | if ( dev->dev_private ) { | 919 | if ( dev->dev_private ) { |
584 | drm_mga_private_t *dev_priv = dev->dev_private; | 920 | drm_mga_private_t *dev_priv = dev->dev_private; |
585 | 921 | ||
586 | if ( dev_priv->warp != NULL ) | 922 | if ((dev_priv->warp != NULL) |
587 | drm_core_ioremapfree( dev_priv->warp, dev ); | 923 | && (dev_priv->mmio->type != _DRM_CONSISTENT)) |
588 | if ( dev_priv->primary != NULL ) | 924 | drm_core_ioremapfree(dev_priv->warp, dev); |
589 | drm_core_ioremapfree( dev_priv->primary, dev ); | 925 | |
590 | if ( dev->agp_buffer_map != NULL ) | 926 | if ((dev_priv->primary != NULL) |
591 | drm_core_ioremapfree( dev->agp_buffer_map, dev ); | 927 | && (dev_priv->primary->type != _DRM_CONSISTENT)) |
928 | drm_core_ioremapfree(dev_priv->primary, dev); | ||
592 | 929 | ||
593 | if ( dev_priv->head != NULL ) { | 930 | if (dev->agp_buffer_map != NULL) |
594 | mga_freelist_cleanup( dev ); | 931 | drm_core_ioremapfree(dev->agp_buffer_map, dev); |
932 | |||
933 | if (dev_priv->used_new_dma_init) { | ||
934 | if (dev_priv->agp_mem != NULL) { | ||
935 | dev_priv->agp_textures = NULL; | ||
936 | drm_unbind_agp(dev_priv->agp_mem); | ||
937 | |||
938 | drm_free_agp(dev_priv->agp_mem, dev_priv->agp_pages); | ||
939 | dev_priv->agp_pages = 0; | ||
940 | dev_priv->agp_mem = NULL; | ||
941 | } | ||
942 | |||
943 | if ((dev->agp != NULL) && dev->agp->acquired) { | ||
944 | err = drm_agp_release(dev); | ||
945 | } | ||
946 | |||
947 | dev_priv->used_new_dma_init = 0; | ||
595 | } | 948 | } |
596 | 949 | ||
597 | drm_free( dev->dev_private, sizeof(drm_mga_private_t), | 950 | dev_priv->warp = NULL; |
598 | DRM_MEM_DRIVER ); | 951 | dev_priv->primary = NULL; |
599 | dev->dev_private = NULL; | 952 | dev_priv->mmio = NULL; |
953 | dev_priv->status = NULL; | ||
954 | dev_priv->sarea = NULL; | ||
955 | dev_priv->sarea_priv = NULL; | ||
956 | dev->agp_buffer_map = NULL; | ||
957 | |||
958 | memset(&dev_priv->prim, 0, sizeof(dev_priv->prim)); | ||
959 | dev_priv->warp_pipe = 0; | ||
960 | memset(dev_priv->warp_pipe_phys, 0, sizeof(dev_priv->warp_pipe_phys)); | ||
961 | |||
962 | if (dev_priv->head != NULL) { | ||
963 | mga_freelist_cleanup(dev); | ||
964 | } | ||
600 | } | 965 | } |
601 | 966 | ||
602 | return 0; | 967 | return 0; |
@@ -606,14 +971,20 @@ int mga_dma_init( DRM_IOCTL_ARGS ) | |||
606 | { | 971 | { |
607 | DRM_DEVICE; | 972 | DRM_DEVICE; |
608 | drm_mga_init_t init; | 973 | drm_mga_init_t init; |
974 | int err; | ||
609 | 975 | ||
610 | LOCK_TEST_WITH_RETURN( dev, filp ); | 976 | LOCK_TEST_WITH_RETURN( dev, filp ); |
611 | 977 | ||
612 | DRM_COPY_FROM_USER_IOCTL( init, (drm_mga_init_t __user *)data, sizeof(init) ); | 978 | DRM_COPY_FROM_USER_IOCTL(init, (drm_mga_init_t __user *) data, |
979 | sizeof(init)); | ||
613 | 980 | ||
614 | switch ( init.func ) { | 981 | switch ( init.func ) { |
615 | case MGA_INIT_DMA: | 982 | case MGA_INIT_DMA: |
616 | return mga_do_init_dma( dev, &init ); | 983 | err = mga_do_init_dma(dev, &init); |
984 | if (err) { | ||
985 | (void) mga_do_cleanup_dma(dev); | ||
986 | } | ||
987 | return err; | ||
617 | case MGA_CLEANUP_DMA: | 988 | case MGA_CLEANUP_DMA: |
618 | return mga_do_cleanup_dma( dev ); | 989 | return mga_do_cleanup_dma( dev ); |
619 | } | 990 | } |
@@ -742,7 +1113,21 @@ int mga_dma_buffers( DRM_IOCTL_ARGS ) | |||
742 | return ret; | 1113 | return ret; |
743 | } | 1114 | } |
744 | 1115 | ||
745 | void mga_driver_pretakedown(drm_device_t *dev) | 1116 | /** |
1117 | * Called just before the module is unloaded. | ||
1118 | */ | ||
1119 | int mga_driver_postcleanup(drm_device_t * dev) | ||
1120 | { | ||
1121 | drm_free(dev->dev_private, sizeof(drm_mga_private_t), DRM_MEM_DRIVER); | ||
1122 | dev->dev_private = NULL; | ||
1123 | |||
1124 | return 0; | ||
1125 | } | ||
1126 | |||
1127 | /** | ||
1128 | * Called when the last opener of the device is closed. | ||
1129 | */ | ||
1130 | void mga_driver_pretakedown(drm_device_t * dev) | ||
746 | { | 1131 | { |
747 | mga_do_cleanup_dma( dev ); | 1132 | mga_do_cleanup_dma( dev ); |
748 | } | 1133 | } |