diff options
author | Tejun Heo <htejun@gmail.com> | 2007-04-17 10:44:07 -0400 |
---|---|---|
committer | Jeff Garzik <jeff@garzik.org> | 2007-04-28 14:16:03 -0400 |
commit | 0f834de3ea61aacacf1fac59ba9e82680f83c846 (patch) | |
tree | 3b99ea99281e04ab6dd436f2954981f2cce889bb /drivers/ata/libata-sff.c | |
parent | f5cda257296fbd3683b1f568f2d94d3caaacf74d (diff) |
libata: convert legacy PCI host handling to new init model
Convert legacy PCI host handling to alloc-init-register model.
ata_init_legacy_host(), ata_request_legacy_irqs() and
ata_pci_init_bmdma() are separated out and follow the new init model.
The two legacy handling functions use separate ata_legacy_devres
instead of generic devm_* resources. This reduces devres overhead for
legacy hosts which was a bit high because it didn't use PCI/iomap
merged resoruces.
ata_pci_init_one() is rewritten in terms of the aboved functions but
native mode handling is still using the old method. Conversion will
be completed when native mode handling is updated.
Signed-off-by: Tejun Heo <htejun@gmail.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/ata/libata-sff.c')
-rw-r--r-- | drivers/ata/libata-sff.c | 404 |
1 files changed, 302 insertions, 102 deletions
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index 93cc96782165..d48e1544a0bb 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c | |||
@@ -627,75 +627,266 @@ ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int | |||
627 | return probe_ent; | 627 | return probe_ent; |
628 | } | 628 | } |
629 | 629 | ||
630 | static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev, | 630 | /** |
631 | struct ata_port_info **port, int port_mask) | 631 | * ata_pci_init_bmdma - acquire PCI BMDMA resources and init ATA host |
632 | * @host: target ATA host | ||
633 | * | ||
634 | * Acquire PCI BMDMA resources and initialize @host accordingly. | ||
635 | * | ||
636 | * LOCKING: | ||
637 | * Inherited from calling layer (may sleep). | ||
638 | * | ||
639 | * RETURNS: | ||
640 | * 0 on success, -errno otherwise. | ||
641 | */ | ||
642 | static int ata_pci_init_bmdma(struct ata_host *host) | ||
632 | { | 643 | { |
633 | struct ata_probe_ent *probe_ent; | 644 | struct device *gdev = host->dev; |
634 | void __iomem *iomap[5] = { }, *bmdma; | 645 | struct pci_dev *pdev = to_pci_dev(gdev); |
646 | int i, rc; | ||
635 | 647 | ||
636 | if (port_mask & ATA_PORT_PRIMARY) { | 648 | /* TODO: If we get no DMA mask we should fall back to PIO */ |
637 | iomap[0] = devm_ioport_map(&pdev->dev, ATA_PRIMARY_CMD, 8); | 649 | rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); |
638 | iomap[1] = devm_ioport_map(&pdev->dev, ATA_PRIMARY_CTL, 1); | 650 | if (rc) |
639 | if (!iomap[0] || !iomap[1]) | 651 | return rc; |
640 | return NULL; | 652 | rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); |
653 | if (rc) | ||
654 | return rc; | ||
655 | |||
656 | /* request and iomap DMA region */ | ||
657 | rc = pcim_iomap_regions(pdev, 1 << 4, DRV_NAME); | ||
658 | if (rc) { | ||
659 | dev_printk(KERN_ERR, gdev, "failed to request/iomap BAR4\n"); | ||
660 | return -ENOMEM; | ||
641 | } | 661 | } |
662 | host->iomap = pcim_iomap_table(pdev); | ||
642 | 663 | ||
643 | if (port_mask & ATA_PORT_SECONDARY) { | 664 | for (i = 0; i < 2; i++) { |
644 | iomap[2] = devm_ioport_map(&pdev->dev, ATA_SECONDARY_CMD, 8); | 665 | struct ata_port *ap = host->ports[i]; |
645 | iomap[3] = devm_ioport_map(&pdev->dev, ATA_SECONDARY_CTL, 1); | 666 | struct ata_ioports *ioaddr = &ap->ioaddr; |
646 | if (!iomap[2] || !iomap[3]) | 667 | void __iomem *bmdma = host->iomap[4] + 8 * i; |
647 | return NULL; | 668 | |
669 | if (ata_port_is_dummy(ap)) | ||
670 | continue; | ||
671 | |||
672 | ioaddr->bmdma_addr = bmdma; | ||
673 | if ((!(ap->flags & ATA_FLAG_IGN_SIMPLEX)) && | ||
674 | (ioread8(bmdma + 2) & 0x80)) | ||
675 | host->flags |= ATA_HOST_SIMPLEX; | ||
648 | } | 676 | } |
649 | 677 | ||
650 | bmdma = pcim_iomap(pdev, 4, 16); /* may fail */ | 678 | return 0; |
679 | } | ||
651 | 680 | ||
652 | /* alloc and init probe_ent */ | 681 | struct ata_legacy_devres { |
653 | probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]); | 682 | unsigned int mask; |
654 | if (!probe_ent) | 683 | unsigned long cmd_port[2]; |
655 | return NULL; | 684 | void __iomem * cmd_addr[2]; |
685 | void __iomem * ctl_addr[2]; | ||
686 | unsigned int irq[2]; | ||
687 | void * irq_dev_id[2]; | ||
688 | }; | ||
656 | 689 | ||
657 | probe_ent->n_ports = 2; | 690 | static void ata_legacy_free_irqs(struct ata_legacy_devres *legacy_dr) |
658 | probe_ent->irq_flags = IRQF_SHARED; | 691 | { |
692 | int i; | ||
659 | 693 | ||
660 | if (port_mask & ATA_PORT_PRIMARY) { | 694 | for (i = 0; i < 2; i++) { |
661 | probe_ent->irq = ATA_PRIMARY_IRQ(pdev); | 695 | if (!legacy_dr->irq[i]) |
662 | probe_ent->port[0].cmd_addr = iomap[0]; | 696 | continue; |
663 | probe_ent->port[0].altstatus_addr = | 697 | |
664 | probe_ent->port[0].ctl_addr = iomap[1]; | 698 | free_irq(legacy_dr->irq[i], legacy_dr->irq_dev_id[i]); |
665 | if (bmdma) { | 699 | legacy_dr->irq[i] = 0; |
666 | probe_ent->port[0].bmdma_addr = bmdma; | 700 | legacy_dr->irq_dev_id[i] = NULL; |
667 | if ((!(port[0]->flags & ATA_FLAG_IGN_SIMPLEX)) && | 701 | } |
668 | (ioread8(bmdma + 2) & 0x80)) | 702 | } |
669 | probe_ent->_host_flags |= ATA_HOST_SIMPLEX; | 703 | |
670 | } | 704 | static void ata_legacy_release(struct device *gdev, void *res) |
671 | ata_std_ports(&probe_ent->port[0]); | 705 | { |
672 | } else | 706 | struct ata_legacy_devres *this = res; |
673 | probe_ent->dummy_port_mask |= ATA_PORT_PRIMARY; | 707 | int i; |
708 | |||
709 | ata_legacy_free_irqs(this); | ||
710 | |||
711 | for (i = 0; i < 2; i++) { | ||
712 | if (this->cmd_addr[i]) | ||
713 | ioport_unmap(this->cmd_addr[i]); | ||
714 | if (this->ctl_addr[i]) | ||
715 | ioport_unmap(this->ctl_addr[i]); | ||
716 | if (this->cmd_port[i]) | ||
717 | release_region(this->cmd_port[i], 8); | ||
718 | } | ||
719 | } | ||
720 | |||
721 | static int ata_init_legacy_port(struct ata_port *ap, | ||
722 | struct ata_legacy_devres *legacy_dr) | ||
723 | { | ||
724 | struct ata_host *host = ap->host; | ||
725 | int port_no = ap->port_no; | ||
726 | unsigned long cmd_port, ctl_port; | ||
727 | |||
728 | if (port_no == 0) { | ||
729 | cmd_port = ATA_PRIMARY_CMD; | ||
730 | ctl_port = ATA_PRIMARY_CTL; | ||
731 | } else { | ||
732 | cmd_port = ATA_SECONDARY_CMD; | ||
733 | ctl_port = ATA_SECONDARY_CTL; | ||
734 | } | ||
735 | |||
736 | /* request cmd_port */ | ||
737 | if (request_region(cmd_port, 8, "libata")) | ||
738 | legacy_dr->cmd_port[port_no] = cmd_port; | ||
739 | else { | ||
740 | dev_printk(KERN_WARNING, host->dev, | ||
741 | "0x%0lX IDE port busy\n", cmd_port); | ||
742 | return -EBUSY; | ||
743 | } | ||
744 | |||
745 | /* iomap cmd and ctl ports */ | ||
746 | legacy_dr->cmd_addr[port_no] = ioport_map(cmd_port, 8); | ||
747 | legacy_dr->ctl_addr[port_no] = ioport_map(ctl_port, 1); | ||
748 | if (!legacy_dr->cmd_addr[port_no] || !legacy_dr->ctl_addr[port_no]) | ||
749 | return -ENOMEM; | ||
750 | |||
751 | /* init IO addresses */ | ||
752 | ap->ioaddr.cmd_addr = legacy_dr->cmd_addr[port_no]; | ||
753 | ap->ioaddr.altstatus_addr = legacy_dr->ctl_addr[port_no]; | ||
754 | ap->ioaddr.ctl_addr = legacy_dr->ctl_addr[port_no]; | ||
755 | ata_std_ports(&ap->ioaddr); | ||
756 | |||
757 | return 0; | ||
758 | } | ||
759 | |||
760 | /** | ||
761 | * ata_init_legacy_host - acquire legacy ATA resources and init ATA host | ||
762 | * @host: target ATA host | ||
763 | * @legacy_mask: out parameter, mask indicating ports is in legacy mode | ||
764 | * @was_busy: out parameter, indicates whether any port was busy | ||
765 | * | ||
766 | * Acquire legacy ATA resources for ports. | ||
767 | * | ||
768 | * LOCKING: | ||
769 | * Inherited from calling layer (may sleep). | ||
770 | * | ||
771 | * RETURNS: | ||
772 | * 0 on success, -errno otherwise. | ||
773 | */ | ||
774 | static int ata_init_legacy_host(struct ata_host *host, | ||
775 | unsigned int *legacy_mask, int *was_busy) | ||
776 | { | ||
777 | struct device *gdev = host->dev; | ||
778 | struct ata_legacy_devres *legacy_dr; | ||
779 | int i, rc; | ||
780 | |||
781 | if (!devres_open_group(gdev, NULL, GFP_KERNEL)) | ||
782 | return -ENOMEM; | ||
783 | |||
784 | rc = -ENOMEM; | ||
785 | legacy_dr = devres_alloc(ata_legacy_release, sizeof(*legacy_dr), | ||
786 | GFP_KERNEL); | ||
787 | if (!legacy_dr) | ||
788 | goto err_out; | ||
789 | devres_add(gdev, legacy_dr); | ||
790 | |||
791 | for (i = 0; i < 2; i++) { | ||
792 | *legacy_mask &= ~(1 << i); | ||
793 | rc = ata_init_legacy_port(host->ports[i], legacy_dr); | ||
794 | if (rc == 0) | ||
795 | legacy_dr->mask |= 1 << i; | ||
796 | else if (rc == -EBUSY) | ||
797 | (*was_busy)++; | ||
798 | } | ||
799 | |||
800 | if (!legacy_dr->mask) | ||
801 | return -EBUSY; | ||
802 | |||
803 | for (i = 0; i < 2; i++) | ||
804 | if (!(legacy_dr->mask & (1 << i))) | ||
805 | host->ports[i]->ops = &ata_dummy_port_ops; | ||
674 | 806 | ||
675 | if (port_mask & ATA_PORT_SECONDARY) { | 807 | *legacy_mask |= legacy_dr->mask; |
676 | if (probe_ent->irq) | 808 | |
677 | probe_ent->irq2 = ATA_SECONDARY_IRQ(pdev); | 809 | devres_remove_group(gdev, NULL); |
810 | return 0; | ||
811 | |||
812 | err_out: | ||
813 | devres_release_group(gdev, NULL); | ||
814 | return rc; | ||
815 | } | ||
816 | |||
817 | /** | ||
818 | * ata_request_legacy_irqs - request legacy ATA IRQs | ||
819 | * @host: target ATA host | ||
820 | * @handler: array of IRQ handlers | ||
821 | * @irq_flags: array of IRQ flags | ||
822 | * @dev_id: array of IRQ dev_ids | ||
823 | * | ||
824 | * Request legacy IRQs for non-dummy legacy ports in @host. All | ||
825 | * IRQ parameters are passed as array to allow ports to have | ||
826 | * separate IRQ handlers. | ||
827 | * | ||
828 | * LOCKING: | ||
829 | * Inherited from calling layer (may sleep). | ||
830 | * | ||
831 | * RETURNS: | ||
832 | * 0 on success, -errno otherwise. | ||
833 | */ | ||
834 | static int ata_request_legacy_irqs(struct ata_host *host, | ||
835 | irq_handler_t const *handler, | ||
836 | const unsigned int *irq_flags, | ||
837 | void * const *dev_id) | ||
838 | { | ||
839 | struct device *gdev = host->dev; | ||
840 | struct ata_legacy_devres *legacy_dr; | ||
841 | int i, rc; | ||
842 | |||
843 | legacy_dr = devres_find(host->dev, ata_legacy_release, NULL, NULL); | ||
844 | BUG_ON(!legacy_dr); | ||
845 | |||
846 | for (i = 0; i < 2; i++) { | ||
847 | unsigned int irq; | ||
848 | |||
849 | /* FIXME: ATA_*_IRQ() should take generic device not pci_dev */ | ||
850 | if (i == 0) | ||
851 | irq = ATA_PRIMARY_IRQ(to_pci_dev(gdev)); | ||
678 | else | 852 | else |
679 | probe_ent->irq = ATA_SECONDARY_IRQ(pdev); | 853 | irq = ATA_SECONDARY_IRQ(to_pci_dev(gdev)); |
680 | probe_ent->port[1].cmd_addr = iomap[2]; | 854 | |
681 | probe_ent->port[1].altstatus_addr = | 855 | if (!(legacy_dr->mask & (1 << i))) |
682 | probe_ent->port[1].ctl_addr = iomap[3]; | 856 | continue; |
683 | if (bmdma) { | 857 | |
684 | probe_ent->port[1].bmdma_addr = bmdma + 8; | 858 | if (!handler[i]) { |
685 | if ((!(port[1]->flags & ATA_FLAG_IGN_SIMPLEX)) && | 859 | dev_printk(KERN_ERR, gdev, |
686 | (ioread8(bmdma + 10) & 0x80)) | 860 | "NULL handler specified for port %d\n", i); |
687 | probe_ent->_host_flags |= ATA_HOST_SIMPLEX; | 861 | rc = -EINVAL; |
862 | goto err_out; | ||
688 | } | 863 | } |
689 | ata_std_ports(&probe_ent->port[1]); | ||
690 | 864 | ||
691 | /* FIXME: could be pointing to stack area; must copy */ | 865 | rc = request_irq(irq, handler[i], irq_flags[i], DRV_NAME, |
692 | probe_ent->pinfo2 = port[1]; | 866 | dev_id[i]); |
693 | } else | 867 | if (rc) { |
694 | probe_ent->dummy_port_mask |= ATA_PORT_SECONDARY; | 868 | dev_printk(KERN_ERR, gdev, |
869 | "irq %u request failed (errno=%d)\n", irq, rc); | ||
870 | goto err_out; | ||
871 | } | ||
695 | 872 | ||
696 | return probe_ent; | 873 | /* record irq allocation in legacy_dr */ |
697 | } | 874 | legacy_dr->irq[i] = irq; |
875 | legacy_dr->irq_dev_id[i] = dev_id[i]; | ||
876 | |||
877 | /* only used to print info */ | ||
878 | if (i == 0) | ||
879 | host->irq = irq; | ||
880 | else | ||
881 | host->irq2 = irq; | ||
882 | } | ||
883 | |||
884 | return 0; | ||
698 | 885 | ||
886 | err_out: | ||
887 | ata_legacy_free_irqs(legacy_dr); | ||
888 | return rc; | ||
889 | } | ||
699 | 890 | ||
700 | /** | 891 | /** |
701 | * ata_pci_init_one - Initialize/register PCI IDE host controller | 892 | * ata_pci_init_one - Initialize/register PCI IDE host controller |
@@ -727,7 +918,8 @@ int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info, | |||
727 | { | 918 | { |
728 | struct device *dev = &pdev->dev; | 919 | struct device *dev = &pdev->dev; |
729 | struct ata_probe_ent *probe_ent = NULL; | 920 | struct ata_probe_ent *probe_ent = NULL; |
730 | struct ata_port_info *port[2]; | 921 | struct ata_host *host = NULL; |
922 | const struct ata_port_info *port[2]; | ||
731 | u8 mask; | 923 | u8 mask; |
732 | unsigned int legacy_mode = 0; | 924 | unsigned int legacy_mode = 0; |
733 | int rc; | 925 | int rc; |
@@ -783,66 +975,74 @@ int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info, | |||
783 | pcim_pin_device(pdev); | 975 | pcim_pin_device(pdev); |
784 | goto err_out; | 976 | goto err_out; |
785 | } | 977 | } |
978 | |||
979 | /* TODO: If we get no DMA mask we should fall back to PIO */ | ||
980 | rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); | ||
981 | if (rc) | ||
982 | goto err_out; | ||
983 | rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); | ||
984 | if (rc) | ||
985 | goto err_out; | ||
986 | |||
987 | pci_set_master(pdev); | ||
786 | } else { | 988 | } else { |
787 | /* Deal with combined mode hack. This side of the logic all | 989 | int was_busy = 0; |
788 | goes away once the combined mode hack is killed in 2.6.21 */ | 990 | |
789 | if (!devm_request_region(dev, ATA_PRIMARY_CMD, 8, "libata")) { | 991 | rc = -ENOMEM; |
790 | pcim_pin_device(pdev); | 992 | host = ata_host_alloc_pinfo(dev, port, 2); |
791 | printk(KERN_WARNING "ata: 0x%0X IDE port busy\n", | 993 | if (!host) |
792 | ATA_PRIMARY_CMD); | 994 | goto err_out; |
793 | } else | ||
794 | legacy_mode |= ATA_PORT_PRIMARY; | ||
795 | 995 | ||
796 | if (!devm_request_region(dev, ATA_SECONDARY_CMD, 8, "libata")) { | 996 | rc = ata_init_legacy_host(host, &legacy_mode, &was_busy); |
997 | if (was_busy) | ||
797 | pcim_pin_device(pdev); | 998 | pcim_pin_device(pdev); |
798 | printk(KERN_WARNING "ata: 0x%X IDE port busy\n", | 999 | if (rc) |
799 | ATA_SECONDARY_CMD); | 1000 | goto err_out; |
800 | } else | ||
801 | legacy_mode |= ATA_PORT_SECONDARY; | ||
802 | |||
803 | if (legacy_mode & ATA_PORT_PRIMARY) | ||
804 | pci_request_region(pdev, 1, DRV_NAME); | ||
805 | if (legacy_mode & ATA_PORT_SECONDARY) | ||
806 | pci_request_region(pdev, 3, DRV_NAME); | ||
807 | /* If there is a DMA resource, allocate it */ | ||
808 | pci_request_region(pdev, 4, DRV_NAME); | ||
809 | } | ||
810 | 1001 | ||
811 | /* we have legacy mode, but all ports are unavailable */ | 1002 | /* request respective PCI regions, may fail */ |
812 | if (legacy_mode == (1 << 3)) { | 1003 | rc = pci_request_region(pdev, 1, DRV_NAME); |
813 | rc = -EBUSY; | 1004 | rc = pci_request_region(pdev, 3, DRV_NAME); |
814 | goto err_out; | ||
815 | } | ||
816 | 1005 | ||
817 | /* TODO: If we get no DMA mask we should fall back to PIO */ | 1006 | /* init bmdma */ |
818 | rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); | 1007 | ata_pci_init_bmdma(host); |
819 | if (rc) | 1008 | pci_set_master(pdev); |
820 | goto err_out; | 1009 | } |
821 | rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); | ||
822 | if (rc) | ||
823 | goto err_out; | ||
824 | 1010 | ||
825 | if (legacy_mode) { | 1011 | if (legacy_mode) { |
826 | probe_ent = ata_pci_init_legacy_port(pdev, port, legacy_mode); | 1012 | irq_handler_t handler[2] = { host->ops->irq_handler, |
1013 | host->ops->irq_handler }; | ||
1014 | unsigned int irq_flags[2] = { IRQF_SHARED, IRQF_SHARED }; | ||
1015 | void *dev_id[2] = { host, host }; | ||
1016 | |||
1017 | rc = ata_host_start(host); | ||
1018 | if (rc) | ||
1019 | goto err_out; | ||
1020 | |||
1021 | rc = ata_request_legacy_irqs(host, handler, irq_flags, dev_id); | ||
1022 | if (rc) | ||
1023 | goto err_out; | ||
1024 | |||
1025 | rc = ata_host_register(host, port_info[0]->sht); | ||
1026 | if (rc) | ||
1027 | goto err_out; | ||
827 | } else { | 1028 | } else { |
828 | if (n_ports == 2) | 1029 | if (n_ports == 2) |
829 | probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY); | 1030 | probe_ent = ata_pci_init_native_mode(pdev, (struct ata_port_info **)port, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY); |
830 | else | 1031 | else |
831 | probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY); | 1032 | probe_ent = ata_pci_init_native_mode(pdev, (struct ata_port_info **)port, ATA_PORT_PRIMARY); |
832 | } | ||
833 | if (!probe_ent) { | ||
834 | rc = -ENOMEM; | ||
835 | goto err_out; | ||
836 | } | ||
837 | 1033 | ||
838 | pci_set_master(pdev); | 1034 | if (!probe_ent) { |
1035 | rc = -ENOMEM; | ||
1036 | goto err_out; | ||
1037 | } | ||
839 | 1038 | ||
840 | if (!ata_device_add(probe_ent)) { | 1039 | if (!ata_device_add(probe_ent)) { |
841 | rc = -ENODEV; | 1040 | rc = -ENODEV; |
842 | goto err_out; | 1041 | goto err_out; |
843 | } | 1042 | } |
844 | 1043 | ||
845 | devm_kfree(dev, probe_ent); | 1044 | devm_kfree(dev, probe_ent); |
1045 | } | ||
846 | devres_remove_group(dev, NULL); | 1046 | devres_remove_group(dev, NULL); |
847 | return 0; | 1047 | return 0; |
848 | 1048 | ||