aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/ata/libata-sff.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/ata/libata-sff.c')
-rw-r--r--drivers/ata/libata-sff.c631
1 files changed, 417 insertions, 214 deletions
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 2ffcca063d80..8af18ad1ca7f 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -526,168 +526,399 @@ static int ata_resources_present(struct pci_dev *pdev, int port)
526 port = port * 2; 526 port = port * 2;
527 for (i = 0; i < 2; i ++) { 527 for (i = 0; i < 2; i ++) {
528 if (pci_resource_start(pdev, port + i) == 0 || 528 if (pci_resource_start(pdev, port + i) == 0 ||
529 pci_resource_len(pdev, port + i) == 0) 529 pci_resource_len(pdev, port + i) == 0)
530 return 0; 530 return 0;
531 } 531 }
532 return 1; 532 return 1;
533} 533}
534 534
535/** 535/**
536 * ata_pci_init_native_mode - Initialize native-mode driver 536 * ata_pci_init_bmdma - acquire PCI BMDMA resources and init ATA host
537 * @pdev: pci device to be initialized 537 * @host: target ATA host
538 * @port: array[2] of pointers to port info structures. 538 *
539 * @ports: bitmap of ports present 539 * Acquire PCI BMDMA resources and initialize @host accordingly.
540 * 540 *
541 * Utility function which allocates and initializes an 541 * LOCKING:
542 * ata_probe_ent structure for a standard dual-port 542 * Inherited from calling layer (may sleep).
543 * PIO-based IDE controller. The returned ata_probe_ent 543 *
544 * structure can be passed to ata_device_add(). The returned 544 * RETURNS:
545 * ata_probe_ent structure should then be freed with kfree(). 545 * 0 on success, -errno otherwise.
546 *
547 * The caller need only pass the address of the primary port, the
548 * secondary will be deduced automatically. If the device has non
549 * standard secondary port mappings this function can be called twice,
550 * once for each interface.
551 */ 546 */
547static int ata_pci_init_bmdma(struct ata_host *host)
548{
549 struct device *gdev = host->dev;
550 struct pci_dev *pdev = to_pci_dev(gdev);
551 int i, rc;
552
553 /* TODO: If we get no DMA mask we should fall back to PIO */
554 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
555 if (rc)
556 return rc;
557 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
558 if (rc)
559 return rc;
560
561 /* request and iomap DMA region */
562 rc = pcim_iomap_regions(pdev, 1 << 4, DRV_NAME);
563 if (rc) {
564 dev_printk(KERN_ERR, gdev, "failed to request/iomap BAR4\n");
565 return -ENOMEM;
566 }
567 host->iomap = pcim_iomap_table(pdev);
568
569 for (i = 0; i < 2; i++) {
570 struct ata_port *ap = host->ports[i];
571 void __iomem *bmdma = host->iomap[4] + 8 * i;
572
573 if (ata_port_is_dummy(ap))
574 continue;
575
576 ap->ioaddr.bmdma_addr = bmdma;
577 if ((!(ap->flags & ATA_FLAG_IGN_SIMPLEX)) &&
578 (ioread8(bmdma + 2) & 0x80))
579 host->flags |= ATA_HOST_SIMPLEX;
580 }
581
582 return 0;
583}
552 584
553struct ata_probe_ent * 585/**
554ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int ports) 586 * ata_pci_init_native_host - acquire native ATA resources and init host
587 * @host: target ATA host
588 * @port_mask: ports to consider
589 *
590 * Acquire native PCI ATA resources for @host and initialize
591 * @host accordoingly.
592 *
593 * LOCKING:
594 * Inherited from calling layer (may sleep).
595 *
596 * RETURNS:
597 * 0 on success, -errno otherwise.
598 */
599int ata_pci_init_native_host(struct ata_host *host, unsigned int port_mask)
555{ 600{
556 struct ata_probe_ent *probe_ent; 601 struct device *gdev = host->dev;
557 int i, p = 0; 602 struct pci_dev *pdev = to_pci_dev(gdev);
558 void __iomem * const *iomap; 603 int i, rc;
559 604
560 /* iomap BARs */ 605 /* Discard disabled ports. Some controllers show their unused
561 for (i = 0; i < 4; i++) { 606 * channels this way. Disabled ports are made dummy.
562 if (pcim_iomap(pdev, i, 0) == NULL) { 607 */
563 dev_printk(KERN_ERR, &pdev->dev, 608 for (i = 0; i < 2; i++) {
564 "failed to iomap PCI BAR %d\n", i); 609 if ((port_mask & (1 << i)) && !ata_resources_present(pdev, i)) {
565 return NULL; 610 host->ports[i]->ops = &ata_dummy_port_ops;
611 port_mask &= ~(1 << i);
566 } 612 }
567 } 613 }
568 614
569 pcim_iomap(pdev, 4, 0); /* may fail */ 615 if (!port_mask) {
570 iomap = pcim_iomap_table(pdev); 616 dev_printk(KERN_ERR, gdev, "no available port\n");
571 617 return -ENODEV;
572 /* alloc and init probe_ent */
573 probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]);
574 if (!probe_ent)
575 return NULL;
576
577 probe_ent->irq = pdev->irq;
578 probe_ent->irq_flags = IRQF_SHARED;
579
580 /* Discard disabled ports. Some controllers show their
581 unused channels this way */
582 if (ata_resources_present(pdev, 0) == 0)
583 ports &= ~ATA_PORT_PRIMARY;
584 if (ata_resources_present(pdev, 1) == 0)
585 ports &= ~ATA_PORT_SECONDARY;
586
587 if (ports & ATA_PORT_PRIMARY) {
588 probe_ent->port[p].cmd_addr = iomap[0];
589 probe_ent->port[p].altstatus_addr =
590 probe_ent->port[p].ctl_addr = (void __iomem *)
591 ((unsigned long)iomap[1] | ATA_PCI_CTL_OFS);
592 if (iomap[4]) {
593 if ((!(port[p]->flags & ATA_FLAG_IGN_SIMPLEX)) &&
594 (ioread8(iomap[4] + 2) & 0x80))
595 probe_ent->_host_flags |= ATA_HOST_SIMPLEX;
596 probe_ent->port[p].bmdma_addr = iomap[4];
597 }
598 ata_std_ports(&probe_ent->port[p]);
599 p++;
600 } 618 }
601 619
602 if (ports & ATA_PORT_SECONDARY) { 620 /* request, iomap BARs and init port addresses accordingly */
603 probe_ent->port[p].cmd_addr = iomap[2]; 621 for (i = 0; i < 2; i++) {
604 probe_ent->port[p].altstatus_addr = 622 struct ata_port *ap = host->ports[i];
605 probe_ent->port[p].ctl_addr = (void __iomem *) 623 int base = i * 2;
606 ((unsigned long)iomap[3] | ATA_PCI_CTL_OFS); 624 void __iomem * const *iomap;
607 if (iomap[4]) { 625
608 if ((!(port[p]->flags & ATA_FLAG_IGN_SIMPLEX)) && 626 if (!(port_mask & (1 << i)))
609 (ioread8(iomap[4] + 10) & 0x80)) 627 continue;
610 probe_ent->_host_flags |= ATA_HOST_SIMPLEX; 628
611 probe_ent->port[p].bmdma_addr = iomap[4] + 8; 629 rc = pcim_iomap_regions(pdev, 0x3 << base, DRV_NAME);
630 if (rc) {
631 dev_printk(KERN_ERR, gdev, "failed to request/iomap "
632 "BARs for port %d (errno=%d)\n", i, rc);
633 if (rc == -EBUSY)
634 pcim_pin_device(pdev);
635 return rc;
612 } 636 }
613 ata_std_ports(&probe_ent->port[p]); 637 host->iomap = iomap = pcim_iomap_table(pdev);
614 probe_ent->pinfo2 = port[1]; 638
615 p++; 639 ap->ioaddr.cmd_addr = iomap[base];
640 ap->ioaddr.altstatus_addr =
641 ap->ioaddr.ctl_addr = (void __iomem *)
642 ((unsigned long)iomap[base + 1] | ATA_PCI_CTL_OFS);
643 ata_std_ports(&ap->ioaddr);
616 } 644 }
617 645
618 probe_ent->n_ports = p; 646 return 0;
619 return probe_ent;
620} 647}
621 648
622static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev, 649/**
623 struct ata_port_info **port, int port_mask) 650 * ata_pci_prepare_native_host - helper to prepare native PCI ATA host
651 * @pdev: target PCI device
652 * @ppi: array of port_info
653 * @n_ports: number of ports to allocate
654 * @r_host: out argument for the initialized ATA host
655 *
656 * Helper to allocate ATA host for @pdev, acquire all native PCI
657 * resources and initialize it accordingly in one go.
658 *
659 * LOCKING:
660 * Inherited from calling layer (may sleep).
661 *
662 * RETURNS:
663 * 0 on success, -errno otherwise.
664 */
665int ata_pci_prepare_native_host(struct pci_dev *pdev,
666 const struct ata_port_info * const * ppi,
667 int n_ports, struct ata_host **r_host)
668{
669 struct ata_host *host;
670 unsigned int port_mask;
671 int rc;
672
673 if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL))
674 return -ENOMEM;
675
676 host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2);
677 if (!host) {
678 dev_printk(KERN_ERR, &pdev->dev,
679 "failed to allocate ATA host\n");
680 rc = -ENOMEM;
681 goto err_out;
682 }
683
684 port_mask = ATA_PORT_PRIMARY;
685 if (n_ports > 1)
686 port_mask |= ATA_PORT_SECONDARY;
687
688 rc = ata_pci_init_native_host(host, port_mask);
689 if (rc)
690 goto err_out;
691
692 /* init DMA related stuff */
693 rc = ata_pci_init_bmdma(host);
694 if (rc)
695 goto err_bmdma;
696
697 devres_remove_group(&pdev->dev, NULL);
698 *r_host = host;
699 return 0;
700
701 err_bmdma:
702 /* This is necessary because PCI and iomap resources are
703 * merged and releasing the top group won't release the
704 * acquired resources if some of those have been acquired
705 * before entering this function.
706 */
707 pcim_iounmap_regions(pdev, 0xf);
708 err_out:
709 devres_release_group(&pdev->dev, NULL);
710 return rc;
711}
712
713struct ata_legacy_devres {
714 unsigned int mask;
715 unsigned long cmd_port[2];
716 void __iomem * cmd_addr[2];
717 void __iomem * ctl_addr[2];
718 unsigned int irq[2];
719 void * irq_dev_id[2];
720};
721
722static void ata_legacy_free_irqs(struct ata_legacy_devres *legacy_dr)
624{ 723{
625 struct ata_probe_ent *probe_ent; 724 int i;
626 void __iomem *iomap[5] = { }, *bmdma; 725
627 726 for (i = 0; i < 2; i++) {
628 if (port_mask & ATA_PORT_PRIMARY) { 727 if (!legacy_dr->irq[i])
629 iomap[0] = devm_ioport_map(&pdev->dev, ATA_PRIMARY_CMD, 8); 728 continue;
630 iomap[1] = devm_ioport_map(&pdev->dev, ATA_PRIMARY_CTL, 1); 729
631 if (!iomap[0] || !iomap[1]) 730 free_irq(legacy_dr->irq[i], legacy_dr->irq_dev_id[i]);
632 return NULL; 731 legacy_dr->irq[i] = 0;
732 legacy_dr->irq_dev_id[i] = NULL;
633 } 733 }
734}
735
736static void ata_legacy_release(struct device *gdev, void *res)
737{
738 struct ata_legacy_devres *this = res;
739 int i;
634 740
635 if (port_mask & ATA_PORT_SECONDARY) { 741 ata_legacy_free_irqs(this);
636 iomap[2] = devm_ioport_map(&pdev->dev, ATA_SECONDARY_CMD, 8); 742
637 iomap[3] = devm_ioport_map(&pdev->dev, ATA_SECONDARY_CTL, 1); 743 for (i = 0; i < 2; i++) {
638 if (!iomap[2] || !iomap[3]) 744 if (this->cmd_addr[i])
639 return NULL; 745 ioport_unmap(this->cmd_addr[i]);
746 if (this->ctl_addr[i])
747 ioport_unmap(this->ctl_addr[i]);
748 if (this->cmd_port[i])
749 release_region(this->cmd_port[i], 8);
640 } 750 }
751}
641 752
642 bmdma = pcim_iomap(pdev, 4, 16); /* may fail */ 753static int ata_init_legacy_port(struct ata_port *ap,
643 754 struct ata_legacy_devres *legacy_dr)
644 /* alloc and init probe_ent */ 755{
645 probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]); 756 struct ata_host *host = ap->host;
646 if (!probe_ent) 757 int port_no = ap->port_no;
647 return NULL; 758 unsigned long cmd_port, ctl_port;
648 759
649 probe_ent->n_ports = 2; 760 if (port_no == 0) {
650 probe_ent->irq_flags = IRQF_SHARED; 761 cmd_port = ATA_PRIMARY_CMD;
651 762 ctl_port = ATA_PRIMARY_CTL;
652 if (port_mask & ATA_PORT_PRIMARY) { 763 } else {
653 probe_ent->irq = ATA_PRIMARY_IRQ(pdev); 764 cmd_port = ATA_SECONDARY_CMD;
654 probe_ent->port[0].cmd_addr = iomap[0]; 765 ctl_port = ATA_SECONDARY_CTL;
655 probe_ent->port[0].altstatus_addr = 766 }
656 probe_ent->port[0].ctl_addr = iomap[1];
657 if (bmdma) {
658 probe_ent->port[0].bmdma_addr = bmdma;
659 if ((!(port[0]->flags & ATA_FLAG_IGN_SIMPLEX)) &&
660 (ioread8(bmdma + 2) & 0x80))
661 probe_ent->_host_flags |= ATA_HOST_SIMPLEX;
662 }
663 ata_std_ports(&probe_ent->port[0]);
664 } else
665 probe_ent->dummy_port_mask |= ATA_PORT_PRIMARY;
666 767
667 if (port_mask & ATA_PORT_SECONDARY) { 768 /* request cmd_port */
668 if (probe_ent->irq) 769 if (request_region(cmd_port, 8, "libata"))
669 probe_ent->irq2 = ATA_SECONDARY_IRQ(pdev); 770 legacy_dr->cmd_port[port_no] = cmd_port;
771 else {
772 dev_printk(KERN_WARNING, host->dev,
773 "0x%0lX IDE port busy\n", cmd_port);
774 return -EBUSY;
775 }
776
777 /* iomap cmd and ctl ports */
778 legacy_dr->cmd_addr[port_no] = ioport_map(cmd_port, 8);
779 legacy_dr->ctl_addr[port_no] = ioport_map(ctl_port, 1);
780 if (!legacy_dr->cmd_addr[port_no] || !legacy_dr->ctl_addr[port_no])
781 return -ENOMEM;
782
783 /* init IO addresses */
784 ap->ioaddr.cmd_addr = legacy_dr->cmd_addr[port_no];
785 ap->ioaddr.altstatus_addr = legacy_dr->ctl_addr[port_no];
786 ap->ioaddr.ctl_addr = legacy_dr->ctl_addr[port_no];
787 ata_std_ports(&ap->ioaddr);
788
789 return 0;
790}
791
792/**
793 * ata_init_legacy_host - acquire legacy ATA resources and init ATA host
794 * @host: target ATA host
795 * @legacy_mask: out parameter, mask indicating ports is in legacy mode
796 * @was_busy: out parameter, indicates whether any port was busy
797 *
798 * Acquire legacy ATA resources for ports.
799 *
800 * LOCKING:
801 * Inherited from calling layer (may sleep).
802 *
803 * RETURNS:
804 * 0 on success, -errno otherwise.
805 */
806static int ata_init_legacy_host(struct ata_host *host,
807 unsigned int *legacy_mask, int *was_busy)
808{
809 struct device *gdev = host->dev;
810 struct ata_legacy_devres *legacy_dr;
811 int i, rc;
812
813 if (!devres_open_group(gdev, NULL, GFP_KERNEL))
814 return -ENOMEM;
815
816 rc = -ENOMEM;
817 legacy_dr = devres_alloc(ata_legacy_release, sizeof(*legacy_dr),
818 GFP_KERNEL);
819 if (!legacy_dr)
820 goto err_out;
821 devres_add(gdev, legacy_dr);
822
823 for (i = 0; i < 2; i++) {
824 *legacy_mask &= ~(1 << i);
825 rc = ata_init_legacy_port(host->ports[i], legacy_dr);
826 if (rc == 0)
827 legacy_dr->mask |= 1 << i;
828 else if (rc == -EBUSY)
829 (*was_busy)++;
830 }
831
832 if (!legacy_dr->mask)
833 return -EBUSY;
834
835 for (i = 0; i < 2; i++)
836 if (!(legacy_dr->mask & (1 << i)))
837 host->ports[i]->ops = &ata_dummy_port_ops;
838
839 *legacy_mask |= legacy_dr->mask;
840
841 devres_remove_group(gdev, NULL);
842 return 0;
843
844 err_out:
845 devres_release_group(gdev, NULL);
846 return rc;
847}
848
849/**
850 * ata_request_legacy_irqs - request legacy ATA IRQs
851 * @host: target ATA host
852 * @handler: array of IRQ handlers
853 * @irq_flags: array of IRQ flags
854 * @dev_id: array of IRQ dev_ids
855 *
856 * Request legacy IRQs for non-dummy legacy ports in @host. All
857 * IRQ parameters are passed as array to allow ports to have
858 * separate IRQ handlers.
859 *
860 * LOCKING:
861 * Inherited from calling layer (may sleep).
862 *
863 * RETURNS:
864 * 0 on success, -errno otherwise.
865 */
866static int ata_request_legacy_irqs(struct ata_host *host,
867 irq_handler_t const *handler,
868 const unsigned int *irq_flags,
869 void * const *dev_id)
870{
871 struct device *gdev = host->dev;
872 struct ata_legacy_devres *legacy_dr;
873 int i, rc;
874
875 legacy_dr = devres_find(host->dev, ata_legacy_release, NULL, NULL);
876 BUG_ON(!legacy_dr);
877
878 for (i = 0; i < 2; i++) {
879 unsigned int irq;
880
881 /* FIXME: ATA_*_IRQ() should take generic device not pci_dev */
882 if (i == 0)
883 irq = ATA_PRIMARY_IRQ(to_pci_dev(gdev));
670 else 884 else
671 probe_ent->irq = ATA_SECONDARY_IRQ(pdev); 885 irq = ATA_SECONDARY_IRQ(to_pci_dev(gdev));
672 probe_ent->port[1].cmd_addr = iomap[2]; 886
673 probe_ent->port[1].altstatus_addr = 887 if (!(legacy_dr->mask & (1 << i)))
674 probe_ent->port[1].ctl_addr = iomap[3]; 888 continue;
675 if (bmdma) { 889
676 probe_ent->port[1].bmdma_addr = bmdma + 8; 890 if (!handler[i]) {
677 if ((!(port[1]->flags & ATA_FLAG_IGN_SIMPLEX)) && 891 dev_printk(KERN_ERR, gdev,
678 (ioread8(bmdma + 10) & 0x80)) 892 "NULL handler specified for port %d\n", i);
679 probe_ent->_host_flags |= ATA_HOST_SIMPLEX; 893 rc = -EINVAL;
894 goto err_out;
895 }
896
897 rc = request_irq(irq, handler[i], irq_flags[i], DRV_NAME,
898 dev_id[i]);
899 if (rc) {
900 dev_printk(KERN_ERR, gdev,
901 "irq %u request failed (errno=%d)\n", irq, rc);
902 goto err_out;
680 } 903 }
681 ata_std_ports(&probe_ent->port[1]);
682 904
683 /* FIXME: could be pointing to stack area; must copy */ 905 /* record irq allocation in legacy_dr */
684 probe_ent->pinfo2 = port[1]; 906 legacy_dr->irq[i] = irq;
685 } else 907 legacy_dr->irq_dev_id[i] = dev_id[i];
686 probe_ent->dummy_port_mask |= ATA_PORT_SECONDARY;
687 908
688 return probe_ent; 909 /* only used to print info */
689} 910 if (i == 0)
911 host->irq = irq;
912 else
913 host->irq2 = irq;
914 }
690 915
916 return 0;
917
918 err_out:
919 ata_legacy_free_irqs(legacy_dr);
920 return rc;
921}
691 922
692/** 923/**
693 * ata_pci_init_one - Initialize/register PCI IDE host controller 924 * ata_pci_init_one - Initialize/register PCI IDE host controller
@@ -718,8 +949,8 @@ int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
718 unsigned int n_ports) 949 unsigned int n_ports)
719{ 950{
720 struct device *dev = &pdev->dev; 951 struct device *dev = &pdev->dev;
721 struct ata_probe_ent *probe_ent = NULL; 952 struct ata_host *host = NULL;
722 struct ata_port_info *port[2]; 953 const struct ata_port_info *port[2];
723 u8 mask; 954 u8 mask;
724 unsigned int legacy_mode = 0; 955 unsigned int legacy_mode = 0;
725 int rc; 956 int rc;
@@ -743,7 +974,7 @@ int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
743 974
744 Checking dev->is_enabled is insufficient as this is not set at 975 Checking dev->is_enabled is insufficient as this is not set at
745 boot for the primary video which is BIOS enabled 976 boot for the primary video which is BIOS enabled
746 */ 977 */
747 978
748 rc = pcim_enable_device(pdev); 979 rc = pcim_enable_device(pdev);
749 if (rc) 980 if (rc)
@@ -769,96 +1000,68 @@ int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
769#endif 1000#endif
770 } 1001 }
771 1002
1003 /* alloc and init host */
1004 host = ata_host_alloc_pinfo(dev, port, 2);
1005 if (!host) {
1006 dev_printk(KERN_ERR, &pdev->dev,
1007 "failed to allocate ATA host\n");
1008 rc = -ENOMEM;
1009 goto err_out;
1010 }
1011
772 if (!legacy_mode) { 1012 if (!legacy_mode) {
773 rc = pci_request_regions(pdev, DRV_NAME); 1013 unsigned int port_mask;
774 if (rc) { 1014
775 pcim_pin_device(pdev); 1015 port_mask = ATA_PORT_PRIMARY;
1016 if (n_ports > 1)
1017 port_mask |= ATA_PORT_SECONDARY;
1018
1019 rc = ata_pci_init_native_host(host, port_mask);
1020 if (rc)
776 goto err_out; 1021 goto err_out;
777 }
778 } else { 1022 } else {
779 /* Deal with combined mode hack. This side of the logic all 1023 int was_busy = 0;
780 goes away once the combined mode hack is killed in 2.6.21 */
781 if (!devm_request_region(dev, ATA_PRIMARY_CMD, 8, "libata")) {
782 struct resource *conflict, res;
783 res.start = ATA_PRIMARY_CMD;
784 res.end = ATA_PRIMARY_CMD + 8 - 1;
785 conflict = ____request_resource(&ioport_resource, &res);
786 while (conflict->child)
787 conflict = ____request_resource(conflict, &res);
788 if (!strcmp(conflict->name, "libata"))
789 legacy_mode |= ATA_PORT_PRIMARY;
790 else {
791 pcim_pin_device(pdev);
792 printk(KERN_WARNING "ata: 0x%0X IDE port busy\n" \
793 "ata: conflict with %s\n",
794 ATA_PRIMARY_CMD,
795 conflict->name);
796 }
797 } else
798 legacy_mode |= ATA_PORT_PRIMARY;
799
800 if (!devm_request_region(dev, ATA_SECONDARY_CMD, 8, "libata")) {
801 struct resource *conflict, res;
802 res.start = ATA_SECONDARY_CMD;
803 res.end = ATA_SECONDARY_CMD + 8 - 1;
804 conflict = ____request_resource(&ioport_resource, &res);
805 while (conflict->child)
806 conflict = ____request_resource(conflict, &res);
807 if (!strcmp(conflict->name, "libata"))
808 legacy_mode |= ATA_PORT_SECONDARY;
809 else {
810 pcim_pin_device(pdev);
811 printk(KERN_WARNING "ata: 0x%X IDE port busy\n" \
812 "ata: conflict with %s\n",
813 ATA_SECONDARY_CMD,
814 conflict->name);
815 }
816 } else
817 legacy_mode |= ATA_PORT_SECONDARY;
818
819 if (legacy_mode & ATA_PORT_PRIMARY)
820 pci_request_region(pdev, 1, DRV_NAME);
821 if (legacy_mode & ATA_PORT_SECONDARY)
822 pci_request_region(pdev, 3, DRV_NAME);
823 /* If there is a DMA resource, allocate it */
824 pci_request_region(pdev, 4, DRV_NAME);
825 }
826 1024
827 /* we have legacy mode, but all ports are unavailable */ 1025 rc = ata_init_legacy_host(host, &legacy_mode, &was_busy);
828 if (legacy_mode == (1 << 3)) { 1026 if (was_busy)
829 rc = -EBUSY; 1027 pcim_pin_device(pdev);
830 goto err_out; 1028 if (rc)
1029 goto err_out;
1030
1031 /* request respective PCI regions, may fail */
1032 rc = pci_request_region(pdev, 1, DRV_NAME);
1033 rc = pci_request_region(pdev, 3, DRV_NAME);
831 } 1034 }
832 1035
833 /* TODO: If we get no DMA mask we should fall back to PIO */ 1036 /* init BMDMA, may fail */
834 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); 1037 ata_pci_init_bmdma(host);
835 if (rc) 1038 pci_set_master(pdev);
836 goto err_out; 1039
837 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); 1040 /* start host and request IRQ */
1041 rc = ata_host_start(host);
838 if (rc) 1042 if (rc)
839 goto err_out; 1043 goto err_out;
840 1044
841 if (legacy_mode) { 1045 if (!legacy_mode)
842 probe_ent = ata_pci_init_legacy_port(pdev, port, legacy_mode); 1046 rc = devm_request_irq(dev, pdev->irq,
843 } else { 1047 port_info[0]->port_ops->irq_handler,
844 if (n_ports == 2) 1048 IRQF_SHARED, DRV_NAME, host);
845 probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY); 1049 else {
846 else 1050 irq_handler_t handler[2] = { host->ops->irq_handler,
847 probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY); 1051 host->ops->irq_handler };
1052 unsigned int irq_flags[2] = { IRQF_SHARED, IRQF_SHARED };
1053 void *dev_id[2] = { host, host };
1054
1055 rc = ata_request_legacy_irqs(host, handler, irq_flags, dev_id);
848 } 1056 }
849 if (!probe_ent) { 1057 if (rc)
850 rc = -ENOMEM;
851 goto err_out; 1058 goto err_out;
852 }
853
854 pci_set_master(pdev);
855 1059
856 if (!ata_device_add(probe_ent)) { 1060 /* register */
857 rc = -ENODEV; 1061 rc = ata_host_register(host, port_info[0]->sht);
1062 if (rc)
858 goto err_out; 1063 goto err_out;
859 }
860 1064
861 devm_kfree(dev, probe_ent);
862 devres_remove_group(dev, NULL); 1065 devres_remove_group(dev, NULL);
863 return 0; 1066 return 0;
864 1067
@@ -893,12 +1096,12 @@ int ata_pci_clear_simplex(struct pci_dev *pdev)
893 return 0; 1096 return 0;
894} 1097}
895 1098
896unsigned long ata_pci_default_filter(const struct ata_port *ap, struct ata_device *adev, unsigned long xfer_mask) 1099unsigned long ata_pci_default_filter(struct ata_device *adev, unsigned long xfer_mask)
897{ 1100{
898 /* Filter out DMA modes if the device has been configured by 1101 /* Filter out DMA modes if the device has been configured by
899 the BIOS as PIO only */ 1102 the BIOS as PIO only */
900 1103
901 if (ap->ioaddr.bmdma_addr == 0) 1104 if (adev->ap->ioaddr.bmdma_addr == 0)
902 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); 1105 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
903 return xfer_mask; 1106 return xfer_mask;
904} 1107}