diff options
Diffstat (limited to 'drivers/ata/sata_mv.c')
-rw-r--r-- | drivers/ata/sata_mv.c | 213 |
1 files changed, 107 insertions, 106 deletions
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index a65ba636aaa8..cb9b9ac12b4c 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c | |||
@@ -253,10 +253,7 @@ enum { | |||
253 | #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE) | 253 | #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE) |
254 | 254 | ||
255 | enum { | 255 | enum { |
256 | /* Our DMA boundary is determined by an ePRD being unable to handle | 256 | MV_DMA_BOUNDARY = 0xffffffffU, |
257 | * anything larger than 64KB | ||
258 | */ | ||
259 | MV_DMA_BOUNDARY = 0xffffU, | ||
260 | 257 | ||
261 | EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U, | 258 | EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U, |
262 | 259 | ||
@@ -350,7 +347,6 @@ static void mv_port_stop(struct ata_port *ap); | |||
350 | static void mv_qc_prep(struct ata_queued_cmd *qc); | 347 | static void mv_qc_prep(struct ata_queued_cmd *qc); |
351 | static void mv_qc_prep_iie(struct ata_queued_cmd *qc); | 348 | static void mv_qc_prep_iie(struct ata_queued_cmd *qc); |
352 | static unsigned int mv_qc_issue(struct ata_queued_cmd *qc); | 349 | static unsigned int mv_qc_issue(struct ata_queued_cmd *qc); |
353 | static irqreturn_t mv_interrupt(int irq, void *dev_instance); | ||
354 | static void mv_eng_timeout(struct ata_port *ap); | 350 | static void mv_eng_timeout(struct ata_port *ap); |
355 | static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); | 351 | static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); |
356 | 352 | ||
@@ -384,10 +380,10 @@ static struct scsi_host_template mv_sht = { | |||
384 | .queuecommand = ata_scsi_queuecmd, | 380 | .queuecommand = ata_scsi_queuecmd, |
385 | .can_queue = MV_USE_Q_DEPTH, | 381 | .can_queue = MV_USE_Q_DEPTH, |
386 | .this_id = ATA_SHT_THIS_ID, | 382 | .this_id = ATA_SHT_THIS_ID, |
387 | .sg_tablesize = MV_MAX_SG_CT / 2, | 383 | .sg_tablesize = MV_MAX_SG_CT, |
388 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | 384 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, |
389 | .emulated = ATA_SHT_EMULATED, | 385 | .emulated = ATA_SHT_EMULATED, |
390 | .use_clustering = ATA_SHT_USE_CLUSTERING, | 386 | .use_clustering = 1, |
391 | .proc_name = DRV_NAME, | 387 | .proc_name = DRV_NAME, |
392 | .dma_boundary = MV_DMA_BOUNDARY, | 388 | .dma_boundary = MV_DMA_BOUNDARY, |
393 | .slave_configure = ata_scsi_slave_config, | 389 | .slave_configure = ata_scsi_slave_config, |
@@ -405,6 +401,7 @@ static const struct ata_port_operations mv5_ops = { | |||
405 | .dev_select = ata_std_dev_select, | 401 | .dev_select = ata_std_dev_select, |
406 | 402 | ||
407 | .phy_reset = mv_phy_reset, | 403 | .phy_reset = mv_phy_reset, |
404 | .cable_detect = ata_cable_sata, | ||
408 | 405 | ||
409 | .qc_prep = mv_qc_prep, | 406 | .qc_prep = mv_qc_prep, |
410 | .qc_issue = mv_qc_issue, | 407 | .qc_issue = mv_qc_issue, |
@@ -412,7 +409,6 @@ static const struct ata_port_operations mv5_ops = { | |||
412 | 409 | ||
413 | .eng_timeout = mv_eng_timeout, | 410 | .eng_timeout = mv_eng_timeout, |
414 | 411 | ||
415 | .irq_handler = mv_interrupt, | ||
416 | .irq_clear = mv_irq_clear, | 412 | .irq_clear = mv_irq_clear, |
417 | .irq_on = ata_irq_on, | 413 | .irq_on = ata_irq_on, |
418 | .irq_ack = ata_irq_ack, | 414 | .irq_ack = ata_irq_ack, |
@@ -434,6 +430,7 @@ static const struct ata_port_operations mv6_ops = { | |||
434 | .dev_select = ata_std_dev_select, | 430 | .dev_select = ata_std_dev_select, |
435 | 431 | ||
436 | .phy_reset = mv_phy_reset, | 432 | .phy_reset = mv_phy_reset, |
433 | .cable_detect = ata_cable_sata, | ||
437 | 434 | ||
438 | .qc_prep = mv_qc_prep, | 435 | .qc_prep = mv_qc_prep, |
439 | .qc_issue = mv_qc_issue, | 436 | .qc_issue = mv_qc_issue, |
@@ -441,7 +438,6 @@ static const struct ata_port_operations mv6_ops = { | |||
441 | 438 | ||
442 | .eng_timeout = mv_eng_timeout, | 439 | .eng_timeout = mv_eng_timeout, |
443 | 440 | ||
444 | .irq_handler = mv_interrupt, | ||
445 | .irq_clear = mv_irq_clear, | 441 | .irq_clear = mv_irq_clear, |
446 | .irq_on = ata_irq_on, | 442 | .irq_on = ata_irq_on, |
447 | .irq_ack = ata_irq_ack, | 443 | .irq_ack = ata_irq_ack, |
@@ -463,6 +459,7 @@ static const struct ata_port_operations mv_iie_ops = { | |||
463 | .dev_select = ata_std_dev_select, | 459 | .dev_select = ata_std_dev_select, |
464 | 460 | ||
465 | .phy_reset = mv_phy_reset, | 461 | .phy_reset = mv_phy_reset, |
462 | .cable_detect = ata_cable_sata, | ||
466 | 463 | ||
467 | .qc_prep = mv_qc_prep_iie, | 464 | .qc_prep = mv_qc_prep_iie, |
468 | .qc_issue = mv_qc_issue, | 465 | .qc_issue = mv_qc_issue, |
@@ -470,7 +467,6 @@ static const struct ata_port_operations mv_iie_ops = { | |||
470 | 467 | ||
471 | .eng_timeout = mv_eng_timeout, | 468 | .eng_timeout = mv_eng_timeout, |
472 | 469 | ||
473 | .irq_handler = mv_interrupt, | ||
474 | .irq_clear = mv_irq_clear, | 470 | .irq_clear = mv_irq_clear, |
475 | .irq_on = ata_irq_on, | 471 | .irq_on = ata_irq_on, |
476 | .irq_ack = ata_irq_ack, | 472 | .irq_ack = ata_irq_ack, |
@@ -484,35 +480,30 @@ static const struct ata_port_operations mv_iie_ops = { | |||
484 | 480 | ||
485 | static const struct ata_port_info mv_port_info[] = { | 481 | static const struct ata_port_info mv_port_info[] = { |
486 | { /* chip_504x */ | 482 | { /* chip_504x */ |
487 | .sht = &mv_sht, | ||
488 | .flags = MV_COMMON_FLAGS, | 483 | .flags = MV_COMMON_FLAGS, |
489 | .pio_mask = 0x1f, /* pio0-4 */ | 484 | .pio_mask = 0x1f, /* pio0-4 */ |
490 | .udma_mask = 0x7f, /* udma0-6 */ | 485 | .udma_mask = 0x7f, /* udma0-6 */ |
491 | .port_ops = &mv5_ops, | 486 | .port_ops = &mv5_ops, |
492 | }, | 487 | }, |
493 | { /* chip_508x */ | 488 | { /* chip_508x */ |
494 | .sht = &mv_sht, | ||
495 | .flags = (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC), | 489 | .flags = (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC), |
496 | .pio_mask = 0x1f, /* pio0-4 */ | 490 | .pio_mask = 0x1f, /* pio0-4 */ |
497 | .udma_mask = 0x7f, /* udma0-6 */ | 491 | .udma_mask = 0x7f, /* udma0-6 */ |
498 | .port_ops = &mv5_ops, | 492 | .port_ops = &mv5_ops, |
499 | }, | 493 | }, |
500 | { /* chip_5080 */ | 494 | { /* chip_5080 */ |
501 | .sht = &mv_sht, | ||
502 | .flags = (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC), | 495 | .flags = (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC), |
503 | .pio_mask = 0x1f, /* pio0-4 */ | 496 | .pio_mask = 0x1f, /* pio0-4 */ |
504 | .udma_mask = 0x7f, /* udma0-6 */ | 497 | .udma_mask = 0x7f, /* udma0-6 */ |
505 | .port_ops = &mv5_ops, | 498 | .port_ops = &mv5_ops, |
506 | }, | 499 | }, |
507 | { /* chip_604x */ | 500 | { /* chip_604x */ |
508 | .sht = &mv_sht, | ||
509 | .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS), | 501 | .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS), |
510 | .pio_mask = 0x1f, /* pio0-4 */ | 502 | .pio_mask = 0x1f, /* pio0-4 */ |
511 | .udma_mask = 0x7f, /* udma0-6 */ | 503 | .udma_mask = 0x7f, /* udma0-6 */ |
512 | .port_ops = &mv6_ops, | 504 | .port_ops = &mv6_ops, |
513 | }, | 505 | }, |
514 | { /* chip_608x */ | 506 | { /* chip_608x */ |
515 | .sht = &mv_sht, | ||
516 | .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS | | 507 | .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS | |
517 | MV_FLAG_DUAL_HC), | 508 | MV_FLAG_DUAL_HC), |
518 | .pio_mask = 0x1f, /* pio0-4 */ | 509 | .pio_mask = 0x1f, /* pio0-4 */ |
@@ -520,14 +511,12 @@ static const struct ata_port_info mv_port_info[] = { | |||
520 | .port_ops = &mv6_ops, | 511 | .port_ops = &mv6_ops, |
521 | }, | 512 | }, |
522 | { /* chip_6042 */ | 513 | { /* chip_6042 */ |
523 | .sht = &mv_sht, | ||
524 | .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS), | 514 | .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS), |
525 | .pio_mask = 0x1f, /* pio0-4 */ | 515 | .pio_mask = 0x1f, /* pio0-4 */ |
526 | .udma_mask = 0x7f, /* udma0-6 */ | 516 | .udma_mask = 0x7f, /* udma0-6 */ |
527 | .port_ops = &mv_iie_ops, | 517 | .port_ops = &mv_iie_ops, |
528 | }, | 518 | }, |
529 | { /* chip_7042 */ | 519 | { /* chip_7042 */ |
530 | .sht = &mv_sht, | ||
531 | .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS), | 520 | .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS), |
532 | .pio_mask = 0x1f, /* pio0-4 */ | 521 | .pio_mask = 0x1f, /* pio0-4 */ |
533 | .udma_mask = 0x7f, /* udma0-6 */ | 522 | .udma_mask = 0x7f, /* udma0-6 */ |
@@ -551,6 +540,9 @@ static const struct pci_device_id mv_pci_tbl[] = { | |||
551 | 540 | ||
552 | { PCI_VDEVICE(TTI, 0x2310), chip_7042 }, | 541 | { PCI_VDEVICE(TTI, 0x2310), chip_7042 }, |
553 | 542 | ||
543 | /* add Marvell 7042 support */ | ||
544 | { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 }, | ||
545 | |||
554 | { } /* terminate list */ | 546 | { } /* terminate list */ |
555 | }; | 547 | }; |
556 | 548 | ||
@@ -585,6 +577,39 @@ static const struct mv_hw_ops mv6xxx_ops = { | |||
585 | static int msi; /* Use PCI msi; either zero (off, default) or non-zero */ | 577 | static int msi; /* Use PCI msi; either zero (off, default) or non-zero */ |
586 | 578 | ||
587 | 579 | ||
580 | /* move to PCI layer or libata core? */ | ||
581 | static int pci_go_64(struct pci_dev *pdev) | ||
582 | { | ||
583 | int rc; | ||
584 | |||
585 | if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) { | ||
586 | rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); | ||
587 | if (rc) { | ||
588 | rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); | ||
589 | if (rc) { | ||
590 | dev_printk(KERN_ERR, &pdev->dev, | ||
591 | "64-bit DMA enable failed\n"); | ||
592 | return rc; | ||
593 | } | ||
594 | } | ||
595 | } else { | ||
596 | rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); | ||
597 | if (rc) { | ||
598 | dev_printk(KERN_ERR, &pdev->dev, | ||
599 | "32-bit DMA enable failed\n"); | ||
600 | return rc; | ||
601 | } | ||
602 | rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); | ||
603 | if (rc) { | ||
604 | dev_printk(KERN_ERR, &pdev->dev, | ||
605 | "32-bit consistent DMA enable failed\n"); | ||
606 | return rc; | ||
607 | } | ||
608 | } | ||
609 | |||
610 | return rc; | ||
611 | } | ||
612 | |||
588 | /* | 613 | /* |
589 | * Functions | 614 | * Functions |
590 | */ | 615 | */ |
@@ -798,20 +823,18 @@ static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in) | |||
798 | { | 823 | { |
799 | unsigned int ofs = mv_scr_offset(sc_reg_in); | 824 | unsigned int ofs = mv_scr_offset(sc_reg_in); |
800 | 825 | ||
801 | if (0xffffffffU != ofs) { | 826 | if (0xffffffffU != ofs) |
802 | return readl(mv_ap_base(ap) + ofs); | 827 | return readl(mv_ap_base(ap) + ofs); |
803 | } else { | 828 | else |
804 | return (u32) ofs; | 829 | return (u32) ofs; |
805 | } | ||
806 | } | 830 | } |
807 | 831 | ||
808 | static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val) | 832 | static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val) |
809 | { | 833 | { |
810 | unsigned int ofs = mv_scr_offset(sc_reg_in); | 834 | unsigned int ofs = mv_scr_offset(sc_reg_in); |
811 | 835 | ||
812 | if (0xffffffffU != ofs) { | 836 | if (0xffffffffU != ofs) |
813 | writelfl(val, mv_ap_base(ap) + ofs); | 837 | writelfl(val, mv_ap_base(ap) + ofs); |
814 | } | ||
815 | } | 838 | } |
816 | 839 | ||
817 | static void mv_edma_cfg(struct mv_host_priv *hpriv, void __iomem *port_mmio) | 840 | static void mv_edma_cfg(struct mv_host_priv *hpriv, void __iomem *port_mmio) |
@@ -959,38 +982,30 @@ static void mv_port_stop(struct ata_port *ap) | |||
959 | * LOCKING: | 982 | * LOCKING: |
960 | * Inherited from caller. | 983 | * Inherited from caller. |
961 | */ | 984 | */ |
962 | static void mv_fill_sg(struct ata_queued_cmd *qc) | 985 | static unsigned int mv_fill_sg(struct ata_queued_cmd *qc) |
963 | { | 986 | { |
964 | struct mv_port_priv *pp = qc->ap->private_data; | 987 | struct mv_port_priv *pp = qc->ap->private_data; |
965 | unsigned int i = 0; | 988 | unsigned int n_sg = 0; |
966 | struct scatterlist *sg; | 989 | struct scatterlist *sg; |
990 | struct mv_sg *mv_sg; | ||
967 | 991 | ||
992 | mv_sg = pp->sg_tbl; | ||
968 | ata_for_each_sg(sg, qc) { | 993 | ata_for_each_sg(sg, qc) { |
969 | dma_addr_t addr; | 994 | dma_addr_t addr = sg_dma_address(sg); |
970 | u32 sg_len, len, offset; | 995 | u32 sg_len = sg_dma_len(sg); |
971 | |||
972 | addr = sg_dma_address(sg); | ||
973 | sg_len = sg_dma_len(sg); | ||
974 | |||
975 | while (sg_len) { | ||
976 | offset = addr & MV_DMA_BOUNDARY; | ||
977 | len = sg_len; | ||
978 | if ((offset + sg_len) > 0x10000) | ||
979 | len = 0x10000 - offset; | ||
980 | 996 | ||
981 | pp->sg_tbl[i].addr = cpu_to_le32(addr & 0xffffffff); | 997 | mv_sg->addr = cpu_to_le32(addr & 0xffffffff); |
982 | pp->sg_tbl[i].addr_hi = cpu_to_le32((addr >> 16) >> 16); | 998 | mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16); |
983 | pp->sg_tbl[i].flags_size = cpu_to_le32(len & 0xffff); | 999 | mv_sg->flags_size = cpu_to_le32(sg_len & 0xffff); |
984 | 1000 | ||
985 | sg_len -= len; | 1001 | if (ata_sg_is_last(sg, qc)) |
986 | addr += len; | 1002 | mv_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL); |
987 | 1003 | ||
988 | if (!sg_len && ata_sg_is_last(sg, qc)) | 1004 | mv_sg++; |
989 | pp->sg_tbl[i].flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL); | 1005 | n_sg++; |
990 | |||
991 | i++; | ||
992 | } | ||
993 | } | 1006 | } |
1007 | |||
1008 | return n_sg; | ||
994 | } | 1009 | } |
995 | 1010 | ||
996 | static inline unsigned mv_inc_q_index(unsigned index) | 1011 | static inline unsigned mv_inc_q_index(unsigned index) |
@@ -1320,17 +1335,15 @@ static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc) | |||
1320 | int shift, port, port0, hard_port, handled; | 1335 | int shift, port, port0, hard_port, handled; |
1321 | unsigned int err_mask; | 1336 | unsigned int err_mask; |
1322 | 1337 | ||
1323 | if (hc == 0) { | 1338 | if (hc == 0) |
1324 | port0 = 0; | 1339 | port0 = 0; |
1325 | } else { | 1340 | else |
1326 | port0 = MV_PORTS_PER_HC; | 1341 | port0 = MV_PORTS_PER_HC; |
1327 | } | ||
1328 | 1342 | ||
1329 | /* we'll need the HC success int register in most cases */ | 1343 | /* we'll need the HC success int register in most cases */ |
1330 | hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS); | 1344 | hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS); |
1331 | if (hc_irq_cause) { | 1345 | if (hc_irq_cause) |
1332 | writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS); | 1346 | writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS); |
1333 | } | ||
1334 | 1347 | ||
1335 | VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n", | 1348 | VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n", |
1336 | hc,relevant,hc_irq_cause); | 1349 | hc,relevant,hc_irq_cause); |
@@ -1425,9 +1438,8 @@ static irqreturn_t mv_interrupt(int irq, void *dev_instance) | |||
1425 | /* check the cases where we either have nothing pending or have read | 1438 | /* check the cases where we either have nothing pending or have read |
1426 | * a bogus register value which can indicate HW removal or PCI fault | 1439 | * a bogus register value which can indicate HW removal or PCI fault |
1427 | */ | 1440 | */ |
1428 | if (!irq_stat || (0xffffffffU == irq_stat)) { | 1441 | if (!irq_stat || (0xffffffffU == irq_stat)) |
1429 | return IRQ_NONE; | 1442 | return IRQ_NONE; |
1430 | } | ||
1431 | 1443 | ||
1432 | n_hcs = mv_get_hc_count(host->ports[0]->flags); | 1444 | n_hcs = mv_get_hc_count(host->ports[0]->flags); |
1433 | spin_lock(&host->lock); | 1445 | spin_lock(&host->lock); |
@@ -1952,7 +1964,6 @@ comreset_retry: | |||
1952 | ata_port_disable(ap); | 1964 | ata_port_disable(ap); |
1953 | return; | 1965 | return; |
1954 | } | 1966 | } |
1955 | ap->cbl = ATA_CBL_SATA; | ||
1956 | 1967 | ||
1957 | /* even after SStatus reflects that device is ready, | 1968 | /* even after SStatus reflects that device is ready, |
1958 | * it seems to take a while for link to be fully | 1969 | * it seems to take a while for link to be fully |
@@ -2077,9 +2088,10 @@ static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio) | |||
2077 | readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS)); | 2088 | readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS)); |
2078 | } | 2089 | } |
2079 | 2090 | ||
2080 | static int mv_chip_id(struct pci_dev *pdev, struct mv_host_priv *hpriv, | 2091 | static int mv_chip_id(struct ata_host *host, unsigned int board_idx) |
2081 | unsigned int board_idx) | ||
2082 | { | 2092 | { |
2093 | struct pci_dev *pdev = to_pci_dev(host->dev); | ||
2094 | struct mv_host_priv *hpriv = host->private_data; | ||
2083 | u8 rev_id; | 2095 | u8 rev_id; |
2084 | u32 hp_flags = hpriv->hp_flags; | 2096 | u32 hp_flags = hpriv->hp_flags; |
2085 | 2097 | ||
@@ -2177,8 +2189,8 @@ static int mv_chip_id(struct pci_dev *pdev, struct mv_host_priv *hpriv, | |||
2177 | 2189 | ||
2178 | /** | 2190 | /** |
2179 | * mv_init_host - Perform some early initialization of the host. | 2191 | * mv_init_host - Perform some early initialization of the host. |
2180 | * @pdev: host PCI device | 2192 | * @host: ATA host to initialize |
2181 | * @probe_ent: early data struct representing the host | 2193 | * @board_idx: controller index |
2182 | * | 2194 | * |
2183 | * If possible, do an early global reset of the host. Then do | 2195 | * If possible, do an early global reset of the host. Then do |
2184 | * our port init and clear/unmask all/relevant host interrupts. | 2196 | * our port init and clear/unmask all/relevant host interrupts. |
@@ -2186,24 +2198,23 @@ static int mv_chip_id(struct pci_dev *pdev, struct mv_host_priv *hpriv, | |||
2186 | * LOCKING: | 2198 | * LOCKING: |
2187 | * Inherited from caller. | 2199 | * Inherited from caller. |
2188 | */ | 2200 | */ |
2189 | static int mv_init_host(struct pci_dev *pdev, struct ata_probe_ent *probe_ent, | 2201 | static int mv_init_host(struct ata_host *host, unsigned int board_idx) |
2190 | unsigned int board_idx) | ||
2191 | { | 2202 | { |
2192 | int rc = 0, n_hc, port, hc; | 2203 | int rc = 0, n_hc, port, hc; |
2193 | void __iomem *mmio = probe_ent->iomap[MV_PRIMARY_BAR]; | 2204 | struct pci_dev *pdev = to_pci_dev(host->dev); |
2194 | struct mv_host_priv *hpriv = probe_ent->private_data; | 2205 | void __iomem *mmio = host->iomap[MV_PRIMARY_BAR]; |
2206 | struct mv_host_priv *hpriv = host->private_data; | ||
2195 | 2207 | ||
2196 | /* global interrupt mask */ | 2208 | /* global interrupt mask */ |
2197 | writel(0, mmio + HC_MAIN_IRQ_MASK_OFS); | 2209 | writel(0, mmio + HC_MAIN_IRQ_MASK_OFS); |
2198 | 2210 | ||
2199 | rc = mv_chip_id(pdev, hpriv, board_idx); | 2211 | rc = mv_chip_id(host, board_idx); |
2200 | if (rc) | 2212 | if (rc) |
2201 | goto done; | 2213 | goto done; |
2202 | 2214 | ||
2203 | n_hc = mv_get_hc_count(probe_ent->port_flags); | 2215 | n_hc = mv_get_hc_count(host->ports[0]->flags); |
2204 | probe_ent->n_ports = MV_PORTS_PER_HC * n_hc; | ||
2205 | 2216 | ||
2206 | for (port = 0; port < probe_ent->n_ports; port++) | 2217 | for (port = 0; port < host->n_ports; port++) |
2207 | hpriv->ops->read_preamp(hpriv, port, mmio); | 2218 | hpriv->ops->read_preamp(hpriv, port, mmio); |
2208 | 2219 | ||
2209 | rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc); | 2220 | rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc); |
@@ -2214,7 +2225,7 @@ static int mv_init_host(struct pci_dev *pdev, struct ata_probe_ent *probe_ent, | |||
2214 | hpriv->ops->reset_bus(pdev, mmio); | 2225 | hpriv->ops->reset_bus(pdev, mmio); |
2215 | hpriv->ops->enable_leds(hpriv, mmio); | 2226 | hpriv->ops->enable_leds(hpriv, mmio); |
2216 | 2227 | ||
2217 | for (port = 0; port < probe_ent->n_ports; port++) { | 2228 | for (port = 0; port < host->n_ports; port++) { |
2218 | if (IS_60XX(hpriv)) { | 2229 | if (IS_60XX(hpriv)) { |
2219 | void __iomem *port_mmio = mv_port_base(mmio, port); | 2230 | void __iomem *port_mmio = mv_port_base(mmio, port); |
2220 | 2231 | ||
@@ -2227,9 +2238,9 @@ static int mv_init_host(struct pci_dev *pdev, struct ata_probe_ent *probe_ent, | |||
2227 | hpriv->ops->phy_errata(hpriv, mmio, port); | 2238 | hpriv->ops->phy_errata(hpriv, mmio, port); |
2228 | } | 2239 | } |
2229 | 2240 | ||
2230 | for (port = 0; port < probe_ent->n_ports; port++) { | 2241 | for (port = 0; port < host->n_ports; port++) { |
2231 | void __iomem *port_mmio = mv_port_base(mmio, port); | 2242 | void __iomem *port_mmio = mv_port_base(mmio, port); |
2232 | mv_port_init(&probe_ent->port[port], port_mmio); | 2243 | mv_port_init(&host->ports[port]->ioaddr, port_mmio); |
2233 | } | 2244 | } |
2234 | 2245 | ||
2235 | for (hc = 0; hc < n_hc; hc++) { | 2246 | for (hc = 0; hc < n_hc; hc++) { |
@@ -2268,17 +2279,17 @@ done: | |||
2268 | 2279 | ||
2269 | /** | 2280 | /** |
2270 | * mv_print_info - Dump key info to kernel log for perusal. | 2281 | * mv_print_info - Dump key info to kernel log for perusal. |
2271 | * @probe_ent: early data struct representing the host | 2282 | * @host: ATA host to print info about |
2272 | * | 2283 | * |
2273 | * FIXME: complete this. | 2284 | * FIXME: complete this. |
2274 | * | 2285 | * |
2275 | * LOCKING: | 2286 | * LOCKING: |
2276 | * Inherited from caller. | 2287 | * Inherited from caller. |
2277 | */ | 2288 | */ |
2278 | static void mv_print_info(struct ata_probe_ent *probe_ent) | 2289 | static void mv_print_info(struct ata_host *host) |
2279 | { | 2290 | { |
2280 | struct pci_dev *pdev = to_pci_dev(probe_ent->dev); | 2291 | struct pci_dev *pdev = to_pci_dev(host->dev); |
2281 | struct mv_host_priv *hpriv = probe_ent->private_data; | 2292 | struct mv_host_priv *hpriv = host->private_data; |
2282 | u8 rev_id, scc; | 2293 | u8 rev_id, scc; |
2283 | const char *scc_s; | 2294 | const char *scc_s; |
2284 | 2295 | ||
@@ -2297,7 +2308,7 @@ static void mv_print_info(struct ata_probe_ent *probe_ent) | |||
2297 | 2308 | ||
2298 | dev_printk(KERN_INFO, &pdev->dev, | 2309 | dev_printk(KERN_INFO, &pdev->dev, |
2299 | "%u slots %u ports %s mode IRQ via %s\n", | 2310 | "%u slots %u ports %s mode IRQ via %s\n", |
2300 | (unsigned)MV_MAX_Q_DEPTH, probe_ent->n_ports, | 2311 | (unsigned)MV_MAX_Q_DEPTH, host->n_ports, |
2301 | scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx"); | 2312 | scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx"); |
2302 | } | 2313 | } |
2303 | 2314 | ||
@@ -2312,50 +2323,42 @@ static void mv_print_info(struct ata_probe_ent *probe_ent) | |||
2312 | static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | 2323 | static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) |
2313 | { | 2324 | { |
2314 | static int printed_version = 0; | 2325 | static int printed_version = 0; |
2315 | struct device *dev = &pdev->dev; | ||
2316 | struct ata_probe_ent *probe_ent; | ||
2317 | struct mv_host_priv *hpriv; | ||
2318 | unsigned int board_idx = (unsigned int)ent->driver_data; | 2326 | unsigned int board_idx = (unsigned int)ent->driver_data; |
2319 | int rc; | 2327 | const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL }; |
2328 | struct ata_host *host; | ||
2329 | struct mv_host_priv *hpriv; | ||
2330 | int n_ports, rc; | ||
2320 | 2331 | ||
2321 | if (!printed_version++) | 2332 | if (!printed_version++) |
2322 | dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n"); | 2333 | dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n"); |
2323 | 2334 | ||
2335 | /* allocate host */ | ||
2336 | n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC; | ||
2337 | |||
2338 | host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports); | ||
2339 | hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL); | ||
2340 | if (!host || !hpriv) | ||
2341 | return -ENOMEM; | ||
2342 | host->private_data = hpriv; | ||
2343 | |||
2344 | /* acquire resources */ | ||
2324 | rc = pcim_enable_device(pdev); | 2345 | rc = pcim_enable_device(pdev); |
2325 | if (rc) | 2346 | if (rc) |
2326 | return rc; | 2347 | return rc; |
2327 | pci_set_master(pdev); | ||
2328 | 2348 | ||
2329 | rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME); | 2349 | rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME); |
2330 | if (rc == -EBUSY) | 2350 | if (rc == -EBUSY) |
2331 | pcim_pin_device(pdev); | 2351 | pcim_pin_device(pdev); |
2332 | if (rc) | 2352 | if (rc) |
2333 | return rc; | 2353 | return rc; |
2354 | host->iomap = pcim_iomap_table(pdev); | ||
2334 | 2355 | ||
2335 | probe_ent = devm_kzalloc(dev, sizeof(*probe_ent), GFP_KERNEL); | 2356 | rc = pci_go_64(pdev); |
2336 | if (probe_ent == NULL) | 2357 | if (rc) |
2337 | return -ENOMEM; | 2358 | return rc; |
2338 | |||
2339 | probe_ent->dev = pci_dev_to_dev(pdev); | ||
2340 | INIT_LIST_HEAD(&probe_ent->node); | ||
2341 | |||
2342 | hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL); | ||
2343 | if (!hpriv) | ||
2344 | return -ENOMEM; | ||
2345 | |||
2346 | probe_ent->sht = mv_port_info[board_idx].sht; | ||
2347 | probe_ent->port_flags = mv_port_info[board_idx].flags; | ||
2348 | probe_ent->pio_mask = mv_port_info[board_idx].pio_mask; | ||
2349 | probe_ent->udma_mask = mv_port_info[board_idx].udma_mask; | ||
2350 | probe_ent->port_ops = mv_port_info[board_idx].port_ops; | ||
2351 | |||
2352 | probe_ent->irq = pdev->irq; | ||
2353 | probe_ent->irq_flags = IRQF_SHARED; | ||
2354 | probe_ent->iomap = pcim_iomap_table(pdev); | ||
2355 | probe_ent->private_data = hpriv; | ||
2356 | 2359 | ||
2357 | /* initialize adapter */ | 2360 | /* initialize adapter */ |
2358 | rc = mv_init_host(pdev, probe_ent, board_idx); | 2361 | rc = mv_init_host(host, board_idx); |
2359 | if (rc) | 2362 | if (rc) |
2360 | return rc; | 2363 | return rc; |
2361 | 2364 | ||
@@ -2364,13 +2367,11 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2364 | pci_intx(pdev, 1); | 2367 | pci_intx(pdev, 1); |
2365 | 2368 | ||
2366 | mv_dump_pci_cfg(pdev, 0x68); | 2369 | mv_dump_pci_cfg(pdev, 0x68); |
2367 | mv_print_info(probe_ent); | 2370 | mv_print_info(host); |
2368 | 2371 | ||
2369 | if (ata_device_add(probe_ent) == 0) | 2372 | pci_set_master(pdev); |
2370 | return -ENODEV; | 2373 | return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED, |
2371 | 2374 | &mv_sht); | |
2372 | devm_kfree(dev, probe_ent); | ||
2373 | return 0; | ||
2374 | } | 2375 | } |
2375 | 2376 | ||
2376 | static int __init mv_init(void) | 2377 | static int __init mv_init(void) |