diff options
author | Brett Russ <russb@emc.com> | 2005-10-05 17:08:53 -0400 |
---|---|---|
committer | Jeff Garzik <jgarzik@pobox.com> | 2005-10-05 17:16:52 -0400 |
commit | 05b308e1df6d9d673daedb517969241f41278b52 (patch) | |
tree | b7114fdbddb7fef974744a5281896c9e6bf6c1d4 /drivers/scsi/sata_mv.c | |
parent | afb0edd922c7ed6e73678730921dfcccebec17e8 (diff) |
[PATCH] libata: Marvell function headers
adds helpful function header comments.
Signed-off-by: Brett Russ <russb@emc.com>
Signed-off-by: Jeff Garzik <jgarzik@pobox.com>
Diffstat (limited to 'drivers/scsi/sata_mv.c')
-rw-r--r-- | drivers/scsi/sata_mv.c | 225 |
1 files changed, 219 insertions, 6 deletions
diff --git a/drivers/scsi/sata_mv.c b/drivers/scsi/sata_mv.c index c3084f8b3ee7..84b488f81c75 100644 --- a/drivers/scsi/sata_mv.c +++ b/drivers/scsi/sata_mv.c | |||
@@ -35,7 +35,7 @@ | |||
35 | #include <asm/io.h> | 35 | #include <asm/io.h> |
36 | 36 | ||
37 | #define DRV_NAME "sata_mv" | 37 | #define DRV_NAME "sata_mv" |
38 | #define DRV_VERSION "0.23" | 38 | #define DRV_VERSION "0.24" |
39 | 39 | ||
40 | enum { | 40 | enum { |
41 | /* BAR's are enumerated in terms of pci_resource_start() terms */ | 41 | /* BAR's are enumerated in terms of pci_resource_start() terms */ |
@@ -406,6 +406,17 @@ static void mv_irq_clear(struct ata_port *ap) | |||
406 | { | 406 | { |
407 | } | 407 | } |
408 | 408 | ||
409 | /** | ||
410 | * mv_start_dma - Enable eDMA engine | ||
411 | * @base: port base address | ||
412 | * @pp: port private data | ||
413 | * | ||
414 | * Verify the local cache of the eDMA state is accurate with an | ||
415 | * assert. | ||
416 | * | ||
417 | * LOCKING: | ||
418 | * Inherited from caller. | ||
419 | */ | ||
409 | static void mv_start_dma(void __iomem *base, struct mv_port_priv *pp) | 420 | static void mv_start_dma(void __iomem *base, struct mv_port_priv *pp) |
410 | { | 421 | { |
411 | if (!(MV_PP_FLAG_EDMA_EN & pp->pp_flags)) { | 422 | if (!(MV_PP_FLAG_EDMA_EN & pp->pp_flags)) { |
@@ -415,6 +426,16 @@ static void mv_start_dma(void __iomem *base, struct mv_port_priv *pp) | |||
415 | assert(EDMA_EN & readl(base + EDMA_CMD_OFS)); | 426 | assert(EDMA_EN & readl(base + EDMA_CMD_OFS)); |
416 | } | 427 | } |
417 | 428 | ||
429 | /** | ||
430 | * mv_stop_dma - Disable eDMA engine | ||
431 | * @ap: ATA channel to manipulate | ||
432 | * | ||
433 | * Verify the local cache of the eDMA state is accurate with an | ||
434 | * assert. | ||
435 | * | ||
436 | * LOCKING: | ||
437 | * Inherited from caller. | ||
438 | */ | ||
418 | static void mv_stop_dma(struct ata_port *ap) | 439 | static void mv_stop_dma(struct ata_port *ap) |
419 | { | 440 | { |
420 | void __iomem *port_mmio = mv_ap_base(ap); | 441 | void __iomem *port_mmio = mv_ap_base(ap); |
@@ -561,7 +582,15 @@ static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val) | |||
561 | } | 582 | } |
562 | } | 583 | } |
563 | 584 | ||
564 | /* This routine only applies to 6xxx parts */ | 585 | /** |
586 | * mv_global_soft_reset - Perform the 6xxx global soft reset | ||
587 | * @mmio_base: base address of the HBA | ||
588 | * | ||
589 | * This routine only applies to 6xxx parts. | ||
590 | * | ||
591 | * LOCKING: | ||
592 | * Inherited from caller. | ||
593 | */ | ||
565 | static int mv_global_soft_reset(void __iomem *mmio_base) | 594 | static int mv_global_soft_reset(void __iomem *mmio_base) |
566 | { | 595 | { |
567 | void __iomem *reg = mmio_base + PCI_MAIN_CMD_STS_OFS; | 596 | void __iomem *reg = mmio_base + PCI_MAIN_CMD_STS_OFS; |
@@ -617,6 +646,16 @@ done: | |||
617 | return rc; | 646 | return rc; |
618 | } | 647 | } |
619 | 648 | ||
649 | /** | ||
650 | * mv_host_stop - Host specific cleanup/stop routine. | ||
651 | * @host_set: host data structure | ||
652 | * | ||
653 | * Disable ints, cleanup host memory, call general purpose | ||
654 | * host_stop. | ||
655 | * | ||
656 | * LOCKING: | ||
657 | * Inherited from caller. | ||
658 | */ | ||
620 | static void mv_host_stop(struct ata_host_set *host_set) | 659 | static void mv_host_stop(struct ata_host_set *host_set) |
621 | { | 660 | { |
622 | struct mv_host_priv *hpriv = host_set->private_data; | 661 | struct mv_host_priv *hpriv = host_set->private_data; |
@@ -631,6 +670,16 @@ static void mv_host_stop(struct ata_host_set *host_set) | |||
631 | ata_host_stop(host_set); | 670 | ata_host_stop(host_set); |
632 | } | 671 | } |
633 | 672 | ||
673 | /** | ||
674 | * mv_port_start - Port specific init/start routine. | ||
675 | * @ap: ATA channel to manipulate | ||
676 | * | ||
677 | * Allocate and point to DMA memory, init port private memory, | ||
678 | * zero indices. | ||
679 | * | ||
680 | * LOCKING: | ||
681 | * Inherited from caller. | ||
682 | */ | ||
634 | static int mv_port_start(struct ata_port *ap) | 683 | static int mv_port_start(struct ata_port *ap) |
635 | { | 684 | { |
636 | struct device *dev = ap->host_set->dev; | 685 | struct device *dev = ap->host_set->dev; |
@@ -699,6 +748,15 @@ static int mv_port_start(struct ata_port *ap) | |||
699 | return 0; | 748 | return 0; |
700 | } | 749 | } |
701 | 750 | ||
751 | /** | ||
752 | * mv_port_stop - Port specific cleanup/stop routine. | ||
753 | * @ap: ATA channel to manipulate | ||
754 | * | ||
755 | * Stop DMA, cleanup port memory. | ||
756 | * | ||
757 | * LOCKING: | ||
758 | * This routine uses the host_set lock to protect the DMA stop. | ||
759 | */ | ||
702 | static void mv_port_stop(struct ata_port *ap) | 760 | static void mv_port_stop(struct ata_port *ap) |
703 | { | 761 | { |
704 | struct device *dev = ap->host_set->dev; | 762 | struct device *dev = ap->host_set->dev; |
@@ -714,6 +772,15 @@ static void mv_port_stop(struct ata_port *ap) | |||
714 | kfree(pp); | 772 | kfree(pp); |
715 | } | 773 | } |
716 | 774 | ||
775 | /** | ||
776 | * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries | ||
777 | * @qc: queued command whose SG list to source from | ||
778 | * | ||
779 | * Populate the SG list and mark the last entry. | ||
780 | * | ||
781 | * LOCKING: | ||
782 | * Inherited from caller. | ||
783 | */ | ||
717 | static void mv_fill_sg(struct ata_queued_cmd *qc) | 784 | static void mv_fill_sg(struct ata_queued_cmd *qc) |
718 | { | 785 | { |
719 | struct mv_port_priv *pp = qc->ap->private_data; | 786 | struct mv_port_priv *pp = qc->ap->private_data; |
@@ -748,6 +815,18 @@ static inline void mv_crqb_pack_cmd(u16 *cmdw, u8 data, u8 addr, unsigned last) | |||
748 | (last ? CRQB_CMD_LAST : 0); | 815 | (last ? CRQB_CMD_LAST : 0); |
749 | } | 816 | } |
750 | 817 | ||
818 | /** | ||
819 | * mv_qc_prep - Host specific command preparation. | ||
820 | * @qc: queued command to prepare | ||
821 | * | ||
822 | * This routine simply redirects to the general purpose routine | ||
823 | * if command is not DMA. Else, it handles prep of the CRQB | ||
824 | * (command request block), does some sanity checking, and calls | ||
825 | * the SG load routine. | ||
826 | * | ||
827 | * LOCKING: | ||
828 | * Inherited from caller. | ||
829 | */ | ||
751 | static void mv_qc_prep(struct ata_queued_cmd *qc) | 830 | static void mv_qc_prep(struct ata_queued_cmd *qc) |
752 | { | 831 | { |
753 | struct ata_port *ap = qc->ap; | 832 | struct ata_port *ap = qc->ap; |
@@ -830,6 +909,18 @@ static void mv_qc_prep(struct ata_queued_cmd *qc) | |||
830 | mv_fill_sg(qc); | 909 | mv_fill_sg(qc); |
831 | } | 910 | } |
832 | 911 | ||
912 | /** | ||
913 | * mv_qc_issue - Initiate a command to the host | ||
914 | * @qc: queued command to start | ||
915 | * | ||
916 | * This routine simply redirects to the general purpose routine | ||
917 | * if command is not DMA. Else, it sanity checks our local | ||
918 | * caches of the request producer/consumer indices then enables | ||
919 | * DMA and bumps the request producer index. | ||
920 | * | ||
921 | * LOCKING: | ||
922 | * Inherited from caller. | ||
923 | */ | ||
833 | static int mv_qc_issue(struct ata_queued_cmd *qc) | 924 | static int mv_qc_issue(struct ata_queued_cmd *qc) |
834 | { | 925 | { |
835 | void __iomem *port_mmio = mv_ap_base(qc->ap); | 926 | void __iomem *port_mmio = mv_ap_base(qc->ap); |
@@ -867,6 +958,19 @@ static int mv_qc_issue(struct ata_queued_cmd *qc) | |||
867 | return 0; | 958 | return 0; |
868 | } | 959 | } |
869 | 960 | ||
961 | /** | ||
962 | * mv_get_crpb_status - get status from most recently completed cmd | ||
963 | * @ap: ATA channel to manipulate | ||
964 | * | ||
965 | * This routine is for use when the port is in DMA mode, when it | ||
966 | * will be using the CRPB (command response block) method of | ||
967 | * returning command completion information. We assert indices | ||
968 | * are good, grab status, and bump the response consumer index to | ||
969 | * prove that we're up to date. | ||
970 | * | ||
971 | * LOCKING: | ||
972 | * Inherited from caller. | ||
973 | */ | ||
870 | static u8 mv_get_crpb_status(struct ata_port *ap) | 974 | static u8 mv_get_crpb_status(struct ata_port *ap) |
871 | { | 975 | { |
872 | void __iomem *port_mmio = mv_ap_base(ap); | 976 | void __iomem *port_mmio = mv_ap_base(ap); |
@@ -896,6 +1000,19 @@ static u8 mv_get_crpb_status(struct ata_port *ap) | |||
896 | return (pp->crpb[pp->rsp_consumer].flags >> CRPB_FLAG_STATUS_SHIFT); | 1000 | return (pp->crpb[pp->rsp_consumer].flags >> CRPB_FLAG_STATUS_SHIFT); |
897 | } | 1001 | } |
898 | 1002 | ||
1003 | /** | ||
1004 | * mv_err_intr - Handle error interrupts on the port | ||
1005 | * @ap: ATA channel to manipulate | ||
1006 | * | ||
1007 | * In most cases, just clear the interrupt and move on. However, | ||
1008 | * some cases require an eDMA reset, which is done right before | ||
1009 | * the COMRESET in mv_phy_reset(). The SERR case requires a | ||
1010 | * clear of pending errors in the SATA SERROR register. Finally, | ||
1011 | * if the port disabled DMA, update our cached copy to match. | ||
1012 | * | ||
1013 | * LOCKING: | ||
1014 | * Inherited from caller. | ||
1015 | */ | ||
899 | static void mv_err_intr(struct ata_port *ap) | 1016 | static void mv_err_intr(struct ata_port *ap) |
900 | { | 1017 | { |
901 | void __iomem *port_mmio = mv_ap_base(ap); | 1018 | void __iomem *port_mmio = mv_ap_base(ap); |
@@ -923,7 +1040,22 @@ static void mv_err_intr(struct ata_port *ap) | |||
923 | } | 1040 | } |
924 | } | 1041 | } |
925 | 1042 | ||
926 | /* Handle any outstanding interrupts in a single SATAHC */ | 1043 | /** |
1044 | * mv_host_intr - Handle all interrupts on the given host controller | ||
1045 | * @host_set: host specific structure | ||
1046 | * @relevant: port error bits relevant to this host controller | ||
1047 | * @hc: which host controller we're to look at | ||
1048 | * | ||
1049 | * Read then write clear the HC interrupt status then walk each | ||
1050 | * port connected to the HC and see if it needs servicing. Port | ||
1051 | * success ints are reported in the HC interrupt status reg, the | ||
1052 | * port error ints are reported in the higher level main | ||
1053 | * interrupt status register and thus are passed in via the | ||
1054 | * 'relevant' argument. | ||
1055 | * | ||
1056 | * LOCKING: | ||
1057 | * Inherited from caller. | ||
1058 | */ | ||
927 | static void mv_host_intr(struct ata_host_set *host_set, u32 relevant, | 1059 | static void mv_host_intr(struct ata_host_set *host_set, u32 relevant, |
928 | unsigned int hc) | 1060 | unsigned int hc) |
929 | { | 1061 | { |
@@ -993,6 +1125,21 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant, | |||
993 | VPRINTK("EXIT\n"); | 1125 | VPRINTK("EXIT\n"); |
994 | } | 1126 | } |
995 | 1127 | ||
1128 | /** | ||
1129 | * mv_interrupt - | ||
1130 | * @irq: unused | ||
1131 | * @dev_instance: private data; in this case the host structure | ||
1132 | * @regs: unused | ||
1133 | * | ||
1134 | * Read the read only register to determine if any host | ||
1135 | * controllers have pending interrupts. If so, call lower level | ||
1136 | * routine to handle. Also check for PCI errors which are only | ||
1137 | * reported here. | ||
1138 | * | ||
1139 | * LOCKING: | ||
1140 | * This routine holds the host_set lock while processing pending | ||
1141 | * interrupts. | ||
1142 | */ | ||
996 | static irqreturn_t mv_interrupt(int irq, void *dev_instance, | 1143 | static irqreturn_t mv_interrupt(int irq, void *dev_instance, |
997 | struct pt_regs *regs) | 1144 | struct pt_regs *regs) |
998 | { | 1145 | { |
@@ -1035,14 +1182,32 @@ static irqreturn_t mv_interrupt(int irq, void *dev_instance, | |||
1035 | return IRQ_RETVAL(handled); | 1182 | return IRQ_RETVAL(handled); |
1036 | } | 1183 | } |
1037 | 1184 | ||
1185 | /** | ||
1186 | * mv_check_err - Return the error shadow register to caller. | ||
1187 | * @ap: ATA channel to manipulate | ||
1188 | * | ||
1189 | * Marvell requires DMA to be stopped before accessing shadow | ||
1190 | * registers. So we do that, then return the needed register. | ||
1191 | * | ||
1192 | * LOCKING: | ||
1193 | * Inherited from caller. FIXME: protect mv_stop_dma with lock? | ||
1194 | */ | ||
1038 | static u8 mv_check_err(struct ata_port *ap) | 1195 | static u8 mv_check_err(struct ata_port *ap) |
1039 | { | 1196 | { |
1040 | mv_stop_dma(ap); /* can't read shadow regs if DMA on */ | 1197 | mv_stop_dma(ap); /* can't read shadow regs if DMA on */ |
1041 | return readb((void __iomem *) ap->ioaddr.error_addr); | 1198 | return readb((void __iomem *) ap->ioaddr.error_addr); |
1042 | } | 1199 | } |
1043 | 1200 | ||
1044 | /* Part of this is taken from __sata_phy_reset and modified to not sleep | 1201 | /** |
1045 | * since this routine gets called from interrupt level. | 1202 | * mv_phy_reset - Perform eDMA reset followed by COMRESET |
1203 | * @ap: ATA channel to manipulate | ||
1204 | * | ||
1205 | * Part of this is taken from __sata_phy_reset and modified to | ||
1206 | * not sleep since this routine gets called from interrupt level. | ||
1207 | * | ||
1208 | * LOCKING: | ||
1209 | * Inherited from caller. This is coded to safe to call at | ||
1210 | * interrupt level, i.e. it does not sleep. | ||
1046 | */ | 1211 | */ |
1047 | static void mv_phy_reset(struct ata_port *ap) | 1212 | static void mv_phy_reset(struct ata_port *ap) |
1048 | { | 1213 | { |
@@ -1105,6 +1270,16 @@ static void mv_phy_reset(struct ata_port *ap) | |||
1105 | VPRINTK("EXIT\n"); | 1270 | VPRINTK("EXIT\n"); |
1106 | } | 1271 | } |
1107 | 1272 | ||
1273 | /** | ||
1274 | * mv_eng_timeout - Routine called by libata when SCSI times out I/O | ||
1275 | * @ap: ATA channel to manipulate | ||
1276 | * | ||
1277 | * Intent is to clear all pending error conditions, reset the | ||
1278 | * chip/bus, fail the command, and move on. | ||
1279 | * | ||
1280 | * LOCKING: | ||
1281 | * This routine holds the host_set lock while failing the command. | ||
1282 | */ | ||
1108 | static void mv_eng_timeout(struct ata_port *ap) | 1283 | static void mv_eng_timeout(struct ata_port *ap) |
1109 | { | 1284 | { |
1110 | struct ata_queued_cmd *qc; | 1285 | struct ata_queued_cmd *qc; |
@@ -1140,6 +1315,18 @@ static void mv_eng_timeout(struct ata_port *ap) | |||
1140 | } | 1315 | } |
1141 | } | 1316 | } |
1142 | 1317 | ||
1318 | /** | ||
1319 | * mv_port_init - Perform some early initialization on a single port. | ||
1320 | * @port: libata data structure storing shadow register addresses | ||
1321 | * @port_mmio: base address of the port | ||
1322 | * | ||
1323 | * Initialize shadow register mmio addresses, clear outstanding | ||
1324 | * interrupts on the port, and unmask interrupts for the future | ||
1325 | * start of the port. | ||
1326 | * | ||
1327 | * LOCKING: | ||
1328 | * Inherited from caller. | ||
1329 | */ | ||
1143 | static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio) | 1330 | static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio) |
1144 | { | 1331 | { |
1145 | unsigned long shd_base = (unsigned long) port_mmio + SHD_BLK_OFS; | 1332 | unsigned long shd_base = (unsigned long) port_mmio + SHD_BLK_OFS; |
@@ -1177,6 +1364,16 @@ static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio) | |||
1177 | readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS)); | 1364 | readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS)); |
1178 | } | 1365 | } |
1179 | 1366 | ||
1367 | /** | ||
1368 | * mv_host_init - Perform some early initialization of the host. | ||
1369 | * @probe_ent: early data struct representing the host | ||
1370 | * | ||
1371 | * If possible, do an early global reset of the host. Then do | ||
1372 | * our port init and clear/unmask all/relevant host interrupts. | ||
1373 | * | ||
1374 | * LOCKING: | ||
1375 | * Inherited from caller. | ||
1376 | */ | ||
1180 | static int mv_host_init(struct ata_probe_ent *probe_ent) | 1377 | static int mv_host_init(struct ata_probe_ent *probe_ent) |
1181 | { | 1378 | { |
1182 | int rc = 0, n_hc, port, hc; | 1379 | int rc = 0, n_hc, port, hc; |
@@ -1226,7 +1423,15 @@ done: | |||
1226 | return rc; | 1423 | return rc; |
1227 | } | 1424 | } |
1228 | 1425 | ||
1229 | /* FIXME: complete this */ | 1426 | /** |
1427 | * mv_print_info - Dump key info to kernel log for perusal. | ||
1428 | * @probe_ent: early data struct representing the host | ||
1429 | * | ||
1430 | * FIXME: complete this. | ||
1431 | * | ||
1432 | * LOCKING: | ||
1433 | * Inherited from caller. | ||
1434 | */ | ||
1230 | static void mv_print_info(struct ata_probe_ent *probe_ent) | 1435 | static void mv_print_info(struct ata_probe_ent *probe_ent) |
1231 | { | 1436 | { |
1232 | struct pci_dev *pdev = to_pci_dev(probe_ent->dev); | 1437 | struct pci_dev *pdev = to_pci_dev(probe_ent->dev); |
@@ -1253,6 +1458,14 @@ static void mv_print_info(struct ata_probe_ent *probe_ent) | |||
1253 | scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx"); | 1458 | scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx"); |
1254 | } | 1459 | } |
1255 | 1460 | ||
1461 | /** | ||
1462 | * mv_init_one - handle a positive probe of a Marvell host | ||
1463 | * @pdev: PCI device found | ||
1464 | * @ent: PCI device ID entry for the matched host | ||
1465 | * | ||
1466 | * LOCKING: | ||
1467 | * Inherited from caller. | ||
1468 | */ | ||
1256 | static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | 1469 | static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) |
1257 | { | 1470 | { |
1258 | static int printed_version = 0; | 1471 | static int printed_version = 0; |