aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/ata/sata_mv.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/ata/sata_mv.c')
-rw-r--r--drivers/ata/sata_mv.c80
1 files changed, 37 insertions, 43 deletions
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 86918634a4c5..7007edd2d451 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -33,10 +33,6 @@
33 * 33 *
34 * --> ATAPI support (Marvell claims the 60xx/70xx chips can do it). 34 * --> ATAPI support (Marvell claims the 60xx/70xx chips can do it).
35 * 35 *
36 * --> Investigate problems with PCI Message Signalled Interrupts (MSI).
37 *
38 * --> Cache frequently-accessed registers in mv_port_priv to reduce overhead.
39 *
40 * --> Develop a low-power-consumption strategy, and implement it. 36 * --> Develop a low-power-consumption strategy, and implement it.
41 * 37 *
42 * --> [Experiment, low priority] Investigate interrupt coalescing. 38 * --> [Experiment, low priority] Investigate interrupt coalescing.
@@ -72,7 +68,7 @@
72#include <linux/libata.h> 68#include <linux/libata.h>
73 69
74#define DRV_NAME "sata_mv" 70#define DRV_NAME "sata_mv"
75#define DRV_VERSION "1.24" 71#define DRV_VERSION "1.25"
76 72
77enum { 73enum {
78 /* BAR's are enumerated in terms of pci_resource_start() terms */ 74 /* BAR's are enumerated in terms of pci_resource_start() terms */
@@ -351,8 +347,6 @@ enum {
351 347
352 EDMA_HALTCOND_OFS = 0x60, /* GenIIe halt conditions */ 348 EDMA_HALTCOND_OFS = 0x60, /* GenIIe halt conditions */
353 349
354 GEN_II_NCQ_MAX_SECTORS = 256, /* max sects/io on Gen2 w/NCQ */
355
356 /* Host private flags (hp_flags) */ 350 /* Host private flags (hp_flags) */
357 MV_HP_FLAG_MSI = (1 << 0), 351 MV_HP_FLAG_MSI = (1 << 0),
358 MV_HP_ERRATA_50XXB0 = (1 << 1), 352 MV_HP_ERRATA_50XXB0 = (1 << 1),
@@ -669,8 +663,8 @@ static const struct pci_device_id mv_pci_tbl[] = {
669 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x }, 663 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
670 /* RocketRAID 1720/174x have different identifiers */ 664 /* RocketRAID 1720/174x have different identifiers */
671 { PCI_VDEVICE(TTI, 0x1720), chip_6042 }, 665 { PCI_VDEVICE(TTI, 0x1720), chip_6042 },
672 { PCI_VDEVICE(TTI, 0x1740), chip_508x }, 666 { PCI_VDEVICE(TTI, 0x1740), chip_6042 },
673 { PCI_VDEVICE(TTI, 0x1742), chip_508x }, 667 { PCI_VDEVICE(TTI, 0x1742), chip_6042 },
674 668
675 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x }, 669 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
676 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x }, 670 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
@@ -883,19 +877,15 @@ static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
883 struct mv_host_priv *hpriv = ap->host->private_data; 877 struct mv_host_priv *hpriv = ap->host->private_data;
884 int hardport = mv_hardport_from_port(ap->port_no); 878 int hardport = mv_hardport_from_port(ap->port_no);
885 void __iomem *hc_mmio = mv_hc_base_from_port( 879 void __iomem *hc_mmio = mv_hc_base_from_port(
886 mv_host_base(ap->host), hardport); 880 mv_host_base(ap->host), ap->port_no);
887 u32 hc_irq_cause, ipending; 881 u32 hc_irq_cause;
888 882
889 /* clear EDMA event indicators, if any */ 883 /* clear EDMA event indicators, if any */
890 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); 884 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
891 885
892 /* clear EDMA interrupt indicator, if any */ 886 /* clear pending irq events */
893 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS); 887 hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport);
894 ipending = (DEV_IRQ | DMA_IRQ) << hardport; 888 writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
895 if (hc_irq_cause & ipending) {
896 writelfl(hc_irq_cause & ~ipending,
897 hc_mmio + HC_IRQ_CAUSE_OFS);
898 }
899 889
900 mv_edma_cfg(ap, want_ncq); 890 mv_edma_cfg(ap, want_ncq);
901 891
@@ -1099,20 +1089,12 @@ static void mv6_dev_config(struct ata_device *adev)
1099 * 1089 *
1100 * Gen-II does not support NCQ over a port multiplier 1090 * Gen-II does not support NCQ over a port multiplier
1101 * (no FIS-based switching). 1091 * (no FIS-based switching).
1102 *
1103 * We don't have hob_nsect when doing NCQ commands on Gen-II.
1104 * See mv_qc_prep() for more info.
1105 */ 1092 */
1106 if (adev->flags & ATA_DFLAG_NCQ) { 1093 if (adev->flags & ATA_DFLAG_NCQ) {
1107 if (sata_pmp_attached(adev->link->ap)) { 1094 if (sata_pmp_attached(adev->link->ap)) {
1108 adev->flags &= ~ATA_DFLAG_NCQ; 1095 adev->flags &= ~ATA_DFLAG_NCQ;
1109 ata_dev_printk(adev, KERN_INFO, 1096 ata_dev_printk(adev, KERN_INFO,
1110 "NCQ disabled for command-based switching\n"); 1097 "NCQ disabled for command-based switching\n");
1111 } else if (adev->max_sectors > GEN_II_NCQ_MAX_SECTORS) {
1112 adev->max_sectors = GEN_II_NCQ_MAX_SECTORS;
1113 ata_dev_printk(adev, KERN_INFO,
1114 "max_sectors limited to %u for NCQ\n",
1115 adev->max_sectors);
1116 } 1098 }
1117 } 1099 }
1118} 1100}
@@ -1450,7 +1432,8 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
1450 * only 11 bytes...so we must pick and choose required 1432 * only 11 bytes...so we must pick and choose required
1451 * registers based on the command. So, we drop feature and 1433 * registers based on the command. So, we drop feature and
1452 * hob_feature for [RW] DMA commands, but they are needed for 1434 * hob_feature for [RW] DMA commands, but they are needed for
1453 * NCQ. NCQ will drop hob_nsect. 1435 * NCQ. NCQ will drop hob_nsect, which is not needed there
1436 * (nsect is used only for the tag; feat/hob_feat hold true nsect).
1454 */ 1437 */
1455 switch (tf->command) { 1438 switch (tf->command) {
1456 case ATA_CMD_READ: 1439 case ATA_CMD_READ:
@@ -2214,9 +2197,15 @@ static irqreturn_t mv_interrupt(int irq, void *dev_instance)
2214 struct ata_host *host = dev_instance; 2197 struct ata_host *host = dev_instance;
2215 struct mv_host_priv *hpriv = host->private_data; 2198 struct mv_host_priv *hpriv = host->private_data;
2216 unsigned int handled = 0; 2199 unsigned int handled = 0;
2200 int using_msi = hpriv->hp_flags & MV_HP_FLAG_MSI;
2217 u32 main_irq_cause, pending_irqs; 2201 u32 main_irq_cause, pending_irqs;
2218 2202
2219 spin_lock(&host->lock); 2203 spin_lock(&host->lock);
2204
2205 /* for MSI: block new interrupts while in here */
2206 if (using_msi)
2207 writel(0, hpriv->main_irq_mask_addr);
2208
2220 main_irq_cause = readl(hpriv->main_irq_cause_addr); 2209 main_irq_cause = readl(hpriv->main_irq_cause_addr);
2221 pending_irqs = main_irq_cause & hpriv->main_irq_mask; 2210 pending_irqs = main_irq_cause & hpriv->main_irq_mask;
2222 /* 2211 /*
@@ -2230,6 +2219,11 @@ static irqreturn_t mv_interrupt(int irq, void *dev_instance)
2230 handled = mv_host_intr(host, pending_irqs); 2219 handled = mv_host_intr(host, pending_irqs);
2231 } 2220 }
2232 spin_unlock(&host->lock); 2221 spin_unlock(&host->lock);
2222
2223 /* for MSI: unmask; interrupt cause bits will retrigger now */
2224 if (using_msi)
2225 writel(hpriv->main_irq_mask, hpriv->main_irq_mask_addr);
2226
2233 return IRQ_RETVAL(handled); 2227 return IRQ_RETVAL(handled);
2234} 2228}
2235 2229
@@ -2821,8 +2815,7 @@ static void mv_eh_thaw(struct ata_port *ap)
2821 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); 2815 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2822 2816
2823 /* clear pending irq events */ 2817 /* clear pending irq events */
2824 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS); 2818 hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport);
2825 hc_irq_cause &= ~((DEV_IRQ | DMA_IRQ) << hardport);
2826 writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS); 2819 writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2827 2820
2828 mv_enable_port_irqs(ap, ERR_IRQ); 2821 mv_enable_port_irqs(ap, ERR_IRQ);
@@ -3075,6 +3068,9 @@ static int mv_init_host(struct ata_host *host, unsigned int board_idx)
3075 hpriv->main_irq_mask_addr = mmio + PCI_HC_MAIN_IRQ_MASK_OFS; 3068 hpriv->main_irq_mask_addr = mmio + PCI_HC_MAIN_IRQ_MASK_OFS;
3076 } 3069 }
3077 3070
3071 /* initialize shadow irq mask with register's value */
3072 hpriv->main_irq_mask = readl(hpriv->main_irq_mask_addr);
3073
3078 /* global interrupt mask: 0 == mask everything */ 3074 /* global interrupt mask: 0 == mask everything */
3079 mv_set_main_irq_mask(host, ~0, 0); 3075 mv_set_main_irq_mask(host, ~0, 0);
3080 3076
@@ -3118,19 +3114,17 @@ static int mv_init_host(struct ata_host *host, unsigned int board_idx)
3118 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS); 3114 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
3119 } 3115 }
3120 3116
3121 if (!IS_SOC(hpriv)) { 3117 /* Clear any currently outstanding host interrupt conditions */
3122 /* Clear any currently outstanding host interrupt conditions */ 3118 writelfl(0, mmio + hpriv->irq_cause_ofs);
3123 writelfl(0, mmio + hpriv->irq_cause_ofs);
3124 3119
3125 /* and unmask interrupt generation for host regs */ 3120 /* and unmask interrupt generation for host regs */
3126 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs); 3121 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
3127 3122
3128 /* 3123 /*
3129 * enable only global host interrupts for now. 3124 * enable only global host interrupts for now.
3130 * The per-port interrupts get done later as ports are set up. 3125 * The per-port interrupts get done later as ports are set up.
3131 */ 3126 */
3132 mv_set_main_irq_mask(host, 0, PCI_ERR); 3127 mv_set_main_irq_mask(host, 0, PCI_ERR);
3133 }
3134done: 3128done:
3135 return rc; 3129 return rc;
3136} 3130}
@@ -3430,9 +3424,9 @@ static int mv_pci_init_one(struct pci_dev *pdev,
3430 if (rc) 3424 if (rc)
3431 return rc; 3425 return rc;
3432 3426
3433 /* Enable interrupts */ 3427 /* Enable message-switched interrupts, if requested */
3434 if (msi && pci_enable_msi(pdev)) 3428 if (msi && pci_enable_msi(pdev) == 0)
3435 pci_intx(pdev, 1); 3429 hpriv->hp_flags |= MV_HP_FLAG_MSI;
3436 3430
3437 mv_dump_pci_cfg(pdev, 0x68); 3431 mv_dump_pci_cfg(pdev, 0x68);
3438 mv_print_info(host); 3432 mv_print_info(host);