diff options
-rw-r--r-- | drivers/ata/sata_mv.c | 102 |
1 files changed, 70 insertions, 32 deletions
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index d15caf32045..1c53c8a7d21 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c | |||
@@ -107,14 +107,12 @@ enum { | |||
107 | 107 | ||
108 | /* CRQB needs alignment on a 1KB boundary. Size == 1KB | 108 | /* CRQB needs alignment on a 1KB boundary. Size == 1KB |
109 | * CRPB needs alignment on a 256B boundary. Size == 256B | 109 | * CRPB needs alignment on a 256B boundary. Size == 256B |
110 | * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB | ||
111 | * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B | 110 | * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B |
112 | */ | 111 | */ |
113 | MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH), | 112 | MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH), |
114 | MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH), | 113 | MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH), |
115 | MV_MAX_SG_CT = 176, | 114 | MV_MAX_SG_CT = 256, |
116 | MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT), | 115 | MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT), |
117 | MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ), | ||
118 | 116 | ||
119 | MV_PORTS_PER_HC = 4, | 117 | MV_PORTS_PER_HC = 4, |
120 | /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */ | 118 | /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */ |
@@ -421,6 +419,14 @@ struct mv_host_priv { | |||
421 | u32 irq_cause_ofs; | 419 | u32 irq_cause_ofs; |
422 | u32 irq_mask_ofs; | 420 | u32 irq_mask_ofs; |
423 | u32 unmask_all_irqs; | 421 | u32 unmask_all_irqs; |
422 | /* | ||
423 | * These consistent DMA memory pools give us guaranteed | ||
424 | * alignment for hardware-accessed data structures, | ||
425 | * and less memory waste in accomplishing the alignment. | ||
426 | */ | ||
427 | struct dma_pool *crqb_pool; | ||
428 | struct dma_pool *crpb_pool; | ||
429 | struct dma_pool *sg_tbl_pool; | ||
424 | }; | 430 | }; |
425 | 431 | ||
426 | struct mv_hw_ops { | 432 | struct mv_hw_ops { |
@@ -1097,6 +1103,25 @@ static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv, | |||
1097 | writelfl(cfg, port_mmio + EDMA_CFG_OFS); | 1103 | writelfl(cfg, port_mmio + EDMA_CFG_OFS); |
1098 | } | 1104 | } |
1099 | 1105 | ||
1106 | static void mv_port_free_dma_mem(struct ata_port *ap) | ||
1107 | { | ||
1108 | struct mv_host_priv *hpriv = ap->host->private_data; | ||
1109 | struct mv_port_priv *pp = ap->private_data; | ||
1110 | |||
1111 | if (pp->crqb) { | ||
1112 | dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma); | ||
1113 | pp->crqb = NULL; | ||
1114 | } | ||
1115 | if (pp->crpb) { | ||
1116 | dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma); | ||
1117 | pp->crpb = NULL; | ||
1118 | } | ||
1119 | if (pp->sg_tbl) { | ||
1120 | dma_pool_free(hpriv->sg_tbl_pool, pp->sg_tbl, pp->sg_tbl_dma); | ||
1121 | pp->sg_tbl = NULL; | ||
1122 | } | ||
1123 | } | ||
1124 | |||
1100 | /** | 1125 | /** |
1101 | * mv_port_start - Port specific init/start routine. | 1126 | * mv_port_start - Port specific init/start routine. |
1102 | * @ap: ATA channel to manipulate | 1127 | * @ap: ATA channel to manipulate |
@@ -1113,51 +1138,36 @@ static int mv_port_start(struct ata_port *ap) | |||
1113 | struct mv_host_priv *hpriv = ap->host->private_data; | 1138 | struct mv_host_priv *hpriv = ap->host->private_data; |
1114 | struct mv_port_priv *pp; | 1139 | struct mv_port_priv *pp; |
1115 | void __iomem *port_mmio = mv_ap_base(ap); | 1140 | void __iomem *port_mmio = mv_ap_base(ap); |
1116 | void *mem; | ||
1117 | dma_addr_t mem_dma; | ||
1118 | unsigned long flags; | 1141 | unsigned long flags; |
1119 | int rc; | 1142 | int rc; |
1120 | 1143 | ||
1121 | pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); | 1144 | pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); |
1122 | if (!pp) | 1145 | if (!pp) |
1123 | return -ENOMEM; | 1146 | return -ENOMEM; |
1124 | 1147 | ap->private_data = pp; | |
1125 | mem = dmam_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma, | ||
1126 | GFP_KERNEL); | ||
1127 | if (!mem) | ||
1128 | return -ENOMEM; | ||
1129 | memset(mem, 0, MV_PORT_PRIV_DMA_SZ); | ||
1130 | 1148 | ||
1131 | rc = ata_pad_alloc(ap, dev); | 1149 | rc = ata_pad_alloc(ap, dev); |
1132 | if (rc) | 1150 | if (rc) |
1133 | return rc; | 1151 | return rc; |
1134 | 1152 | ||
1135 | /* First item in chunk of DMA memory: | 1153 | pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma); |
1136 | * 32-slot command request table (CRQB), 32 bytes each in size | 1154 | if (!pp->crqb) |
1137 | */ | 1155 | return -ENOMEM; |
1138 | pp->crqb = mem; | 1156 | memset(pp->crqb, 0, MV_CRQB_Q_SZ); |
1139 | pp->crqb_dma = mem_dma; | ||
1140 | mem += MV_CRQB_Q_SZ; | ||
1141 | mem_dma += MV_CRQB_Q_SZ; | ||
1142 | 1157 | ||
1143 | /* Second item: | 1158 | pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma); |
1144 | * 32-slot command response table (CRPB), 8 bytes each in size | 1159 | if (!pp->crpb) |
1145 | */ | 1160 | goto out_port_free_dma_mem; |
1146 | pp->crpb = mem; | 1161 | memset(pp->crpb, 0, MV_CRPB_Q_SZ); |
1147 | pp->crpb_dma = mem_dma; | ||
1148 | mem += MV_CRPB_Q_SZ; | ||
1149 | mem_dma += MV_CRPB_Q_SZ; | ||
1150 | 1162 | ||
1151 | /* Third item: | 1163 | pp->sg_tbl = dma_pool_alloc(hpriv->sg_tbl_pool, GFP_KERNEL, |
1152 | * Table of scatter-gather descriptors (ePRD), 16 bytes each | 1164 | &pp->sg_tbl_dma); |
1153 | */ | 1165 | if (!pp->sg_tbl) |
1154 | pp->sg_tbl = mem; | 1166 | goto out_port_free_dma_mem; |
1155 | pp->sg_tbl_dma = mem_dma; | ||
1156 | 1167 | ||
1157 | spin_lock_irqsave(&ap->host->lock, flags); | 1168 | spin_lock_irqsave(&ap->host->lock, flags); |
1158 | 1169 | ||
1159 | mv_edma_cfg(pp, hpriv, port_mmio, 0); | 1170 | mv_edma_cfg(pp, hpriv, port_mmio, 0); |
1160 | |||
1161 | mv_set_edma_ptrs(port_mmio, hpriv, pp); | 1171 | mv_set_edma_ptrs(port_mmio, hpriv, pp); |
1162 | 1172 | ||
1163 | spin_unlock_irqrestore(&ap->host->lock, flags); | 1173 | spin_unlock_irqrestore(&ap->host->lock, flags); |
@@ -1166,8 +1176,11 @@ static int mv_port_start(struct ata_port *ap) | |||
1166 | * we'll be unable to send non-data, PIO, etc due to restricted access | 1176 | * we'll be unable to send non-data, PIO, etc due to restricted access |
1167 | * to shadow regs. | 1177 | * to shadow regs. |
1168 | */ | 1178 | */ |
1169 | ap->private_data = pp; | ||
1170 | return 0; | 1179 | return 0; |
1180 | |||
1181 | out_port_free_dma_mem: | ||
1182 | mv_port_free_dma_mem(ap); | ||
1183 | return -ENOMEM; | ||
1171 | } | 1184 | } |
1172 | 1185 | ||
1173 | /** | 1186 | /** |
@@ -1182,6 +1195,7 @@ static int mv_port_start(struct ata_port *ap) | |||
1182 | static void mv_port_stop(struct ata_port *ap) | 1195 | static void mv_port_stop(struct ata_port *ap) |
1183 | { | 1196 | { |
1184 | mv_stop_dma(ap); | 1197 | mv_stop_dma(ap); |
1198 | mv_port_free_dma_mem(ap); | ||
1185 | } | 1199 | } |
1186 | 1200 | ||
1187 | /** | 1201 | /** |
@@ -2765,6 +2779,26 @@ static void mv_print_info(struct ata_host *host) | |||
2765 | scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx"); | 2779 | scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx"); |
2766 | } | 2780 | } |
2767 | 2781 | ||
2782 | static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev) | ||
2783 | { | ||
2784 | hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ, | ||
2785 | MV_CRQB_Q_SZ, 0); | ||
2786 | if (!hpriv->crqb_pool) | ||
2787 | return -ENOMEM; | ||
2788 | |||
2789 | hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ, | ||
2790 | MV_CRPB_Q_SZ, 0); | ||
2791 | if (!hpriv->crpb_pool) | ||
2792 | return -ENOMEM; | ||
2793 | |||
2794 | hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ, | ||
2795 | MV_SG_TBL_SZ, 0); | ||
2796 | if (!hpriv->sg_tbl_pool) | ||
2797 | return -ENOMEM; | ||
2798 | |||
2799 | return 0; | ||
2800 | } | ||
2801 | |||
2768 | /** | 2802 | /** |
2769 | * mv_init_one - handle a positive probe of a Marvell host | 2803 | * mv_init_one - handle a positive probe of a Marvell host |
2770 | * @pdev: PCI device found | 2804 | * @pdev: PCI device found |
@@ -2810,6 +2844,10 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2810 | if (rc) | 2844 | if (rc) |
2811 | return rc; | 2845 | return rc; |
2812 | 2846 | ||
2847 | rc = mv_create_dma_pools(hpriv, &pdev->dev); | ||
2848 | if (rc) | ||
2849 | return rc; | ||
2850 | |||
2813 | /* initialize adapter */ | 2851 | /* initialize adapter */ |
2814 | rc = mv_init_host(host, board_idx); | 2852 | rc = mv_init_host(host, board_idx); |
2815 | if (rc) | 2853 | if (rc) |