diff options
author | Mark Lord <liml@rtr.ca> | 2008-01-29 13:24:00 -0500 |
---|---|---|
committer | Jeff Garzik <jeff@garzik.org> | 2008-02-01 11:29:49 -0500 |
commit | eb73d558d1c1c931de0b3a86af962c77d74ef688 (patch) | |
tree | 3f36725438e95362b83f0c6c59f85cd0d01d17b9 /drivers/ata | |
parent | bf7f22b9cac74a1e3d8b8e77350db2baca2c35be (diff) |
sata_mv ncq Introduce per-tag SG tables
In preparation for supporting NCQ, we must allocate separate SG tables
for each command tag, rather than just a single table per port as before.
Gen-I hardware cannot do NCQ, though, so we still allocate just a single
table for that, but populate it in all 32 slots to avoid special-cases
elsewhere in hotter paths of the code.
Signed-off-by: Mark Lord <mlord@pobox.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/ata')
-rw-r--r-- | drivers/ata/sata_mv.c | 55 |
1 files changed, 40 insertions, 15 deletions
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index 1c53c8a7d21f..ea7af1f16844 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c | |||
@@ -398,8 +398,8 @@ struct mv_port_priv { | |||
398 | dma_addr_t crqb_dma; | 398 | dma_addr_t crqb_dma; |
399 | struct mv_crpb *crpb; | 399 | struct mv_crpb *crpb; |
400 | dma_addr_t crpb_dma; | 400 | dma_addr_t crpb_dma; |
401 | struct mv_sg *sg_tbl; | 401 | struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH]; |
402 | dma_addr_t sg_tbl_dma; | 402 | dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH]; |
403 | 403 | ||
404 | unsigned int req_idx; | 404 | unsigned int req_idx; |
405 | unsigned int resp_idx; | 405 | unsigned int resp_idx; |
@@ -483,6 +483,10 @@ static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv, | |||
483 | void __iomem *port_mmio, int want_ncq); | 483 | void __iomem *port_mmio, int want_ncq); |
484 | static int __mv_stop_dma(struct ata_port *ap); | 484 | static int __mv_stop_dma(struct ata_port *ap); |
485 | 485 | ||
486 | /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below | ||
487 | * because we have to allow room for worst case splitting of | ||
488 | * PRDs for 64K boundaries in mv_fill_sg(). | ||
489 | */ | ||
486 | static struct scsi_host_template mv5_sht = { | 490 | static struct scsi_host_template mv5_sht = { |
487 | .module = THIS_MODULE, | 491 | .module = THIS_MODULE, |
488 | .name = DRV_NAME, | 492 | .name = DRV_NAME, |
@@ -1107,6 +1111,7 @@ static void mv_port_free_dma_mem(struct ata_port *ap) | |||
1107 | { | 1111 | { |
1108 | struct mv_host_priv *hpriv = ap->host->private_data; | 1112 | struct mv_host_priv *hpriv = ap->host->private_data; |
1109 | struct mv_port_priv *pp = ap->private_data; | 1113 | struct mv_port_priv *pp = ap->private_data; |
1114 | int tag; | ||
1110 | 1115 | ||
1111 | if (pp->crqb) { | 1116 | if (pp->crqb) { |
1112 | dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma); | 1117 | dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma); |
@@ -1116,9 +1121,18 @@ static void mv_port_free_dma_mem(struct ata_port *ap) | |||
1116 | dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma); | 1121 | dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma); |
1117 | pp->crpb = NULL; | 1122 | pp->crpb = NULL; |
1118 | } | 1123 | } |
1119 | if (pp->sg_tbl) { | 1124 | /* |
1120 | dma_pool_free(hpriv->sg_tbl_pool, pp->sg_tbl, pp->sg_tbl_dma); | 1125 | * For GEN_I, there's no NCQ, so we have only a single sg_tbl. |
1121 | pp->sg_tbl = NULL; | 1126 | * For later hardware, we have one unique sg_tbl per NCQ tag. |
1127 | */ | ||
1128 | for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) { | ||
1129 | if (pp->sg_tbl[tag]) { | ||
1130 | if (tag == 0 || !IS_GEN_I(hpriv)) | ||
1131 | dma_pool_free(hpriv->sg_tbl_pool, | ||
1132 | pp->sg_tbl[tag], | ||
1133 | pp->sg_tbl_dma[tag]); | ||
1134 | pp->sg_tbl[tag] = NULL; | ||
1135 | } | ||
1122 | } | 1136 | } |
1123 | } | 1137 | } |
1124 | 1138 | ||
@@ -1139,7 +1153,7 @@ static int mv_port_start(struct ata_port *ap) | |||
1139 | struct mv_port_priv *pp; | 1153 | struct mv_port_priv *pp; |
1140 | void __iomem *port_mmio = mv_ap_base(ap); | 1154 | void __iomem *port_mmio = mv_ap_base(ap); |
1141 | unsigned long flags; | 1155 | unsigned long flags; |
1142 | int rc; | 1156 | int tag, rc; |
1143 | 1157 | ||
1144 | pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); | 1158 | pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); |
1145 | if (!pp) | 1159 | if (!pp) |
@@ -1160,10 +1174,21 @@ static int mv_port_start(struct ata_port *ap) | |||
1160 | goto out_port_free_dma_mem; | 1174 | goto out_port_free_dma_mem; |
1161 | memset(pp->crpb, 0, MV_CRPB_Q_SZ); | 1175 | memset(pp->crpb, 0, MV_CRPB_Q_SZ); |
1162 | 1176 | ||
1163 | pp->sg_tbl = dma_pool_alloc(hpriv->sg_tbl_pool, GFP_KERNEL, | 1177 | /* |
1164 | &pp->sg_tbl_dma); | 1178 | * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl. |
1165 | if (!pp->sg_tbl) | 1179 | * For later hardware, we need one unique sg_tbl per NCQ tag. |
1166 | goto out_port_free_dma_mem; | 1180 | */ |
1181 | for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) { | ||
1182 | if (tag == 0 || !IS_GEN_I(hpriv)) { | ||
1183 | pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool, | ||
1184 | GFP_KERNEL, &pp->sg_tbl_dma[tag]); | ||
1185 | if (!pp->sg_tbl[tag]) | ||
1186 | goto out_port_free_dma_mem; | ||
1187 | } else { | ||
1188 | pp->sg_tbl[tag] = pp->sg_tbl[0]; | ||
1189 | pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0]; | ||
1190 | } | ||
1191 | } | ||
1167 | 1192 | ||
1168 | spin_lock_irqsave(&ap->host->lock, flags); | 1193 | spin_lock_irqsave(&ap->host->lock, flags); |
1169 | 1194 | ||
@@ -1214,7 +1239,7 @@ static void mv_fill_sg(struct ata_queued_cmd *qc) | |||
1214 | struct mv_sg *mv_sg, *last_sg = NULL; | 1239 | struct mv_sg *mv_sg, *last_sg = NULL; |
1215 | unsigned int si; | 1240 | unsigned int si; |
1216 | 1241 | ||
1217 | mv_sg = pp->sg_tbl; | 1242 | mv_sg = pp->sg_tbl[qc->tag]; |
1218 | for_each_sg(qc->sg, sg, qc->n_elem, si) { | 1243 | for_each_sg(qc->sg, sg, qc->n_elem, si) { |
1219 | dma_addr_t addr = sg_dma_address(sg); | 1244 | dma_addr_t addr = sg_dma_address(sg); |
1220 | u32 sg_len = sg_dma_len(sg); | 1245 | u32 sg_len = sg_dma_len(sg); |
@@ -1284,9 +1309,9 @@ static void mv_qc_prep(struct ata_queued_cmd *qc) | |||
1284 | in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK; | 1309 | in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK; |
1285 | 1310 | ||
1286 | pp->crqb[in_index].sg_addr = | 1311 | pp->crqb[in_index].sg_addr = |
1287 | cpu_to_le32(pp->sg_tbl_dma & 0xffffffff); | 1312 | cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff); |
1288 | pp->crqb[in_index].sg_addr_hi = | 1313 | pp->crqb[in_index].sg_addr_hi = |
1289 | cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16); | 1314 | cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16); |
1290 | pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags); | 1315 | pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags); |
1291 | 1316 | ||
1292 | cw = &pp->crqb[in_index].ata_cmd[0]; | 1317 | cw = &pp->crqb[in_index].ata_cmd[0]; |
@@ -1377,8 +1402,8 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc) | |||
1377 | in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK; | 1402 | in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK; |
1378 | 1403 | ||
1379 | crqb = (struct mv_crqb_iie *) &pp->crqb[in_index]; | 1404 | crqb = (struct mv_crqb_iie *) &pp->crqb[in_index]; |
1380 | crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff); | 1405 | crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff); |
1381 | crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16); | 1406 | crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16); |
1382 | crqb->flags = cpu_to_le32(flags); | 1407 | crqb->flags = cpu_to_le32(flags); |
1383 | 1408 | ||
1384 | tf = &qc->tf; | 1409 | tf = &qc->tf; |