aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
authorJeff Garzik <jgarzik@pobox.com>2005-10-05 07:13:30 -0400
committerJeff Garzik <jgarzik@pobox.com>2005-10-05 07:13:30 -0400
commitcedc9a478d8c6265879dc3839ef3d4849a709184 (patch)
tree0c8e0fbffdb6081381c01b8cfd93c95b168acb44 /drivers/scsi
parented39f731ab2e77e58122232f6e27333331d7793d (diff)
libata: fix ATAPI DMA alignment issues
ATAPI needs to be padded to next 4 byte boundary, if misaligned. Original work by me, many fixes from Tejun Heo.
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/ahci.c30
-rw-r--r--drivers/scsi/libata-core.c123
-rw-r--r--drivers/scsi/libata-scsi.c14
-rw-r--r--drivers/scsi/sata_qstor.c8
-rw-r--r--drivers/scsi/sata_sx4.c13
5 files changed, 148 insertions, 40 deletions
diff --git a/drivers/scsi/ahci.c b/drivers/scsi/ahci.c
index c2c8fa828e24..6e4bb36f8d7c 100644
--- a/drivers/scsi/ahci.c
+++ b/drivers/scsi/ahci.c
@@ -314,8 +314,15 @@ static int ahci_port_start(struct ata_port *ap)
314 return -ENOMEM; 314 return -ENOMEM;
315 memset(pp, 0, sizeof(*pp)); 315 memset(pp, 0, sizeof(*pp));
316 316
317 ap->pad = dma_alloc_coherent(dev, ATA_DMA_PAD_BUF_SZ, &ap->pad_dma, GFP_KERNEL);
318 if (!ap->pad) {
319 kfree(pp);
320 return -ENOMEM;
321 }
322
317 mem = dma_alloc_coherent(dev, AHCI_PORT_PRIV_DMA_SZ, &mem_dma, GFP_KERNEL); 323 mem = dma_alloc_coherent(dev, AHCI_PORT_PRIV_DMA_SZ, &mem_dma, GFP_KERNEL);
318 if (!mem) { 324 if (!mem) {
325 dma_free_coherent(dev, ATA_DMA_PAD_BUF_SZ, ap->pad, ap->pad_dma);
319 kfree(pp); 326 kfree(pp);
320 return -ENOMEM; 327 return -ENOMEM;
321 } 328 }
@@ -391,6 +398,7 @@ static void ahci_port_stop(struct ata_port *ap)
391 ap->private_data = NULL; 398 ap->private_data = NULL;
392 dma_free_coherent(dev, AHCI_PORT_PRIV_DMA_SZ, 399 dma_free_coherent(dev, AHCI_PORT_PRIV_DMA_SZ,
393 pp->cmd_slot, pp->cmd_slot_dma); 400 pp->cmd_slot, pp->cmd_slot_dma);
401 dma_free_coherent(dev, ATA_DMA_PAD_BUF_SZ, ap->pad, ap->pad_dma);
394 kfree(pp); 402 kfree(pp);
395} 403}
396 404
@@ -476,23 +484,23 @@ static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
476static void ahci_fill_sg(struct ata_queued_cmd *qc) 484static void ahci_fill_sg(struct ata_queued_cmd *qc)
477{ 485{
478 struct ahci_port_priv *pp = qc->ap->private_data; 486 struct ahci_port_priv *pp = qc->ap->private_data;
479 unsigned int i; 487 struct scatterlist *sg;
488 struct ahci_sg *ahci_sg;
480 489
481 VPRINTK("ENTER\n"); 490 VPRINTK("ENTER\n");
482 491
483 /* 492 /*
484 * Next, the S/G list. 493 * Next, the S/G list.
485 */ 494 */
486 for (i = 0; i < qc->n_elem; i++) { 495 ahci_sg = pp->cmd_tbl_sg;
487 u32 sg_len; 496 ata_for_each_sg(sg, qc) {
488 dma_addr_t addr; 497 dma_addr_t addr = sg_dma_address(sg);
489 498 u32 sg_len = sg_dma_len(sg);
490 addr = sg_dma_address(&qc->sg[i]); 499
491 sg_len = sg_dma_len(&qc->sg[i]); 500 ahci_sg->addr = cpu_to_le32(addr & 0xffffffff);
492 501 ahci_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
493 pp->cmd_tbl_sg[i].addr = cpu_to_le32(addr & 0xffffffff); 502 ahci_sg->flags_size = cpu_to_le32(sg_len - 1);
494 pp->cmd_tbl_sg[i].addr_hi = cpu_to_le32((addr >> 16) >> 16); 503 ahci_sg++;
495 pp->cmd_tbl_sg[i].flags_size = cpu_to_le32(sg_len - 1);
496 } 504 }
497} 505}
498 506
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c
index e5b01997117a..943b44c3c16f 100644
--- a/drivers/scsi/libata-core.c
+++ b/drivers/scsi/libata-core.c
@@ -2156,8 +2156,9 @@ static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev)
2156static void ata_sg_clean(struct ata_queued_cmd *qc) 2156static void ata_sg_clean(struct ata_queued_cmd *qc)
2157{ 2157{
2158 struct ata_port *ap = qc->ap; 2158 struct ata_port *ap = qc->ap;
2159 struct scatterlist *sg = qc->sg; 2159 struct scatterlist *sg = qc->__sg;
2160 int dir = qc->dma_dir; 2160 int dir = qc->dma_dir;
2161 void *pad_buf = NULL;
2161 2162
2162 assert(qc->flags & ATA_QCFLAG_DMAMAP); 2163 assert(qc->flags & ATA_QCFLAG_DMAMAP);
2163 assert(sg != NULL); 2164 assert(sg != NULL);
@@ -2167,14 +2168,35 @@ static void ata_sg_clean(struct ata_queued_cmd *qc)
2167 2168
2168 DPRINTK("unmapping %u sg elements\n", qc->n_elem); 2169 DPRINTK("unmapping %u sg elements\n", qc->n_elem);
2169 2170
2170 if (qc->flags & ATA_QCFLAG_SG) 2171 /* if we padded the buffer out to 32-bit bound, and data
2172 * xfer direction is from-device, we must copy from the
2173 * pad buffer back into the supplied buffer
2174 */
2175 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
2176 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
2177
2178 if (qc->flags & ATA_QCFLAG_SG) {
2171 dma_unmap_sg(ap->host_set->dev, sg, qc->n_elem, dir); 2179 dma_unmap_sg(ap->host_set->dev, sg, qc->n_elem, dir);
2172 else 2180 /* restore last sg */
2181 sg[qc->orig_n_elem - 1].length += qc->pad_len;
2182 if (pad_buf) {
2183 struct scatterlist *psg = &qc->pad_sgent;
2184 void *addr = kmap_atomic(psg->page, KM_IRQ0);
2185 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
2186 kunmap_atomic(psg->page, KM_IRQ0);
2187 }
2188 } else {
2173 dma_unmap_single(ap->host_set->dev, sg_dma_address(&sg[0]), 2189 dma_unmap_single(ap->host_set->dev, sg_dma_address(&sg[0]),
2174 sg_dma_len(&sg[0]), dir); 2190 sg_dma_len(&sg[0]), dir);
2191 /* restore sg */
2192 sg->length += qc->pad_len;
2193 if (pad_buf)
2194 memcpy(qc->buf_virt + sg->length - qc->pad_len,
2195 pad_buf, qc->pad_len);
2196 }
2175 2197
2176 qc->flags &= ~ATA_QCFLAG_DMAMAP; 2198 qc->flags &= ~ATA_QCFLAG_DMAMAP;
2177 qc->sg = NULL; 2199 qc->__sg = NULL;
2178} 2200}
2179 2201
2180/** 2202/**
@@ -2190,15 +2212,15 @@ static void ata_sg_clean(struct ata_queued_cmd *qc)
2190 */ 2212 */
2191static void ata_fill_sg(struct ata_queued_cmd *qc) 2213static void ata_fill_sg(struct ata_queued_cmd *qc)
2192{ 2214{
2193 struct scatterlist *sg = qc->sg;
2194 struct ata_port *ap = qc->ap; 2215 struct ata_port *ap = qc->ap;
2195 unsigned int idx, nelem; 2216 struct scatterlist *sg;
2217 unsigned int idx;
2196 2218
2197 assert(sg != NULL); 2219 assert(qc->__sg != NULL);
2198 assert(qc->n_elem > 0); 2220 assert(qc->n_elem > 0);
2199 2221
2200 idx = 0; 2222 idx = 0;
2201 for (nelem = qc->n_elem; nelem; nelem--,sg++) { 2223 ata_for_each_sg(sg, qc) {
2202 u32 addr, offset; 2224 u32 addr, offset;
2203 u32 sg_len, len; 2225 u32 sg_len, len;
2204 2226
@@ -2289,11 +2311,12 @@ void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
2289 qc->flags |= ATA_QCFLAG_SINGLE; 2311 qc->flags |= ATA_QCFLAG_SINGLE;
2290 2312
2291 memset(&qc->sgent, 0, sizeof(qc->sgent)); 2313 memset(&qc->sgent, 0, sizeof(qc->sgent));
2292 qc->sg = &qc->sgent; 2314 qc->__sg = &qc->sgent;
2293 qc->n_elem = 1; 2315 qc->n_elem = 1;
2316 qc->orig_n_elem = 1;
2294 qc->buf_virt = buf; 2317 qc->buf_virt = buf;
2295 2318
2296 sg = qc->sg; 2319 sg = qc->__sg;
2297 sg->page = virt_to_page(buf); 2320 sg->page = virt_to_page(buf);
2298 sg->offset = (unsigned long) buf & ~PAGE_MASK; 2321 sg->offset = (unsigned long) buf & ~PAGE_MASK;
2299 sg->length = buflen; 2322 sg->length = buflen;
@@ -2317,8 +2340,9 @@ void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
2317 unsigned int n_elem) 2340 unsigned int n_elem)
2318{ 2341{
2319 qc->flags |= ATA_QCFLAG_SG; 2342 qc->flags |= ATA_QCFLAG_SG;
2320 qc->sg = sg; 2343 qc->__sg = sg;
2321 qc->n_elem = n_elem; 2344 qc->n_elem = n_elem;
2345 qc->orig_n_elem = n_elem;
2322} 2346}
2323 2347
2324/** 2348/**
@@ -2338,9 +2362,32 @@ static int ata_sg_setup_one(struct ata_queued_cmd *qc)
2338{ 2362{
2339 struct ata_port *ap = qc->ap; 2363 struct ata_port *ap = qc->ap;
2340 int dir = qc->dma_dir; 2364 int dir = qc->dma_dir;
2341 struct scatterlist *sg = qc->sg; 2365 struct scatterlist *sg = qc->__sg;
2342 dma_addr_t dma_address; 2366 dma_addr_t dma_address;
2343 2367
2368 /* we must lengthen transfers to end on a 32-bit boundary */
2369 qc->pad_len = sg->length & 3;
2370 if (qc->pad_len) {
2371 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
2372 struct scatterlist *psg = &qc->pad_sgent;
2373
2374 assert(qc->dev->class == ATA_DEV_ATAPI);
2375
2376 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
2377
2378 if (qc->tf.flags & ATA_TFLAG_WRITE)
2379 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
2380 qc->pad_len);
2381
2382 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
2383 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
2384 /* trim sg */
2385 sg->length -= qc->pad_len;
2386
2387 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
2388 sg->length, qc->pad_len);
2389 }
2390
2344 dma_address = dma_map_single(ap->host_set->dev, qc->buf_virt, 2391 dma_address = dma_map_single(ap->host_set->dev, qc->buf_virt,
2345 sg->length, dir); 2392 sg->length, dir);
2346 if (dma_mapping_error(dma_address)) 2393 if (dma_mapping_error(dma_address))
@@ -2372,12 +2419,47 @@ static int ata_sg_setup_one(struct ata_queued_cmd *qc)
2372static int ata_sg_setup(struct ata_queued_cmd *qc) 2419static int ata_sg_setup(struct ata_queued_cmd *qc)
2373{ 2420{
2374 struct ata_port *ap = qc->ap; 2421 struct ata_port *ap = qc->ap;
2375 struct scatterlist *sg = qc->sg; 2422 struct scatterlist *sg = qc->__sg;
2423 struct scatterlist *lsg = &sg[qc->n_elem - 1];
2376 int n_elem, dir; 2424 int n_elem, dir;
2377 2425
2378 VPRINTK("ENTER, ata%u\n", ap->id); 2426 VPRINTK("ENTER, ata%u\n", ap->id);
2379 assert(qc->flags & ATA_QCFLAG_SG); 2427 assert(qc->flags & ATA_QCFLAG_SG);
2380 2428
2429 /* we must lengthen transfers to end on a 32-bit boundary */
2430 qc->pad_len = lsg->length & 3;
2431 if (qc->pad_len) {
2432 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
2433 struct scatterlist *psg = &qc->pad_sgent;
2434 unsigned int offset;
2435
2436 assert(qc->dev->class == ATA_DEV_ATAPI);
2437
2438 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
2439
2440 /*
2441 * psg->page/offset are used to copy to-be-written
2442 * data in this function or read data in ata_sg_clean.
2443 */
2444 offset = lsg->offset + lsg->length - qc->pad_len;
2445 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
2446 psg->offset = offset_in_page(offset);
2447
2448 if (qc->tf.flags & ATA_TFLAG_WRITE) {
2449 void *addr = kmap_atomic(psg->page, KM_IRQ0);
2450 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
2451 kunmap_atomic(psg->page, KM_IRQ0);
2452 }
2453
2454 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
2455 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
2456 /* trim last sg */
2457 lsg->length -= qc->pad_len;
2458
2459 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
2460 qc->n_elem - 1, lsg->length, qc->pad_len);
2461 }
2462
2381 dir = qc->dma_dir; 2463 dir = qc->dma_dir;
2382 n_elem = dma_map_sg(ap->host_set->dev, sg, qc->n_elem, dir); 2464 n_elem = dma_map_sg(ap->host_set->dev, sg, qc->n_elem, dir);
2383 if (n_elem < 1) 2465 if (n_elem < 1)
@@ -2655,7 +2737,7 @@ static void ata_data_xfer(struct ata_port *ap, unsigned char *buf,
2655static void ata_pio_sector(struct ata_queued_cmd *qc) 2737static void ata_pio_sector(struct ata_queued_cmd *qc)
2656{ 2738{
2657 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE); 2739 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
2658 struct scatterlist *sg = qc->sg; 2740 struct scatterlist *sg = qc->__sg;
2659 struct ata_port *ap = qc->ap; 2741 struct ata_port *ap = qc->ap;
2660 struct page *page; 2742 struct page *page;
2661 unsigned int offset; 2743 unsigned int offset;
@@ -2705,7 +2787,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
2705static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes) 2787static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
2706{ 2788{
2707 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE); 2789 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
2708 struct scatterlist *sg = qc->sg; 2790 struct scatterlist *sg = qc->__sg;
2709 struct ata_port *ap = qc->ap; 2791 struct ata_port *ap = qc->ap;
2710 struct page *page; 2792 struct page *page;
2711 unsigned char *buf; 2793 unsigned char *buf;
@@ -2738,7 +2820,7 @@ next_sg:
2738 return; 2820 return;
2739 } 2821 }
2740 2822
2741 sg = &qc->sg[qc->cursg]; 2823 sg = &qc->__sg[qc->cursg];
2742 2824
2743 page = sg->page; 2825 page = sg->page;
2744 offset = sg->offset + qc->cursg_ofs; 2826 offset = sg->offset + qc->cursg_ofs;
@@ -3145,7 +3227,7 @@ struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
3145 3227
3146 qc = ata_qc_new(ap); 3228 qc = ata_qc_new(ap);
3147 if (qc) { 3229 if (qc) {
3148 qc->sg = NULL; 3230 qc->__sg = NULL;
3149 qc->flags = 0; 3231 qc->flags = 0;
3150 qc->scsicmd = NULL; 3232 qc->scsicmd = NULL;
3151 qc->ap = ap; 3233 qc->ap = ap;
@@ -3837,6 +3919,12 @@ int ata_port_start (struct ata_port *ap)
3837 if (!ap->prd) 3919 if (!ap->prd)
3838 return -ENOMEM; 3920 return -ENOMEM;
3839 3921
3922 ap->pad = dma_alloc_coherent(dev, ATA_DMA_PAD_BUF_SZ, &ap->pad_dma, GFP_KERNEL);
3923 if (!ap->pad) {
3924 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
3925 return -ENOMEM;
3926 }
3927
3840 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma); 3928 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma);
3841 3929
3842 return 0; 3930 return 0;
@@ -3859,6 +3947,7 @@ void ata_port_stop (struct ata_port *ap)
3859 struct device *dev = ap->host_set->dev; 3947 struct device *dev = ap->host_set->dev;
3860 3948
3861 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma); 3949 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
3950 dma_free_coherent(dev, ATA_DMA_PAD_BUF_SZ, ap->pad, ap->pad_dma);
3862} 3951}
3863 3952
3864void ata_host_stop (struct ata_host_set *host_set) 3953void ata_host_stop (struct ata_host_set *host_set)
diff --git a/drivers/scsi/libata-scsi.c b/drivers/scsi/libata-scsi.c
index 104fd9a63e73..ee3f1050fb5f 100644
--- a/drivers/scsi/libata-scsi.c
+++ b/drivers/scsi/libata-scsi.c
@@ -150,10 +150,10 @@ struct ata_queued_cmd *ata_scsi_qc_new(struct ata_port *ap,
150 qc->scsidone = done; 150 qc->scsidone = done;
151 151
152 if (cmd->use_sg) { 152 if (cmd->use_sg) {
153 qc->sg = (struct scatterlist *) cmd->request_buffer; 153 qc->__sg = (struct scatterlist *) cmd->request_buffer;
154 qc->n_elem = cmd->use_sg; 154 qc->n_elem = cmd->use_sg;
155 } else { 155 } else {
156 qc->sg = &qc->sgent; 156 qc->__sg = &qc->sgent;
157 qc->n_elem = 1; 157 qc->n_elem = 1;
158 } 158 }
159 } else { 159 } else {
@@ -364,6 +364,16 @@ int ata_scsi_slave_config(struct scsi_device *sdev)
364 */ 364 */
365 blk_queue_max_sectors(sdev->request_queue, 2048); 365 blk_queue_max_sectors(sdev->request_queue, 2048);
366 } 366 }
367
368 /*
369 * SATA DMA transfers must be multiples of 4 byte, so
370 * we need to pad ATAPI transfers using an extra sg.
371 * Decrement max hw segments accordingly.
372 */
373 if (dev->class == ATA_DEV_ATAPI) {
374 request_queue_t *q = sdev->request_queue;
375 blk_queue_max_hw_segments(q, q->max_hw_segments - 1);
376 }
367 } 377 }
368 378
369 return 0; /* scsi layer doesn't check return value, sigh */ 379 return 0; /* scsi layer doesn't check return value, sigh */
diff --git a/drivers/scsi/sata_qstor.c b/drivers/scsi/sata_qstor.c
index ffcdeb68641c..69a9b1cf6f9c 100644
--- a/drivers/scsi/sata_qstor.c
+++ b/drivers/scsi/sata_qstor.c
@@ -268,16 +268,17 @@ static void qs_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
268 268
269static void qs_fill_sg(struct ata_queued_cmd *qc) 269static void qs_fill_sg(struct ata_queued_cmd *qc)
270{ 270{
271 struct scatterlist *sg = qc->sg; 271 struct scatterlist *sg;
272 struct ata_port *ap = qc->ap; 272 struct ata_port *ap = qc->ap;
273 struct qs_port_priv *pp = ap->private_data; 273 struct qs_port_priv *pp = ap->private_data;
274 unsigned int nelem; 274 unsigned int nelem;
275 u8 *prd = pp->pkt + QS_CPB_BYTES; 275 u8 *prd = pp->pkt + QS_CPB_BYTES;
276 276
277 assert(sg != NULL); 277 assert(qc->__sg != NULL);
278 assert(qc->n_elem > 0); 278 assert(qc->n_elem > 0);
279 279
280 for (nelem = 0; nelem < qc->n_elem; nelem++,sg++) { 280 nelem = 0;
281 ata_for_each_sg(sg, qc) {
281 u64 addr; 282 u64 addr;
282 u32 len; 283 u32 len;
283 284
@@ -291,6 +292,7 @@ static void qs_fill_sg(struct ata_queued_cmd *qc)
291 292
292 VPRINTK("PRD[%u] = (0x%llX, 0x%X)\n", nelem, 293 VPRINTK("PRD[%u] = (0x%llX, 0x%X)\n", nelem,
293 (unsigned long long)addr, len); 294 (unsigned long long)addr, len);
295 nelem++;
294 } 296 }
295} 297}
296 298
diff --git a/drivers/scsi/sata_sx4.c b/drivers/scsi/sata_sx4.c
index 540a85191172..79fdbbab513e 100644
--- a/drivers/scsi/sata_sx4.c
+++ b/drivers/scsi/sata_sx4.c
@@ -449,14 +449,14 @@ static inline void pdc20621_host_pkt(struct ata_taskfile *tf, u8 *buf,
449 449
450static void pdc20621_dma_prep(struct ata_queued_cmd *qc) 450static void pdc20621_dma_prep(struct ata_queued_cmd *qc)
451{ 451{
452 struct scatterlist *sg = qc->sg; 452 struct scatterlist *sg;
453 struct ata_port *ap = qc->ap; 453 struct ata_port *ap = qc->ap;
454 struct pdc_port_priv *pp = ap->private_data; 454 struct pdc_port_priv *pp = ap->private_data;
455 void __iomem *mmio = ap->host_set->mmio_base; 455 void __iomem *mmio = ap->host_set->mmio_base;
456 struct pdc_host_priv *hpriv = ap->host_set->private_data; 456 struct pdc_host_priv *hpriv = ap->host_set->private_data;
457 void __iomem *dimm_mmio = hpriv->dimm_mmio; 457 void __iomem *dimm_mmio = hpriv->dimm_mmio;
458 unsigned int portno = ap->port_no; 458 unsigned int portno = ap->port_no;
459 unsigned int i, last, idx, total_len = 0, sgt_len; 459 unsigned int i, idx, total_len = 0, sgt_len;
460 u32 *buf = (u32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ]; 460 u32 *buf = (u32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ];
461 461
462 assert(qc->flags & ATA_QCFLAG_DMAMAP); 462 assert(qc->flags & ATA_QCFLAG_DMAMAP);
@@ -469,12 +469,11 @@ static void pdc20621_dma_prep(struct ata_queued_cmd *qc)
469 /* 469 /*
470 * Build S/G table 470 * Build S/G table
471 */ 471 */
472 last = qc->n_elem;
473 idx = 0; 472 idx = 0;
474 for (i = 0; i < last; i++) { 473 ata_for_each_sg(sg, qc) {
475 buf[idx++] = cpu_to_le32(sg_dma_address(&sg[i])); 474 buf[idx++] = cpu_to_le32(sg_dma_address(sg));
476 buf[idx++] = cpu_to_le32(sg_dma_len(&sg[i])); 475 buf[idx++] = cpu_to_le32(sg_dma_len(sg));
477 total_len += sg_dma_len(&sg[i]); 476 total_len += sg_dma_len(sg);
478 } 477 }
479 buf[idx - 1] |= cpu_to_le32(ATA_PRD_EOT); 478 buf[idx - 1] |= cpu_to_le32(ATA_PRD_EOT);
480 sgt_len = idx * 4; 479 sgt_len = idx * 4;