diff options
author | FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> | 2007-05-14 07:25:31 -0400 |
---|---|---|
committer | James Bottomley <jejb@mulgrave.il.steeleye.com> | 2007-06-17 19:35:14 -0400 |
commit | f98754960a9b25057ad5f249f877b3d6fab889ce (patch) | |
tree | e00e0d04e57a158e02adb84de73a3f010c27627d | |
parent | 45804fbb00eea27bdf4d62751681228a9e2844e9 (diff) |
[SCSI] hptiop: convert to use the data buffer accessors
- remove the unnecessary map_single path.
- convert to use the new accessors for the sg lists and the
parameters.
Jens Axboe <jens.axboe@oracle.com> did the for_each_sg cleanup.
Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Acked-by: HighPoint Linux Team <linux@highpoint-tech.com>
Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
-rw-r--r-- | drivers/scsi/hptiop.c | 76 |
1 files changed, 23 insertions, 53 deletions
diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c index bec83cbee59a..0e579ca45814 100644 --- a/drivers/scsi/hptiop.c +++ b/drivers/scsi/hptiop.c | |||
@@ -339,20 +339,8 @@ static void hptiop_host_request_callback(struct hptiop_hba *hba, u32 tag) | |||
339 | 339 | ||
340 | scp = hba->reqs[tag].scp; | 340 | scp = hba->reqs[tag].scp; |
341 | 341 | ||
342 | if (HPT_SCP(scp)->mapped) { | 342 | if (HPT_SCP(scp)->mapped) |
343 | if (scp->use_sg) | 343 | scsi_dma_unmap(scp); |
344 | pci_unmap_sg(hba->pcidev, | ||
345 | (struct scatterlist *)scp->request_buffer, | ||
346 | scp->use_sg, | ||
347 | scp->sc_data_direction | ||
348 | ); | ||
349 | else | ||
350 | pci_unmap_single(hba->pcidev, | ||
351 | HPT_SCP(scp)->dma_handle, | ||
352 | scp->request_bufflen, | ||
353 | scp->sc_data_direction | ||
354 | ); | ||
355 | } | ||
356 | 344 | ||
357 | switch (le32_to_cpu(req->header.result)) { | 345 | switch (le32_to_cpu(req->header.result)) { |
358 | case IOP_RESULT_SUCCESS: | 346 | case IOP_RESULT_SUCCESS: |
@@ -448,43 +436,26 @@ static int hptiop_buildsgl(struct scsi_cmnd *scp, struct hpt_iopsg *psg) | |||
448 | { | 436 | { |
449 | struct Scsi_Host *host = scp->device->host; | 437 | struct Scsi_Host *host = scp->device->host; |
450 | struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata; | 438 | struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata; |
451 | struct scatterlist *sglist = (struct scatterlist *)scp->request_buffer; | 439 | struct scatterlist *sg; |
452 | 440 | int idx, nseg; | |
453 | /* | 441 | |
454 | * though we'll not get non-use_sg fields anymore, | 442 | nseg = scsi_dma_map(scp); |
455 | * keep use_sg checking anyway | 443 | BUG_ON(nseg < 0); |
456 | */ | 444 | if (!nseg) |
457 | if (scp->use_sg) { | 445 | return 0; |
458 | int idx; | ||
459 | |||
460 | HPT_SCP(scp)->sgcnt = pci_map_sg(hba->pcidev, | ||
461 | sglist, scp->use_sg, | ||
462 | scp->sc_data_direction); | ||
463 | HPT_SCP(scp)->mapped = 1; | ||
464 | BUG_ON(HPT_SCP(scp)->sgcnt > hba->max_sg_descriptors); | ||
465 | |||
466 | for (idx = 0; idx < HPT_SCP(scp)->sgcnt; idx++) { | ||
467 | psg[idx].pci_address = | ||
468 | cpu_to_le64(sg_dma_address(&sglist[idx])); | ||
469 | psg[idx].size = cpu_to_le32(sg_dma_len(&sglist[idx])); | ||
470 | psg[idx].eot = (idx == HPT_SCP(scp)->sgcnt - 1) ? | ||
471 | cpu_to_le32(1) : 0; | ||
472 | } | ||
473 | 446 | ||
474 | return HPT_SCP(scp)->sgcnt; | 447 | HPT_SCP(scp)->sgcnt = nseg; |
475 | } else { | 448 | HPT_SCP(scp)->mapped = 1; |
476 | HPT_SCP(scp)->dma_handle = pci_map_single( | 449 | |
477 | hba->pcidev, | 450 | BUG_ON(HPT_SCP(scp)->sgcnt > hba->max_sg_descriptors); |
478 | scp->request_buffer, | 451 | |
479 | scp->request_bufflen, | 452 | scsi_for_each_sg(scp, sg, HPT_SCP(scp)->sgcnt, idx) { |
480 | scp->sc_data_direction | 453 | psg[idx].pci_address = cpu_to_le64(sg_dma_address(sg)); |
481 | ); | 454 | psg[idx].size = cpu_to_le32(sg_dma_len(sg)); |
482 | HPT_SCP(scp)->mapped = 1; | 455 | psg[idx].eot = (idx == HPT_SCP(scp)->sgcnt - 1) ? |
483 | psg->pci_address = cpu_to_le64(HPT_SCP(scp)->dma_handle); | 456 | cpu_to_le32(1) : 0; |
484 | psg->size = cpu_to_le32(scp->request_bufflen); | ||
485 | psg->eot = cpu_to_le32(1); | ||
486 | return 1; | ||
487 | } | 457 | } |
458 | return HPT_SCP(scp)->sgcnt; | ||
488 | } | 459 | } |
489 | 460 | ||
490 | static int hptiop_queuecommand(struct scsi_cmnd *scp, | 461 | static int hptiop_queuecommand(struct scsi_cmnd *scp, |
@@ -529,9 +500,8 @@ static int hptiop_queuecommand(struct scsi_cmnd *scp, | |||
529 | req = (struct hpt_iop_request_scsi_command *)_req->req_virt; | 500 | req = (struct hpt_iop_request_scsi_command *)_req->req_virt; |
530 | 501 | ||
531 | /* build S/G table */ | 502 | /* build S/G table */ |
532 | if (scp->request_bufflen) | 503 | sg_count = hptiop_buildsgl(scp, req->sg_list); |
533 | sg_count = hptiop_buildsgl(scp, req->sg_list); | 504 | if (!sg_count) |
534 | else | ||
535 | HPT_SCP(scp)->mapped = 0; | 505 | HPT_SCP(scp)->mapped = 0; |
536 | 506 | ||
537 | req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT); | 507 | req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT); |
@@ -540,7 +510,7 @@ static int hptiop_queuecommand(struct scsi_cmnd *scp, | |||
540 | req->header.context = cpu_to_le32(IOPMU_QUEUE_ADDR_HOST_BIT | | 510 | req->header.context = cpu_to_le32(IOPMU_QUEUE_ADDR_HOST_BIT | |
541 | (u32)_req->index); | 511 | (u32)_req->index); |
542 | req->header.context_hi32 = 0; | 512 | req->header.context_hi32 = 0; |
543 | req->dataxfer_length = cpu_to_le32(scp->request_bufflen); | 513 | req->dataxfer_length = cpu_to_le32(scsi_bufflen(scp)); |
544 | req->channel = scp->device->channel; | 514 | req->channel = scp->device->channel; |
545 | req->target = scp->device->id; | 515 | req->target = scp->device->id; |
546 | req->lun = scp->device->lun; | 516 | req->lun = scp->device->lun; |