aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>2007-05-25 21:01:24 -0400
committerJames Bottomley <jejb@mulgrave.il.steeleye.com>2007-05-29 23:49:59 -0400
commitd5587d5dcd275338af21627a3e931a77a6c04b8d (patch)
tree1433eb441ea54857a415e0837c79cfdd52832c3e /drivers
parent5f7186c841a13abff0bf81ee93754b4f46e19141 (diff)
[SCSI] stex: convert to use the data buffer accessors
- remove the unnecessary map_single path. - convert to use the new accessors for the sg lists and the parameters. Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Acked-by: Ed Lin <ed.lin@promise.com> Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/scsi/stex.c109
1 files changed, 36 insertions, 73 deletions
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
index 69be1324b114..baf516d09d79 100644
--- a/drivers/scsi/stex.c
+++ b/drivers/scsi/stex.c
@@ -398,53 +398,34 @@ static struct req_msg *stex_alloc_req(struct st_hba *hba)
398static int stex_map_sg(struct st_hba *hba, 398static int stex_map_sg(struct st_hba *hba,
399 struct req_msg *req, struct st_ccb *ccb) 399 struct req_msg *req, struct st_ccb *ccb)
400{ 400{
401 struct pci_dev *pdev = hba->pdev;
402 struct scsi_cmnd *cmd; 401 struct scsi_cmnd *cmd;
403 dma_addr_t dma_handle; 402 struct scatterlist *sg;
404 struct scatterlist *src;
405 struct st_sgtable *dst; 403 struct st_sgtable *dst;
406 int i; 404 int i, nseg;
407 405
408 cmd = ccb->cmd; 406 cmd = ccb->cmd;
409 dst = (struct st_sgtable *)req->variable; 407 dst = (struct st_sgtable *)req->variable;
410 dst->max_sg_count = cpu_to_le16(ST_MAX_SG); 408 dst->max_sg_count = cpu_to_le16(ST_MAX_SG);
411 dst->sz_in_byte = cpu_to_le32(cmd->request_bufflen); 409 dst->sz_in_byte = cpu_to_le32(scsi_bufflen(cmd));
412
413 if (cmd->use_sg) {
414 int n_elem;
415 410
416 src = (struct scatterlist *) cmd->request_buffer; 411 nseg = scsi_dma_map(cmd);
417 n_elem = pci_map_sg(pdev, src, 412 if (nseg < 0)
418 cmd->use_sg, cmd->sc_data_direction); 413 return -EIO;
419 if (n_elem <= 0) 414 if (nseg) {
420 return -EIO; 415 ccb->sg_count = nseg;
416 dst->sg_count = cpu_to_le16((u16)nseg);
421 417
422 ccb->sg_count = n_elem; 418 scsi_for_each_sg(cmd, sg, nseg, i) {
423 dst->sg_count = cpu_to_le16((u16)n_elem); 419 dst->table[i].count = cpu_to_le32((u32)sg_dma_len(sg));
424
425 for (i = 0; i < n_elem; i++, src++) {
426 dst->table[i].count = cpu_to_le32((u32)sg_dma_len(src));
427 dst->table[i].addr = 420 dst->table[i].addr =
428 cpu_to_le32(sg_dma_address(src) & 0xffffffff); 421 cpu_to_le32(sg_dma_address(sg) & 0xffffffff);
429 dst->table[i].addr_hi = 422 dst->table[i].addr_hi =
430 cpu_to_le32((sg_dma_address(src) >> 16) >> 16); 423 cpu_to_le32((sg_dma_address(sg) >> 16) >> 16);
431 dst->table[i].ctrl = SG_CF_64B | SG_CF_HOST; 424 dst->table[i].ctrl = SG_CF_64B | SG_CF_HOST;
432 } 425 }
433 dst->table[--i].ctrl |= SG_CF_EOT; 426 dst->table[--i].ctrl |= SG_CF_EOT;
434 return 0;
435 } 427 }
436 428
437 dma_handle = pci_map_single(pdev, cmd->request_buffer,
438 cmd->request_bufflen, cmd->sc_data_direction);
439 cmd->SCp.dma_handle = dma_handle;
440
441 ccb->sg_count = 1;
442 dst->sg_count = cpu_to_le16(1);
443 dst->table[0].addr = cpu_to_le32(dma_handle & 0xffffffff);
444 dst->table[0].addr_hi = cpu_to_le32((dma_handle >> 16) >> 16);
445 dst->table[0].count = cpu_to_le32((u32)cmd->request_bufflen);
446 dst->table[0].ctrl = SG_CF_EOT | SG_CF_64B | SG_CF_HOST;
447
448 return 0; 429 return 0;
449} 430}
450 431
@@ -454,24 +435,24 @@ static void stex_internal_copy(struct scsi_cmnd *cmd,
454 size_t lcount; 435 size_t lcount;
455 size_t len; 436 size_t len;
456 void *s, *d, *base = NULL; 437 void *s, *d, *base = NULL;
457 if (*count > cmd->request_bufflen) 438 size_t offset;
458 *count = cmd->request_bufflen; 439
440 if (*count > scsi_bufflen(cmd))
441 *count = scsi_bufflen(cmd);
459 lcount = *count; 442 lcount = *count;
460 while (lcount) { 443 while (lcount) {
461 len = lcount; 444 len = lcount;
462 s = (void *)src; 445 s = (void *)src;
463 if (cmd->use_sg) { 446
464 size_t offset = *count - lcount; 447 offset = *count - lcount;
465 s += offset; 448 s += offset;
466 base = scsi_kmap_atomic_sg(cmd->request_buffer, 449 base = scsi_kmap_atomic_sg(scsi_sglist(cmd),
467 sg_count, &offset, &len); 450 sg_count, &offset, &len);
468 if (base == NULL) { 451 if (!base) {
469 *count -= lcount; 452 *count -= lcount;
470 return; 453 return;
471 } 454 }
472 d = base + offset; 455 d = base + offset;
473 } else
474 d = cmd->request_buffer;
475 456
476 if (direction == ST_TO_CMD) 457 if (direction == ST_TO_CMD)
477 memcpy(d, s, len); 458 memcpy(d, s, len);
@@ -479,30 +460,24 @@ static void stex_internal_copy(struct scsi_cmnd *cmd,
479 memcpy(s, d, len); 460 memcpy(s, d, len);
480 461
481 lcount -= len; 462 lcount -= len;
482 if (cmd->use_sg) 463 scsi_kunmap_atomic_sg(base);
483 scsi_kunmap_atomic_sg(base);
484 } 464 }
485} 465}
486 466
487static int stex_direct_copy(struct scsi_cmnd *cmd, 467static int stex_direct_copy(struct scsi_cmnd *cmd,
488 const void *src, size_t count) 468 const void *src, size_t count)
489{ 469{
490 struct st_hba *hba = (struct st_hba *) &cmd->device->host->hostdata[0];
491 size_t cp_len = count; 470 size_t cp_len = count;
492 int n_elem = 0; 471 int n_elem = 0;
493 472
494 if (cmd->use_sg) { 473 n_elem = scsi_dma_map(cmd);
495 n_elem = pci_map_sg(hba->pdev, cmd->request_buffer, 474 if (n_elem < 0)
496 cmd->use_sg, cmd->sc_data_direction); 475 return 0;
497 if (n_elem <= 0)
498 return 0;
499 }
500 476
501 stex_internal_copy(cmd, src, &cp_len, n_elem, ST_TO_CMD); 477 stex_internal_copy(cmd, src, &cp_len, n_elem, ST_TO_CMD);
502 478
503 if (cmd->use_sg) 479 scsi_dma_unmap(cmd);
504 pci_unmap_sg(hba->pdev, cmd->request_buffer, 480
505 cmd->use_sg, cmd->sc_data_direction);
506 return cp_len == count; 481 return cp_len == count;
507} 482}
508 483
@@ -668,18 +643,6 @@ stex_queuecommand(struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *))
668 return 0; 643 return 0;
669} 644}
670 645
671static void stex_unmap_sg(struct st_hba *hba, struct scsi_cmnd *cmd)
672{
673 if (cmd->sc_data_direction != DMA_NONE) {
674 if (cmd->use_sg)
675 pci_unmap_sg(hba->pdev, cmd->request_buffer,
676 cmd->use_sg, cmd->sc_data_direction);
677 else
678 pci_unmap_single(hba->pdev, cmd->SCp.dma_handle,
679 cmd->request_bufflen, cmd->sc_data_direction);
680 }
681}
682
683static void stex_scsi_done(struct st_ccb *ccb) 646static void stex_scsi_done(struct st_ccb *ccb)
684{ 647{
685 struct scsi_cmnd *cmd = ccb->cmd; 648 struct scsi_cmnd *cmd = ccb->cmd;
@@ -746,7 +709,7 @@ static void stex_ys_commands(struct st_hba *hba,
746 709
747 if (ccb->cmd->cmnd[0] == MGT_CMD && 710 if (ccb->cmd->cmnd[0] == MGT_CMD &&
748 resp->scsi_status != SAM_STAT_CHECK_CONDITION) { 711 resp->scsi_status != SAM_STAT_CHECK_CONDITION) {
749 ccb->cmd->request_bufflen = 712 scsi_bufflen(ccb->cmd) =
750 le32_to_cpu(*(__le32 *)&resp->variable[0]); 713 le32_to_cpu(*(__le32 *)&resp->variable[0]);
751 return; 714 return;
752 } 715 }
@@ -857,7 +820,7 @@ static void stex_mu_intr(struct st_hba *hba, u32 doorbell)
857 ccb->cmd->cmnd[1] == PASSTHRU_GET_ADAPTER)) 820 ccb->cmd->cmnd[1] == PASSTHRU_GET_ADAPTER))
858 stex_controller_info(hba, ccb); 821 stex_controller_info(hba, ccb);
859 822
860 stex_unmap_sg(hba, ccb->cmd); 823 scsi_dma_unmap(ccb->cmd);
861 stex_scsi_done(ccb); 824 stex_scsi_done(ccb);
862 hba->out_req_cnt--; 825 hba->out_req_cnt--;
863 } else if (ccb->req_type & PASSTHRU_REQ_TYPE) { 826 } else if (ccb->req_type & PASSTHRU_REQ_TYPE) {
@@ -1025,7 +988,7 @@ static int stex_abort(struct scsi_cmnd *cmd)
1025 } 988 }
1026 989
1027fail_out: 990fail_out:
1028 stex_unmap_sg(hba, cmd); 991 scsi_dma_unmap(cmd);
1029 hba->wait_ccb->req = NULL; /* nullify the req's future return */ 992 hba->wait_ccb->req = NULL; /* nullify the req's future return */
1030 hba->wait_ccb = NULL; 993 hba->wait_ccb = NULL;
1031 result = FAILED; 994 result = FAILED;