aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/stex.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/stex.c')
-rw-r--r--drivers/scsi/stex.c111
1 files changed, 37 insertions, 74 deletions
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
index 9ac83abc4028..72f6d8015358 100644
--- a/drivers/scsi/stex.c
+++ b/drivers/scsi/stex.c
@@ -395,53 +395,34 @@ static struct req_msg *stex_alloc_req(struct st_hba *hba)
395static int stex_map_sg(struct st_hba *hba, 395static int stex_map_sg(struct st_hba *hba,
396 struct req_msg *req, struct st_ccb *ccb) 396 struct req_msg *req, struct st_ccb *ccb)
397{ 397{
398 struct pci_dev *pdev = hba->pdev;
399 struct scsi_cmnd *cmd; 398 struct scsi_cmnd *cmd;
400 dma_addr_t dma_handle; 399 struct scatterlist *sg;
401 struct scatterlist *src;
402 struct st_sgtable *dst; 400 struct st_sgtable *dst;
403 int i; 401 int i, nseg;
404 402
405 cmd = ccb->cmd; 403 cmd = ccb->cmd;
406 dst = (struct st_sgtable *)req->variable; 404 dst = (struct st_sgtable *)req->variable;
407 dst->max_sg_count = cpu_to_le16(ST_MAX_SG); 405 dst->max_sg_count = cpu_to_le16(ST_MAX_SG);
408 dst->sz_in_byte = cpu_to_le32(cmd->request_bufflen); 406 dst->sz_in_byte = cpu_to_le32(scsi_bufflen(cmd));
409
410 if (cmd->use_sg) {
411 int n_elem;
412 407
413 src = (struct scatterlist *) cmd->request_buffer; 408 nseg = scsi_dma_map(cmd);
414 n_elem = pci_map_sg(pdev, src, 409 if (nseg < 0)
415 cmd->use_sg, cmd->sc_data_direction); 410 return -EIO;
416 if (n_elem <= 0) 411 if (nseg) {
417 return -EIO; 412 ccb->sg_count = nseg;
413 dst->sg_count = cpu_to_le16((u16)nseg);
418 414
419 ccb->sg_count = n_elem; 415 scsi_for_each_sg(cmd, sg, nseg, i) {
420 dst->sg_count = cpu_to_le16((u16)n_elem); 416 dst->table[i].count = cpu_to_le32((u32)sg_dma_len(sg));
421
422 for (i = 0; i < n_elem; i++, src++) {
423 dst->table[i].count = cpu_to_le32((u32)sg_dma_len(src));
424 dst->table[i].addr = 417 dst->table[i].addr =
425 cpu_to_le32(sg_dma_address(src) & 0xffffffff); 418 cpu_to_le32(sg_dma_address(sg) & 0xffffffff);
426 dst->table[i].addr_hi = 419 dst->table[i].addr_hi =
427 cpu_to_le32((sg_dma_address(src) >> 16) >> 16); 420 cpu_to_le32((sg_dma_address(sg) >> 16) >> 16);
428 dst->table[i].ctrl = SG_CF_64B | SG_CF_HOST; 421 dst->table[i].ctrl = SG_CF_64B | SG_CF_HOST;
429 } 422 }
430 dst->table[--i].ctrl |= SG_CF_EOT; 423 dst->table[--i].ctrl |= SG_CF_EOT;
431 return 0;
432 } 424 }
433 425
434 dma_handle = pci_map_single(pdev, cmd->request_buffer,
435 cmd->request_bufflen, cmd->sc_data_direction);
436 cmd->SCp.dma_handle = dma_handle;
437
438 ccb->sg_count = 1;
439 dst->sg_count = cpu_to_le16(1);
440 dst->table[0].addr = cpu_to_le32(dma_handle & 0xffffffff);
441 dst->table[0].addr_hi = cpu_to_le32((dma_handle >> 16) >> 16);
442 dst->table[0].count = cpu_to_le32((u32)cmd->request_bufflen);
443 dst->table[0].ctrl = SG_CF_EOT | SG_CF_64B | SG_CF_HOST;
444
445 return 0; 426 return 0;
446} 427}
447 428
@@ -451,24 +432,24 @@ static void stex_internal_copy(struct scsi_cmnd *cmd,
451 size_t lcount; 432 size_t lcount;
452 size_t len; 433 size_t len;
453 void *s, *d, *base = NULL; 434 void *s, *d, *base = NULL;
454 if (*count > cmd->request_bufflen) 435 size_t offset;
455 *count = cmd->request_bufflen; 436
437 if (*count > scsi_bufflen(cmd))
438 *count = scsi_bufflen(cmd);
456 lcount = *count; 439 lcount = *count;
457 while (lcount) { 440 while (lcount) {
458 len = lcount; 441 len = lcount;
459 s = (void *)src; 442 s = (void *)src;
460 if (cmd->use_sg) { 443
461 size_t offset = *count - lcount; 444 offset = *count - lcount;
462 s += offset; 445 s += offset;
463 base = scsi_kmap_atomic_sg(cmd->request_buffer, 446 base = scsi_kmap_atomic_sg(scsi_sglist(cmd),
464 sg_count, &offset, &len); 447 sg_count, &offset, &len);
465 if (base == NULL) { 448 if (!base) {
466 *count -= lcount; 449 *count -= lcount;
467 return; 450 return;
468 } 451 }
469 d = base + offset; 452 d = base + offset;
470 } else
471 d = cmd->request_buffer;
472 453
473 if (direction == ST_TO_CMD) 454 if (direction == ST_TO_CMD)
474 memcpy(d, s, len); 455 memcpy(d, s, len);
@@ -476,30 +457,24 @@ static void stex_internal_copy(struct scsi_cmnd *cmd,
476 memcpy(s, d, len); 457 memcpy(s, d, len);
477 458
478 lcount -= len; 459 lcount -= len;
479 if (cmd->use_sg) 460 scsi_kunmap_atomic_sg(base);
480 scsi_kunmap_atomic_sg(base);
481 } 461 }
482} 462}
483 463
484static int stex_direct_copy(struct scsi_cmnd *cmd, 464static int stex_direct_copy(struct scsi_cmnd *cmd,
485 const void *src, size_t count) 465 const void *src, size_t count)
486{ 466{
487 struct st_hba *hba = (struct st_hba *) &cmd->device->host->hostdata[0];
488 size_t cp_len = count; 467 size_t cp_len = count;
489 int n_elem = 0; 468 int n_elem = 0;
490 469
491 if (cmd->use_sg) { 470 n_elem = scsi_dma_map(cmd);
492 n_elem = pci_map_sg(hba->pdev, cmd->request_buffer, 471 if (n_elem < 0)
493 cmd->use_sg, cmd->sc_data_direction); 472 return 0;
494 if (n_elem <= 0)
495 return 0;
496 }
497 473
498 stex_internal_copy(cmd, src, &cp_len, n_elem, ST_TO_CMD); 474 stex_internal_copy(cmd, src, &cp_len, n_elem, ST_TO_CMD);
499 475
500 if (cmd->use_sg) 476 scsi_dma_unmap(cmd);
501 pci_unmap_sg(hba->pdev, cmd->request_buffer, 477
502 cmd->use_sg, cmd->sc_data_direction);
503 return cp_len == count; 478 return cp_len == count;
504} 479}
505 480
@@ -678,18 +653,6 @@ stex_queuecommand(struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *))
678 return 0; 653 return 0;
679} 654}
680 655
681static void stex_unmap_sg(struct st_hba *hba, struct scsi_cmnd *cmd)
682{
683 if (cmd->sc_data_direction != DMA_NONE) {
684 if (cmd->use_sg)
685 pci_unmap_sg(hba->pdev, cmd->request_buffer,
686 cmd->use_sg, cmd->sc_data_direction);
687 else
688 pci_unmap_single(hba->pdev, cmd->SCp.dma_handle,
689 cmd->request_bufflen, cmd->sc_data_direction);
690 }
691}
692
693static void stex_scsi_done(struct st_ccb *ccb) 656static void stex_scsi_done(struct st_ccb *ccb)
694{ 657{
695 struct scsi_cmnd *cmd = ccb->cmd; 658 struct scsi_cmnd *cmd = ccb->cmd;
@@ -756,8 +719,8 @@ static void stex_ys_commands(struct st_hba *hba,
756 719
757 if (ccb->cmd->cmnd[0] == MGT_CMD && 720 if (ccb->cmd->cmnd[0] == MGT_CMD &&
758 resp->scsi_status != SAM_STAT_CHECK_CONDITION) { 721 resp->scsi_status != SAM_STAT_CHECK_CONDITION) {
759 ccb->cmd->request_bufflen = 722 scsi_set_resid(ccb->cmd, scsi_bufflen(ccb->cmd) -
760 le32_to_cpu(*(__le32 *)&resp->variable[0]); 723 le32_to_cpu(*(__le32 *)&resp->variable[0]));
761 return; 724 return;
762 } 725 }
763 726
@@ -855,7 +818,7 @@ static void stex_mu_intr(struct st_hba *hba, u32 doorbell)
855 ccb->cmd->cmnd[1] == PASSTHRU_GET_ADAPTER)) 818 ccb->cmd->cmnd[1] == PASSTHRU_GET_ADAPTER))
856 stex_controller_info(hba, ccb); 819 stex_controller_info(hba, ccb);
857 820
858 stex_unmap_sg(hba, ccb->cmd); 821 scsi_dma_unmap(ccb->cmd);
859 stex_scsi_done(ccb); 822 stex_scsi_done(ccb);
860 hba->out_req_cnt--; 823 hba->out_req_cnt--;
861 } else if (ccb->req_type & PASSTHRU_REQ_TYPE) { 824 } else if (ccb->req_type & PASSTHRU_REQ_TYPE) {
@@ -1028,7 +991,7 @@ static int stex_abort(struct scsi_cmnd *cmd)
1028 } 991 }
1029 992
1030fail_out: 993fail_out:
1031 stex_unmap_sg(hba, cmd); 994 scsi_dma_unmap(cmd);
1032 hba->wait_ccb->req = NULL; /* nullify the req's future return */ 995 hba->wait_ccb->req = NULL; /* nullify the req's future return */
1033 hba->wait_ccb = NULL; 996 hba->wait_ccb = NULL;
1034 result = FAILED; 997 result = FAILED;