diff options
author | Roland Dreier <rolandd@cisco.com> | 2006-03-24 18:47:26 -0500 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2006-03-24 18:47:26 -0500 |
commit | cf368713a3f3b2eb737a92d1b7186dedcc51167c (patch) | |
tree | 450791a25611cf98bde1540a3c366d0e2a6ba117 /drivers | |
parent | 6f633c8d69415aabbccfcc494008e8e1300a98c1 (diff) |
IB/srp: Use a fake scatterlist for non-SG SCSI commands
Since the SCSI midlayer is moving towards entirely getting rid of
commands with use_sg == 0, we should treat this case as an exception.
Therefore, change the IB SRP initiator to create a fake scatterlist
for these commands with sg_init_one(). This simplifies the flow of
DMA mapping and unmapping, since SRP can just use dma_map_sg() and
dma_unmap_sg() unconditionally, rather than having to choose between
the dma_{map,unmap}_sg() and dma_{map,unmap}_single() variants.
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/infiniband/ulp/srp/ib_srp.c | 145 | ||||
-rw-r--r-- | drivers/infiniband/ulp/srp/ib_srp.h | 7 |
2 files changed, 75 insertions, 77 deletions
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index a13dcdf90a4f..61924cc30e55 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c | |||
@@ -503,8 +503,10 @@ err: | |||
503 | static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, | 503 | static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, |
504 | struct srp_request *req) | 504 | struct srp_request *req) |
505 | { | 505 | { |
506 | struct scatterlist *scat; | ||
506 | struct srp_cmd *cmd = req->cmd->buf; | 507 | struct srp_cmd *cmd = req->cmd->buf; |
507 | int len; | 508 | int len, nents, count; |
509 | int i; | ||
508 | u8 fmt; | 510 | u8 fmt; |
509 | 511 | ||
510 | if (!scmnd->request_buffer || scmnd->sc_data_direction == DMA_NONE) | 512 | if (!scmnd->request_buffer || scmnd->sc_data_direction == DMA_NONE) |
@@ -517,82 +519,66 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, | |||
517 | return -EINVAL; | 519 | return -EINVAL; |
518 | } | 520 | } |
519 | 521 | ||
520 | if (scmnd->use_sg) { | 522 | /* |
521 | struct scatterlist *scat = scmnd->request_buffer; | 523 | * This handling of non-SG commands can be killed when the |
522 | int n; | 524 | * SCSI midlayer no longer generates non-SG commands. |
523 | int i; | 525 | */ |
524 | 526 | if (likely(scmnd->use_sg)) { | |
525 | n = dma_map_sg(target->srp_host->dev->dma_device, | 527 | nents = scmnd->use_sg; |
526 | scat, scmnd->use_sg, scmnd->sc_data_direction); | 528 | scat = scmnd->request_buffer; |
529 | } else { | ||
530 | nents = 1; | ||
531 | scat = &req->fake_sg; | ||
532 | sg_init_one(scat, scmnd->request_buffer, scmnd->request_bufflen); | ||
533 | } | ||
527 | 534 | ||
528 | if (n == 1) { | 535 | count = dma_map_sg(target->srp_host->dev->dma_device, scat, nents, |
529 | struct srp_direct_buf *buf = (void *) cmd->add_data; | 536 | scmnd->sc_data_direction); |
530 | 537 | ||
531 | fmt = SRP_DATA_DESC_DIRECT; | 538 | if (count == 1) { |
539 | struct srp_direct_buf *buf = (void *) cmd->add_data; | ||
532 | 540 | ||
533 | buf->va = cpu_to_be64(sg_dma_address(scat)); | 541 | fmt = SRP_DATA_DESC_DIRECT; |
534 | buf->key = cpu_to_be32(target->srp_host->mr->rkey); | ||
535 | buf->len = cpu_to_be32(sg_dma_len(scat)); | ||
536 | 542 | ||
537 | len = sizeof (struct srp_cmd) + | 543 | buf->va = cpu_to_be64(sg_dma_address(scat)); |
538 | sizeof (struct srp_direct_buf); | 544 | buf->key = cpu_to_be32(target->srp_host->mr->rkey); |
539 | } else { | 545 | buf->len = cpu_to_be32(sg_dma_len(scat)); |
540 | struct srp_indirect_buf *buf = (void *) cmd->add_data; | ||
541 | u32 datalen = 0; | ||
542 | 546 | ||
543 | fmt = SRP_DATA_DESC_INDIRECT; | 547 | len = sizeof (struct srp_cmd) + |
548 | sizeof (struct srp_direct_buf); | ||
549 | } else { | ||
550 | struct srp_indirect_buf *buf = (void *) cmd->add_data; | ||
551 | u32 datalen = 0; | ||
544 | 552 | ||
545 | if (scmnd->sc_data_direction == DMA_TO_DEVICE) | 553 | fmt = SRP_DATA_DESC_INDIRECT; |
546 | cmd->data_out_desc_cnt = n; | ||
547 | else | ||
548 | cmd->data_in_desc_cnt = n; | ||
549 | 554 | ||
550 | buf->table_desc.va = cpu_to_be64(req->cmd->dma + | 555 | if (scmnd->sc_data_direction == DMA_TO_DEVICE) |
551 | sizeof *cmd + | 556 | cmd->data_out_desc_cnt = count; |
552 | sizeof *buf); | 557 | else |
553 | buf->table_desc.key = | 558 | cmd->data_in_desc_cnt = count; |
559 | |||
560 | buf->table_desc.va = cpu_to_be64(req->cmd->dma + | ||
561 | sizeof *cmd + | ||
562 | sizeof *buf); | ||
563 | buf->table_desc.key = | ||
564 | cpu_to_be32(target->srp_host->mr->rkey); | ||
565 | buf->table_desc.len = | ||
566 | cpu_to_be32(count * sizeof (struct srp_direct_buf)); | ||
567 | |||
568 | for (i = 0; i < count; ++i) { | ||
569 | buf->desc_list[i].va = cpu_to_be64(sg_dma_address(&scat[i])); | ||
570 | buf->desc_list[i].key = | ||
554 | cpu_to_be32(target->srp_host->mr->rkey); | 571 | cpu_to_be32(target->srp_host->mr->rkey); |
555 | buf->table_desc.len = | 572 | buf->desc_list[i].len = cpu_to_be32(sg_dma_len(&scat[i])); |
556 | cpu_to_be32(n * sizeof (struct srp_direct_buf)); | ||
557 | |||
558 | for (i = 0; i < n; ++i) { | ||
559 | buf->desc_list[i].va = cpu_to_be64(sg_dma_address(&scat[i])); | ||
560 | buf->desc_list[i].key = | ||
561 | cpu_to_be32(target->srp_host->mr->rkey); | ||
562 | buf->desc_list[i].len = cpu_to_be32(sg_dma_len(&scat[i])); | ||
563 | |||
564 | datalen += sg_dma_len(&scat[i]); | ||
565 | } | ||
566 | 573 | ||
567 | buf->len = cpu_to_be32(datalen); | 574 | datalen += sg_dma_len(&scat[i]); |
568 | |||
569 | len = sizeof (struct srp_cmd) + | ||
570 | sizeof (struct srp_indirect_buf) + | ||
571 | n * sizeof (struct srp_direct_buf); | ||
572 | } | ||
573 | } else { | ||
574 | struct srp_direct_buf *buf = (void *) cmd->add_data; | ||
575 | dma_addr_t dma; | ||
576 | |||
577 | dma = dma_map_single(target->srp_host->dev->dma_device, | ||
578 | scmnd->request_buffer, scmnd->request_bufflen, | ||
579 | scmnd->sc_data_direction); | ||
580 | if (dma_mapping_error(dma)) { | ||
581 | printk(KERN_WARNING PFX "unable to map %p/%d (dir %d)\n", | ||
582 | scmnd->request_buffer, (int) scmnd->request_bufflen, | ||
583 | scmnd->sc_data_direction); | ||
584 | return -EINVAL; | ||
585 | } | 575 | } |
586 | 576 | ||
587 | pci_unmap_addr_set(req, direct_mapping, dma); | 577 | buf->len = cpu_to_be32(datalen); |
588 | 578 | ||
589 | buf->va = cpu_to_be64(dma); | 579 | len = sizeof (struct srp_cmd) + |
590 | buf->key = cpu_to_be32(target->srp_host->mr->rkey); | 580 | sizeof (struct srp_indirect_buf) + |
591 | buf->len = cpu_to_be32(scmnd->request_bufflen); | 581 | count * sizeof (struct srp_direct_buf); |
592 | |||
593 | fmt = SRP_DATA_DESC_DIRECT; | ||
594 | |||
595 | len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf); | ||
596 | } | 582 | } |
597 | 583 | ||
598 | if (scmnd->sc_data_direction == DMA_TO_DEVICE) | 584 | if (scmnd->sc_data_direction == DMA_TO_DEVICE) |
@@ -600,7 +586,6 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, | |||
600 | else | 586 | else |
601 | cmd->buf_fmt = fmt; | 587 | cmd->buf_fmt = fmt; |
602 | 588 | ||
603 | |||
604 | return len; | 589 | return len; |
605 | } | 590 | } |
606 | 591 | ||
@@ -608,20 +593,28 @@ static void srp_unmap_data(struct scsi_cmnd *scmnd, | |||
608 | struct srp_target_port *target, | 593 | struct srp_target_port *target, |
609 | struct srp_request *req) | 594 | struct srp_request *req) |
610 | { | 595 | { |
596 | struct scatterlist *scat; | ||
597 | int nents; | ||
598 | |||
611 | if (!scmnd->request_buffer || | 599 | if (!scmnd->request_buffer || |
612 | (scmnd->sc_data_direction != DMA_TO_DEVICE && | 600 | (scmnd->sc_data_direction != DMA_TO_DEVICE && |
613 | scmnd->sc_data_direction != DMA_FROM_DEVICE)) | 601 | scmnd->sc_data_direction != DMA_FROM_DEVICE)) |
614 | return; | 602 | return; |
615 | 603 | ||
616 | if (scmnd->use_sg) | 604 | /* |
617 | dma_unmap_sg(target->srp_host->dev->dma_device, | 605 | * This handling of non-SG commands can be killed when the |
618 | (struct scatterlist *) scmnd->request_buffer, | 606 | * SCSI midlayer no longer generates non-SG commands. |
619 | scmnd->use_sg, scmnd->sc_data_direction); | 607 | */ |
620 | else | 608 | if (likely(scmnd->use_sg)) { |
621 | dma_unmap_single(target->srp_host->dev->dma_device, | 609 | nents = scmnd->use_sg; |
622 | pci_unmap_addr(req, direct_mapping), | 610 | scat = (struct scatterlist *) scmnd->request_buffer; |
623 | scmnd->request_bufflen, | 611 | } else { |
624 | scmnd->sc_data_direction); | 612 | nents = 1; |
613 | scat = (struct scatterlist *) scmnd->request_buffer; | ||
614 | } | ||
615 | |||
616 | dma_unmap_sg(target->srp_host->dev->dma_device, scat, nents, | ||
617 | scmnd->sc_data_direction); | ||
625 | } | 618 | } |
626 | 619 | ||
627 | static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) | 620 | static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) |
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h index 4e7727df32f1..bd7f7c3115de 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.h +++ b/drivers/infiniband/ulp/srp/ib_srp.h | |||
@@ -38,6 +38,7 @@ | |||
38 | #include <linux/types.h> | 38 | #include <linux/types.h> |
39 | #include <linux/list.h> | 39 | #include <linux/list.h> |
40 | #include <linux/mutex.h> | 40 | #include <linux/mutex.h> |
41 | #include <linux/scatterlist.h> | ||
41 | 42 | ||
42 | #include <scsi/scsi_host.h> | 43 | #include <scsi/scsi_host.h> |
43 | #include <scsi/scsi_cmnd.h> | 44 | #include <scsi/scsi_cmnd.h> |
@@ -94,7 +95,11 @@ struct srp_request { | |||
94 | struct scsi_cmnd *scmnd; | 95 | struct scsi_cmnd *scmnd; |
95 | struct srp_iu *cmd; | 96 | struct srp_iu *cmd; |
96 | struct srp_iu *tsk_mgmt; | 97 | struct srp_iu *tsk_mgmt; |
97 | DECLARE_PCI_UNMAP_ADDR(direct_mapping) | 98 | /* |
99 | * Fake scatterlist used when scmnd->use_sg==0. Can be killed | ||
100 | * when the SCSI midlayer no longer generates non-SG commands. | ||
101 | */ | ||
102 | struct scatterlist fake_sg; | ||
98 | struct completion done; | 103 | struct completion done; |
99 | short next; | 104 | short next; |
100 | u8 cmd_done; | 105 | u8 cmd_done; |