diff options
author | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-07-15 19:51:54 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-07-15 19:51:54 -0400 |
commit | bc06cffdec85d487c77109dffcd2f285bdc502d3 (patch) | |
tree | adc6e6398243da87e66c56102840597a329183a0 /drivers/infiniband/ulp | |
parent | d3502d7f25b22cfc9762bf1781faa9db1bb3be2e (diff) | |
parent | 9413d7b8aa777dd1fc7db9563ce5e80d769fe7b5 (diff) |
Merge master.kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
* master.kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (166 commits)
[SCSI] ibmvscsi: convert to use the data buffer accessors
[SCSI] dc395x: convert to use the data buffer accessors
[SCSI] ncr53c8xx: convert to use the data buffer accessors
[SCSI] sym53c8xx: convert to use the data buffer accessors
[SCSI] ppa: coding police and printk levels
[SCSI] aic7xxx_old: remove redundant GFP_ATOMIC from kmalloc
[SCSI] i2o: remove redundant GFP_ATOMIC from kmalloc from device.c
[SCSI] remove the dead CYBERSTORMIII_SCSI option
[SCSI] don't build scsi_dma_{map,unmap} for !HAS_DMA
[SCSI] Clean up scsi_add_lun a bit
[SCSI] 53c700: Remove printk, which triggers because of low scsi clock on SNI RMs
[SCSI] sni_53c710: Cleanup
[SCSI] qla4xxx: Fix underrun/overrun conditions
[SCSI] megaraid_mbox: use mutex instead of semaphore
[SCSI] aacraid: add 51245, 51645 and 52245 adapters to documentation.
[SCSI] qla2xxx: update version to 8.02.00-k1.
[SCSI] qla2xxx: add support for NPIV
[SCSI] stex: use resid for xfer len information
[SCSI] Add Brownie 1200U3P to blacklist
[SCSI] scsi.c: convert to use the data buffer accessors
...
Diffstat (limited to 'drivers/infiniband/ulp')
-rw-r--r-- | drivers/infiniband/ulp/iser/iscsi_iser.c | 40 | ||||
-rw-r--r-- | drivers/infiniband/ulp/iser/iscsi_iser.h | 4 | ||||
-rw-r--r-- | drivers/infiniband/ulp/iser/iser_initiator.c | 14 | ||||
-rw-r--r-- | drivers/infiniband/ulp/iser/iser_verbs.c | 4 | ||||
-rw-r--r-- | drivers/infiniband/ulp/srp/ib_srp.c | 63 | ||||
-rw-r--r-- | drivers/infiniband/ulp/srp/ib_srp.h | 5 |
6 files changed, 54 insertions, 76 deletions
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c index dd221eda3ea6..effdee299b0c 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.c +++ b/drivers/infiniband/ulp/iser/iscsi_iser.c | |||
@@ -134,19 +134,9 @@ iscsi_iser_cmd_init(struct iscsi_cmd_task *ctask) | |||
134 | { | 134 | { |
135 | struct iscsi_iser_conn *iser_conn = ctask->conn->dd_data; | 135 | struct iscsi_iser_conn *iser_conn = ctask->conn->dd_data; |
136 | struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data; | 136 | struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data; |
137 | struct scsi_cmnd *sc = ctask->sc; | ||
138 | 137 | ||
139 | iser_ctask->command_sent = 0; | 138 | iser_ctask->command_sent = 0; |
140 | iser_ctask->iser_conn = iser_conn; | 139 | iser_ctask->iser_conn = iser_conn; |
141 | |||
142 | if (sc->sc_data_direction == DMA_TO_DEVICE) { | ||
143 | BUG_ON(ctask->total_length == 0); | ||
144 | |||
145 | debug_scsi("cmd [itt %x total %d imm %d unsol_data %d\n", | ||
146 | ctask->itt, ctask->total_length, ctask->imm_count, | ||
147 | ctask->unsol_count); | ||
148 | } | ||
149 | |||
150 | iser_ctask_rdma_init(iser_ctask); | 140 | iser_ctask_rdma_init(iser_ctask); |
151 | } | 141 | } |
152 | 142 | ||
@@ -219,6 +209,14 @@ iscsi_iser_ctask_xmit(struct iscsi_conn *conn, | |||
219 | struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data; | 209 | struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data; |
220 | int error = 0; | 210 | int error = 0; |
221 | 211 | ||
212 | if (ctask->sc->sc_data_direction == DMA_TO_DEVICE) { | ||
213 | BUG_ON(scsi_bufflen(ctask->sc) == 0); | ||
214 | |||
215 | debug_scsi("cmd [itt %x total %d imm %d unsol_data %d\n", | ||
216 | ctask->itt, scsi_bufflen(ctask->sc), | ||
217 | ctask->imm_count, ctask->unsol_count); | ||
218 | } | ||
219 | |||
222 | debug_scsi("ctask deq [cid %d itt 0x%x]\n", | 220 | debug_scsi("ctask deq [cid %d itt 0x%x]\n", |
223 | conn->id, ctask->itt); | 221 | conn->id, ctask->itt); |
224 | 222 | ||
@@ -375,7 +373,8 @@ static struct iscsi_transport iscsi_iser_transport; | |||
375 | static struct iscsi_cls_session * | 373 | static struct iscsi_cls_session * |
376 | iscsi_iser_session_create(struct iscsi_transport *iscsit, | 374 | iscsi_iser_session_create(struct iscsi_transport *iscsit, |
377 | struct scsi_transport_template *scsit, | 375 | struct scsi_transport_template *scsit, |
378 | uint32_t initial_cmdsn, uint32_t *hostno) | 376 | uint16_t cmds_max, uint16_t qdepth, |
377 | uint32_t initial_cmdsn, uint32_t *hostno) | ||
379 | { | 378 | { |
380 | struct iscsi_cls_session *cls_session; | 379 | struct iscsi_cls_session *cls_session; |
381 | struct iscsi_session *session; | 380 | struct iscsi_session *session; |
@@ -386,7 +385,13 @@ iscsi_iser_session_create(struct iscsi_transport *iscsit, | |||
386 | struct iscsi_iser_cmd_task *iser_ctask; | 385 | struct iscsi_iser_cmd_task *iser_ctask; |
387 | struct iser_desc *desc; | 386 | struct iser_desc *desc; |
388 | 387 | ||
388 | /* | ||
389 | * we do not support setting can_queue cmd_per_lun from userspace yet | ||
390 | * because we preallocate so many resources | ||
391 | */ | ||
389 | cls_session = iscsi_session_setup(iscsit, scsit, | 392 | cls_session = iscsi_session_setup(iscsit, scsit, |
393 | ISCSI_DEF_XMIT_CMDS_MAX, | ||
394 | ISCSI_MAX_CMD_PER_LUN, | ||
390 | sizeof(struct iscsi_iser_cmd_task), | 395 | sizeof(struct iscsi_iser_cmd_task), |
391 | sizeof(struct iser_desc), | 396 | sizeof(struct iser_desc), |
392 | initial_cmdsn, &hn); | 397 | initial_cmdsn, &hn); |
@@ -545,7 +550,7 @@ iscsi_iser_ep_disconnect(__u64 ep_handle) | |||
545 | static struct scsi_host_template iscsi_iser_sht = { | 550 | static struct scsi_host_template iscsi_iser_sht = { |
546 | .name = "iSCSI Initiator over iSER, v." DRV_VER, | 551 | .name = "iSCSI Initiator over iSER, v." DRV_VER, |
547 | .queuecommand = iscsi_queuecommand, | 552 | .queuecommand = iscsi_queuecommand, |
548 | .can_queue = ISCSI_XMIT_CMDS_MAX - 1, | 553 | .can_queue = ISCSI_DEF_XMIT_CMDS_MAX - 1, |
549 | .sg_tablesize = ISCSI_ISER_SG_TABLESIZE, | 554 | .sg_tablesize = ISCSI_ISER_SG_TABLESIZE, |
550 | .max_sectors = 1024, | 555 | .max_sectors = 1024, |
551 | .cmd_per_lun = ISCSI_MAX_CMD_PER_LUN, | 556 | .cmd_per_lun = ISCSI_MAX_CMD_PER_LUN, |
@@ -574,8 +579,12 @@ static struct iscsi_transport iscsi_iser_transport = { | |||
574 | ISCSI_EXP_STATSN | | 579 | ISCSI_EXP_STATSN | |
575 | ISCSI_PERSISTENT_PORT | | 580 | ISCSI_PERSISTENT_PORT | |
576 | ISCSI_PERSISTENT_ADDRESS | | 581 | ISCSI_PERSISTENT_ADDRESS | |
577 | ISCSI_TARGET_NAME | | 582 | ISCSI_TARGET_NAME | ISCSI_TPGT | |
578 | ISCSI_TPGT, | 583 | ISCSI_USERNAME | ISCSI_PASSWORD | |
584 | ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN, | ||
585 | .host_param_mask = ISCSI_HOST_HWADDRESS | | ||
586 | ISCSI_HOST_NETDEV_NAME | | ||
587 | ISCSI_HOST_INITIATOR_NAME, | ||
579 | .host_template = &iscsi_iser_sht, | 588 | .host_template = &iscsi_iser_sht, |
580 | .conndata_size = sizeof(struct iscsi_conn), | 589 | .conndata_size = sizeof(struct iscsi_conn), |
581 | .max_lun = ISCSI_ISER_MAX_LUN, | 590 | .max_lun = ISCSI_ISER_MAX_LUN, |
@@ -592,6 +601,9 @@ static struct iscsi_transport iscsi_iser_transport = { | |||
592 | .get_session_param = iscsi_session_get_param, | 601 | .get_session_param = iscsi_session_get_param, |
593 | .start_conn = iscsi_iser_conn_start, | 602 | .start_conn = iscsi_iser_conn_start, |
594 | .stop_conn = iscsi_conn_stop, | 603 | .stop_conn = iscsi_conn_stop, |
604 | /* iscsi host params */ | ||
605 | .get_host_param = iscsi_host_get_param, | ||
606 | .set_host_param = iscsi_host_set_param, | ||
595 | /* IO */ | 607 | /* IO */ |
596 | .send_pdu = iscsi_conn_send_pdu, | 608 | .send_pdu = iscsi_conn_send_pdu, |
597 | .get_stats = iscsi_iser_conn_get_stats, | 609 | .get_stats = iscsi_iser_conn_get_stats, |
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h index 8960196ffb0f..e2353701e8bb 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.h +++ b/drivers/infiniband/ulp/iser/iscsi_iser.h | |||
@@ -98,7 +98,7 @@ | |||
98 | #define ISER_MAX_TX_MISC_PDUS 6 /* NOOP_OUT(2), TEXT(1), * | 98 | #define ISER_MAX_TX_MISC_PDUS 6 /* NOOP_OUT(2), TEXT(1), * |
99 | * SCSI_TMFUNC(2), LOGOUT(1) */ | 99 | * SCSI_TMFUNC(2), LOGOUT(1) */ |
100 | 100 | ||
101 | #define ISER_QP_MAX_RECV_DTOS (ISCSI_XMIT_CMDS_MAX + \ | 101 | #define ISER_QP_MAX_RECV_DTOS (ISCSI_DEF_XMIT_CMDS_MAX + \ |
102 | ISER_MAX_RX_MISC_PDUS + \ | 102 | ISER_MAX_RX_MISC_PDUS + \ |
103 | ISER_MAX_TX_MISC_PDUS) | 103 | ISER_MAX_TX_MISC_PDUS) |
104 | 104 | ||
@@ -110,7 +110,7 @@ | |||
110 | 110 | ||
111 | #define ISER_INFLIGHT_DATAOUTS 8 | 111 | #define ISER_INFLIGHT_DATAOUTS 8 |
112 | 112 | ||
113 | #define ISER_QP_MAX_REQ_DTOS (ISCSI_XMIT_CMDS_MAX * \ | 113 | #define ISER_QP_MAX_REQ_DTOS (ISCSI_DEF_XMIT_CMDS_MAX * \ |
114 | (1 + ISER_INFLIGHT_DATAOUTS) + \ | 114 | (1 + ISER_INFLIGHT_DATAOUTS) + \ |
115 | ISER_MAX_TX_MISC_PDUS + \ | 115 | ISER_MAX_TX_MISC_PDUS + \ |
116 | ISER_MAX_RX_MISC_PDUS) | 116 | ISER_MAX_RX_MISC_PDUS) |
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c index 3651072f6c1f..9ea5b9aaba7c 100644 --- a/drivers/infiniband/ulp/iser/iser_initiator.c +++ b/drivers/infiniband/ulp/iser/iser_initiator.c | |||
@@ -351,18 +351,12 @@ int iser_send_command(struct iscsi_conn *conn, | |||
351 | else | 351 | else |
352 | data_buf = &iser_ctask->data[ISER_DIR_OUT]; | 352 | data_buf = &iser_ctask->data[ISER_DIR_OUT]; |
353 | 353 | ||
354 | if (sc->use_sg) { /* using a scatter list */ | 354 | if (scsi_sg_count(sc)) { /* using a scatter list */ |
355 | data_buf->buf = sc->request_buffer; | 355 | data_buf->buf = scsi_sglist(sc); |
356 | data_buf->size = sc->use_sg; | 356 | data_buf->size = scsi_sg_count(sc); |
357 | } else if (sc->request_bufflen) { | ||
358 | /* using a single buffer - convert it into one entry SG */ | ||
359 | sg_init_one(&data_buf->sg_single, | ||
360 | sc->request_buffer, sc->request_bufflen); | ||
361 | data_buf->buf = &data_buf->sg_single; | ||
362 | data_buf->size = 1; | ||
363 | } | 357 | } |
364 | 358 | ||
365 | data_buf->data_len = sc->request_bufflen; | 359 | data_buf->data_len = scsi_bufflen(sc); |
366 | 360 | ||
367 | if (hdr->flags & ISCSI_FLAG_CMD_READ) { | 361 | if (hdr->flags & ISCSI_FLAG_CMD_READ) { |
368 | err = iser_prepare_read_cmd(ctask, edtl); | 362 | err = iser_prepare_read_cmd(ctask, edtl); |
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index 3702e2375553..2044de1164ac 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c | |||
@@ -155,8 +155,8 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn) | |||
155 | params.max_pages_per_fmr = ISCSI_ISER_SG_TABLESIZE + 1; | 155 | params.max_pages_per_fmr = ISCSI_ISER_SG_TABLESIZE + 1; |
156 | /* make the pool size twice the max number of SCSI commands * | 156 | /* make the pool size twice the max number of SCSI commands * |
157 | * the ML is expected to queue, watermark for unmap at 50% */ | 157 | * the ML is expected to queue, watermark for unmap at 50% */ |
158 | params.pool_size = ISCSI_XMIT_CMDS_MAX * 2; | 158 | params.pool_size = ISCSI_DEF_XMIT_CMDS_MAX * 2; |
159 | params.dirty_watermark = ISCSI_XMIT_CMDS_MAX; | 159 | params.dirty_watermark = ISCSI_DEF_XMIT_CMDS_MAX; |
160 | params.cache = 0; | 160 | params.cache = 0; |
161 | params.flush_function = NULL; | 161 | params.flush_function = NULL; |
162 | params.access = (IB_ACCESS_LOCAL_WRITE | | 162 | params.access = (IB_ACCESS_LOCAL_WRITE | |
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 39bf057fbc43..f01ca182f226 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c | |||
@@ -455,10 +455,7 @@ static void srp_unmap_data(struct scsi_cmnd *scmnd, | |||
455 | struct srp_target_port *target, | 455 | struct srp_target_port *target, |
456 | struct srp_request *req) | 456 | struct srp_request *req) |
457 | { | 457 | { |
458 | struct scatterlist *scat; | 458 | if (!scsi_sglist(scmnd) || |
459 | int nents; | ||
460 | |||
461 | if (!scmnd->request_buffer || | ||
462 | (scmnd->sc_data_direction != DMA_TO_DEVICE && | 459 | (scmnd->sc_data_direction != DMA_TO_DEVICE && |
463 | scmnd->sc_data_direction != DMA_FROM_DEVICE)) | 460 | scmnd->sc_data_direction != DMA_FROM_DEVICE)) |
464 | return; | 461 | return; |
@@ -468,20 +465,8 @@ static void srp_unmap_data(struct scsi_cmnd *scmnd, | |||
468 | req->fmr = NULL; | 465 | req->fmr = NULL; |
469 | } | 466 | } |
470 | 467 | ||
471 | /* | 468 | ib_dma_unmap_sg(target->srp_host->dev->dev, scsi_sglist(scmnd), |
472 | * This handling of non-SG commands can be killed when the | 469 | scsi_sg_count(scmnd), scmnd->sc_data_direction); |
473 | * SCSI midlayer no longer generates non-SG commands. | ||
474 | */ | ||
475 | if (likely(scmnd->use_sg)) { | ||
476 | nents = scmnd->use_sg; | ||
477 | scat = scmnd->request_buffer; | ||
478 | } else { | ||
479 | nents = 1; | ||
480 | scat = &req->fake_sg; | ||
481 | } | ||
482 | |||
483 | ib_dma_unmap_sg(target->srp_host->dev->dev, scat, nents, | ||
484 | scmnd->sc_data_direction); | ||
485 | } | 470 | } |
486 | 471 | ||
487 | static void srp_remove_req(struct srp_target_port *target, struct srp_request *req) | 472 | static void srp_remove_req(struct srp_target_port *target, struct srp_request *req) |
@@ -595,6 +580,7 @@ static int srp_map_fmr(struct srp_target_port *target, struct scatterlist *scat, | |||
595 | int ret; | 580 | int ret; |
596 | struct srp_device *dev = target->srp_host->dev; | 581 | struct srp_device *dev = target->srp_host->dev; |
597 | struct ib_device *ibdev = dev->dev; | 582 | struct ib_device *ibdev = dev->dev; |
583 | struct scatterlist *sg; | ||
598 | 584 | ||
599 | if (!dev->fmr_pool) | 585 | if (!dev->fmr_pool) |
600 | return -ENODEV; | 586 | return -ENODEV; |
@@ -604,16 +590,16 @@ static int srp_map_fmr(struct srp_target_port *target, struct scatterlist *scat, | |||
604 | return -EINVAL; | 590 | return -EINVAL; |
605 | 591 | ||
606 | len = page_cnt = 0; | 592 | len = page_cnt = 0; |
607 | for (i = 0; i < sg_cnt; ++i) { | 593 | scsi_for_each_sg(req->scmnd, sg, sg_cnt, i) { |
608 | unsigned int dma_len = ib_sg_dma_len(ibdev, &scat[i]); | 594 | unsigned int dma_len = ib_sg_dma_len(ibdev, sg); |
609 | 595 | ||
610 | if (ib_sg_dma_address(ibdev, &scat[i]) & ~dev->fmr_page_mask) { | 596 | if (ib_sg_dma_address(ibdev, sg) & ~dev->fmr_page_mask) { |
611 | if (i > 0) | 597 | if (i > 0) |
612 | return -EINVAL; | 598 | return -EINVAL; |
613 | else | 599 | else |
614 | ++page_cnt; | 600 | ++page_cnt; |
615 | } | 601 | } |
616 | if ((ib_sg_dma_address(ibdev, &scat[i]) + dma_len) & | 602 | if ((ib_sg_dma_address(ibdev, sg) + dma_len) & |
617 | ~dev->fmr_page_mask) { | 603 | ~dev->fmr_page_mask) { |
618 | if (i < sg_cnt - 1) | 604 | if (i < sg_cnt - 1) |
619 | return -EINVAL; | 605 | return -EINVAL; |
@@ -633,12 +619,12 @@ static int srp_map_fmr(struct srp_target_port *target, struct scatterlist *scat, | |||
633 | return -ENOMEM; | 619 | return -ENOMEM; |
634 | 620 | ||
635 | page_cnt = 0; | 621 | page_cnt = 0; |
636 | for (i = 0; i < sg_cnt; ++i) { | 622 | scsi_for_each_sg(req->scmnd, sg, sg_cnt, i) { |
637 | unsigned int dma_len = ib_sg_dma_len(ibdev, &scat[i]); | 623 | unsigned int dma_len = ib_sg_dma_len(ibdev, sg); |
638 | 624 | ||
639 | for (j = 0; j < dma_len; j += dev->fmr_page_size) | 625 | for (j = 0; j < dma_len; j += dev->fmr_page_size) |
640 | dma_pages[page_cnt++] = | 626 | dma_pages[page_cnt++] = |
641 | (ib_sg_dma_address(ibdev, &scat[i]) & | 627 | (ib_sg_dma_address(ibdev, sg) & |
642 | dev->fmr_page_mask) + j; | 628 | dev->fmr_page_mask) + j; |
643 | } | 629 | } |
644 | 630 | ||
@@ -673,7 +659,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, | |||
673 | struct srp_device *dev; | 659 | struct srp_device *dev; |
674 | struct ib_device *ibdev; | 660 | struct ib_device *ibdev; |
675 | 661 | ||
676 | if (!scmnd->request_buffer || scmnd->sc_data_direction == DMA_NONE) | 662 | if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE) |
677 | return sizeof (struct srp_cmd); | 663 | return sizeof (struct srp_cmd); |
678 | 664 | ||
679 | if (scmnd->sc_data_direction != DMA_FROM_DEVICE && | 665 | if (scmnd->sc_data_direction != DMA_FROM_DEVICE && |
@@ -683,18 +669,8 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, | |||
683 | return -EINVAL; | 669 | return -EINVAL; |
684 | } | 670 | } |
685 | 671 | ||
686 | /* | 672 | nents = scsi_sg_count(scmnd); |
687 | * This handling of non-SG commands can be killed when the | 673 | scat = scsi_sglist(scmnd); |
688 | * SCSI midlayer no longer generates non-SG commands. | ||
689 | */ | ||
690 | if (likely(scmnd->use_sg)) { | ||
691 | nents = scmnd->use_sg; | ||
692 | scat = scmnd->request_buffer; | ||
693 | } else { | ||
694 | nents = 1; | ||
695 | scat = &req->fake_sg; | ||
696 | sg_init_one(scat, scmnd->request_buffer, scmnd->request_bufflen); | ||
697 | } | ||
698 | 674 | ||
699 | dev = target->srp_host->dev; | 675 | dev = target->srp_host->dev; |
700 | ibdev = dev->dev; | 676 | ibdev = dev->dev; |
@@ -724,6 +700,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, | |||
724 | * descriptor. | 700 | * descriptor. |
725 | */ | 701 | */ |
726 | struct srp_indirect_buf *buf = (void *) cmd->add_data; | 702 | struct srp_indirect_buf *buf = (void *) cmd->add_data; |
703 | struct scatterlist *sg; | ||
727 | u32 datalen = 0; | 704 | u32 datalen = 0; |
728 | int i; | 705 | int i; |
729 | 706 | ||
@@ -732,11 +709,11 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, | |||
732 | sizeof (struct srp_indirect_buf) + | 709 | sizeof (struct srp_indirect_buf) + |
733 | count * sizeof (struct srp_direct_buf); | 710 | count * sizeof (struct srp_direct_buf); |
734 | 711 | ||
735 | for (i = 0; i < count; ++i) { | 712 | scsi_for_each_sg(scmnd, sg, count, i) { |
736 | unsigned int dma_len = ib_sg_dma_len(ibdev, &scat[i]); | 713 | unsigned int dma_len = ib_sg_dma_len(ibdev, sg); |
737 | 714 | ||
738 | buf->desc_list[i].va = | 715 | buf->desc_list[i].va = |
739 | cpu_to_be64(ib_sg_dma_address(ibdev, &scat[i])); | 716 | cpu_to_be64(ib_sg_dma_address(ibdev, sg)); |
740 | buf->desc_list[i].key = | 717 | buf->desc_list[i].key = |
741 | cpu_to_be32(dev->mr->rkey); | 718 | cpu_to_be32(dev->mr->rkey); |
742 | buf->desc_list[i].len = cpu_to_be32(dma_len); | 719 | buf->desc_list[i].len = cpu_to_be32(dma_len); |
@@ -802,9 +779,9 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) | |||
802 | } | 779 | } |
803 | 780 | ||
804 | if (rsp->flags & (SRP_RSP_FLAG_DOOVER | SRP_RSP_FLAG_DOUNDER)) | 781 | if (rsp->flags & (SRP_RSP_FLAG_DOOVER | SRP_RSP_FLAG_DOUNDER)) |
805 | scmnd->resid = be32_to_cpu(rsp->data_out_res_cnt); | 782 | scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt)); |
806 | else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER)) | 783 | else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER)) |
807 | scmnd->resid = be32_to_cpu(rsp->data_in_res_cnt); | 784 | scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt)); |
808 | 785 | ||
809 | if (!req->tsk_mgmt) { | 786 | if (!req->tsk_mgmt) { |
810 | scmnd->host_scribble = (void *) -1L; | 787 | scmnd->host_scribble = (void *) -1L; |
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h index 1d53c7bc368f..e3573e7038c4 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.h +++ b/drivers/infiniband/ulp/srp/ib_srp.h | |||
@@ -106,11 +106,6 @@ struct srp_request { | |||
106 | struct srp_iu *cmd; | 106 | struct srp_iu *cmd; |
107 | struct srp_iu *tsk_mgmt; | 107 | struct srp_iu *tsk_mgmt; |
108 | struct ib_pool_fmr *fmr; | 108 | struct ib_pool_fmr *fmr; |
109 | /* | ||
110 | * Fake scatterlist used when scmnd->use_sg==0. Can be killed | ||
111 | * when the SCSI midlayer no longer generates non-SG commands. | ||
112 | */ | ||
113 | struct scatterlist fake_sg; | ||
114 | struct completion done; | 109 | struct completion done; |
115 | short index; | 110 | short index; |
116 | u8 cmd_done; | 111 | u8 cmd_done; |