diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-07-22 19:01:14 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-07-22 19:01:14 -0400 |
commit | 72f96e0e38d7e29ba16dcfd824ecaebe38b8293e (patch) | |
tree | 2100f3333a993b529376add583985416f83002bf | |
parent | 17413f5acd03224bcd09eefc5c4088894e832cad (diff) | |
parent | fa4951595648c14754621c99a07c47c9b9dcf05b (diff) |
Merge branch 'for-linus-core' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending
* 'for-linus-core' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending: (38 commits)
target: Bump version to v4.1.0-rc1-ml
target: remove custom hex2bin() implementation
target: fix typo Assoication -> Association
target: Update QUEUE ALGORITHM MODIFIER control page default
target: ->map_task_SG conversion to ->map_control_SG and ->map_data_SG
target: Follow up core updates from AGrover and HCH (round 4)
target: Eliminate usage of struct se_mem
target: Pass 2nd param of transport_split_cdb by value
target: Enforce 1 page max for control cdb buffer sizes
target: Make all control CDBs scatter-gather
target: Implement Block Device Characteristics VPD page
target: Fix reporting of supported VPD pages
target: Allow for built-in target modules
tcm_fc: Convert to wake_up_process and schedule_timeout_interruptible
tcm_fc: Makefile cleanups
loopback: Fix memory leak in tcm_loop_make_scsi_hba()
loopback: Remove duplicate scsi/scsi_tcq.h include
loopback: off by one in tcm_loop_make_naa_tpg()
target/iblock: Remove unused iblock_dev members
target/iblock: Use request_queue->nr_request for se_device defaults
...
37 files changed, 4057 insertions, 5607 deletions
diff --git a/drivers/target/loopback/Kconfig b/drivers/target/loopback/Kconfig index 57dcbc2d711b..abe8ecbcdf06 100644 --- a/drivers/target/loopback/Kconfig +++ b/drivers/target/loopback/Kconfig | |||
@@ -3,9 +3,3 @@ config LOOPBACK_TARGET | |||
3 | help | 3 | help |
4 | Say Y here to enable the TCM Virtual SAS target and Linux/SCSI LLD | 4 | Say Y here to enable the TCM Virtual SAS target and Linux/SCSI LLD |
5 | fabric loopback module. | 5 | fabric loopback module. |
6 | |||
7 | config LOOPBACK_TARGET_CDB_DEBUG | ||
8 | bool "TCM loopback fabric module CDB debug code" | ||
9 | depends on LOOPBACK_TARGET | ||
10 | help | ||
11 | Say Y here to enable the TCM loopback fabric module CDB debug code | ||
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c index 70c2e7fa6664..aa2d67997235 100644 --- a/drivers/target/loopback/tcm_loop.c +++ b/drivers/target/loopback/tcm_loop.c | |||
@@ -31,7 +31,6 @@ | |||
31 | #include <scsi/scsi_host.h> | 31 | #include <scsi/scsi_host.h> |
32 | #include <scsi/scsi_device.h> | 32 | #include <scsi/scsi_device.h> |
33 | #include <scsi/scsi_cmnd.h> | 33 | #include <scsi/scsi_cmnd.h> |
34 | #include <scsi/scsi_tcq.h> | ||
35 | 34 | ||
36 | #include <target/target_core_base.h> | 35 | #include <target/target_core_base.h> |
37 | #include <target/target_core_transport.h> | 36 | #include <target/target_core_transport.h> |
@@ -80,7 +79,7 @@ static struct se_cmd *tcm_loop_allocate_core_cmd( | |||
80 | 79 | ||
81 | tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_ATOMIC); | 80 | tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_ATOMIC); |
82 | if (!tl_cmd) { | 81 | if (!tl_cmd) { |
83 | printk(KERN_ERR "Unable to allocate struct tcm_loop_cmd\n"); | 82 | pr_err("Unable to allocate struct tcm_loop_cmd\n"); |
84 | set_host_byte(sc, DID_ERROR); | 83 | set_host_byte(sc, DID_ERROR); |
85 | return NULL; | 84 | return NULL; |
86 | } | 85 | } |
@@ -118,17 +117,16 @@ static struct se_cmd *tcm_loop_allocate_core_cmd( | |||
118 | * Signal BIDI usage with T_TASK(cmd)->t_tasks_bidi | 117 | * Signal BIDI usage with T_TASK(cmd)->t_tasks_bidi |
119 | */ | 118 | */ |
120 | if (scsi_bidi_cmnd(sc)) | 119 | if (scsi_bidi_cmnd(sc)) |
121 | T_TASK(se_cmd)->t_tasks_bidi = 1; | 120 | se_cmd->t_tasks_bidi = 1; |
122 | /* | 121 | /* |
123 | * Locate the struct se_lun pointer and attach it to struct se_cmd | 122 | * Locate the struct se_lun pointer and attach it to struct se_cmd |
124 | */ | 123 | */ |
125 | if (transport_get_lun_for_cmd(se_cmd, NULL, tl_cmd->sc->device->lun) < 0) { | 124 | if (transport_lookup_cmd_lun(se_cmd, tl_cmd->sc->device->lun) < 0) { |
126 | kmem_cache_free(tcm_loop_cmd_cache, tl_cmd); | 125 | kmem_cache_free(tcm_loop_cmd_cache, tl_cmd); |
127 | set_host_byte(sc, DID_NO_CONNECT); | 126 | set_host_byte(sc, DID_NO_CONNECT); |
128 | return NULL; | 127 | return NULL; |
129 | } | 128 | } |
130 | 129 | ||
131 | transport_device_setup_cmd(se_cmd); | ||
132 | return se_cmd; | 130 | return se_cmd; |
133 | } | 131 | } |
134 | 132 | ||
@@ -143,17 +141,17 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd) | |||
143 | struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, | 141 | struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, |
144 | struct tcm_loop_cmd, tl_se_cmd); | 142 | struct tcm_loop_cmd, tl_se_cmd); |
145 | struct scsi_cmnd *sc = tl_cmd->sc; | 143 | struct scsi_cmnd *sc = tl_cmd->sc; |
146 | void *mem_ptr, *mem_bidi_ptr = NULL; | 144 | struct scatterlist *sgl_bidi = NULL; |
147 | u32 sg_no_bidi = 0; | 145 | u32 sgl_bidi_count = 0; |
148 | int ret; | 146 | int ret; |
149 | /* | 147 | /* |
150 | * Allocate the necessary tasks to complete the received CDB+data | 148 | * Allocate the necessary tasks to complete the received CDB+data |
151 | */ | 149 | */ |
152 | ret = transport_generic_allocate_tasks(se_cmd, tl_cmd->sc->cmnd); | 150 | ret = transport_generic_allocate_tasks(se_cmd, sc->cmnd); |
153 | if (ret == -1) { | 151 | if (ret == -ENOMEM) { |
154 | /* Out of Resources */ | 152 | /* Out of Resources */ |
155 | return PYX_TRANSPORT_LU_COMM_FAILURE; | 153 | return PYX_TRANSPORT_LU_COMM_FAILURE; |
156 | } else if (ret == -2) { | 154 | } else if (ret == -EINVAL) { |
157 | /* | 155 | /* |
158 | * Handle case for SAM_STAT_RESERVATION_CONFLICT | 156 | * Handle case for SAM_STAT_RESERVATION_CONFLICT |
159 | */ | 157 | */ |
@@ -165,35 +163,21 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd) | |||
165 | */ | 163 | */ |
166 | return PYX_TRANSPORT_USE_SENSE_REASON; | 164 | return PYX_TRANSPORT_USE_SENSE_REASON; |
167 | } | 165 | } |
166 | |||
168 | /* | 167 | /* |
169 | * Setup the struct scatterlist memory from the received | 168 | * For BIDI commands, pass in the extra READ buffer |
170 | * struct scsi_cmnd. | 169 | * to transport_generic_map_mem_to_cmd() below.. |
171 | */ | 170 | */ |
172 | if (scsi_sg_count(sc)) { | 171 | if (se_cmd->t_tasks_bidi) { |
173 | se_cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM; | 172 | struct scsi_data_buffer *sdb = scsi_in(sc); |
174 | mem_ptr = (void *)scsi_sglist(sc); | ||
175 | /* | ||
176 | * For BIDI commands, pass in the extra READ buffer | ||
177 | * to transport_generic_map_mem_to_cmd() below.. | ||
178 | */ | ||
179 | if (T_TASK(se_cmd)->t_tasks_bidi) { | ||
180 | struct scsi_data_buffer *sdb = scsi_in(sc); | ||
181 | 173 | ||
182 | mem_bidi_ptr = (void *)sdb->table.sgl; | 174 | sgl_bidi = sdb->table.sgl; |
183 | sg_no_bidi = sdb->table.nents; | 175 | sgl_bidi_count = sdb->table.nents; |
184 | } | ||
185 | } else { | ||
186 | /* | ||
187 | * Used for DMA_NONE | ||
188 | */ | ||
189 | mem_ptr = NULL; | ||
190 | } | 176 | } |
191 | /* | 177 | |
192 | * Map the SG memory into struct se_mem->page linked list using the same | 178 | /* Tell the core about our preallocated memory */ |
193 | * physical memory at sg->page_link. | 179 | ret = transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc), |
194 | */ | 180 | scsi_sg_count(sc), sgl_bidi, sgl_bidi_count); |
195 | ret = transport_generic_map_mem_to_cmd(se_cmd, mem_ptr, | ||
196 | scsi_sg_count(sc), mem_bidi_ptr, sg_no_bidi); | ||
197 | if (ret < 0) | 181 | if (ret < 0) |
198 | return PYX_TRANSPORT_LU_COMM_FAILURE; | 182 | return PYX_TRANSPORT_LU_COMM_FAILURE; |
199 | 183 | ||
@@ -216,13 +200,10 @@ static void tcm_loop_check_stop_free(struct se_cmd *se_cmd) | |||
216 | * Release the struct se_cmd, which will make a callback to release | 200 | * Release the struct se_cmd, which will make a callback to release |
217 | * struct tcm_loop_cmd * in tcm_loop_deallocate_core_cmd() | 201 | * struct tcm_loop_cmd * in tcm_loop_deallocate_core_cmd() |
218 | */ | 202 | */ |
219 | transport_generic_free_cmd(se_cmd, 0, 1, 0); | 203 | transport_generic_free_cmd(se_cmd, 0, 0); |
220 | } | 204 | } |
221 | 205 | ||
222 | /* | 206 | static void tcm_loop_release_cmd(struct se_cmd *se_cmd) |
223 | * Called from struct target_core_fabric_ops->release_cmd_to_pool() | ||
224 | */ | ||
225 | static void tcm_loop_deallocate_core_cmd(struct se_cmd *se_cmd) | ||
226 | { | 207 | { |
227 | struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, | 208 | struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, |
228 | struct tcm_loop_cmd, tl_se_cmd); | 209 | struct tcm_loop_cmd, tl_se_cmd); |
@@ -300,7 +281,7 @@ static int tcm_loop_queuecommand( | |||
300 | struct tcm_loop_hba *tl_hba; | 281 | struct tcm_loop_hba *tl_hba; |
301 | struct tcm_loop_tpg *tl_tpg; | 282 | struct tcm_loop_tpg *tl_tpg; |
302 | 283 | ||
303 | TL_CDB_DEBUG("tcm_loop_queuecommand() %d:%d:%d:%d got CDB: 0x%02x" | 284 | pr_debug("tcm_loop_queuecommand() %d:%d:%d:%d got CDB: 0x%02x" |
304 | " scsi_buf_len: %u\n", sc->device->host->host_no, | 285 | " scsi_buf_len: %u\n", sc->device->host->host_no, |
305 | sc->device->id, sc->device->channel, sc->device->lun, | 286 | sc->device->id, sc->device->channel, sc->device->lun, |
306 | sc->cmnd[0], scsi_bufflen(sc)); | 287 | sc->cmnd[0], scsi_bufflen(sc)); |
@@ -350,7 +331,7 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc) | |||
350 | */ | 331 | */ |
351 | tl_nexus = tl_hba->tl_nexus; | 332 | tl_nexus = tl_hba->tl_nexus; |
352 | if (!tl_nexus) { | 333 | if (!tl_nexus) { |
353 | printk(KERN_ERR "Unable to perform device reset without" | 334 | pr_err("Unable to perform device reset without" |
354 | " active I_T Nexus\n"); | 335 | " active I_T Nexus\n"); |
355 | return FAILED; | 336 | return FAILED; |
356 | } | 337 | } |
@@ -363,13 +344,13 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc) | |||
363 | 344 | ||
364 | tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL); | 345 | tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL); |
365 | if (!tl_cmd) { | 346 | if (!tl_cmd) { |
366 | printk(KERN_ERR "Unable to allocate memory for tl_cmd\n"); | 347 | pr_err("Unable to allocate memory for tl_cmd\n"); |
367 | return FAILED; | 348 | return FAILED; |
368 | } | 349 | } |
369 | 350 | ||
370 | tl_tmr = kzalloc(sizeof(struct tcm_loop_tmr), GFP_KERNEL); | 351 | tl_tmr = kzalloc(sizeof(struct tcm_loop_tmr), GFP_KERNEL); |
371 | if (!tl_tmr) { | 352 | if (!tl_tmr) { |
372 | printk(KERN_ERR "Unable to allocate memory for tl_tmr\n"); | 353 | pr_err("Unable to allocate memory for tl_tmr\n"); |
373 | goto release; | 354 | goto release; |
374 | } | 355 | } |
375 | init_waitqueue_head(&tl_tmr->tl_tmr_wait); | 356 | init_waitqueue_head(&tl_tmr->tl_tmr_wait); |
@@ -384,14 +365,14 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc) | |||
384 | /* | 365 | /* |
385 | * Allocate the LUN_RESET TMR | 366 | * Allocate the LUN_RESET TMR |
386 | */ | 367 | */ |
387 | se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd, (void *)tl_tmr, | 368 | se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd, tl_tmr, |
388 | TMR_LUN_RESET); | 369 | TMR_LUN_RESET); |
389 | if (IS_ERR(se_cmd->se_tmr_req)) | 370 | if (IS_ERR(se_cmd->se_tmr_req)) |
390 | goto release; | 371 | goto release; |
391 | /* | 372 | /* |
392 | * Locate the underlying TCM struct se_lun from sc->device->lun | 373 | * Locate the underlying TCM struct se_lun from sc->device->lun |
393 | */ | 374 | */ |
394 | if (transport_get_lun_for_tmr(se_cmd, sc->device->lun) < 0) | 375 | if (transport_lookup_tmr_lun(se_cmd, sc->device->lun) < 0) |
395 | goto release; | 376 | goto release; |
396 | /* | 377 | /* |
397 | * Queue the TMR to TCM Core and sleep waiting for tcm_loop_queue_tm_rsp() | 378 | * Queue the TMR to TCM Core and sleep waiting for tcm_loop_queue_tm_rsp() |
@@ -407,7 +388,7 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc) | |||
407 | SUCCESS : FAILED; | 388 | SUCCESS : FAILED; |
408 | release: | 389 | release: |
409 | if (se_cmd) | 390 | if (se_cmd) |
410 | transport_generic_free_cmd(se_cmd, 1, 1, 0); | 391 | transport_generic_free_cmd(se_cmd, 1, 0); |
411 | else | 392 | else |
412 | kmem_cache_free(tcm_loop_cmd_cache, tl_cmd); | 393 | kmem_cache_free(tcm_loop_cmd_cache, tl_cmd); |
413 | kfree(tl_tmr); | 394 | kfree(tl_tmr); |
@@ -454,7 +435,7 @@ static int tcm_loop_driver_probe(struct device *dev) | |||
454 | sh = scsi_host_alloc(&tcm_loop_driver_template, | 435 | sh = scsi_host_alloc(&tcm_loop_driver_template, |
455 | sizeof(struct tcm_loop_hba)); | 436 | sizeof(struct tcm_loop_hba)); |
456 | if (!sh) { | 437 | if (!sh) { |
457 | printk(KERN_ERR "Unable to allocate struct scsi_host\n"); | 438 | pr_err("Unable to allocate struct scsi_host\n"); |
458 | return -ENODEV; | 439 | return -ENODEV; |
459 | } | 440 | } |
460 | tl_hba->sh = sh; | 441 | tl_hba->sh = sh; |
@@ -473,7 +454,7 @@ static int tcm_loop_driver_probe(struct device *dev) | |||
473 | 454 | ||
474 | error = scsi_add_host(sh, &tl_hba->dev); | 455 | error = scsi_add_host(sh, &tl_hba->dev); |
475 | if (error) { | 456 | if (error) { |
476 | printk(KERN_ERR "%s: scsi_add_host failed\n", __func__); | 457 | pr_err("%s: scsi_add_host failed\n", __func__); |
477 | scsi_host_put(sh); | 458 | scsi_host_put(sh); |
478 | return -ENODEV; | 459 | return -ENODEV; |
479 | } | 460 | } |
@@ -514,7 +495,7 @@ static int tcm_loop_setup_hba_bus(struct tcm_loop_hba *tl_hba, int tcm_loop_host | |||
514 | 495 | ||
515 | ret = device_register(&tl_hba->dev); | 496 | ret = device_register(&tl_hba->dev); |
516 | if (ret) { | 497 | if (ret) { |
517 | printk(KERN_ERR "device_register() failed for" | 498 | pr_err("device_register() failed for" |
518 | " tl_hba->dev: %d\n", ret); | 499 | " tl_hba->dev: %d\n", ret); |
519 | return -ENODEV; | 500 | return -ENODEV; |
520 | } | 501 | } |
@@ -532,24 +513,24 @@ static int tcm_loop_alloc_core_bus(void) | |||
532 | 513 | ||
533 | tcm_loop_primary = root_device_register("tcm_loop_0"); | 514 | tcm_loop_primary = root_device_register("tcm_loop_0"); |
534 | if (IS_ERR(tcm_loop_primary)) { | 515 | if (IS_ERR(tcm_loop_primary)) { |
535 | printk(KERN_ERR "Unable to allocate tcm_loop_primary\n"); | 516 | pr_err("Unable to allocate tcm_loop_primary\n"); |
536 | return PTR_ERR(tcm_loop_primary); | 517 | return PTR_ERR(tcm_loop_primary); |
537 | } | 518 | } |
538 | 519 | ||
539 | ret = bus_register(&tcm_loop_lld_bus); | 520 | ret = bus_register(&tcm_loop_lld_bus); |
540 | if (ret) { | 521 | if (ret) { |
541 | printk(KERN_ERR "bus_register() failed for tcm_loop_lld_bus\n"); | 522 | pr_err("bus_register() failed for tcm_loop_lld_bus\n"); |
542 | goto dev_unreg; | 523 | goto dev_unreg; |
543 | } | 524 | } |
544 | 525 | ||
545 | ret = driver_register(&tcm_loop_driverfs); | 526 | ret = driver_register(&tcm_loop_driverfs); |
546 | if (ret) { | 527 | if (ret) { |
547 | printk(KERN_ERR "driver_register() failed for" | 528 | pr_err("driver_register() failed for" |
548 | "tcm_loop_driverfs\n"); | 529 | "tcm_loop_driverfs\n"); |
549 | goto bus_unreg; | 530 | goto bus_unreg; |
550 | } | 531 | } |
551 | 532 | ||
552 | printk(KERN_INFO "Initialized TCM Loop Core Bus\n"); | 533 | pr_debug("Initialized TCM Loop Core Bus\n"); |
553 | return ret; | 534 | return ret; |
554 | 535 | ||
555 | bus_unreg: | 536 | bus_unreg: |
@@ -565,7 +546,7 @@ static void tcm_loop_release_core_bus(void) | |||
565 | bus_unregister(&tcm_loop_lld_bus); | 546 | bus_unregister(&tcm_loop_lld_bus); |
566 | root_device_unregister(tcm_loop_primary); | 547 | root_device_unregister(tcm_loop_primary); |
567 | 548 | ||
568 | printk(KERN_INFO "Releasing TCM Loop Core BUS\n"); | 549 | pr_debug("Releasing TCM Loop Core BUS\n"); |
569 | } | 550 | } |
570 | 551 | ||
571 | static char *tcm_loop_get_fabric_name(void) | 552 | static char *tcm_loop_get_fabric_name(void) |
@@ -593,7 +574,7 @@ static u8 tcm_loop_get_fabric_proto_ident(struct se_portal_group *se_tpg) | |||
593 | case SCSI_PROTOCOL_ISCSI: | 574 | case SCSI_PROTOCOL_ISCSI: |
594 | return iscsi_get_fabric_proto_ident(se_tpg); | 575 | return iscsi_get_fabric_proto_ident(se_tpg); |
595 | default: | 576 | default: |
596 | printk(KERN_ERR "Unknown tl_proto_id: 0x%02x, using" | 577 | pr_err("Unknown tl_proto_id: 0x%02x, using" |
597 | " SAS emulation\n", tl_hba->tl_proto_id); | 578 | " SAS emulation\n", tl_hba->tl_proto_id); |
598 | break; | 579 | break; |
599 | } | 580 | } |
@@ -649,7 +630,7 @@ static u32 tcm_loop_get_pr_transport_id( | |||
649 | return iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg, | 630 | return iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg, |
650 | format_code, buf); | 631 | format_code, buf); |
651 | default: | 632 | default: |
652 | printk(KERN_ERR "Unknown tl_proto_id: 0x%02x, using" | 633 | pr_err("Unknown tl_proto_id: 0x%02x, using" |
653 | " SAS emulation\n", tl_hba->tl_proto_id); | 634 | " SAS emulation\n", tl_hba->tl_proto_id); |
654 | break; | 635 | break; |
655 | } | 636 | } |
@@ -679,7 +660,7 @@ static u32 tcm_loop_get_pr_transport_id_len( | |||
679 | return iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg, | 660 | return iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg, |
680 | format_code); | 661 | format_code); |
681 | default: | 662 | default: |
682 | printk(KERN_ERR "Unknown tl_proto_id: 0x%02x, using" | 663 | pr_err("Unknown tl_proto_id: 0x%02x, using" |
683 | " SAS emulation\n", tl_hba->tl_proto_id); | 664 | " SAS emulation\n", tl_hba->tl_proto_id); |
684 | break; | 665 | break; |
685 | } | 666 | } |
@@ -713,7 +694,7 @@ static char *tcm_loop_parse_pr_out_transport_id( | |||
713 | return iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, | 694 | return iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, |
714 | port_nexus_ptr); | 695 | port_nexus_ptr); |
715 | default: | 696 | default: |
716 | printk(KERN_ERR "Unknown tl_proto_id: 0x%02x, using" | 697 | pr_err("Unknown tl_proto_id: 0x%02x, using" |
717 | " SAS emulation\n", tl_hba->tl_proto_id); | 698 | " SAS emulation\n", tl_hba->tl_proto_id); |
718 | break; | 699 | break; |
719 | } | 700 | } |
@@ -762,7 +743,7 @@ static struct se_node_acl *tcm_loop_tpg_alloc_fabric_acl( | |||
762 | 743 | ||
763 | tl_nacl = kzalloc(sizeof(struct tcm_loop_nacl), GFP_KERNEL); | 744 | tl_nacl = kzalloc(sizeof(struct tcm_loop_nacl), GFP_KERNEL); |
764 | if (!tl_nacl) { | 745 | if (!tl_nacl) { |
765 | printk(KERN_ERR "Unable to allocate struct tcm_loop_nacl\n"); | 746 | pr_err("Unable to allocate struct tcm_loop_nacl\n"); |
766 | return NULL; | 747 | return NULL; |
767 | } | 748 | } |
768 | 749 | ||
@@ -784,16 +765,6 @@ static u32 tcm_loop_get_inst_index(struct se_portal_group *se_tpg) | |||
784 | return 1; | 765 | return 1; |
785 | } | 766 | } |
786 | 767 | ||
787 | static void tcm_loop_new_cmd_failure(struct se_cmd *se_cmd) | ||
788 | { | ||
789 | /* | ||
790 | * Since TCM_loop is already passing struct scatterlist data from | ||
791 | * struct scsi_cmnd, no more Linux/SCSI failure dependent state need | ||
792 | * to be handled here. | ||
793 | */ | ||
794 | return; | ||
795 | } | ||
796 | |||
797 | static int tcm_loop_is_state_remove(struct se_cmd *se_cmd) | 768 | static int tcm_loop_is_state_remove(struct se_cmd *se_cmd) |
798 | { | 769 | { |
799 | /* | 770 | /* |
@@ -882,7 +853,7 @@ static int tcm_loop_queue_data_in(struct se_cmd *se_cmd) | |||
882 | struct tcm_loop_cmd, tl_se_cmd); | 853 | struct tcm_loop_cmd, tl_se_cmd); |
883 | struct scsi_cmnd *sc = tl_cmd->sc; | 854 | struct scsi_cmnd *sc = tl_cmd->sc; |
884 | 855 | ||
885 | TL_CDB_DEBUG("tcm_loop_queue_data_in() called for scsi_cmnd: %p" | 856 | pr_debug("tcm_loop_queue_data_in() called for scsi_cmnd: %p" |
886 | " cdb: 0x%02x\n", sc, sc->cmnd[0]); | 857 | " cdb: 0x%02x\n", sc, sc->cmnd[0]); |
887 | 858 | ||
888 | sc->result = SAM_STAT_GOOD; | 859 | sc->result = SAM_STAT_GOOD; |
@@ -897,14 +868,14 @@ static int tcm_loop_queue_status(struct se_cmd *se_cmd) | |||
897 | struct tcm_loop_cmd, tl_se_cmd); | 868 | struct tcm_loop_cmd, tl_se_cmd); |
898 | struct scsi_cmnd *sc = tl_cmd->sc; | 869 | struct scsi_cmnd *sc = tl_cmd->sc; |
899 | 870 | ||
900 | TL_CDB_DEBUG("tcm_loop_queue_status() called for scsi_cmnd: %p" | 871 | pr_debug("tcm_loop_queue_status() called for scsi_cmnd: %p" |
901 | " cdb: 0x%02x\n", sc, sc->cmnd[0]); | 872 | " cdb: 0x%02x\n", sc, sc->cmnd[0]); |
902 | 873 | ||
903 | if (se_cmd->sense_buffer && | 874 | if (se_cmd->sense_buffer && |
904 | ((se_cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) || | 875 | ((se_cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) || |
905 | (se_cmd->se_cmd_flags & SCF_EMULATED_TASK_SENSE))) { | 876 | (se_cmd->se_cmd_flags & SCF_EMULATED_TASK_SENSE))) { |
906 | 877 | ||
907 | memcpy((void *)sc->sense_buffer, (void *)se_cmd->sense_buffer, | 878 | memcpy(sc->sense_buffer, se_cmd->sense_buffer, |
908 | SCSI_SENSE_BUFFERSIZE); | 879 | SCSI_SENSE_BUFFERSIZE); |
909 | sc->result = SAM_STAT_CHECK_CONDITION; | 880 | sc->result = SAM_STAT_CHECK_CONDITION; |
910 | set_driver_byte(sc, DRIVER_SENSE); | 881 | set_driver_byte(sc, DRIVER_SENSE); |
@@ -972,7 +943,7 @@ static int tcm_loop_port_link( | |||
972 | */ | 943 | */ |
973 | scsi_add_device(tl_hba->sh, 0, tl_tpg->tl_tpgt, lun->unpacked_lun); | 944 | scsi_add_device(tl_hba->sh, 0, tl_tpg->tl_tpgt, lun->unpacked_lun); |
974 | 945 | ||
975 | printk(KERN_INFO "TCM_Loop_ConfigFS: Port Link Successful\n"); | 946 | pr_debug("TCM_Loop_ConfigFS: Port Link Successful\n"); |
976 | return 0; | 947 | return 0; |
977 | } | 948 | } |
978 | 949 | ||
@@ -990,7 +961,7 @@ static void tcm_loop_port_unlink( | |||
990 | sd = scsi_device_lookup(tl_hba->sh, 0, tl_tpg->tl_tpgt, | 961 | sd = scsi_device_lookup(tl_hba->sh, 0, tl_tpg->tl_tpgt, |
991 | se_lun->unpacked_lun); | 962 | se_lun->unpacked_lun); |
992 | if (!sd) { | 963 | if (!sd) { |
993 | printk(KERN_ERR "Unable to locate struct scsi_device for %d:%d:" | 964 | pr_err("Unable to locate struct scsi_device for %d:%d:" |
994 | "%d\n", 0, tl_tpg->tl_tpgt, se_lun->unpacked_lun); | 965 | "%d\n", 0, tl_tpg->tl_tpgt, se_lun->unpacked_lun); |
995 | return; | 966 | return; |
996 | } | 967 | } |
@@ -1003,7 +974,7 @@ static void tcm_loop_port_unlink( | |||
1003 | atomic_dec(&tl_tpg->tl_tpg_port_count); | 974 | atomic_dec(&tl_tpg->tl_tpg_port_count); |
1004 | smp_mb__after_atomic_dec(); | 975 | smp_mb__after_atomic_dec(); |
1005 | 976 | ||
1006 | printk(KERN_INFO "TCM_Loop_ConfigFS: Port Unlink Successful\n"); | 977 | pr_debug("TCM_Loop_ConfigFS: Port Unlink Successful\n"); |
1007 | } | 978 | } |
1008 | 979 | ||
1009 | /* End items for tcm_loop_port_cit */ | 980 | /* End items for tcm_loop_port_cit */ |
@@ -1020,14 +991,14 @@ static int tcm_loop_make_nexus( | |||
1020 | int ret = -ENOMEM; | 991 | int ret = -ENOMEM; |
1021 | 992 | ||
1022 | if (tl_tpg->tl_hba->tl_nexus) { | 993 | if (tl_tpg->tl_hba->tl_nexus) { |
1023 | printk(KERN_INFO "tl_tpg->tl_hba->tl_nexus already exists\n"); | 994 | pr_debug("tl_tpg->tl_hba->tl_nexus already exists\n"); |
1024 | return -EEXIST; | 995 | return -EEXIST; |
1025 | } | 996 | } |
1026 | se_tpg = &tl_tpg->tl_se_tpg; | 997 | se_tpg = &tl_tpg->tl_se_tpg; |
1027 | 998 | ||
1028 | tl_nexus = kzalloc(sizeof(struct tcm_loop_nexus), GFP_KERNEL); | 999 | tl_nexus = kzalloc(sizeof(struct tcm_loop_nexus), GFP_KERNEL); |
1029 | if (!tl_nexus) { | 1000 | if (!tl_nexus) { |
1030 | printk(KERN_ERR "Unable to allocate struct tcm_loop_nexus\n"); | 1001 | pr_err("Unable to allocate struct tcm_loop_nexus\n"); |
1031 | return -ENOMEM; | 1002 | return -ENOMEM; |
1032 | } | 1003 | } |
1033 | /* | 1004 | /* |
@@ -1054,9 +1025,9 @@ static int tcm_loop_make_nexus( | |||
1054 | * transport_register_session() | 1025 | * transport_register_session() |
1055 | */ | 1026 | */ |
1056 | __transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl, | 1027 | __transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl, |
1057 | tl_nexus->se_sess, (void *)tl_nexus); | 1028 | tl_nexus->se_sess, tl_nexus); |
1058 | tl_tpg->tl_hba->tl_nexus = tl_nexus; | 1029 | tl_tpg->tl_hba->tl_nexus = tl_nexus; |
1059 | printk(KERN_INFO "TCM_Loop_ConfigFS: Established I_T Nexus to emulated" | 1030 | pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated" |
1060 | " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba), | 1031 | " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba), |
1061 | name); | 1032 | name); |
1062 | return 0; | 1033 | return 0; |
@@ -1082,13 +1053,13 @@ static int tcm_loop_drop_nexus( | |||
1082 | return -ENODEV; | 1053 | return -ENODEV; |
1083 | 1054 | ||
1084 | if (atomic_read(&tpg->tl_tpg_port_count)) { | 1055 | if (atomic_read(&tpg->tl_tpg_port_count)) { |
1085 | printk(KERN_ERR "Unable to remove TCM_Loop I_T Nexus with" | 1056 | pr_err("Unable to remove TCM_Loop I_T Nexus with" |
1086 | " active TPG port count: %d\n", | 1057 | " active TPG port count: %d\n", |
1087 | atomic_read(&tpg->tl_tpg_port_count)); | 1058 | atomic_read(&tpg->tl_tpg_port_count)); |
1088 | return -EPERM; | 1059 | return -EPERM; |
1089 | } | 1060 | } |
1090 | 1061 | ||
1091 | printk(KERN_INFO "TCM_Loop_ConfigFS: Removing I_T Nexus to emulated" | 1062 | pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated" |
1092 | " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba), | 1063 | " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba), |
1093 | tl_nexus->se_sess->se_node_acl->initiatorname); | 1064 | tl_nexus->se_sess->se_node_acl->initiatorname); |
1094 | /* | 1065 | /* |
@@ -1144,7 +1115,7 @@ static ssize_t tcm_loop_tpg_store_nexus( | |||
1144 | * tcm_loop_make_nexus() | 1115 | * tcm_loop_make_nexus() |
1145 | */ | 1116 | */ |
1146 | if (strlen(page) >= TL_WWN_ADDR_LEN) { | 1117 | if (strlen(page) >= TL_WWN_ADDR_LEN) { |
1147 | printk(KERN_ERR "Emulated NAA Sas Address: %s, exceeds" | 1118 | pr_err("Emulated NAA Sas Address: %s, exceeds" |
1148 | " max: %d\n", page, TL_WWN_ADDR_LEN); | 1119 | " max: %d\n", page, TL_WWN_ADDR_LEN); |
1149 | return -EINVAL; | 1120 | return -EINVAL; |
1150 | } | 1121 | } |
@@ -1153,7 +1124,7 @@ static ssize_t tcm_loop_tpg_store_nexus( | |||
1153 | ptr = strstr(i_port, "naa."); | 1124 | ptr = strstr(i_port, "naa."); |
1154 | if (ptr) { | 1125 | if (ptr) { |
1155 | if (tl_hba->tl_proto_id != SCSI_PROTOCOL_SAS) { | 1126 | if (tl_hba->tl_proto_id != SCSI_PROTOCOL_SAS) { |
1156 | printk(KERN_ERR "Passed SAS Initiator Port %s does not" | 1127 | pr_err("Passed SAS Initiator Port %s does not" |
1157 | " match target port protoid: %s\n", i_port, | 1128 | " match target port protoid: %s\n", i_port, |
1158 | tcm_loop_dump_proto_id(tl_hba)); | 1129 | tcm_loop_dump_proto_id(tl_hba)); |
1159 | return -EINVAL; | 1130 | return -EINVAL; |
@@ -1164,7 +1135,7 @@ static ssize_t tcm_loop_tpg_store_nexus( | |||
1164 | ptr = strstr(i_port, "fc."); | 1135 | ptr = strstr(i_port, "fc."); |
1165 | if (ptr) { | 1136 | if (ptr) { |
1166 | if (tl_hba->tl_proto_id != SCSI_PROTOCOL_FCP) { | 1137 | if (tl_hba->tl_proto_id != SCSI_PROTOCOL_FCP) { |
1167 | printk(KERN_ERR "Passed FCP Initiator Port %s does not" | 1138 | pr_err("Passed FCP Initiator Port %s does not" |
1168 | " match target port protoid: %s\n", i_port, | 1139 | " match target port protoid: %s\n", i_port, |
1169 | tcm_loop_dump_proto_id(tl_hba)); | 1140 | tcm_loop_dump_proto_id(tl_hba)); |
1170 | return -EINVAL; | 1141 | return -EINVAL; |
@@ -1175,7 +1146,7 @@ static ssize_t tcm_loop_tpg_store_nexus( | |||
1175 | ptr = strstr(i_port, "iqn."); | 1146 | ptr = strstr(i_port, "iqn."); |
1176 | if (ptr) { | 1147 | if (ptr) { |
1177 | if (tl_hba->tl_proto_id != SCSI_PROTOCOL_ISCSI) { | 1148 | if (tl_hba->tl_proto_id != SCSI_PROTOCOL_ISCSI) { |
1178 | printk(KERN_ERR "Passed iSCSI Initiator Port %s does not" | 1149 | pr_err("Passed iSCSI Initiator Port %s does not" |
1179 | " match target port protoid: %s\n", i_port, | 1150 | " match target port protoid: %s\n", i_port, |
1180 | tcm_loop_dump_proto_id(tl_hba)); | 1151 | tcm_loop_dump_proto_id(tl_hba)); |
1181 | return -EINVAL; | 1152 | return -EINVAL; |
@@ -1183,7 +1154,7 @@ static ssize_t tcm_loop_tpg_store_nexus( | |||
1183 | port_ptr = &i_port[0]; | 1154 | port_ptr = &i_port[0]; |
1184 | goto check_newline; | 1155 | goto check_newline; |
1185 | } | 1156 | } |
1186 | printk(KERN_ERR "Unable to locate prefix for emulated Initiator Port:" | 1157 | pr_err("Unable to locate prefix for emulated Initiator Port:" |
1187 | " %s\n", i_port); | 1158 | " %s\n", i_port); |
1188 | return -EINVAL; | 1159 | return -EINVAL; |
1189 | /* | 1160 | /* |
@@ -1223,15 +1194,15 @@ struct se_portal_group *tcm_loop_make_naa_tpg( | |||
1223 | 1194 | ||
1224 | tpgt_str = strstr(name, "tpgt_"); | 1195 | tpgt_str = strstr(name, "tpgt_"); |
1225 | if (!tpgt_str) { | 1196 | if (!tpgt_str) { |
1226 | printk(KERN_ERR "Unable to locate \"tpgt_#\" directory" | 1197 | pr_err("Unable to locate \"tpgt_#\" directory" |
1227 | " group\n"); | 1198 | " group\n"); |
1228 | return ERR_PTR(-EINVAL); | 1199 | return ERR_PTR(-EINVAL); |
1229 | } | 1200 | } |
1230 | tpgt_str += 5; /* Skip ahead of "tpgt_" */ | 1201 | tpgt_str += 5; /* Skip ahead of "tpgt_" */ |
1231 | tpgt = (unsigned short int) simple_strtoul(tpgt_str, &end_ptr, 0); | 1202 | tpgt = (unsigned short int) simple_strtoul(tpgt_str, &end_ptr, 0); |
1232 | 1203 | ||
1233 | if (tpgt > TL_TPGS_PER_HBA) { | 1204 | if (tpgt >= TL_TPGS_PER_HBA) { |
1234 | printk(KERN_ERR "Passed tpgt: %hu exceeds TL_TPGS_PER_HBA:" | 1205 | pr_err("Passed tpgt: %hu exceeds TL_TPGS_PER_HBA:" |
1235 | " %u\n", tpgt, TL_TPGS_PER_HBA); | 1206 | " %u\n", tpgt, TL_TPGS_PER_HBA); |
1236 | return ERR_PTR(-EINVAL); | 1207 | return ERR_PTR(-EINVAL); |
1237 | } | 1208 | } |
@@ -1242,12 +1213,12 @@ struct se_portal_group *tcm_loop_make_naa_tpg( | |||
1242 | * Register the tl_tpg as a emulated SAS TCM Target Endpoint | 1213 | * Register the tl_tpg as a emulated SAS TCM Target Endpoint |
1243 | */ | 1214 | */ |
1244 | ret = core_tpg_register(&tcm_loop_fabric_configfs->tf_ops, | 1215 | ret = core_tpg_register(&tcm_loop_fabric_configfs->tf_ops, |
1245 | wwn, &tl_tpg->tl_se_tpg, (void *)tl_tpg, | 1216 | wwn, &tl_tpg->tl_se_tpg, tl_tpg, |
1246 | TRANSPORT_TPG_TYPE_NORMAL); | 1217 | TRANSPORT_TPG_TYPE_NORMAL); |
1247 | if (ret < 0) | 1218 | if (ret < 0) |
1248 | return ERR_PTR(-ENOMEM); | 1219 | return ERR_PTR(-ENOMEM); |
1249 | 1220 | ||
1250 | printk(KERN_INFO "TCM_Loop_ConfigFS: Allocated Emulated %s" | 1221 | pr_debug("TCM_Loop_ConfigFS: Allocated Emulated %s" |
1251 | " Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba), | 1222 | " Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba), |
1252 | config_item_name(&wwn->wwn_group.cg_item), tpgt); | 1223 | config_item_name(&wwn->wwn_group.cg_item), tpgt); |
1253 | 1224 | ||
@@ -1274,7 +1245,7 @@ void tcm_loop_drop_naa_tpg( | |||
1274 | */ | 1245 | */ |
1275 | core_tpg_deregister(se_tpg); | 1246 | core_tpg_deregister(se_tpg); |
1276 | 1247 | ||
1277 | printk(KERN_INFO "TCM_Loop_ConfigFS: Deallocated Emulated %s" | 1248 | pr_debug("TCM_Loop_ConfigFS: Deallocated Emulated %s" |
1278 | " Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba), | 1249 | " Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba), |
1279 | config_item_name(&wwn->wwn_group.cg_item), tpgt); | 1250 | config_item_name(&wwn->wwn_group.cg_item), tpgt); |
1280 | } | 1251 | } |
@@ -1295,7 +1266,7 @@ struct se_wwn *tcm_loop_make_scsi_hba( | |||
1295 | 1266 | ||
1296 | tl_hba = kzalloc(sizeof(struct tcm_loop_hba), GFP_KERNEL); | 1267 | tl_hba = kzalloc(sizeof(struct tcm_loop_hba), GFP_KERNEL); |
1297 | if (!tl_hba) { | 1268 | if (!tl_hba) { |
1298 | printk(KERN_ERR "Unable to allocate struct tcm_loop_hba\n"); | 1269 | pr_err("Unable to allocate struct tcm_loop_hba\n"); |
1299 | return ERR_PTR(-ENOMEM); | 1270 | return ERR_PTR(-ENOMEM); |
1300 | } | 1271 | } |
1301 | /* | 1272 | /* |
@@ -1314,22 +1285,21 @@ struct se_wwn *tcm_loop_make_scsi_hba( | |||
1314 | goto check_len; | 1285 | goto check_len; |
1315 | } | 1286 | } |
1316 | ptr = strstr(name, "iqn."); | 1287 | ptr = strstr(name, "iqn."); |
1317 | if (ptr) { | 1288 | if (!ptr) { |
1318 | tl_hba->tl_proto_id = SCSI_PROTOCOL_ISCSI; | 1289 | pr_err("Unable to locate prefix for emulated Target " |
1319 | goto check_len; | 1290 | "Port: %s\n", name); |
1291 | ret = -EINVAL; | ||
1292 | goto out; | ||
1320 | } | 1293 | } |
1321 | 1294 | tl_hba->tl_proto_id = SCSI_PROTOCOL_ISCSI; | |
1322 | printk(KERN_ERR "Unable to locate prefix for emulated Target Port:" | ||
1323 | " %s\n", name); | ||
1324 | return ERR_PTR(-EINVAL); | ||
1325 | 1295 | ||
1326 | check_len: | 1296 | check_len: |
1327 | if (strlen(name) >= TL_WWN_ADDR_LEN) { | 1297 | if (strlen(name) >= TL_WWN_ADDR_LEN) { |
1328 | printk(KERN_ERR "Emulated NAA %s Address: %s, exceeds" | 1298 | pr_err("Emulated NAA %s Address: %s, exceeds" |
1329 | " max: %d\n", name, tcm_loop_dump_proto_id(tl_hba), | 1299 | " max: %d\n", name, tcm_loop_dump_proto_id(tl_hba), |
1330 | TL_WWN_ADDR_LEN); | 1300 | TL_WWN_ADDR_LEN); |
1331 | kfree(tl_hba); | 1301 | ret = -EINVAL; |
1332 | return ERR_PTR(-EINVAL); | 1302 | goto out; |
1333 | } | 1303 | } |
1334 | snprintf(&tl_hba->tl_wwn_address[0], TL_WWN_ADDR_LEN, "%s", &name[off]); | 1304 | snprintf(&tl_hba->tl_wwn_address[0], TL_WWN_ADDR_LEN, "%s", &name[off]); |
1335 | 1305 | ||
@@ -1344,7 +1314,7 @@ check_len: | |||
1344 | 1314 | ||
1345 | sh = tl_hba->sh; | 1315 | sh = tl_hba->sh; |
1346 | tcm_loop_hba_no_cnt++; | 1316 | tcm_loop_hba_no_cnt++; |
1347 | printk(KERN_INFO "TCM_Loop_ConfigFS: Allocated emulated Target" | 1317 | pr_debug("TCM_Loop_ConfigFS: Allocated emulated Target" |
1348 | " %s Address: %s at Linux/SCSI Host ID: %d\n", | 1318 | " %s Address: %s at Linux/SCSI Host ID: %d\n", |
1349 | tcm_loop_dump_proto_id(tl_hba), name, sh->host_no); | 1319 | tcm_loop_dump_proto_id(tl_hba), name, sh->host_no); |
1350 | 1320 | ||
@@ -1367,7 +1337,7 @@ void tcm_loop_drop_scsi_hba( | |||
1367 | */ | 1337 | */ |
1368 | device_unregister(&tl_hba->dev); | 1338 | device_unregister(&tl_hba->dev); |
1369 | 1339 | ||
1370 | printk(KERN_INFO "TCM_Loop_ConfigFS: Deallocated emulated Target" | 1340 | pr_debug("TCM_Loop_ConfigFS: Deallocated emulated Target" |
1371 | " SAS Address: %s at Linux/SCSI Host ID: %d\n", | 1341 | " SAS Address: %s at Linux/SCSI Host ID: %d\n", |
1372 | config_item_name(&wwn->wwn_group.cg_item), host_no); | 1342 | config_item_name(&wwn->wwn_group.cg_item), host_no); |
1373 | } | 1343 | } |
@@ -1402,9 +1372,9 @@ static int tcm_loop_register_configfs(void) | |||
1402 | * Register the top level struct config_item_type with TCM core | 1372 | * Register the top level struct config_item_type with TCM core |
1403 | */ | 1373 | */ |
1404 | fabric = target_fabric_configfs_init(THIS_MODULE, "loopback"); | 1374 | fabric = target_fabric_configfs_init(THIS_MODULE, "loopback"); |
1405 | if (!fabric) { | 1375 | if (IS_ERR(fabric)) { |
1406 | printk(KERN_ERR "tcm_loop_register_configfs() failed!\n"); | 1376 | pr_err("tcm_loop_register_configfs() failed!\n"); |
1407 | return -1; | 1377 | return PTR_ERR(fabric); |
1408 | } | 1378 | } |
1409 | /* | 1379 | /* |
1410 | * Setup the fabric API of function pointers used by target_core_mod | 1380 | * Setup the fabric API of function pointers used by target_core_mod |
@@ -1436,19 +1406,11 @@ static int tcm_loop_register_configfs(void) | |||
1436 | &tcm_loop_tpg_release_fabric_acl; | 1406 | &tcm_loop_tpg_release_fabric_acl; |
1437 | fabric->tf_ops.tpg_get_inst_index = &tcm_loop_get_inst_index; | 1407 | fabric->tf_ops.tpg_get_inst_index = &tcm_loop_get_inst_index; |
1438 | /* | 1408 | /* |
1439 | * Since tcm_loop is mapping physical memory from Linux/SCSI | ||
1440 | * struct scatterlist arrays for each struct scsi_cmnd I/O, | ||
1441 | * we do not need TCM to allocate a iovec array for | ||
1442 | * virtual memory address mappings | ||
1443 | */ | ||
1444 | fabric->tf_ops.alloc_cmd_iovecs = NULL; | ||
1445 | /* | ||
1446 | * Used for setting up remaining TCM resources in process context | 1409 | * Used for setting up remaining TCM resources in process context |
1447 | */ | 1410 | */ |
1448 | fabric->tf_ops.new_cmd_map = &tcm_loop_new_cmd_map; | 1411 | fabric->tf_ops.new_cmd_map = &tcm_loop_new_cmd_map; |
1449 | fabric->tf_ops.check_stop_free = &tcm_loop_check_stop_free; | 1412 | fabric->tf_ops.check_stop_free = &tcm_loop_check_stop_free; |
1450 | fabric->tf_ops.release_cmd_to_pool = &tcm_loop_deallocate_core_cmd; | 1413 | fabric->tf_ops.release_cmd = &tcm_loop_release_cmd; |
1451 | fabric->tf_ops.release_cmd_direct = &tcm_loop_deallocate_core_cmd; | ||
1452 | fabric->tf_ops.shutdown_session = &tcm_loop_shutdown_session; | 1414 | fabric->tf_ops.shutdown_session = &tcm_loop_shutdown_session; |
1453 | fabric->tf_ops.close_session = &tcm_loop_close_session; | 1415 | fabric->tf_ops.close_session = &tcm_loop_close_session; |
1454 | fabric->tf_ops.stop_session = &tcm_loop_stop_session; | 1416 | fabric->tf_ops.stop_session = &tcm_loop_stop_session; |
@@ -1465,7 +1427,6 @@ static int tcm_loop_register_configfs(void) | |||
1465 | &tcm_loop_set_default_node_attributes; | 1427 | &tcm_loop_set_default_node_attributes; |
1466 | fabric->tf_ops.get_task_tag = &tcm_loop_get_task_tag; | 1428 | fabric->tf_ops.get_task_tag = &tcm_loop_get_task_tag; |
1467 | fabric->tf_ops.get_cmd_state = &tcm_loop_get_cmd_state; | 1429 | fabric->tf_ops.get_cmd_state = &tcm_loop_get_cmd_state; |
1468 | fabric->tf_ops.new_cmd_failure = &tcm_loop_new_cmd_failure; | ||
1469 | fabric->tf_ops.queue_data_in = &tcm_loop_queue_data_in; | 1430 | fabric->tf_ops.queue_data_in = &tcm_loop_queue_data_in; |
1470 | fabric->tf_ops.queue_status = &tcm_loop_queue_status; | 1431 | fabric->tf_ops.queue_status = &tcm_loop_queue_status; |
1471 | fabric->tf_ops.queue_tm_rsp = &tcm_loop_queue_tm_rsp; | 1432 | fabric->tf_ops.queue_tm_rsp = &tcm_loop_queue_tm_rsp; |
@@ -1503,7 +1464,7 @@ static int tcm_loop_register_configfs(void) | |||
1503 | */ | 1464 | */ |
1504 | ret = target_fabric_configfs_register(fabric); | 1465 | ret = target_fabric_configfs_register(fabric); |
1505 | if (ret < 0) { | 1466 | if (ret < 0) { |
1506 | printk(KERN_ERR "target_fabric_configfs_register() for" | 1467 | pr_err("target_fabric_configfs_register() for" |
1507 | " TCM_Loop failed!\n"); | 1468 | " TCM_Loop failed!\n"); |
1508 | target_fabric_configfs_free(fabric); | 1469 | target_fabric_configfs_free(fabric); |
1509 | return -1; | 1470 | return -1; |
@@ -1512,7 +1473,7 @@ static int tcm_loop_register_configfs(void) | |||
1512 | * Setup our local pointer to *fabric. | 1473 | * Setup our local pointer to *fabric. |
1513 | */ | 1474 | */ |
1514 | tcm_loop_fabric_configfs = fabric; | 1475 | tcm_loop_fabric_configfs = fabric; |
1515 | printk(KERN_INFO "TCM_LOOP[0] - Set fabric ->" | 1476 | pr_debug("TCM_LOOP[0] - Set fabric ->" |
1516 | " tcm_loop_fabric_configfs\n"); | 1477 | " tcm_loop_fabric_configfs\n"); |
1517 | return 0; | 1478 | return 0; |
1518 | } | 1479 | } |
@@ -1524,7 +1485,7 @@ static void tcm_loop_deregister_configfs(void) | |||
1524 | 1485 | ||
1525 | target_fabric_configfs_deregister(tcm_loop_fabric_configfs); | 1486 | target_fabric_configfs_deregister(tcm_loop_fabric_configfs); |
1526 | tcm_loop_fabric_configfs = NULL; | 1487 | tcm_loop_fabric_configfs = NULL; |
1527 | printk(KERN_INFO "TCM_LOOP[0] - Cleared" | 1488 | pr_debug("TCM_LOOP[0] - Cleared" |
1528 | " tcm_loop_fabric_configfs\n"); | 1489 | " tcm_loop_fabric_configfs\n"); |
1529 | } | 1490 | } |
1530 | 1491 | ||
@@ -1537,7 +1498,7 @@ static int __init tcm_loop_fabric_init(void) | |||
1537 | __alignof__(struct tcm_loop_cmd), | 1498 | __alignof__(struct tcm_loop_cmd), |
1538 | 0, NULL); | 1499 | 0, NULL); |
1539 | if (!tcm_loop_cmd_cache) { | 1500 | if (!tcm_loop_cmd_cache) { |
1540 | printk(KERN_ERR "kmem_cache_create() for" | 1501 | pr_debug("kmem_cache_create() for" |
1541 | " tcm_loop_cmd_cache failed\n"); | 1502 | " tcm_loop_cmd_cache failed\n"); |
1542 | return -ENOMEM; | 1503 | return -ENOMEM; |
1543 | } | 1504 | } |
diff --git a/drivers/target/loopback/tcm_loop.h b/drivers/target/loopback/tcm_loop.h index 7e9f7ab45548..6b76c7a22bb0 100644 --- a/drivers/target/loopback/tcm_loop.h +++ b/drivers/target/loopback/tcm_loop.h | |||
@@ -16,12 +16,6 @@ | |||
16 | */ | 16 | */ |
17 | #define TL_SCSI_MAX_CMD_LEN 32 | 17 | #define TL_SCSI_MAX_CMD_LEN 32 |
18 | 18 | ||
19 | #ifdef CONFIG_LOOPBACK_TARGET_CDB_DEBUG | ||
20 | # define TL_CDB_DEBUG(x...) printk(KERN_INFO x) | ||
21 | #else | ||
22 | # define TL_CDB_DEBUG(x...) | ||
23 | #endif | ||
24 | |||
25 | struct tcm_loop_cmd { | 19 | struct tcm_loop_cmd { |
26 | /* State of Linux/SCSI CDB+Data descriptor */ | 20 | /* State of Linux/SCSI CDB+Data descriptor */ |
27 | u32 sc_cmd_state; | 21 | u32 sc_cmd_state; |
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index 47abb42d9c36..98c98a3a0250 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c | |||
@@ -46,6 +46,14 @@ static int core_alua_set_tg_pt_secondary_state( | |||
46 | struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, | 46 | struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, |
47 | struct se_port *port, int explict, int offline); | 47 | struct se_port *port, int explict, int offline); |
48 | 48 | ||
49 | static u16 alua_lu_gps_counter; | ||
50 | static u32 alua_lu_gps_count; | ||
51 | |||
52 | static DEFINE_SPINLOCK(lu_gps_lock); | ||
53 | static LIST_HEAD(lu_gps_list); | ||
54 | |||
55 | struct t10_alua_lu_gp *default_lu_gp; | ||
56 | |||
49 | /* | 57 | /* |
50 | * REPORT_TARGET_PORT_GROUPS | 58 | * REPORT_TARGET_PORT_GROUPS |
51 | * | 59 | * |
@@ -53,16 +61,18 @@ static int core_alua_set_tg_pt_secondary_state( | |||
53 | */ | 61 | */ |
54 | int core_emulate_report_target_port_groups(struct se_cmd *cmd) | 62 | int core_emulate_report_target_port_groups(struct se_cmd *cmd) |
55 | { | 63 | { |
56 | struct se_subsystem_dev *su_dev = SE_DEV(cmd)->se_sub_dev; | 64 | struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev; |
57 | struct se_port *port; | 65 | struct se_port *port; |
58 | struct t10_alua_tg_pt_gp *tg_pt_gp; | 66 | struct t10_alua_tg_pt_gp *tg_pt_gp; |
59 | struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; | 67 | struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; |
60 | unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf; | 68 | unsigned char *buf; |
61 | u32 rd_len = 0, off = 4; /* Skip over RESERVED area to first | 69 | u32 rd_len = 0, off = 4; /* Skip over RESERVED area to first |
62 | Target port group descriptor */ | 70 | Target port group descriptor */ |
63 | 71 | ||
64 | spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock); | 72 | buf = transport_kmap_first_data_page(cmd); |
65 | list_for_each_entry(tg_pt_gp, &T10_ALUA(su_dev)->tg_pt_gps_list, | 73 | |
74 | spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); | ||
75 | list_for_each_entry(tg_pt_gp, &su_dev->t10_alua.tg_pt_gps_list, | ||
66 | tg_pt_gp_list) { | 76 | tg_pt_gp_list) { |
67 | /* | 77 | /* |
68 | * PREF: Preferred target port bit, determine if this | 78 | * PREF: Preferred target port bit, determine if this |
@@ -124,7 +134,7 @@ int core_emulate_report_target_port_groups(struct se_cmd *cmd) | |||
124 | } | 134 | } |
125 | spin_unlock(&tg_pt_gp->tg_pt_gp_lock); | 135 | spin_unlock(&tg_pt_gp->tg_pt_gp_lock); |
126 | } | 136 | } |
127 | spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); | 137 | spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); |
128 | /* | 138 | /* |
129 | * Set the RETURN DATA LENGTH set in the header of the DataIN Payload | 139 | * Set the RETURN DATA LENGTH set in the header of the DataIN Payload |
130 | */ | 140 | */ |
@@ -133,6 +143,8 @@ int core_emulate_report_target_port_groups(struct se_cmd *cmd) | |||
133 | buf[2] = ((rd_len >> 8) & 0xff); | 143 | buf[2] = ((rd_len >> 8) & 0xff); |
134 | buf[3] = (rd_len & 0xff); | 144 | buf[3] = (rd_len & 0xff); |
135 | 145 | ||
146 | transport_kunmap_first_data_page(cmd); | ||
147 | |||
136 | return 0; | 148 | return 0; |
137 | } | 149 | } |
138 | 150 | ||
@@ -143,45 +155,53 @@ int core_emulate_report_target_port_groups(struct se_cmd *cmd) | |||
143 | */ | 155 | */ |
144 | int core_emulate_set_target_port_groups(struct se_cmd *cmd) | 156 | int core_emulate_set_target_port_groups(struct se_cmd *cmd) |
145 | { | 157 | { |
146 | struct se_device *dev = SE_DEV(cmd); | 158 | struct se_device *dev = cmd->se_dev; |
147 | struct se_subsystem_dev *su_dev = SE_DEV(cmd)->se_sub_dev; | 159 | struct se_subsystem_dev *su_dev = dev->se_sub_dev; |
148 | struct se_port *port, *l_port = SE_LUN(cmd)->lun_sep; | 160 | struct se_port *port, *l_port = cmd->se_lun->lun_sep; |
149 | struct se_node_acl *nacl = SE_SESS(cmd)->se_node_acl; | 161 | struct se_node_acl *nacl = cmd->se_sess->se_node_acl; |
150 | struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp; | 162 | struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp; |
151 | struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *l_tg_pt_gp_mem; | 163 | struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *l_tg_pt_gp_mem; |
152 | unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf; | 164 | unsigned char *buf; |
153 | unsigned char *ptr = &buf[4]; /* Skip over RESERVED area in header */ | 165 | unsigned char *ptr; |
154 | u32 len = 4; /* Skip over RESERVED area in header */ | 166 | u32 len = 4; /* Skip over RESERVED area in header */ |
155 | int alua_access_state, primary = 0, rc; | 167 | int alua_access_state, primary = 0, rc; |
156 | u16 tg_pt_id, rtpi; | 168 | u16 tg_pt_id, rtpi; |
157 | 169 | ||
158 | if (!(l_port)) | 170 | if (!l_port) |
159 | return PYX_TRANSPORT_LU_COMM_FAILURE; | 171 | return PYX_TRANSPORT_LU_COMM_FAILURE; |
172 | |||
173 | buf = transport_kmap_first_data_page(cmd); | ||
174 | |||
160 | /* | 175 | /* |
161 | * Determine if explict ALUA via SET_TARGET_PORT_GROUPS is allowed | 176 | * Determine if explict ALUA via SET_TARGET_PORT_GROUPS is allowed |
162 | * for the local tg_pt_gp. | 177 | * for the local tg_pt_gp. |
163 | */ | 178 | */ |
164 | l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem; | 179 | l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem; |
165 | if (!(l_tg_pt_gp_mem)) { | 180 | if (!l_tg_pt_gp_mem) { |
166 | printk(KERN_ERR "Unable to access l_port->sep_alua_tg_pt_gp_mem\n"); | 181 | pr_err("Unable to access l_port->sep_alua_tg_pt_gp_mem\n"); |
167 | return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; | 182 | rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; |
183 | goto out; | ||
168 | } | 184 | } |
169 | spin_lock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock); | 185 | spin_lock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock); |
170 | l_tg_pt_gp = l_tg_pt_gp_mem->tg_pt_gp; | 186 | l_tg_pt_gp = l_tg_pt_gp_mem->tg_pt_gp; |
171 | if (!(l_tg_pt_gp)) { | 187 | if (!l_tg_pt_gp) { |
172 | spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock); | 188 | spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock); |
173 | printk(KERN_ERR "Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n"); | 189 | pr_err("Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n"); |
174 | return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; | 190 | rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; |
191 | goto out; | ||
175 | } | 192 | } |
176 | rc = (l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA); | 193 | rc = (l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA); |
177 | spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock); | 194 | spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock); |
178 | 195 | ||
179 | if (!(rc)) { | 196 | if (!rc) { |
180 | printk(KERN_INFO "Unable to process SET_TARGET_PORT_GROUPS" | 197 | pr_debug("Unable to process SET_TARGET_PORT_GROUPS" |
181 | " while TPGS_EXPLICT_ALUA is disabled\n"); | 198 | " while TPGS_EXPLICT_ALUA is disabled\n"); |
182 | return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; | 199 | rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; |
200 | goto out; | ||
183 | } | 201 | } |
184 | 202 | ||
203 | ptr = &buf[4]; /* Skip over RESERVED area in header */ | ||
204 | |||
185 | while (len < cmd->data_length) { | 205 | while (len < cmd->data_length) { |
186 | alua_access_state = (ptr[0] & 0x0f); | 206 | alua_access_state = (ptr[0] & 0x0f); |
187 | /* | 207 | /* |
@@ -201,7 +221,8 @@ int core_emulate_set_target_port_groups(struct se_cmd *cmd) | |||
201 | * REQUEST, and the additional sense code set to INVALID | 221 | * REQUEST, and the additional sense code set to INVALID |
202 | * FIELD IN PARAMETER LIST. | 222 | * FIELD IN PARAMETER LIST. |
203 | */ | 223 | */ |
204 | return PYX_TRANSPORT_INVALID_PARAMETER_LIST; | 224 | rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST; |
225 | goto out; | ||
205 | } | 226 | } |
206 | rc = -1; | 227 | rc = -1; |
207 | /* | 228 | /* |
@@ -224,11 +245,11 @@ int core_emulate_set_target_port_groups(struct se_cmd *cmd) | |||
224 | * Locate the matching target port group ID from | 245 | * Locate the matching target port group ID from |
225 | * the global tg_pt_gp list | 246 | * the global tg_pt_gp list |
226 | */ | 247 | */ |
227 | spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock); | 248 | spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); |
228 | list_for_each_entry(tg_pt_gp, | 249 | list_for_each_entry(tg_pt_gp, |
229 | &T10_ALUA(su_dev)->tg_pt_gps_list, | 250 | &su_dev->t10_alua.tg_pt_gps_list, |
230 | tg_pt_gp_list) { | 251 | tg_pt_gp_list) { |
231 | if (!(tg_pt_gp->tg_pt_gp_valid_id)) | 252 | if (!tg_pt_gp->tg_pt_gp_valid_id) |
232 | continue; | 253 | continue; |
233 | 254 | ||
234 | if (tg_pt_id != tg_pt_gp->tg_pt_gp_id) | 255 | if (tg_pt_id != tg_pt_gp->tg_pt_gp_id) |
@@ -236,24 +257,26 @@ int core_emulate_set_target_port_groups(struct se_cmd *cmd) | |||
236 | 257 | ||
237 | atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); | 258 | atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); |
238 | smp_mb__after_atomic_inc(); | 259 | smp_mb__after_atomic_inc(); |
239 | spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); | 260 | spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); |
240 | 261 | ||
241 | rc = core_alua_do_port_transition(tg_pt_gp, | 262 | rc = core_alua_do_port_transition(tg_pt_gp, |
242 | dev, l_port, nacl, | 263 | dev, l_port, nacl, |
243 | alua_access_state, 1); | 264 | alua_access_state, 1); |
244 | 265 | ||
245 | spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock); | 266 | spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); |
246 | atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); | 267 | atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); |
247 | smp_mb__after_atomic_dec(); | 268 | smp_mb__after_atomic_dec(); |
248 | break; | 269 | break; |
249 | } | 270 | } |
250 | spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); | 271 | spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); |
251 | /* | 272 | /* |
252 | * If not matching target port group ID can be located | 273 | * If not matching target port group ID can be located |
253 | * throw an exception with ASCQ: INVALID_PARAMETER_LIST | 274 | * throw an exception with ASCQ: INVALID_PARAMETER_LIST |
254 | */ | 275 | */ |
255 | if (rc != 0) | 276 | if (rc != 0) { |
256 | return PYX_TRANSPORT_INVALID_PARAMETER_LIST; | 277 | rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST; |
278 | goto out; | ||
279 | } | ||
257 | } else { | 280 | } else { |
258 | /* | 281 | /* |
259 | * Extact the RELATIVE TARGET PORT IDENTIFIER to identify | 282 | * Extact the RELATIVE TARGET PORT IDENTIFIER to identify |
@@ -287,14 +310,19 @@ int core_emulate_set_target_port_groups(struct se_cmd *cmd) | |||
287 | * be located, throw an exception with ASCQ: | 310 | * be located, throw an exception with ASCQ: |
288 | * INVALID_PARAMETER_LIST | 311 | * INVALID_PARAMETER_LIST |
289 | */ | 312 | */ |
290 | if (rc != 0) | 313 | if (rc != 0) { |
291 | return PYX_TRANSPORT_INVALID_PARAMETER_LIST; | 314 | rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST; |
315 | goto out; | ||
316 | } | ||
292 | } | 317 | } |
293 | 318 | ||
294 | ptr += 4; | 319 | ptr += 4; |
295 | len += 4; | 320 | len += 4; |
296 | } | 321 | } |
297 | 322 | ||
323 | out: | ||
324 | transport_kunmap_first_data_page(cmd); | ||
325 | |||
298 | return 0; | 326 | return 0; |
299 | } | 327 | } |
300 | 328 | ||
@@ -464,13 +492,13 @@ static int core_alua_state_check( | |||
464 | unsigned char *cdb, | 492 | unsigned char *cdb, |
465 | u8 *alua_ascq) | 493 | u8 *alua_ascq) |
466 | { | 494 | { |
467 | struct se_lun *lun = SE_LUN(cmd); | 495 | struct se_lun *lun = cmd->se_lun; |
468 | struct se_port *port = lun->lun_sep; | 496 | struct se_port *port = lun->lun_sep; |
469 | struct t10_alua_tg_pt_gp *tg_pt_gp; | 497 | struct t10_alua_tg_pt_gp *tg_pt_gp; |
470 | struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; | 498 | struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; |
471 | int out_alua_state, nonop_delay_msecs; | 499 | int out_alua_state, nonop_delay_msecs; |
472 | 500 | ||
473 | if (!(port)) | 501 | if (!port) |
474 | return 0; | 502 | return 0; |
475 | /* | 503 | /* |
476 | * First, check for a struct se_port specific secondary ALUA target port | 504 | * First, check for a struct se_port specific secondary ALUA target port |
@@ -478,7 +506,7 @@ static int core_alua_state_check( | |||
478 | */ | 506 | */ |
479 | if (atomic_read(&port->sep_tg_pt_secondary_offline)) { | 507 | if (atomic_read(&port->sep_tg_pt_secondary_offline)) { |
480 | *alua_ascq = ASCQ_04H_ALUA_OFFLINE; | 508 | *alua_ascq = ASCQ_04H_ALUA_OFFLINE; |
481 | printk(KERN_INFO "ALUA: Got secondary offline status for local" | 509 | pr_debug("ALUA: Got secondary offline status for local" |
482 | " target port\n"); | 510 | " target port\n"); |
483 | *alua_ascq = ASCQ_04H_ALUA_OFFLINE; | 511 | *alua_ascq = ASCQ_04H_ALUA_OFFLINE; |
484 | return 1; | 512 | return 1; |
@@ -520,9 +548,9 @@ static int core_alua_state_check( | |||
520 | */ | 548 | */ |
521 | case ALUA_ACCESS_STATE_OFFLINE: | 549 | case ALUA_ACCESS_STATE_OFFLINE: |
522 | default: | 550 | default: |
523 | printk(KERN_ERR "Unknown ALUA access state: 0x%02x\n", | 551 | pr_err("Unknown ALUA access state: 0x%02x\n", |
524 | out_alua_state); | 552 | out_alua_state); |
525 | return -1; | 553 | return -EINVAL; |
526 | } | 554 | } |
527 | 555 | ||
528 | return 0; | 556 | return 0; |
@@ -552,8 +580,8 @@ static int core_alua_check_transition(int state, int *primary) | |||
552 | *primary = 0; | 580 | *primary = 0; |
553 | break; | 581 | break; |
554 | default: | 582 | default: |
555 | printk(KERN_ERR "Unknown ALUA access state: 0x%02x\n", state); | 583 | pr_err("Unknown ALUA access state: 0x%02x\n", state); |
556 | return -1; | 584 | return -EINVAL; |
557 | } | 585 | } |
558 | 586 | ||
559 | return 0; | 587 | return 0; |
@@ -610,7 +638,7 @@ int core_alua_check_nonop_delay( | |||
610 | * The ALUA Active/NonOptimized access state delay can be disabled | 638 | * The ALUA Active/NonOptimized access state delay can be disabled |
611 | * in via configfs with a value of zero | 639 | * in via configfs with a value of zero |
612 | */ | 640 | */ |
613 | if (!(cmd->alua_nonop_delay)) | 641 | if (!cmd->alua_nonop_delay) |
614 | return 0; | 642 | return 0; |
615 | /* | 643 | /* |
616 | * struct se_cmd->alua_nonop_delay gets set by a target port group | 644 | * struct se_cmd->alua_nonop_delay gets set by a target port group |
@@ -639,7 +667,7 @@ static int core_alua_write_tpg_metadata( | |||
639 | 667 | ||
640 | file = filp_open(path, flags, 0600); | 668 | file = filp_open(path, flags, 0600); |
641 | if (IS_ERR(file) || !file || !file->f_dentry) { | 669 | if (IS_ERR(file) || !file || !file->f_dentry) { |
642 | printk(KERN_ERR "filp_open(%s) for ALUA metadata failed\n", | 670 | pr_err("filp_open(%s) for ALUA metadata failed\n", |
643 | path); | 671 | path); |
644 | return -ENODEV; | 672 | return -ENODEV; |
645 | } | 673 | } |
@@ -653,7 +681,7 @@ static int core_alua_write_tpg_metadata( | |||
653 | set_fs(old_fs); | 681 | set_fs(old_fs); |
654 | 682 | ||
655 | if (ret < 0) { | 683 | if (ret < 0) { |
656 | printk(KERN_ERR "Error writing ALUA metadata file: %s\n", path); | 684 | pr_err("Error writing ALUA metadata file: %s\n", path); |
657 | filp_close(file, NULL); | 685 | filp_close(file, NULL); |
658 | return -EIO; | 686 | return -EIO; |
659 | } | 687 | } |
@@ -750,7 +778,7 @@ static int core_alua_do_transition_tg_pt( | |||
750 | * se_deve->se_lun_acl pointer may be NULL for a | 778 | * se_deve->se_lun_acl pointer may be NULL for a |
751 | * entry created without explict Node+MappedLUN ACLs | 779 | * entry created without explict Node+MappedLUN ACLs |
752 | */ | 780 | */ |
753 | if (!(lacl)) | 781 | if (!lacl) |
754 | continue; | 782 | continue; |
755 | 783 | ||
756 | if (explict && | 784 | if (explict && |
@@ -792,7 +820,7 @@ static int core_alua_do_transition_tg_pt( | |||
792 | */ | 820 | */ |
793 | atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, new_state); | 821 | atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, new_state); |
794 | 822 | ||
795 | printk(KERN_INFO "Successful %s ALUA transition TG PT Group: %s ID: %hu" | 823 | pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu" |
796 | " from primary access state %s to %s\n", (explict) ? "explict" : | 824 | " from primary access state %s to %s\n", (explict) ? "explict" : |
797 | "implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item), | 825 | "implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item), |
798 | tg_pt_gp->tg_pt_gp_id, core_alua_dump_state(old_state), | 826 | tg_pt_gp->tg_pt_gp_id, core_alua_dump_state(old_state), |
@@ -823,8 +851,8 @@ int core_alua_do_port_transition( | |||
823 | return -EINVAL; | 851 | return -EINVAL; |
824 | 852 | ||
825 | md_buf = kzalloc(l_tg_pt_gp->tg_pt_gp_md_buf_len, GFP_KERNEL); | 853 | md_buf = kzalloc(l_tg_pt_gp->tg_pt_gp_md_buf_len, GFP_KERNEL); |
826 | if (!(md_buf)) { | 854 | if (!md_buf) { |
827 | printk("Unable to allocate buf for ALUA metadata\n"); | 855 | pr_err("Unable to allocate buf for ALUA metadata\n"); |
828 | return -ENOMEM; | 856 | return -ENOMEM; |
829 | } | 857 | } |
830 | 858 | ||
@@ -839,7 +867,7 @@ int core_alua_do_port_transition( | |||
839 | * we only do transition on the passed *l_tp_pt_gp, and not | 867 | * we only do transition on the passed *l_tp_pt_gp, and not |
840 | * on all of the matching target port groups IDs in default_lu_gp. | 868 | * on all of the matching target port groups IDs in default_lu_gp. |
841 | */ | 869 | */ |
842 | if (!(lu_gp->lu_gp_id)) { | 870 | if (!lu_gp->lu_gp_id) { |
843 | /* | 871 | /* |
844 | * core_alua_do_transition_tg_pt() will always return | 872 | * core_alua_do_transition_tg_pt() will always return |
845 | * success. | 873 | * success. |
@@ -866,12 +894,12 @@ int core_alua_do_port_transition( | |||
866 | smp_mb__after_atomic_inc(); | 894 | smp_mb__after_atomic_inc(); |
867 | spin_unlock(&lu_gp->lu_gp_lock); | 895 | spin_unlock(&lu_gp->lu_gp_lock); |
868 | 896 | ||
869 | spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock); | 897 | spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); |
870 | list_for_each_entry(tg_pt_gp, | 898 | list_for_each_entry(tg_pt_gp, |
871 | &T10_ALUA(su_dev)->tg_pt_gps_list, | 899 | &su_dev->t10_alua.tg_pt_gps_list, |
872 | tg_pt_gp_list) { | 900 | tg_pt_gp_list) { |
873 | 901 | ||
874 | if (!(tg_pt_gp->tg_pt_gp_valid_id)) | 902 | if (!tg_pt_gp->tg_pt_gp_valid_id) |
875 | continue; | 903 | continue; |
876 | /* | 904 | /* |
877 | * If the target behavior port asymmetric access state | 905 | * If the target behavior port asymmetric access state |
@@ -893,7 +921,7 @@ int core_alua_do_port_transition( | |||
893 | } | 921 | } |
894 | atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); | 922 | atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); |
895 | smp_mb__after_atomic_inc(); | 923 | smp_mb__after_atomic_inc(); |
896 | spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); | 924 | spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); |
897 | /* | 925 | /* |
898 | * core_alua_do_transition_tg_pt() will always return | 926 | * core_alua_do_transition_tg_pt() will always return |
899 | * success. | 927 | * success. |
@@ -901,11 +929,11 @@ int core_alua_do_port_transition( | |||
901 | core_alua_do_transition_tg_pt(tg_pt_gp, port, | 929 | core_alua_do_transition_tg_pt(tg_pt_gp, port, |
902 | nacl, md_buf, new_state, explict); | 930 | nacl, md_buf, new_state, explict); |
903 | 931 | ||
904 | spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock); | 932 | spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); |
905 | atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); | 933 | atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); |
906 | smp_mb__after_atomic_dec(); | 934 | smp_mb__after_atomic_dec(); |
907 | } | 935 | } |
908 | spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); | 936 | spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); |
909 | 937 | ||
910 | spin_lock(&lu_gp->lu_gp_lock); | 938 | spin_lock(&lu_gp->lu_gp_lock); |
911 | atomic_dec(&lu_gp_mem->lu_gp_mem_ref_cnt); | 939 | atomic_dec(&lu_gp_mem->lu_gp_mem_ref_cnt); |
@@ -913,7 +941,7 @@ int core_alua_do_port_transition( | |||
913 | } | 941 | } |
914 | spin_unlock(&lu_gp->lu_gp_lock); | 942 | spin_unlock(&lu_gp->lu_gp_lock); |
915 | 943 | ||
916 | printk(KERN_INFO "Successfully processed LU Group: %s all ALUA TG PT" | 944 | pr_debug("Successfully processed LU Group: %s all ALUA TG PT" |
917 | " Group IDs: %hu %s transition to primary state: %s\n", | 945 | " Group IDs: %hu %s transition to primary state: %s\n", |
918 | config_item_name(&lu_gp->lu_gp_group.cg_item), | 946 | config_item_name(&lu_gp->lu_gp_group.cg_item), |
919 | l_tg_pt_gp->tg_pt_gp_id, (explict) ? "explict" : "implict", | 947 | l_tg_pt_gp->tg_pt_gp_id, (explict) ? "explict" : "implict", |
@@ -942,11 +970,11 @@ static int core_alua_update_tpg_secondary_metadata( | |||
942 | memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN); | 970 | memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN); |
943 | 971 | ||
944 | len = snprintf(wwn, ALUA_SECONDARY_METADATA_WWN_LEN, "%s", | 972 | len = snprintf(wwn, ALUA_SECONDARY_METADATA_WWN_LEN, "%s", |
945 | TPG_TFO(se_tpg)->tpg_get_wwn(se_tpg)); | 973 | se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg)); |
946 | 974 | ||
947 | if (TPG_TFO(se_tpg)->tpg_get_tag != NULL) | 975 | if (se_tpg->se_tpg_tfo->tpg_get_tag != NULL) |
948 | snprintf(wwn+len, ALUA_SECONDARY_METADATA_WWN_LEN-len, "+%hu", | 976 | snprintf(wwn+len, ALUA_SECONDARY_METADATA_WWN_LEN-len, "+%hu", |
949 | TPG_TFO(se_tpg)->tpg_get_tag(se_tpg)); | 977 | se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg)); |
950 | 978 | ||
951 | len = snprintf(md_buf, md_buf_len, "alua_tg_pt_offline=%d\n" | 979 | len = snprintf(md_buf, md_buf_len, "alua_tg_pt_offline=%d\n" |
952 | "alua_tg_pt_status=0x%02x\n", | 980 | "alua_tg_pt_status=0x%02x\n", |
@@ -954,7 +982,7 @@ static int core_alua_update_tpg_secondary_metadata( | |||
954 | port->sep_tg_pt_secondary_stat); | 982 | port->sep_tg_pt_secondary_stat); |
955 | 983 | ||
956 | snprintf(path, ALUA_METADATA_PATH_LEN, "/var/target/alua/%s/%s/lun_%u", | 984 | snprintf(path, ALUA_METADATA_PATH_LEN, "/var/target/alua/%s/%s/lun_%u", |
957 | TPG_TFO(se_tpg)->get_fabric_name(), wwn, | 985 | se_tpg->se_tpg_tfo->get_fabric_name(), wwn, |
958 | port->sep_lun->unpacked_lun); | 986 | port->sep_lun->unpacked_lun); |
959 | 987 | ||
960 | return core_alua_write_tpg_metadata(path, md_buf, len); | 988 | return core_alua_write_tpg_metadata(path, md_buf, len); |
@@ -973,11 +1001,11 @@ static int core_alua_set_tg_pt_secondary_state( | |||
973 | 1001 | ||
974 | spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); | 1002 | spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); |
975 | tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; | 1003 | tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; |
976 | if (!(tg_pt_gp)) { | 1004 | if (!tg_pt_gp) { |
977 | spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); | 1005 | spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); |
978 | printk(KERN_ERR "Unable to complete secondary state" | 1006 | pr_err("Unable to complete secondary state" |
979 | " transition\n"); | 1007 | " transition\n"); |
980 | return -1; | 1008 | return -EINVAL; |
981 | } | 1009 | } |
982 | trans_delay_msecs = tg_pt_gp->tg_pt_gp_trans_delay_msecs; | 1010 | trans_delay_msecs = tg_pt_gp->tg_pt_gp_trans_delay_msecs; |
983 | /* | 1011 | /* |
@@ -994,7 +1022,7 @@ static int core_alua_set_tg_pt_secondary_state( | |||
994 | ALUA_STATUS_ALTERED_BY_EXPLICT_STPG : | 1022 | ALUA_STATUS_ALTERED_BY_EXPLICT_STPG : |
995 | ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA; | 1023 | ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA; |
996 | 1024 | ||
997 | printk(KERN_INFO "Successful %s ALUA transition TG PT Group: %s ID: %hu" | 1025 | pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu" |
998 | " to secondary access state: %s\n", (explict) ? "explict" : | 1026 | " to secondary access state: %s\n", (explict) ? "explict" : |
999 | "implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item), | 1027 | "implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item), |
1000 | tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE"); | 1028 | tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE"); |
@@ -1012,10 +1040,10 @@ static int core_alua_set_tg_pt_secondary_state( | |||
1012 | */ | 1040 | */ |
1013 | if (port->sep_tg_pt_secondary_write_md) { | 1041 | if (port->sep_tg_pt_secondary_write_md) { |
1014 | md_buf = kzalloc(md_buf_len, GFP_KERNEL); | 1042 | md_buf = kzalloc(md_buf_len, GFP_KERNEL); |
1015 | if (!(md_buf)) { | 1043 | if (!md_buf) { |
1016 | printk(KERN_ERR "Unable to allocate md_buf for" | 1044 | pr_err("Unable to allocate md_buf for" |
1017 | " secondary ALUA access metadata\n"); | 1045 | " secondary ALUA access metadata\n"); |
1018 | return -1; | 1046 | return -ENOMEM; |
1019 | } | 1047 | } |
1020 | mutex_lock(&port->sep_tg_pt_md_mutex); | 1048 | mutex_lock(&port->sep_tg_pt_md_mutex); |
1021 | core_alua_update_tpg_secondary_metadata(tg_pt_gp_mem, port, | 1049 | core_alua_update_tpg_secondary_metadata(tg_pt_gp_mem, port, |
@@ -1034,19 +1062,19 @@ core_alua_allocate_lu_gp(const char *name, int def_group) | |||
1034 | struct t10_alua_lu_gp *lu_gp; | 1062 | struct t10_alua_lu_gp *lu_gp; |
1035 | 1063 | ||
1036 | lu_gp = kmem_cache_zalloc(t10_alua_lu_gp_cache, GFP_KERNEL); | 1064 | lu_gp = kmem_cache_zalloc(t10_alua_lu_gp_cache, GFP_KERNEL); |
1037 | if (!(lu_gp)) { | 1065 | if (!lu_gp) { |
1038 | printk(KERN_ERR "Unable to allocate struct t10_alua_lu_gp\n"); | 1066 | pr_err("Unable to allocate struct t10_alua_lu_gp\n"); |
1039 | return ERR_PTR(-ENOMEM); | 1067 | return ERR_PTR(-ENOMEM); |
1040 | } | 1068 | } |
1041 | INIT_LIST_HEAD(&lu_gp->lu_gp_list); | 1069 | INIT_LIST_HEAD(&lu_gp->lu_gp_node); |
1042 | INIT_LIST_HEAD(&lu_gp->lu_gp_mem_list); | 1070 | INIT_LIST_HEAD(&lu_gp->lu_gp_mem_list); |
1043 | spin_lock_init(&lu_gp->lu_gp_lock); | 1071 | spin_lock_init(&lu_gp->lu_gp_lock); |
1044 | atomic_set(&lu_gp->lu_gp_ref_cnt, 0); | 1072 | atomic_set(&lu_gp->lu_gp_ref_cnt, 0); |
1045 | 1073 | ||
1046 | if (def_group) { | 1074 | if (def_group) { |
1047 | lu_gp->lu_gp_id = se_global->alua_lu_gps_counter++; | 1075 | lu_gp->lu_gp_id = alua_lu_gps_counter++; |
1048 | lu_gp->lu_gp_valid_id = 1; | 1076 | lu_gp->lu_gp_valid_id = 1; |
1049 | se_global->alua_lu_gps_count++; | 1077 | alua_lu_gps_count++; |
1050 | } | 1078 | } |
1051 | 1079 | ||
1052 | return lu_gp; | 1080 | return lu_gp; |
@@ -1060,41 +1088,41 @@ int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *lu_gp, u16 lu_gp_id) | |||
1060 | * The lu_gp->lu_gp_id may only be set once.. | 1088 | * The lu_gp->lu_gp_id may only be set once.. |
1061 | */ | 1089 | */ |
1062 | if (lu_gp->lu_gp_valid_id) { | 1090 | if (lu_gp->lu_gp_valid_id) { |
1063 | printk(KERN_WARNING "ALUA LU Group already has a valid ID," | 1091 | pr_warn("ALUA LU Group already has a valid ID," |
1064 | " ignoring request\n"); | 1092 | " ignoring request\n"); |
1065 | return -1; | 1093 | return -EINVAL; |
1066 | } | 1094 | } |
1067 | 1095 | ||
1068 | spin_lock(&se_global->lu_gps_lock); | 1096 | spin_lock(&lu_gps_lock); |
1069 | if (se_global->alua_lu_gps_count == 0x0000ffff) { | 1097 | if (alua_lu_gps_count == 0x0000ffff) { |
1070 | printk(KERN_ERR "Maximum ALUA se_global->alua_lu_gps_count:" | 1098 | pr_err("Maximum ALUA alua_lu_gps_count:" |
1071 | " 0x0000ffff reached\n"); | 1099 | " 0x0000ffff reached\n"); |
1072 | spin_unlock(&se_global->lu_gps_lock); | 1100 | spin_unlock(&lu_gps_lock); |
1073 | kmem_cache_free(t10_alua_lu_gp_cache, lu_gp); | 1101 | kmem_cache_free(t10_alua_lu_gp_cache, lu_gp); |
1074 | return -1; | 1102 | return -ENOSPC; |
1075 | } | 1103 | } |
1076 | again: | 1104 | again: |
1077 | lu_gp_id_tmp = (lu_gp_id != 0) ? lu_gp_id : | 1105 | lu_gp_id_tmp = (lu_gp_id != 0) ? lu_gp_id : |
1078 | se_global->alua_lu_gps_counter++; | 1106 | alua_lu_gps_counter++; |
1079 | 1107 | ||
1080 | list_for_each_entry(lu_gp_tmp, &se_global->g_lu_gps_list, lu_gp_list) { | 1108 | list_for_each_entry(lu_gp_tmp, &lu_gps_list, lu_gp_node) { |
1081 | if (lu_gp_tmp->lu_gp_id == lu_gp_id_tmp) { | 1109 | if (lu_gp_tmp->lu_gp_id == lu_gp_id_tmp) { |
1082 | if (!(lu_gp_id)) | 1110 | if (!lu_gp_id) |
1083 | goto again; | 1111 | goto again; |
1084 | 1112 | ||
1085 | printk(KERN_WARNING "ALUA Logical Unit Group ID: %hu" | 1113 | pr_warn("ALUA Logical Unit Group ID: %hu" |
1086 | " already exists, ignoring request\n", | 1114 | " already exists, ignoring request\n", |
1087 | lu_gp_id); | 1115 | lu_gp_id); |
1088 | spin_unlock(&se_global->lu_gps_lock); | 1116 | spin_unlock(&lu_gps_lock); |
1089 | return -1; | 1117 | return -EINVAL; |
1090 | } | 1118 | } |
1091 | } | 1119 | } |
1092 | 1120 | ||
1093 | lu_gp->lu_gp_id = lu_gp_id_tmp; | 1121 | lu_gp->lu_gp_id = lu_gp_id_tmp; |
1094 | lu_gp->lu_gp_valid_id = 1; | 1122 | lu_gp->lu_gp_valid_id = 1; |
1095 | list_add_tail(&lu_gp->lu_gp_list, &se_global->g_lu_gps_list); | 1123 | list_add_tail(&lu_gp->lu_gp_node, &lu_gps_list); |
1096 | se_global->alua_lu_gps_count++; | 1124 | alua_lu_gps_count++; |
1097 | spin_unlock(&se_global->lu_gps_lock); | 1125 | spin_unlock(&lu_gps_lock); |
1098 | 1126 | ||
1099 | return 0; | 1127 | return 0; |
1100 | } | 1128 | } |
@@ -1105,8 +1133,8 @@ core_alua_allocate_lu_gp_mem(struct se_device *dev) | |||
1105 | struct t10_alua_lu_gp_member *lu_gp_mem; | 1133 | struct t10_alua_lu_gp_member *lu_gp_mem; |
1106 | 1134 | ||
1107 | lu_gp_mem = kmem_cache_zalloc(t10_alua_lu_gp_mem_cache, GFP_KERNEL); | 1135 | lu_gp_mem = kmem_cache_zalloc(t10_alua_lu_gp_mem_cache, GFP_KERNEL); |
1108 | if (!(lu_gp_mem)) { | 1136 | if (!lu_gp_mem) { |
1109 | printk(KERN_ERR "Unable to allocate struct t10_alua_lu_gp_member\n"); | 1137 | pr_err("Unable to allocate struct t10_alua_lu_gp_member\n"); |
1110 | return ERR_PTR(-ENOMEM); | 1138 | return ERR_PTR(-ENOMEM); |
1111 | } | 1139 | } |
1112 | INIT_LIST_HEAD(&lu_gp_mem->lu_gp_mem_list); | 1140 | INIT_LIST_HEAD(&lu_gp_mem->lu_gp_mem_list); |
@@ -1130,11 +1158,11 @@ void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp) | |||
1130 | * no associations can be made while we are releasing | 1158 | * no associations can be made while we are releasing |
1131 | * struct t10_alua_lu_gp. | 1159 | * struct t10_alua_lu_gp. |
1132 | */ | 1160 | */ |
1133 | spin_lock(&se_global->lu_gps_lock); | 1161 | spin_lock(&lu_gps_lock); |
1134 | atomic_set(&lu_gp->lu_gp_shutdown, 1); | 1162 | atomic_set(&lu_gp->lu_gp_shutdown, 1); |
1135 | list_del(&lu_gp->lu_gp_list); | 1163 | list_del(&lu_gp->lu_gp_node); |
1136 | se_global->alua_lu_gps_count--; | 1164 | alua_lu_gps_count--; |
1137 | spin_unlock(&se_global->lu_gps_lock); | 1165 | spin_unlock(&lu_gps_lock); |
1138 | /* | 1166 | /* |
1139 | * Allow struct t10_alua_lu_gp * referenced by core_alua_get_lu_gp_by_name() | 1167 | * Allow struct t10_alua_lu_gp * referenced by core_alua_get_lu_gp_by_name() |
1140 | * in target_core_configfs.c:target_core_store_alua_lu_gp() to be | 1168 | * in target_core_configfs.c:target_core_store_alua_lu_gp() to be |
@@ -1165,9 +1193,9 @@ void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp) | |||
1165 | * we want to re-assocate a given lu_gp_mem with default_lu_gp. | 1193 | * we want to re-assocate a given lu_gp_mem with default_lu_gp. |
1166 | */ | 1194 | */ |
1167 | spin_lock(&lu_gp_mem->lu_gp_mem_lock); | 1195 | spin_lock(&lu_gp_mem->lu_gp_mem_lock); |
1168 | if (lu_gp != se_global->default_lu_gp) | 1196 | if (lu_gp != default_lu_gp) |
1169 | __core_alua_attach_lu_gp_mem(lu_gp_mem, | 1197 | __core_alua_attach_lu_gp_mem(lu_gp_mem, |
1170 | se_global->default_lu_gp); | 1198 | default_lu_gp); |
1171 | else | 1199 | else |
1172 | lu_gp_mem->lu_gp = NULL; | 1200 | lu_gp_mem->lu_gp = NULL; |
1173 | spin_unlock(&lu_gp_mem->lu_gp_mem_lock); | 1201 | spin_unlock(&lu_gp_mem->lu_gp_mem_lock); |
@@ -1182,7 +1210,7 @@ void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp) | |||
1182 | void core_alua_free_lu_gp_mem(struct se_device *dev) | 1210 | void core_alua_free_lu_gp_mem(struct se_device *dev) |
1183 | { | 1211 | { |
1184 | struct se_subsystem_dev *su_dev = dev->se_sub_dev; | 1212 | struct se_subsystem_dev *su_dev = dev->se_sub_dev; |
1185 | struct t10_alua *alua = T10_ALUA(su_dev); | 1213 | struct t10_alua *alua = &su_dev->t10_alua; |
1186 | struct t10_alua_lu_gp *lu_gp; | 1214 | struct t10_alua_lu_gp *lu_gp; |
1187 | struct t10_alua_lu_gp_member *lu_gp_mem; | 1215 | struct t10_alua_lu_gp_member *lu_gp_mem; |
1188 | 1216 | ||
@@ -1190,7 +1218,7 @@ void core_alua_free_lu_gp_mem(struct se_device *dev) | |||
1190 | return; | 1218 | return; |
1191 | 1219 | ||
1192 | lu_gp_mem = dev->dev_alua_lu_gp_mem; | 1220 | lu_gp_mem = dev->dev_alua_lu_gp_mem; |
1193 | if (!(lu_gp_mem)) | 1221 | if (!lu_gp_mem) |
1194 | return; | 1222 | return; |
1195 | 1223 | ||
1196 | while (atomic_read(&lu_gp_mem->lu_gp_mem_ref_cnt)) | 1224 | while (atomic_read(&lu_gp_mem->lu_gp_mem_ref_cnt)) |
@@ -1198,7 +1226,7 @@ void core_alua_free_lu_gp_mem(struct se_device *dev) | |||
1198 | 1226 | ||
1199 | spin_lock(&lu_gp_mem->lu_gp_mem_lock); | 1227 | spin_lock(&lu_gp_mem->lu_gp_mem_lock); |
1200 | lu_gp = lu_gp_mem->lu_gp; | 1228 | lu_gp = lu_gp_mem->lu_gp; |
1201 | if ((lu_gp)) { | 1229 | if (lu_gp) { |
1202 | spin_lock(&lu_gp->lu_gp_lock); | 1230 | spin_lock(&lu_gp->lu_gp_lock); |
1203 | if (lu_gp_mem->lu_gp_assoc) { | 1231 | if (lu_gp_mem->lu_gp_assoc) { |
1204 | list_del(&lu_gp_mem->lu_gp_mem_list); | 1232 | list_del(&lu_gp_mem->lu_gp_mem_list); |
@@ -1218,27 +1246,27 @@ struct t10_alua_lu_gp *core_alua_get_lu_gp_by_name(const char *name) | |||
1218 | struct t10_alua_lu_gp *lu_gp; | 1246 | struct t10_alua_lu_gp *lu_gp; |
1219 | struct config_item *ci; | 1247 | struct config_item *ci; |
1220 | 1248 | ||
1221 | spin_lock(&se_global->lu_gps_lock); | 1249 | spin_lock(&lu_gps_lock); |
1222 | list_for_each_entry(lu_gp, &se_global->g_lu_gps_list, lu_gp_list) { | 1250 | list_for_each_entry(lu_gp, &lu_gps_list, lu_gp_node) { |
1223 | if (!(lu_gp->lu_gp_valid_id)) | 1251 | if (!lu_gp->lu_gp_valid_id) |
1224 | continue; | 1252 | continue; |
1225 | ci = &lu_gp->lu_gp_group.cg_item; | 1253 | ci = &lu_gp->lu_gp_group.cg_item; |
1226 | if (!(strcmp(config_item_name(ci), name))) { | 1254 | if (!strcmp(config_item_name(ci), name)) { |
1227 | atomic_inc(&lu_gp->lu_gp_ref_cnt); | 1255 | atomic_inc(&lu_gp->lu_gp_ref_cnt); |
1228 | spin_unlock(&se_global->lu_gps_lock); | 1256 | spin_unlock(&lu_gps_lock); |
1229 | return lu_gp; | 1257 | return lu_gp; |
1230 | } | 1258 | } |
1231 | } | 1259 | } |
1232 | spin_unlock(&se_global->lu_gps_lock); | 1260 | spin_unlock(&lu_gps_lock); |
1233 | 1261 | ||
1234 | return NULL; | 1262 | return NULL; |
1235 | } | 1263 | } |
1236 | 1264 | ||
1237 | void core_alua_put_lu_gp_from_name(struct t10_alua_lu_gp *lu_gp) | 1265 | void core_alua_put_lu_gp_from_name(struct t10_alua_lu_gp *lu_gp) |
1238 | { | 1266 | { |
1239 | spin_lock(&se_global->lu_gps_lock); | 1267 | spin_lock(&lu_gps_lock); |
1240 | atomic_dec(&lu_gp->lu_gp_ref_cnt); | 1268 | atomic_dec(&lu_gp->lu_gp_ref_cnt); |
1241 | spin_unlock(&se_global->lu_gps_lock); | 1269 | spin_unlock(&lu_gps_lock); |
1242 | } | 1270 | } |
1243 | 1271 | ||
1244 | /* | 1272 | /* |
@@ -1279,8 +1307,8 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp( | |||
1279 | struct t10_alua_tg_pt_gp *tg_pt_gp; | 1307 | struct t10_alua_tg_pt_gp *tg_pt_gp; |
1280 | 1308 | ||
1281 | tg_pt_gp = kmem_cache_zalloc(t10_alua_tg_pt_gp_cache, GFP_KERNEL); | 1309 | tg_pt_gp = kmem_cache_zalloc(t10_alua_tg_pt_gp_cache, GFP_KERNEL); |
1282 | if (!(tg_pt_gp)) { | 1310 | if (!tg_pt_gp) { |
1283 | printk(KERN_ERR "Unable to allocate struct t10_alua_tg_pt_gp\n"); | 1311 | pr_err("Unable to allocate struct t10_alua_tg_pt_gp\n"); |
1284 | return NULL; | 1312 | return NULL; |
1285 | } | 1313 | } |
1286 | INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list); | 1314 | INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list); |
@@ -1304,14 +1332,14 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp( | |||
1304 | tg_pt_gp->tg_pt_gp_trans_delay_msecs = ALUA_DEFAULT_TRANS_DELAY_MSECS; | 1332 | tg_pt_gp->tg_pt_gp_trans_delay_msecs = ALUA_DEFAULT_TRANS_DELAY_MSECS; |
1305 | 1333 | ||
1306 | if (def_group) { | 1334 | if (def_group) { |
1307 | spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock); | 1335 | spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); |
1308 | tg_pt_gp->tg_pt_gp_id = | 1336 | tg_pt_gp->tg_pt_gp_id = |
1309 | T10_ALUA(su_dev)->alua_tg_pt_gps_counter++; | 1337 | su_dev->t10_alua.alua_tg_pt_gps_counter++; |
1310 | tg_pt_gp->tg_pt_gp_valid_id = 1; | 1338 | tg_pt_gp->tg_pt_gp_valid_id = 1; |
1311 | T10_ALUA(su_dev)->alua_tg_pt_gps_count++; | 1339 | su_dev->t10_alua.alua_tg_pt_gps_count++; |
1312 | list_add_tail(&tg_pt_gp->tg_pt_gp_list, | 1340 | list_add_tail(&tg_pt_gp->tg_pt_gp_list, |
1313 | &T10_ALUA(su_dev)->tg_pt_gps_list); | 1341 | &su_dev->t10_alua.tg_pt_gps_list); |
1314 | spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); | 1342 | spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); |
1315 | } | 1343 | } |
1316 | 1344 | ||
1317 | return tg_pt_gp; | 1345 | return tg_pt_gp; |
@@ -1328,42 +1356,42 @@ int core_alua_set_tg_pt_gp_id( | |||
1328 | * The tg_pt_gp->tg_pt_gp_id may only be set once.. | 1356 | * The tg_pt_gp->tg_pt_gp_id may only be set once.. |
1329 | */ | 1357 | */ |
1330 | if (tg_pt_gp->tg_pt_gp_valid_id) { | 1358 | if (tg_pt_gp->tg_pt_gp_valid_id) { |
1331 | printk(KERN_WARNING "ALUA TG PT Group already has a valid ID," | 1359 | pr_warn("ALUA TG PT Group already has a valid ID," |
1332 | " ignoring request\n"); | 1360 | " ignoring request\n"); |
1333 | return -1; | 1361 | return -EINVAL; |
1334 | } | 1362 | } |
1335 | 1363 | ||
1336 | spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock); | 1364 | spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); |
1337 | if (T10_ALUA(su_dev)->alua_tg_pt_gps_count == 0x0000ffff) { | 1365 | if (su_dev->t10_alua.alua_tg_pt_gps_count == 0x0000ffff) { |
1338 | printk(KERN_ERR "Maximum ALUA alua_tg_pt_gps_count:" | 1366 | pr_err("Maximum ALUA alua_tg_pt_gps_count:" |
1339 | " 0x0000ffff reached\n"); | 1367 | " 0x0000ffff reached\n"); |
1340 | spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); | 1368 | spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); |
1341 | kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp); | 1369 | kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp); |
1342 | return -1; | 1370 | return -ENOSPC; |
1343 | } | 1371 | } |
1344 | again: | 1372 | again: |
1345 | tg_pt_gp_id_tmp = (tg_pt_gp_id != 0) ? tg_pt_gp_id : | 1373 | tg_pt_gp_id_tmp = (tg_pt_gp_id != 0) ? tg_pt_gp_id : |
1346 | T10_ALUA(su_dev)->alua_tg_pt_gps_counter++; | 1374 | su_dev->t10_alua.alua_tg_pt_gps_counter++; |
1347 | 1375 | ||
1348 | list_for_each_entry(tg_pt_gp_tmp, &T10_ALUA(su_dev)->tg_pt_gps_list, | 1376 | list_for_each_entry(tg_pt_gp_tmp, &su_dev->t10_alua.tg_pt_gps_list, |
1349 | tg_pt_gp_list) { | 1377 | tg_pt_gp_list) { |
1350 | if (tg_pt_gp_tmp->tg_pt_gp_id == tg_pt_gp_id_tmp) { | 1378 | if (tg_pt_gp_tmp->tg_pt_gp_id == tg_pt_gp_id_tmp) { |
1351 | if (!(tg_pt_gp_id)) | 1379 | if (!tg_pt_gp_id) |
1352 | goto again; | 1380 | goto again; |
1353 | 1381 | ||
1354 | printk(KERN_ERR "ALUA Target Port Group ID: %hu already" | 1382 | pr_err("ALUA Target Port Group ID: %hu already" |
1355 | " exists, ignoring request\n", tg_pt_gp_id); | 1383 | " exists, ignoring request\n", tg_pt_gp_id); |
1356 | spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); | 1384 | spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); |
1357 | return -1; | 1385 | return -EINVAL; |
1358 | } | 1386 | } |
1359 | } | 1387 | } |
1360 | 1388 | ||
1361 | tg_pt_gp->tg_pt_gp_id = tg_pt_gp_id_tmp; | 1389 | tg_pt_gp->tg_pt_gp_id = tg_pt_gp_id_tmp; |
1362 | tg_pt_gp->tg_pt_gp_valid_id = 1; | 1390 | tg_pt_gp->tg_pt_gp_valid_id = 1; |
1363 | list_add_tail(&tg_pt_gp->tg_pt_gp_list, | 1391 | list_add_tail(&tg_pt_gp->tg_pt_gp_list, |
1364 | &T10_ALUA(su_dev)->tg_pt_gps_list); | 1392 | &su_dev->t10_alua.tg_pt_gps_list); |
1365 | T10_ALUA(su_dev)->alua_tg_pt_gps_count++; | 1393 | su_dev->t10_alua.alua_tg_pt_gps_count++; |
1366 | spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); | 1394 | spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); |
1367 | 1395 | ||
1368 | return 0; | 1396 | return 0; |
1369 | } | 1397 | } |
@@ -1375,8 +1403,8 @@ struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem( | |||
1375 | 1403 | ||
1376 | tg_pt_gp_mem = kmem_cache_zalloc(t10_alua_tg_pt_gp_mem_cache, | 1404 | tg_pt_gp_mem = kmem_cache_zalloc(t10_alua_tg_pt_gp_mem_cache, |
1377 | GFP_KERNEL); | 1405 | GFP_KERNEL); |
1378 | if (!(tg_pt_gp_mem)) { | 1406 | if (!tg_pt_gp_mem) { |
1379 | printk(KERN_ERR "Unable to allocate struct t10_alua_tg_pt_gp_member\n"); | 1407 | pr_err("Unable to allocate struct t10_alua_tg_pt_gp_member\n"); |
1380 | return ERR_PTR(-ENOMEM); | 1408 | return ERR_PTR(-ENOMEM); |
1381 | } | 1409 | } |
1382 | INIT_LIST_HEAD(&tg_pt_gp_mem->tg_pt_gp_mem_list); | 1410 | INIT_LIST_HEAD(&tg_pt_gp_mem->tg_pt_gp_mem_list); |
@@ -1403,10 +1431,10 @@ void core_alua_free_tg_pt_gp( | |||
1403 | * no assications *OR* explict ALUA via SET_TARGET_PORT_GROUPS | 1431 | * no assications *OR* explict ALUA via SET_TARGET_PORT_GROUPS |
1404 | * can be made while we are releasing struct t10_alua_tg_pt_gp. | 1432 | * can be made while we are releasing struct t10_alua_tg_pt_gp. |
1405 | */ | 1433 | */ |
1406 | spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock); | 1434 | spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); |
1407 | list_del(&tg_pt_gp->tg_pt_gp_list); | 1435 | list_del(&tg_pt_gp->tg_pt_gp_list); |
1408 | T10_ALUA(su_dev)->alua_tg_pt_gps_counter--; | 1436 | su_dev->t10_alua.alua_tg_pt_gps_counter--; |
1409 | spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); | 1437 | spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); |
1410 | /* | 1438 | /* |
1411 | * Allow a struct t10_alua_tg_pt_gp_member * referenced by | 1439 | * Allow a struct t10_alua_tg_pt_gp_member * referenced by |
1412 | * core_alua_get_tg_pt_gp_by_name() in | 1440 | * core_alua_get_tg_pt_gp_by_name() in |
@@ -1438,9 +1466,9 @@ void core_alua_free_tg_pt_gp( | |||
1438 | * default_tg_pt_gp. | 1466 | * default_tg_pt_gp. |
1439 | */ | 1467 | */ |
1440 | spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); | 1468 | spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); |
1441 | if (tg_pt_gp != T10_ALUA(su_dev)->default_tg_pt_gp) { | 1469 | if (tg_pt_gp != su_dev->t10_alua.default_tg_pt_gp) { |
1442 | __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, | 1470 | __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, |
1443 | T10_ALUA(su_dev)->default_tg_pt_gp); | 1471 | su_dev->t10_alua.default_tg_pt_gp); |
1444 | } else | 1472 | } else |
1445 | tg_pt_gp_mem->tg_pt_gp = NULL; | 1473 | tg_pt_gp_mem->tg_pt_gp = NULL; |
1446 | spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); | 1474 | spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); |
@@ -1455,7 +1483,7 @@ void core_alua_free_tg_pt_gp( | |||
1455 | void core_alua_free_tg_pt_gp_mem(struct se_port *port) | 1483 | void core_alua_free_tg_pt_gp_mem(struct se_port *port) |
1456 | { | 1484 | { |
1457 | struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev; | 1485 | struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev; |
1458 | struct t10_alua *alua = T10_ALUA(su_dev); | 1486 | struct t10_alua *alua = &su_dev->t10_alua; |
1459 | struct t10_alua_tg_pt_gp *tg_pt_gp; | 1487 | struct t10_alua_tg_pt_gp *tg_pt_gp; |
1460 | struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; | 1488 | struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; |
1461 | 1489 | ||
@@ -1463,7 +1491,7 @@ void core_alua_free_tg_pt_gp_mem(struct se_port *port) | |||
1463 | return; | 1491 | return; |
1464 | 1492 | ||
1465 | tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; | 1493 | tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; |
1466 | if (!(tg_pt_gp_mem)) | 1494 | if (!tg_pt_gp_mem) |
1467 | return; | 1495 | return; |
1468 | 1496 | ||
1469 | while (atomic_read(&tg_pt_gp_mem->tg_pt_gp_mem_ref_cnt)) | 1497 | while (atomic_read(&tg_pt_gp_mem->tg_pt_gp_mem_ref_cnt)) |
@@ -1471,7 +1499,7 @@ void core_alua_free_tg_pt_gp_mem(struct se_port *port) | |||
1471 | 1499 | ||
1472 | spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); | 1500 | spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); |
1473 | tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; | 1501 | tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; |
1474 | if ((tg_pt_gp)) { | 1502 | if (tg_pt_gp) { |
1475 | spin_lock(&tg_pt_gp->tg_pt_gp_lock); | 1503 | spin_lock(&tg_pt_gp->tg_pt_gp_lock); |
1476 | if (tg_pt_gp_mem->tg_pt_gp_assoc) { | 1504 | if (tg_pt_gp_mem->tg_pt_gp_assoc) { |
1477 | list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list); | 1505 | list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list); |
@@ -1493,19 +1521,19 @@ static struct t10_alua_tg_pt_gp *core_alua_get_tg_pt_gp_by_name( | |||
1493 | struct t10_alua_tg_pt_gp *tg_pt_gp; | 1521 | struct t10_alua_tg_pt_gp *tg_pt_gp; |
1494 | struct config_item *ci; | 1522 | struct config_item *ci; |
1495 | 1523 | ||
1496 | spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock); | 1524 | spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); |
1497 | list_for_each_entry(tg_pt_gp, &T10_ALUA(su_dev)->tg_pt_gps_list, | 1525 | list_for_each_entry(tg_pt_gp, &su_dev->t10_alua.tg_pt_gps_list, |
1498 | tg_pt_gp_list) { | 1526 | tg_pt_gp_list) { |
1499 | if (!(tg_pt_gp->tg_pt_gp_valid_id)) | 1527 | if (!tg_pt_gp->tg_pt_gp_valid_id) |
1500 | continue; | 1528 | continue; |
1501 | ci = &tg_pt_gp->tg_pt_gp_group.cg_item; | 1529 | ci = &tg_pt_gp->tg_pt_gp_group.cg_item; |
1502 | if (!(strcmp(config_item_name(ci), name))) { | 1530 | if (!strcmp(config_item_name(ci), name)) { |
1503 | atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); | 1531 | atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); |
1504 | spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); | 1532 | spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); |
1505 | return tg_pt_gp; | 1533 | return tg_pt_gp; |
1506 | } | 1534 | } |
1507 | } | 1535 | } |
1508 | spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); | 1536 | spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); |
1509 | 1537 | ||
1510 | return NULL; | 1538 | return NULL; |
1511 | } | 1539 | } |
@@ -1515,9 +1543,9 @@ static void core_alua_put_tg_pt_gp_from_name( | |||
1515 | { | 1543 | { |
1516 | struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev; | 1544 | struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev; |
1517 | 1545 | ||
1518 | spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock); | 1546 | spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); |
1519 | atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); | 1547 | atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); |
1520 | spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); | 1548 | spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); |
1521 | } | 1549 | } |
1522 | 1550 | ||
1523 | /* | 1551 | /* |
@@ -1555,7 +1583,7 @@ ssize_t core_alua_show_tg_pt_gp_info(struct se_port *port, char *page) | |||
1555 | { | 1583 | { |
1556 | struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev; | 1584 | struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev; |
1557 | struct config_item *tg_pt_ci; | 1585 | struct config_item *tg_pt_ci; |
1558 | struct t10_alua *alua = T10_ALUA(su_dev); | 1586 | struct t10_alua *alua = &su_dev->t10_alua; |
1559 | struct t10_alua_tg_pt_gp *tg_pt_gp; | 1587 | struct t10_alua_tg_pt_gp *tg_pt_gp; |
1560 | struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; | 1588 | struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; |
1561 | ssize_t len = 0; | 1589 | ssize_t len = 0; |
@@ -1564,12 +1592,12 @@ ssize_t core_alua_show_tg_pt_gp_info(struct se_port *port, char *page) | |||
1564 | return len; | 1592 | return len; |
1565 | 1593 | ||
1566 | tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; | 1594 | tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; |
1567 | if (!(tg_pt_gp_mem)) | 1595 | if (!tg_pt_gp_mem) |
1568 | return len; | 1596 | return len; |
1569 | 1597 | ||
1570 | spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); | 1598 | spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); |
1571 | tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; | 1599 | tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; |
1572 | if ((tg_pt_gp)) { | 1600 | if (tg_pt_gp) { |
1573 | tg_pt_ci = &tg_pt_gp->tg_pt_gp_group.cg_item; | 1601 | tg_pt_ci = &tg_pt_gp->tg_pt_gp_group.cg_item; |
1574 | len += sprintf(page, "TG Port Alias: %s\nTG Port Group ID:" | 1602 | len += sprintf(page, "TG Port Alias: %s\nTG Port Group ID:" |
1575 | " %hu\nTG Port Primary Access State: %s\nTG Port " | 1603 | " %hu\nTG Port Primary Access State: %s\nTG Port " |
@@ -1605,16 +1633,16 @@ ssize_t core_alua_store_tg_pt_gp_info( | |||
1605 | tpg = port->sep_tpg; | 1633 | tpg = port->sep_tpg; |
1606 | lun = port->sep_lun; | 1634 | lun = port->sep_lun; |
1607 | 1635 | ||
1608 | if (T10_ALUA(su_dev)->alua_type != SPC3_ALUA_EMULATED) { | 1636 | if (su_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED) { |
1609 | printk(KERN_WARNING "SPC3_ALUA_EMULATED not enabled for" | 1637 | pr_warn("SPC3_ALUA_EMULATED not enabled for" |
1610 | " %s/tpgt_%hu/%s\n", TPG_TFO(tpg)->tpg_get_wwn(tpg), | 1638 | " %s/tpgt_%hu/%s\n", tpg->se_tpg_tfo->tpg_get_wwn(tpg), |
1611 | TPG_TFO(tpg)->tpg_get_tag(tpg), | 1639 | tpg->se_tpg_tfo->tpg_get_tag(tpg), |
1612 | config_item_name(&lun->lun_group.cg_item)); | 1640 | config_item_name(&lun->lun_group.cg_item)); |
1613 | return -EINVAL; | 1641 | return -EINVAL; |
1614 | } | 1642 | } |
1615 | 1643 | ||
1616 | if (count > TG_PT_GROUP_NAME_BUF) { | 1644 | if (count > TG_PT_GROUP_NAME_BUF) { |
1617 | printk(KERN_ERR "ALUA Target Port Group alias too large!\n"); | 1645 | pr_err("ALUA Target Port Group alias too large!\n"); |
1618 | return -EINVAL; | 1646 | return -EINVAL; |
1619 | } | 1647 | } |
1620 | memset(buf, 0, TG_PT_GROUP_NAME_BUF); | 1648 | memset(buf, 0, TG_PT_GROUP_NAME_BUF); |
@@ -1631,31 +1659,31 @@ ssize_t core_alua_store_tg_pt_gp_info( | |||
1631 | */ | 1659 | */ |
1632 | tg_pt_gp_new = core_alua_get_tg_pt_gp_by_name(su_dev, | 1660 | tg_pt_gp_new = core_alua_get_tg_pt_gp_by_name(su_dev, |
1633 | strstrip(buf)); | 1661 | strstrip(buf)); |
1634 | if (!(tg_pt_gp_new)) | 1662 | if (!tg_pt_gp_new) |
1635 | return -ENODEV; | 1663 | return -ENODEV; |
1636 | } | 1664 | } |
1637 | tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; | 1665 | tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; |
1638 | if (!(tg_pt_gp_mem)) { | 1666 | if (!tg_pt_gp_mem) { |
1639 | if (tg_pt_gp_new) | 1667 | if (tg_pt_gp_new) |
1640 | core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new); | 1668 | core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new); |
1641 | printk(KERN_ERR "NULL struct se_port->sep_alua_tg_pt_gp_mem pointer\n"); | 1669 | pr_err("NULL struct se_port->sep_alua_tg_pt_gp_mem pointer\n"); |
1642 | return -EINVAL; | 1670 | return -EINVAL; |
1643 | } | 1671 | } |
1644 | 1672 | ||
1645 | spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); | 1673 | spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); |
1646 | tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; | 1674 | tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; |
1647 | if ((tg_pt_gp)) { | 1675 | if (tg_pt_gp) { |
1648 | /* | 1676 | /* |
1649 | * Clearing an existing tg_pt_gp association, and replacing | 1677 | * Clearing an existing tg_pt_gp association, and replacing |
1650 | * with the default_tg_pt_gp. | 1678 | * with the default_tg_pt_gp. |
1651 | */ | 1679 | */ |
1652 | if (!(tg_pt_gp_new)) { | 1680 | if (!tg_pt_gp_new) { |
1653 | printk(KERN_INFO "Target_Core_ConfigFS: Moving" | 1681 | pr_debug("Target_Core_ConfigFS: Moving" |
1654 | " %s/tpgt_%hu/%s from ALUA Target Port Group:" | 1682 | " %s/tpgt_%hu/%s from ALUA Target Port Group:" |
1655 | " alua/%s, ID: %hu back to" | 1683 | " alua/%s, ID: %hu back to" |
1656 | " default_tg_pt_gp\n", | 1684 | " default_tg_pt_gp\n", |
1657 | TPG_TFO(tpg)->tpg_get_wwn(tpg), | 1685 | tpg->se_tpg_tfo->tpg_get_wwn(tpg), |
1658 | TPG_TFO(tpg)->tpg_get_tag(tpg), | 1686 | tpg->se_tpg_tfo->tpg_get_tag(tpg), |
1659 | config_item_name(&lun->lun_group.cg_item), | 1687 | config_item_name(&lun->lun_group.cg_item), |
1660 | config_item_name( | 1688 | config_item_name( |
1661 | &tg_pt_gp->tg_pt_gp_group.cg_item), | 1689 | &tg_pt_gp->tg_pt_gp_group.cg_item), |
@@ -1663,7 +1691,7 @@ ssize_t core_alua_store_tg_pt_gp_info( | |||
1663 | 1691 | ||
1664 | __core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp); | 1692 | __core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp); |
1665 | __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, | 1693 | __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, |
1666 | T10_ALUA(su_dev)->default_tg_pt_gp); | 1694 | su_dev->t10_alua.default_tg_pt_gp); |
1667 | spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); | 1695 | spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); |
1668 | 1696 | ||
1669 | return count; | 1697 | return count; |
@@ -1679,10 +1707,10 @@ ssize_t core_alua_store_tg_pt_gp_info( | |||
1679 | */ | 1707 | */ |
1680 | __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp_new); | 1708 | __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp_new); |
1681 | spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); | 1709 | spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); |
1682 | printk(KERN_INFO "Target_Core_ConfigFS: %s %s/tpgt_%hu/%s to ALUA" | 1710 | pr_debug("Target_Core_ConfigFS: %s %s/tpgt_%hu/%s to ALUA" |
1683 | " Target Port Group: alua/%s, ID: %hu\n", (move) ? | 1711 | " Target Port Group: alua/%s, ID: %hu\n", (move) ? |
1684 | "Moving" : "Adding", TPG_TFO(tpg)->tpg_get_wwn(tpg), | 1712 | "Moving" : "Adding", tpg->se_tpg_tfo->tpg_get_wwn(tpg), |
1685 | TPG_TFO(tpg)->tpg_get_tag(tpg), | 1713 | tpg->se_tpg_tfo->tpg_get_tag(tpg), |
1686 | config_item_name(&lun->lun_group.cg_item), | 1714 | config_item_name(&lun->lun_group.cg_item), |
1687 | config_item_name(&tg_pt_gp_new->tg_pt_gp_group.cg_item), | 1715 | config_item_name(&tg_pt_gp_new->tg_pt_gp_group.cg_item), |
1688 | tg_pt_gp_new->tg_pt_gp_id); | 1716 | tg_pt_gp_new->tg_pt_gp_id); |
@@ -1716,11 +1744,11 @@ ssize_t core_alua_store_access_type( | |||
1716 | 1744 | ||
1717 | ret = strict_strtoul(page, 0, &tmp); | 1745 | ret = strict_strtoul(page, 0, &tmp); |
1718 | if (ret < 0) { | 1746 | if (ret < 0) { |
1719 | printk(KERN_ERR "Unable to extract alua_access_type\n"); | 1747 | pr_err("Unable to extract alua_access_type\n"); |
1720 | return -EINVAL; | 1748 | return -EINVAL; |
1721 | } | 1749 | } |
1722 | if ((tmp != 0) && (tmp != 1) && (tmp != 2) && (tmp != 3)) { | 1750 | if ((tmp != 0) && (tmp != 1) && (tmp != 2) && (tmp != 3)) { |
1723 | printk(KERN_ERR "Illegal value for alua_access_type:" | 1751 | pr_err("Illegal value for alua_access_type:" |
1724 | " %lu\n", tmp); | 1752 | " %lu\n", tmp); |
1725 | return -EINVAL; | 1753 | return -EINVAL; |
1726 | } | 1754 | } |
@@ -1754,11 +1782,11 @@ ssize_t core_alua_store_nonop_delay_msecs( | |||
1754 | 1782 | ||
1755 | ret = strict_strtoul(page, 0, &tmp); | 1783 | ret = strict_strtoul(page, 0, &tmp); |
1756 | if (ret < 0) { | 1784 | if (ret < 0) { |
1757 | printk(KERN_ERR "Unable to extract nonop_delay_msecs\n"); | 1785 | pr_err("Unable to extract nonop_delay_msecs\n"); |
1758 | return -EINVAL; | 1786 | return -EINVAL; |
1759 | } | 1787 | } |
1760 | if (tmp > ALUA_MAX_NONOP_DELAY_MSECS) { | 1788 | if (tmp > ALUA_MAX_NONOP_DELAY_MSECS) { |
1761 | printk(KERN_ERR "Passed nonop_delay_msecs: %lu, exceeds" | 1789 | pr_err("Passed nonop_delay_msecs: %lu, exceeds" |
1762 | " ALUA_MAX_NONOP_DELAY_MSECS: %d\n", tmp, | 1790 | " ALUA_MAX_NONOP_DELAY_MSECS: %d\n", tmp, |
1763 | ALUA_MAX_NONOP_DELAY_MSECS); | 1791 | ALUA_MAX_NONOP_DELAY_MSECS); |
1764 | return -EINVAL; | 1792 | return -EINVAL; |
@@ -1785,11 +1813,11 @@ ssize_t core_alua_store_trans_delay_msecs( | |||
1785 | 1813 | ||
1786 | ret = strict_strtoul(page, 0, &tmp); | 1814 | ret = strict_strtoul(page, 0, &tmp); |
1787 | if (ret < 0) { | 1815 | if (ret < 0) { |
1788 | printk(KERN_ERR "Unable to extract trans_delay_msecs\n"); | 1816 | pr_err("Unable to extract trans_delay_msecs\n"); |
1789 | return -EINVAL; | 1817 | return -EINVAL; |
1790 | } | 1818 | } |
1791 | if (tmp > ALUA_MAX_TRANS_DELAY_MSECS) { | 1819 | if (tmp > ALUA_MAX_TRANS_DELAY_MSECS) { |
1792 | printk(KERN_ERR "Passed trans_delay_msecs: %lu, exceeds" | 1820 | pr_err("Passed trans_delay_msecs: %lu, exceeds" |
1793 | " ALUA_MAX_TRANS_DELAY_MSECS: %d\n", tmp, | 1821 | " ALUA_MAX_TRANS_DELAY_MSECS: %d\n", tmp, |
1794 | ALUA_MAX_TRANS_DELAY_MSECS); | 1822 | ALUA_MAX_TRANS_DELAY_MSECS); |
1795 | return -EINVAL; | 1823 | return -EINVAL; |
@@ -1816,11 +1844,11 @@ ssize_t core_alua_store_preferred_bit( | |||
1816 | 1844 | ||
1817 | ret = strict_strtoul(page, 0, &tmp); | 1845 | ret = strict_strtoul(page, 0, &tmp); |
1818 | if (ret < 0) { | 1846 | if (ret < 0) { |
1819 | printk(KERN_ERR "Unable to extract preferred ALUA value\n"); | 1847 | pr_err("Unable to extract preferred ALUA value\n"); |
1820 | return -EINVAL; | 1848 | return -EINVAL; |
1821 | } | 1849 | } |
1822 | if ((tmp != 0) && (tmp != 1)) { | 1850 | if ((tmp != 0) && (tmp != 1)) { |
1823 | printk(KERN_ERR "Illegal value for preferred ALUA: %lu\n", tmp); | 1851 | pr_err("Illegal value for preferred ALUA: %lu\n", tmp); |
1824 | return -EINVAL; | 1852 | return -EINVAL; |
1825 | } | 1853 | } |
1826 | tg_pt_gp->tg_pt_gp_pref = (int)tmp; | 1854 | tg_pt_gp->tg_pt_gp_pref = (int)tmp; |
@@ -1830,7 +1858,7 @@ ssize_t core_alua_store_preferred_bit( | |||
1830 | 1858 | ||
1831 | ssize_t core_alua_show_offline_bit(struct se_lun *lun, char *page) | 1859 | ssize_t core_alua_show_offline_bit(struct se_lun *lun, char *page) |
1832 | { | 1860 | { |
1833 | if (!(lun->lun_sep)) | 1861 | if (!lun->lun_sep) |
1834 | return -ENODEV; | 1862 | return -ENODEV; |
1835 | 1863 | ||
1836 | return sprintf(page, "%d\n", | 1864 | return sprintf(page, "%d\n", |
@@ -1846,22 +1874,22 @@ ssize_t core_alua_store_offline_bit( | |||
1846 | unsigned long tmp; | 1874 | unsigned long tmp; |
1847 | int ret; | 1875 | int ret; |
1848 | 1876 | ||
1849 | if (!(lun->lun_sep)) | 1877 | if (!lun->lun_sep) |
1850 | return -ENODEV; | 1878 | return -ENODEV; |
1851 | 1879 | ||
1852 | ret = strict_strtoul(page, 0, &tmp); | 1880 | ret = strict_strtoul(page, 0, &tmp); |
1853 | if (ret < 0) { | 1881 | if (ret < 0) { |
1854 | printk(KERN_ERR "Unable to extract alua_tg_pt_offline value\n"); | 1882 | pr_err("Unable to extract alua_tg_pt_offline value\n"); |
1855 | return -EINVAL; | 1883 | return -EINVAL; |
1856 | } | 1884 | } |
1857 | if ((tmp != 0) && (tmp != 1)) { | 1885 | if ((tmp != 0) && (tmp != 1)) { |
1858 | printk(KERN_ERR "Illegal value for alua_tg_pt_offline: %lu\n", | 1886 | pr_err("Illegal value for alua_tg_pt_offline: %lu\n", |
1859 | tmp); | 1887 | tmp); |
1860 | return -EINVAL; | 1888 | return -EINVAL; |
1861 | } | 1889 | } |
1862 | tg_pt_gp_mem = lun->lun_sep->sep_alua_tg_pt_gp_mem; | 1890 | tg_pt_gp_mem = lun->lun_sep->sep_alua_tg_pt_gp_mem; |
1863 | if (!(tg_pt_gp_mem)) { | 1891 | if (!tg_pt_gp_mem) { |
1864 | printk(KERN_ERR "Unable to locate *tg_pt_gp_mem\n"); | 1892 | pr_err("Unable to locate *tg_pt_gp_mem\n"); |
1865 | return -EINVAL; | 1893 | return -EINVAL; |
1866 | } | 1894 | } |
1867 | 1895 | ||
@@ -1890,13 +1918,13 @@ ssize_t core_alua_store_secondary_status( | |||
1890 | 1918 | ||
1891 | ret = strict_strtoul(page, 0, &tmp); | 1919 | ret = strict_strtoul(page, 0, &tmp); |
1892 | if (ret < 0) { | 1920 | if (ret < 0) { |
1893 | printk(KERN_ERR "Unable to extract alua_tg_pt_status\n"); | 1921 | pr_err("Unable to extract alua_tg_pt_status\n"); |
1894 | return -EINVAL; | 1922 | return -EINVAL; |
1895 | } | 1923 | } |
1896 | if ((tmp != ALUA_STATUS_NONE) && | 1924 | if ((tmp != ALUA_STATUS_NONE) && |
1897 | (tmp != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) && | 1925 | (tmp != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) && |
1898 | (tmp != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) { | 1926 | (tmp != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) { |
1899 | printk(KERN_ERR "Illegal value for alua_tg_pt_status: %lu\n", | 1927 | pr_err("Illegal value for alua_tg_pt_status: %lu\n", |
1900 | tmp); | 1928 | tmp); |
1901 | return -EINVAL; | 1929 | return -EINVAL; |
1902 | } | 1930 | } |
@@ -1923,11 +1951,11 @@ ssize_t core_alua_store_secondary_write_metadata( | |||
1923 | 1951 | ||
1924 | ret = strict_strtoul(page, 0, &tmp); | 1952 | ret = strict_strtoul(page, 0, &tmp); |
1925 | if (ret < 0) { | 1953 | if (ret < 0) { |
1926 | printk(KERN_ERR "Unable to extract alua_tg_pt_write_md\n"); | 1954 | pr_err("Unable to extract alua_tg_pt_write_md\n"); |
1927 | return -EINVAL; | 1955 | return -EINVAL; |
1928 | } | 1956 | } |
1929 | if ((tmp != 0) && (tmp != 1)) { | 1957 | if ((tmp != 0) && (tmp != 1)) { |
1930 | printk(KERN_ERR "Illegal value for alua_tg_pt_write_md:" | 1958 | pr_err("Illegal value for alua_tg_pt_write_md:" |
1931 | " %lu\n", tmp); | 1959 | " %lu\n", tmp); |
1932 | return -EINVAL; | 1960 | return -EINVAL; |
1933 | } | 1961 | } |
@@ -1939,7 +1967,7 @@ ssize_t core_alua_store_secondary_write_metadata( | |||
1939 | int core_setup_alua(struct se_device *dev, int force_pt) | 1967 | int core_setup_alua(struct se_device *dev, int force_pt) |
1940 | { | 1968 | { |
1941 | struct se_subsystem_dev *su_dev = dev->se_sub_dev; | 1969 | struct se_subsystem_dev *su_dev = dev->se_sub_dev; |
1942 | struct t10_alua *alua = T10_ALUA(su_dev); | 1970 | struct t10_alua *alua = &su_dev->t10_alua; |
1943 | struct t10_alua_lu_gp_member *lu_gp_mem; | 1971 | struct t10_alua_lu_gp_member *lu_gp_mem; |
1944 | /* | 1972 | /* |
1945 | * If this device is from Target_Core_Mod/pSCSI, use the ALUA logic | 1973 | * If this device is from Target_Core_Mod/pSCSI, use the ALUA logic |
@@ -1947,44 +1975,44 @@ int core_setup_alua(struct se_device *dev, int force_pt) | |||
1947 | * cause a problem because libata and some SATA RAID HBAs appear | 1975 | * cause a problem because libata and some SATA RAID HBAs appear |
1948 | * under Linux/SCSI, but emulate SCSI logic themselves. | 1976 | * under Linux/SCSI, but emulate SCSI logic themselves. |
1949 | */ | 1977 | */ |
1950 | if (((TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) && | 1978 | if (((dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) && |
1951 | !(DEV_ATTRIB(dev)->emulate_alua)) || force_pt) { | 1979 | !(dev->se_sub_dev->se_dev_attrib.emulate_alua)) || force_pt) { |
1952 | alua->alua_type = SPC_ALUA_PASSTHROUGH; | 1980 | alua->alua_type = SPC_ALUA_PASSTHROUGH; |
1953 | alua->alua_state_check = &core_alua_state_check_nop; | 1981 | alua->alua_state_check = &core_alua_state_check_nop; |
1954 | printk(KERN_INFO "%s: Using SPC_ALUA_PASSTHROUGH, no ALUA" | 1982 | pr_debug("%s: Using SPC_ALUA_PASSTHROUGH, no ALUA" |
1955 | " emulation\n", TRANSPORT(dev)->name); | 1983 | " emulation\n", dev->transport->name); |
1956 | return 0; | 1984 | return 0; |
1957 | } | 1985 | } |
1958 | /* | 1986 | /* |
1959 | * If SPC-3 or above is reported by real or emulated struct se_device, | 1987 | * If SPC-3 or above is reported by real or emulated struct se_device, |
1960 | * use emulated ALUA. | 1988 | * use emulated ALUA. |
1961 | */ | 1989 | */ |
1962 | if (TRANSPORT(dev)->get_device_rev(dev) >= SCSI_3) { | 1990 | if (dev->transport->get_device_rev(dev) >= SCSI_3) { |
1963 | printk(KERN_INFO "%s: Enabling ALUA Emulation for SPC-3" | 1991 | pr_debug("%s: Enabling ALUA Emulation for SPC-3" |
1964 | " device\n", TRANSPORT(dev)->name); | 1992 | " device\n", dev->transport->name); |
1965 | /* | 1993 | /* |
1966 | * Associate this struct se_device with the default ALUA | 1994 | * Associate this struct se_device with the default ALUA |
1967 | * LUN Group. | 1995 | * LUN Group. |
1968 | */ | 1996 | */ |
1969 | lu_gp_mem = core_alua_allocate_lu_gp_mem(dev); | 1997 | lu_gp_mem = core_alua_allocate_lu_gp_mem(dev); |
1970 | if (IS_ERR(lu_gp_mem) || !lu_gp_mem) | 1998 | if (IS_ERR(lu_gp_mem)) |
1971 | return -1; | 1999 | return PTR_ERR(lu_gp_mem); |
1972 | 2000 | ||
1973 | alua->alua_type = SPC3_ALUA_EMULATED; | 2001 | alua->alua_type = SPC3_ALUA_EMULATED; |
1974 | alua->alua_state_check = &core_alua_state_check; | 2002 | alua->alua_state_check = &core_alua_state_check; |
1975 | spin_lock(&lu_gp_mem->lu_gp_mem_lock); | 2003 | spin_lock(&lu_gp_mem->lu_gp_mem_lock); |
1976 | __core_alua_attach_lu_gp_mem(lu_gp_mem, | 2004 | __core_alua_attach_lu_gp_mem(lu_gp_mem, |
1977 | se_global->default_lu_gp); | 2005 | default_lu_gp); |
1978 | spin_unlock(&lu_gp_mem->lu_gp_mem_lock); | 2006 | spin_unlock(&lu_gp_mem->lu_gp_mem_lock); |
1979 | 2007 | ||
1980 | printk(KERN_INFO "%s: Adding to default ALUA LU Group:" | 2008 | pr_debug("%s: Adding to default ALUA LU Group:" |
1981 | " core/alua/lu_gps/default_lu_gp\n", | 2009 | " core/alua/lu_gps/default_lu_gp\n", |
1982 | TRANSPORT(dev)->name); | 2010 | dev->transport->name); |
1983 | } else { | 2011 | } else { |
1984 | alua->alua_type = SPC2_ALUA_DISABLED; | 2012 | alua->alua_type = SPC2_ALUA_DISABLED; |
1985 | alua->alua_state_check = &core_alua_state_check_nop; | 2013 | alua->alua_state_check = &core_alua_state_check_nop; |
1986 | printk(KERN_INFO "%s: Disabling ALUA Emulation for SPC-2" | 2014 | pr_debug("%s: Disabling ALUA Emulation for SPC-2" |
1987 | " device\n", TRANSPORT(dev)->name); | 2015 | " device\n", dev->transport->name); |
1988 | } | 2016 | } |
1989 | 2017 | ||
1990 | return 0; | 2018 | return 0; |
diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c index 7f19c8b7b84c..8ae09a1bdf74 100644 --- a/drivers/target/target_core_cdb.c +++ b/drivers/target/target_core_cdb.c | |||
@@ -23,6 +23,7 @@ | |||
23 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | 23 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
24 | */ | 24 | */ |
25 | 25 | ||
26 | #include <linux/kernel.h> | ||
26 | #include <asm/unaligned.h> | 27 | #include <asm/unaligned.h> |
27 | #include <scsi/scsi.h> | 28 | #include <scsi/scsi.h> |
28 | 29 | ||
@@ -64,20 +65,22 @@ target_fill_alua_data(struct se_port *port, unsigned char *buf) | |||
64 | static int | 65 | static int |
65 | target_emulate_inquiry_std(struct se_cmd *cmd) | 66 | target_emulate_inquiry_std(struct se_cmd *cmd) |
66 | { | 67 | { |
67 | struct se_lun *lun = SE_LUN(cmd); | 68 | struct se_lun *lun = cmd->se_lun; |
68 | struct se_device *dev = SE_DEV(cmd); | 69 | struct se_device *dev = cmd->se_dev; |
69 | unsigned char *buf = cmd->t_task->t_task_buf; | 70 | unsigned char *buf; |
70 | 71 | ||
71 | /* | 72 | /* |
72 | * Make sure we at least have 6 bytes of INQUIRY response | 73 | * Make sure we at least have 6 bytes of INQUIRY response |
73 | * payload going back for EVPD=0 | 74 | * payload going back for EVPD=0 |
74 | */ | 75 | */ |
75 | if (cmd->data_length < 6) { | 76 | if (cmd->data_length < 6) { |
76 | printk(KERN_ERR "SCSI Inquiry payload length: %u" | 77 | pr_err("SCSI Inquiry payload length: %u" |
77 | " too small for EVPD=0\n", cmd->data_length); | 78 | " too small for EVPD=0\n", cmd->data_length); |
78 | return -1; | 79 | return -EINVAL; |
79 | } | 80 | } |
80 | 81 | ||
82 | buf = transport_kmap_first_data_page(cmd); | ||
83 | |||
81 | buf[0] = dev->transport->get_device_type(dev); | 84 | buf[0] = dev->transport->get_device_type(dev); |
82 | if (buf[0] == TYPE_TAPE) | 85 | if (buf[0] == TYPE_TAPE) |
83 | buf[1] = 0x80; | 86 | buf[1] = 0x80; |
@@ -86,12 +89,12 @@ target_emulate_inquiry_std(struct se_cmd *cmd) | |||
86 | /* | 89 | /* |
87 | * Enable SCCS and TPGS fields for Emulated ALUA | 90 | * Enable SCCS and TPGS fields for Emulated ALUA |
88 | */ | 91 | */ |
89 | if (T10_ALUA(dev->se_sub_dev)->alua_type == SPC3_ALUA_EMULATED) | 92 | if (dev->se_sub_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) |
90 | target_fill_alua_data(lun->lun_sep, buf); | 93 | target_fill_alua_data(lun->lun_sep, buf); |
91 | 94 | ||
92 | if (cmd->data_length < 8) { | 95 | if (cmd->data_length < 8) { |
93 | buf[4] = 1; /* Set additional length to 1 */ | 96 | buf[4] = 1; /* Set additional length to 1 */ |
94 | return 0; | 97 | goto out; |
95 | } | 98 | } |
96 | 99 | ||
97 | buf[7] = 0x32; /* Sync=1 and CmdQue=1 */ | 100 | buf[7] = 0x32; /* Sync=1 and CmdQue=1 */ |
@@ -102,40 +105,18 @@ target_emulate_inquiry_std(struct se_cmd *cmd) | |||
102 | */ | 105 | */ |
103 | if (cmd->data_length < 36) { | 106 | if (cmd->data_length < 36) { |
104 | buf[4] = 3; /* Set additional length to 3 */ | 107 | buf[4] = 3; /* Set additional length to 3 */ |
105 | return 0; | 108 | goto out; |
106 | } | 109 | } |
107 | 110 | ||
108 | snprintf((unsigned char *)&buf[8], 8, "LIO-ORG"); | 111 | snprintf((unsigned char *)&buf[8], 8, "LIO-ORG"); |
109 | snprintf((unsigned char *)&buf[16], 16, "%s", | 112 | snprintf((unsigned char *)&buf[16], 16, "%s", |
110 | &DEV_T10_WWN(dev)->model[0]); | 113 | &dev->se_sub_dev->t10_wwn.model[0]); |
111 | snprintf((unsigned char *)&buf[32], 4, "%s", | 114 | snprintf((unsigned char *)&buf[32], 4, "%s", |
112 | &DEV_T10_WWN(dev)->revision[0]); | 115 | &dev->se_sub_dev->t10_wwn.revision[0]); |
113 | buf[4] = 31; /* Set additional length to 31 */ | 116 | buf[4] = 31; /* Set additional length to 31 */ |
114 | return 0; | ||
115 | } | ||
116 | |||
117 | /* supported vital product data pages */ | ||
118 | static int | ||
119 | target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf) | ||
120 | { | ||
121 | buf[1] = 0x00; | ||
122 | if (cmd->data_length < 8) | ||
123 | return 0; | ||
124 | |||
125 | buf[4] = 0x0; | ||
126 | /* | ||
127 | * Only report the INQUIRY EVPD=1 pages after a valid NAA | ||
128 | * Registered Extended LUN WWN has been set via ConfigFS | ||
129 | * during device creation/restart. | ||
130 | */ | ||
131 | if (SE_DEV(cmd)->se_sub_dev->su_dev_flags & | ||
132 | SDF_EMULATED_VPD_UNIT_SERIAL) { | ||
133 | buf[3] = 3; | ||
134 | buf[5] = 0x80; | ||
135 | buf[6] = 0x83; | ||
136 | buf[7] = 0x86; | ||
137 | } | ||
138 | 117 | ||
118 | out: | ||
119 | transport_kunmap_first_data_page(cmd); | ||
139 | return 0; | 120 | return 0; |
140 | } | 121 | } |
141 | 122 | ||
@@ -143,16 +124,15 @@ target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf) | |||
143 | static int | 124 | static int |
144 | target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf) | 125 | target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf) |
145 | { | 126 | { |
146 | struct se_device *dev = SE_DEV(cmd); | 127 | struct se_device *dev = cmd->se_dev; |
147 | u16 len = 0; | 128 | u16 len = 0; |
148 | 129 | ||
149 | buf[1] = 0x80; | ||
150 | if (dev->se_sub_dev->su_dev_flags & | 130 | if (dev->se_sub_dev->su_dev_flags & |
151 | SDF_EMULATED_VPD_UNIT_SERIAL) { | 131 | SDF_EMULATED_VPD_UNIT_SERIAL) { |
152 | u32 unit_serial_len; | 132 | u32 unit_serial_len; |
153 | 133 | ||
154 | unit_serial_len = | 134 | unit_serial_len = |
155 | strlen(&DEV_T10_WWN(dev)->unit_serial[0]); | 135 | strlen(&dev->se_sub_dev->t10_wwn.unit_serial[0]); |
156 | unit_serial_len++; /* For NULL Terminator */ | 136 | unit_serial_len++; /* For NULL Terminator */ |
157 | 137 | ||
158 | if (((len + 4) + unit_serial_len) > cmd->data_length) { | 138 | if (((len + 4) + unit_serial_len) > cmd->data_length) { |
@@ -162,7 +142,7 @@ target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf) | |||
162 | return 0; | 142 | return 0; |
163 | } | 143 | } |
164 | len += sprintf((unsigned char *)&buf[4], "%s", | 144 | len += sprintf((unsigned char *)&buf[4], "%s", |
165 | &DEV_T10_WWN(dev)->unit_serial[0]); | 145 | &dev->se_sub_dev->t10_wwn.unit_serial[0]); |
166 | len++; /* Extra Byte for NULL Terminator */ | 146 | len++; /* Extra Byte for NULL Terminator */ |
167 | buf[3] = len; | 147 | buf[3] = len; |
168 | } | 148 | } |
@@ -176,21 +156,18 @@ target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf) | |||
176 | static int | 156 | static int |
177 | target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf) | 157 | target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf) |
178 | { | 158 | { |
179 | struct se_device *dev = SE_DEV(cmd); | 159 | struct se_device *dev = cmd->se_dev; |
180 | struct se_lun *lun = SE_LUN(cmd); | 160 | struct se_lun *lun = cmd->se_lun; |
181 | struct se_port *port = NULL; | 161 | struct se_port *port = NULL; |
182 | struct se_portal_group *tpg = NULL; | 162 | struct se_portal_group *tpg = NULL; |
183 | struct t10_alua_lu_gp_member *lu_gp_mem; | 163 | struct t10_alua_lu_gp_member *lu_gp_mem; |
184 | struct t10_alua_tg_pt_gp *tg_pt_gp; | 164 | struct t10_alua_tg_pt_gp *tg_pt_gp; |
185 | struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; | 165 | struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; |
186 | unsigned char binary, binary_new; | 166 | unsigned char *prod = &dev->se_sub_dev->t10_wwn.model[0]; |
187 | unsigned char *prod = &DEV_T10_WWN(dev)->model[0]; | ||
188 | u32 prod_len; | 167 | u32 prod_len; |
189 | u32 unit_serial_len, off = 0; | 168 | u32 unit_serial_len, off = 0; |
190 | int i; | ||
191 | u16 len = 0, id_len; | 169 | u16 len = 0, id_len; |
192 | 170 | ||
193 | buf[1] = 0x83; | ||
194 | off = 4; | 171 | off = 4; |
195 | 172 | ||
196 | /* | 173 | /* |
@@ -210,11 +187,11 @@ target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf) | |||
210 | /* CODE SET == Binary */ | 187 | /* CODE SET == Binary */ |
211 | buf[off++] = 0x1; | 188 | buf[off++] = 0x1; |
212 | 189 | ||
213 | /* Set ASSOICATION == addressed logical unit: 0)b */ | 190 | /* Set ASSOCIATION == addressed logical unit: 0)b */ |
214 | buf[off] = 0x00; | 191 | buf[off] = 0x00; |
215 | 192 | ||
216 | /* Identifier/Designator type == NAA identifier */ | 193 | /* Identifier/Designator type == NAA identifier */ |
217 | buf[off++] = 0x3; | 194 | buf[off++] |= 0x3; |
218 | off++; | 195 | off++; |
219 | 196 | ||
220 | /* Identifier/Designator length */ | 197 | /* Identifier/Designator length */ |
@@ -237,16 +214,9 @@ target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf) | |||
237 | * VENDOR_SPECIFIC_IDENTIFIER and | 214 | * VENDOR_SPECIFIC_IDENTIFIER and |
238 | * VENDOR_SPECIFIC_IDENTIFIER_EXTENTION | 215 | * VENDOR_SPECIFIC_IDENTIFIER_EXTENTION |
239 | */ | 216 | */ |
240 | binary = transport_asciihex_to_binaryhex( | 217 | buf[off++] |= hex_to_bin(dev->se_sub_dev->t10_wwn.unit_serial[0]); |
241 | &DEV_T10_WWN(dev)->unit_serial[0]); | 218 | hex2bin(&buf[off], &dev->se_sub_dev->t10_wwn.unit_serial[1], 12); |
242 | buf[off++] |= (binary & 0xf0) >> 4; | 219 | |
243 | for (i = 0; i < 24; i += 2) { | ||
244 | binary_new = transport_asciihex_to_binaryhex( | ||
245 | &DEV_T10_WWN(dev)->unit_serial[i+2]); | ||
246 | buf[off] = (binary & 0x0f) << 4; | ||
247 | buf[off++] |= (binary_new & 0xf0) >> 4; | ||
248 | binary = binary_new; | ||
249 | } | ||
250 | len = 20; | 220 | len = 20; |
251 | off = (len + 4); | 221 | off = (len + 4); |
252 | 222 | ||
@@ -263,7 +233,7 @@ check_t10_vend_desc: | |||
263 | if (dev->se_sub_dev->su_dev_flags & | 233 | if (dev->se_sub_dev->su_dev_flags & |
264 | SDF_EMULATED_VPD_UNIT_SERIAL) { | 234 | SDF_EMULATED_VPD_UNIT_SERIAL) { |
265 | unit_serial_len = | 235 | unit_serial_len = |
266 | strlen(&DEV_T10_WWN(dev)->unit_serial[0]); | 236 | strlen(&dev->se_sub_dev->t10_wwn.unit_serial[0]); |
267 | unit_serial_len++; /* For NULL Terminator */ | 237 | unit_serial_len++; /* For NULL Terminator */ |
268 | 238 | ||
269 | if ((len + (id_len + 4) + | 239 | if ((len + (id_len + 4) + |
@@ -274,7 +244,7 @@ check_t10_vend_desc: | |||
274 | } | 244 | } |
275 | id_len += sprintf((unsigned char *)&buf[off+12], | 245 | id_len += sprintf((unsigned char *)&buf[off+12], |
276 | "%s:%s", prod, | 246 | "%s:%s", prod, |
277 | &DEV_T10_WWN(dev)->unit_serial[0]); | 247 | &dev->se_sub_dev->t10_wwn.unit_serial[0]); |
278 | } | 248 | } |
279 | buf[off] = 0x2; /* ASCII */ | 249 | buf[off] = 0x2; /* ASCII */ |
280 | buf[off+1] = 0x1; /* T10 Vendor ID */ | 250 | buf[off+1] = 0x1; /* T10 Vendor ID */ |
@@ -312,10 +282,10 @@ check_port: | |||
312 | goto check_tpgi; | 282 | goto check_tpgi; |
313 | } | 283 | } |
314 | buf[off] = | 284 | buf[off] = |
315 | (TPG_TFO(tpg)->get_fabric_proto_ident(tpg) << 4); | 285 | (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4); |
316 | buf[off++] |= 0x1; /* CODE SET == Binary */ | 286 | buf[off++] |= 0x1; /* CODE SET == Binary */ |
317 | buf[off] = 0x80; /* Set PIV=1 */ | 287 | buf[off] = 0x80; /* Set PIV=1 */ |
318 | /* Set ASSOICATION == target port: 01b */ | 288 | /* Set ASSOCIATION == target port: 01b */ |
319 | buf[off] |= 0x10; | 289 | buf[off] |= 0x10; |
320 | /* DESIGNATOR TYPE == Relative target port identifer */ | 290 | /* DESIGNATOR TYPE == Relative target port identifer */ |
321 | buf[off++] |= 0x4; | 291 | buf[off++] |= 0x4; |
@@ -335,7 +305,7 @@ check_port: | |||
335 | * section 7.5.1 Table 362 | 305 | * section 7.5.1 Table 362 |
336 | */ | 306 | */ |
337 | check_tpgi: | 307 | check_tpgi: |
338 | if (T10_ALUA(dev->se_sub_dev)->alua_type != | 308 | if (dev->se_sub_dev->t10_alua.alua_type != |
339 | SPC3_ALUA_EMULATED) | 309 | SPC3_ALUA_EMULATED) |
340 | goto check_scsi_name; | 310 | goto check_scsi_name; |
341 | 311 | ||
@@ -349,7 +319,7 @@ check_tpgi: | |||
349 | 319 | ||
350 | spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); | 320 | spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); |
351 | tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; | 321 | tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; |
352 | if (!(tg_pt_gp)) { | 322 | if (!tg_pt_gp) { |
353 | spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); | 323 | spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); |
354 | goto check_lu_gp; | 324 | goto check_lu_gp; |
355 | } | 325 | } |
@@ -357,10 +327,10 @@ check_tpgi: | |||
357 | spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); | 327 | spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); |
358 | 328 | ||
359 | buf[off] = | 329 | buf[off] = |
360 | (TPG_TFO(tpg)->get_fabric_proto_ident(tpg) << 4); | 330 | (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4); |
361 | buf[off++] |= 0x1; /* CODE SET == Binary */ | 331 | buf[off++] |= 0x1; /* CODE SET == Binary */ |
362 | buf[off] = 0x80; /* Set PIV=1 */ | 332 | buf[off] = 0x80; /* Set PIV=1 */ |
363 | /* Set ASSOICATION == target port: 01b */ | 333 | /* Set ASSOCIATION == target port: 01b */ |
364 | buf[off] |= 0x10; | 334 | buf[off] |= 0x10; |
365 | /* DESIGNATOR TYPE == Target port group identifier */ | 335 | /* DESIGNATOR TYPE == Target port group identifier */ |
366 | buf[off++] |= 0x5; | 336 | buf[off++] |= 0x5; |
@@ -380,12 +350,12 @@ check_lu_gp: | |||
380 | goto check_scsi_name; | 350 | goto check_scsi_name; |
381 | } | 351 | } |
382 | lu_gp_mem = dev->dev_alua_lu_gp_mem; | 352 | lu_gp_mem = dev->dev_alua_lu_gp_mem; |
383 | if (!(lu_gp_mem)) | 353 | if (!lu_gp_mem) |
384 | goto check_scsi_name; | 354 | goto check_scsi_name; |
385 | 355 | ||
386 | spin_lock(&lu_gp_mem->lu_gp_mem_lock); | 356 | spin_lock(&lu_gp_mem->lu_gp_mem_lock); |
387 | lu_gp = lu_gp_mem->lu_gp; | 357 | lu_gp = lu_gp_mem->lu_gp; |
388 | if (!(lu_gp)) { | 358 | if (!lu_gp) { |
389 | spin_unlock(&lu_gp_mem->lu_gp_mem_lock); | 359 | spin_unlock(&lu_gp_mem->lu_gp_mem_lock); |
390 | goto check_scsi_name; | 360 | goto check_scsi_name; |
391 | } | 361 | } |
@@ -409,7 +379,7 @@ check_lu_gp: | |||
409 | * section 7.5.1 Table 362 | 379 | * section 7.5.1 Table 362 |
410 | */ | 380 | */ |
411 | check_scsi_name: | 381 | check_scsi_name: |
412 | scsi_name_len = strlen(TPG_TFO(tpg)->tpg_get_wwn(tpg)); | 382 | scsi_name_len = strlen(tpg->se_tpg_tfo->tpg_get_wwn(tpg)); |
413 | /* UTF-8 ",t,0x<16-bit TPGT>" + NULL Terminator */ | 383 | /* UTF-8 ",t,0x<16-bit TPGT>" + NULL Terminator */ |
414 | scsi_name_len += 10; | 384 | scsi_name_len += 10; |
415 | /* Check for 4-byte padding */ | 385 | /* Check for 4-byte padding */ |
@@ -424,10 +394,10 @@ check_scsi_name: | |||
424 | goto set_len; | 394 | goto set_len; |
425 | } | 395 | } |
426 | buf[off] = | 396 | buf[off] = |
427 | (TPG_TFO(tpg)->get_fabric_proto_ident(tpg) << 4); | 397 | (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4); |
428 | buf[off++] |= 0x3; /* CODE SET == UTF-8 */ | 398 | buf[off++] |= 0x3; /* CODE SET == UTF-8 */ |
429 | buf[off] = 0x80; /* Set PIV=1 */ | 399 | buf[off] = 0x80; /* Set PIV=1 */ |
430 | /* Set ASSOICATION == target port: 01b */ | 400 | /* Set ASSOCIATION == target port: 01b */ |
431 | buf[off] |= 0x10; | 401 | buf[off] |= 0x10; |
432 | /* DESIGNATOR TYPE == SCSI name string */ | 402 | /* DESIGNATOR TYPE == SCSI name string */ |
433 | buf[off++] |= 0x8; | 403 | buf[off++] |= 0x8; |
@@ -438,9 +408,9 @@ check_scsi_name: | |||
438 | * Target Port, this means "<iSCSI name>,t,0x<TPGT> in | 408 | * Target Port, this means "<iSCSI name>,t,0x<TPGT> in |
439 | * UTF-8 encoding. | 409 | * UTF-8 encoding. |
440 | */ | 410 | */ |
441 | tpgt = TPG_TFO(tpg)->tpg_get_tag(tpg); | 411 | tpgt = tpg->se_tpg_tfo->tpg_get_tag(tpg); |
442 | scsi_name_len = sprintf(&buf[off], "%s,t,0x%04x", | 412 | scsi_name_len = sprintf(&buf[off], "%s,t,0x%04x", |
443 | TPG_TFO(tpg)->tpg_get_wwn(tpg), tpgt); | 413 | tpg->se_tpg_tfo->tpg_get_wwn(tpg), tpgt); |
444 | scsi_name_len += 1 /* Include NULL terminator */; | 414 | scsi_name_len += 1 /* Include NULL terminator */; |
445 | /* | 415 | /* |
446 | * The null-terminated, null-padded (see 4.4.2) SCSI | 416 | * The null-terminated, null-padded (see 4.4.2) SCSI |
@@ -471,13 +441,12 @@ target_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf) | |||
471 | if (cmd->data_length < 60) | 441 | if (cmd->data_length < 60) |
472 | return 0; | 442 | return 0; |
473 | 443 | ||
474 | buf[1] = 0x86; | ||
475 | buf[2] = 0x3c; | 444 | buf[2] = 0x3c; |
476 | /* Set HEADSUP, ORDSUP, SIMPSUP */ | 445 | /* Set HEADSUP, ORDSUP, SIMPSUP */ |
477 | buf[5] = 0x07; | 446 | buf[5] = 0x07; |
478 | 447 | ||
479 | /* If WriteCache emulation is enabled, set V_SUP */ | 448 | /* If WriteCache emulation is enabled, set V_SUP */ |
480 | if (DEV_ATTRIB(SE_DEV(cmd))->emulate_write_cache > 0) | 449 | if (cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) |
481 | buf[6] = 0x01; | 450 | buf[6] = 0x01; |
482 | return 0; | 451 | return 0; |
483 | } | 452 | } |
@@ -486,7 +455,7 @@ target_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf) | |||
486 | static int | 455 | static int |
487 | target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) | 456 | target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) |
488 | { | 457 | { |
489 | struct se_device *dev = SE_DEV(cmd); | 458 | struct se_device *dev = cmd->se_dev; |
490 | int have_tp = 0; | 459 | int have_tp = 0; |
491 | 460 | ||
492 | /* | 461 | /* |
@@ -494,27 +463,29 @@ target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) | |||
494 | * emulate_tpu=1 or emulate_tpws=1 we will be expect a | 463 | * emulate_tpu=1 or emulate_tpws=1 we will be expect a |
495 | * different page length for Thin Provisioning. | 464 | * different page length for Thin Provisioning. |
496 | */ | 465 | */ |
497 | if (DEV_ATTRIB(dev)->emulate_tpu || DEV_ATTRIB(dev)->emulate_tpws) | 466 | if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws) |
498 | have_tp = 1; | 467 | have_tp = 1; |
499 | 468 | ||
500 | if (cmd->data_length < (0x10 + 4)) { | 469 | if (cmd->data_length < (0x10 + 4)) { |
501 | printk(KERN_INFO "Received data_length: %u" | 470 | pr_debug("Received data_length: %u" |
502 | " too small for EVPD 0xb0\n", | 471 | " too small for EVPD 0xb0\n", |
503 | cmd->data_length); | 472 | cmd->data_length); |
504 | return -1; | 473 | return -EINVAL; |
505 | } | 474 | } |
506 | 475 | ||
507 | if (have_tp && cmd->data_length < (0x3c + 4)) { | 476 | if (have_tp && cmd->data_length < (0x3c + 4)) { |
508 | printk(KERN_INFO "Received data_length: %u" | 477 | pr_debug("Received data_length: %u" |
509 | " too small for TPE=1 EVPD 0xb0\n", | 478 | " too small for TPE=1 EVPD 0xb0\n", |
510 | cmd->data_length); | 479 | cmd->data_length); |
511 | have_tp = 0; | 480 | have_tp = 0; |
512 | } | 481 | } |
513 | 482 | ||
514 | buf[0] = dev->transport->get_device_type(dev); | 483 | buf[0] = dev->transport->get_device_type(dev); |
515 | buf[1] = 0xb0; | ||
516 | buf[3] = have_tp ? 0x3c : 0x10; | 484 | buf[3] = have_tp ? 0x3c : 0x10; |
517 | 485 | ||
486 | /* Set WSNZ to 1 */ | ||
487 | buf[4] = 0x01; | ||
488 | |||
518 | /* | 489 | /* |
519 | * Set OPTIMAL TRANSFER LENGTH GRANULARITY | 490 | * Set OPTIMAL TRANSFER LENGTH GRANULARITY |
520 | */ | 491 | */ |
@@ -523,12 +494,12 @@ target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) | |||
523 | /* | 494 | /* |
524 | * Set MAXIMUM TRANSFER LENGTH | 495 | * Set MAXIMUM TRANSFER LENGTH |
525 | */ | 496 | */ |
526 | put_unaligned_be32(DEV_ATTRIB(dev)->max_sectors, &buf[8]); | 497 | put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.max_sectors, &buf[8]); |
527 | 498 | ||
528 | /* | 499 | /* |
529 | * Set OPTIMAL TRANSFER LENGTH | 500 | * Set OPTIMAL TRANSFER LENGTH |
530 | */ | 501 | */ |
531 | put_unaligned_be32(DEV_ATTRIB(dev)->optimal_sectors, &buf[12]); | 502 | put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.optimal_sectors, &buf[12]); |
532 | 503 | ||
533 | /* | 504 | /* |
534 | * Exit now if we don't support TP or the initiator sent a too | 505 | * Exit now if we don't support TP or the initiator sent a too |
@@ -540,35 +511,51 @@ target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) | |||
540 | /* | 511 | /* |
541 | * Set MAXIMUM UNMAP LBA COUNT | 512 | * Set MAXIMUM UNMAP LBA COUNT |
542 | */ | 513 | */ |
543 | put_unaligned_be32(DEV_ATTRIB(dev)->max_unmap_lba_count, &buf[20]); | 514 | put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count, &buf[20]); |
544 | 515 | ||
545 | /* | 516 | /* |
546 | * Set MAXIMUM UNMAP BLOCK DESCRIPTOR COUNT | 517 | * Set MAXIMUM UNMAP BLOCK DESCRIPTOR COUNT |
547 | */ | 518 | */ |
548 | put_unaligned_be32(DEV_ATTRIB(dev)->max_unmap_block_desc_count, | 519 | put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count, |
549 | &buf[24]); | 520 | &buf[24]); |
550 | 521 | ||
551 | /* | 522 | /* |
552 | * Set OPTIMAL UNMAP GRANULARITY | 523 | * Set OPTIMAL UNMAP GRANULARITY |
553 | */ | 524 | */ |
554 | put_unaligned_be32(DEV_ATTRIB(dev)->unmap_granularity, &buf[28]); | 525 | put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.unmap_granularity, &buf[28]); |
555 | 526 | ||
556 | /* | 527 | /* |
557 | * UNMAP GRANULARITY ALIGNMENT | 528 | * UNMAP GRANULARITY ALIGNMENT |
558 | */ | 529 | */ |
559 | put_unaligned_be32(DEV_ATTRIB(dev)->unmap_granularity_alignment, | 530 | put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment, |
560 | &buf[32]); | 531 | &buf[32]); |
561 | if (DEV_ATTRIB(dev)->unmap_granularity_alignment != 0) | 532 | if (dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment != 0) |
562 | buf[32] |= 0x80; /* Set the UGAVALID bit */ | 533 | buf[32] |= 0x80; /* Set the UGAVALID bit */ |
563 | 534 | ||
564 | return 0; | 535 | return 0; |
565 | } | 536 | } |
566 | 537 | ||
538 | /* Block Device Characteristics VPD page */ | ||
539 | static int | ||
540 | target_emulate_evpd_b1(struct se_cmd *cmd, unsigned char *buf) | ||
541 | { | ||
542 | struct se_device *dev = cmd->se_dev; | ||
543 | |||
544 | buf[0] = dev->transport->get_device_type(dev); | ||
545 | buf[3] = 0x3c; | ||
546 | |||
547 | if (cmd->data_length >= 5 && | ||
548 | dev->se_sub_dev->se_dev_attrib.is_nonrot) | ||
549 | buf[5] = 1; | ||
550 | |||
551 | return 0; | ||
552 | } | ||
553 | |||
567 | /* Thin Provisioning VPD */ | 554 | /* Thin Provisioning VPD */ |
568 | static int | 555 | static int |
569 | target_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf) | 556 | target_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf) |
570 | { | 557 | { |
571 | struct se_device *dev = SE_DEV(cmd); | 558 | struct se_device *dev = cmd->se_dev; |
572 | 559 | ||
573 | /* | 560 | /* |
574 | * From sbc3r22 section 6.5.4 Thin Provisioning VPD page: | 561 | * From sbc3r22 section 6.5.4 Thin Provisioning VPD page: |
@@ -579,7 +566,6 @@ target_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf) | |||
579 | * defined in table 162. | 566 | * defined in table 162. |
580 | */ | 567 | */ |
581 | buf[0] = dev->transport->get_device_type(dev); | 568 | buf[0] = dev->transport->get_device_type(dev); |
582 | buf[1] = 0xb2; | ||
583 | 569 | ||
584 | /* | 570 | /* |
585 | * Set Hardcoded length mentioned above for DP=0 | 571 | * Set Hardcoded length mentioned above for DP=0 |
@@ -602,7 +588,7 @@ target_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf) | |||
602 | * the UNMAP command (see 5.25). A TPU bit set to zero indicates | 588 | * the UNMAP command (see 5.25). A TPU bit set to zero indicates |
603 | * that the device server does not support the UNMAP command. | 589 | * that the device server does not support the UNMAP command. |
604 | */ | 590 | */ |
605 | if (DEV_ATTRIB(dev)->emulate_tpu != 0) | 591 | if (dev->se_sub_dev->se_dev_attrib.emulate_tpu != 0) |
606 | buf[5] = 0x80; | 592 | buf[5] = 0x80; |
607 | 593 | ||
608 | /* | 594 | /* |
@@ -611,18 +597,59 @@ target_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf) | |||
611 | * A TPWS bit set to zero indicates that the device server does not | 597 | * A TPWS bit set to zero indicates that the device server does not |
612 | * support the use of the WRITE SAME (16) command to unmap LBAs. | 598 | * support the use of the WRITE SAME (16) command to unmap LBAs. |
613 | */ | 599 | */ |
614 | if (DEV_ATTRIB(dev)->emulate_tpws != 0) | 600 | if (dev->se_sub_dev->se_dev_attrib.emulate_tpws != 0) |
615 | buf[5] |= 0x40; | 601 | buf[5] |= 0x40; |
616 | 602 | ||
617 | return 0; | 603 | return 0; |
618 | } | 604 | } |
619 | 605 | ||
620 | static int | 606 | static int |
607 | target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf); | ||
608 | |||
609 | static struct { | ||
610 | uint8_t page; | ||
611 | int (*emulate)(struct se_cmd *, unsigned char *); | ||
612 | } evpd_handlers[] = { | ||
613 | { .page = 0x00, .emulate = target_emulate_evpd_00 }, | ||
614 | { .page = 0x80, .emulate = target_emulate_evpd_80 }, | ||
615 | { .page = 0x83, .emulate = target_emulate_evpd_83 }, | ||
616 | { .page = 0x86, .emulate = target_emulate_evpd_86 }, | ||
617 | { .page = 0xb0, .emulate = target_emulate_evpd_b0 }, | ||
618 | { .page = 0xb1, .emulate = target_emulate_evpd_b1 }, | ||
619 | { .page = 0xb2, .emulate = target_emulate_evpd_b2 }, | ||
620 | }; | ||
621 | |||
622 | /* supported vital product data pages */ | ||
623 | static int | ||
624 | target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf) | ||
625 | { | ||
626 | int p; | ||
627 | |||
628 | if (cmd->data_length < 8) | ||
629 | return 0; | ||
630 | /* | ||
631 | * Only report the INQUIRY EVPD=1 pages after a valid NAA | ||
632 | * Registered Extended LUN WWN has been set via ConfigFS | ||
633 | * during device creation/restart. | ||
634 | */ | ||
635 | if (cmd->se_dev->se_sub_dev->su_dev_flags & | ||
636 | SDF_EMULATED_VPD_UNIT_SERIAL) { | ||
637 | buf[3] = ARRAY_SIZE(evpd_handlers); | ||
638 | for (p = 0; p < min_t(int, ARRAY_SIZE(evpd_handlers), | ||
639 | cmd->data_length - 4); ++p) | ||
640 | buf[p + 4] = evpd_handlers[p].page; | ||
641 | } | ||
642 | |||
643 | return 0; | ||
644 | } | ||
645 | |||
646 | static int | ||
621 | target_emulate_inquiry(struct se_cmd *cmd) | 647 | target_emulate_inquiry(struct se_cmd *cmd) |
622 | { | 648 | { |
623 | struct se_device *dev = SE_DEV(cmd); | 649 | struct se_device *dev = cmd->se_dev; |
624 | unsigned char *buf = cmd->t_task->t_task_buf; | 650 | unsigned char *buf; |
625 | unsigned char *cdb = cmd->t_task->t_task_cdb; | 651 | unsigned char *cdb = cmd->t_task_cdb; |
652 | int p, ret; | ||
626 | 653 | ||
627 | if (!(cdb[1] & 0x1)) | 654 | if (!(cdb[1] & 0x1)) |
628 | return target_emulate_inquiry_std(cmd); | 655 | return target_emulate_inquiry_std(cmd); |
@@ -635,38 +662,33 @@ target_emulate_inquiry(struct se_cmd *cmd) | |||
635 | * payload length left for the next outgoing EVPD metadata | 662 | * payload length left for the next outgoing EVPD metadata |
636 | */ | 663 | */ |
637 | if (cmd->data_length < 4) { | 664 | if (cmd->data_length < 4) { |
638 | printk(KERN_ERR "SCSI Inquiry payload length: %u" | 665 | pr_err("SCSI Inquiry payload length: %u" |
639 | " too small for EVPD=1\n", cmd->data_length); | 666 | " too small for EVPD=1\n", cmd->data_length); |
640 | return -1; | 667 | return -EINVAL; |
641 | } | 668 | } |
669 | |||
670 | buf = transport_kmap_first_data_page(cmd); | ||
671 | |||
642 | buf[0] = dev->transport->get_device_type(dev); | 672 | buf[0] = dev->transport->get_device_type(dev); |
643 | 673 | ||
644 | switch (cdb[2]) { | 674 | for (p = 0; p < ARRAY_SIZE(evpd_handlers); ++p) |
645 | case 0x00: | 675 | if (cdb[2] == evpd_handlers[p].page) { |
646 | return target_emulate_evpd_00(cmd, buf); | 676 | buf[1] = cdb[2]; |
647 | case 0x80: | 677 | ret = evpd_handlers[p].emulate(cmd, buf); |
648 | return target_emulate_evpd_80(cmd, buf); | 678 | transport_kunmap_first_data_page(cmd); |
649 | case 0x83: | 679 | return ret; |
650 | return target_emulate_evpd_83(cmd, buf); | 680 | } |
651 | case 0x86: | ||
652 | return target_emulate_evpd_86(cmd, buf); | ||
653 | case 0xb0: | ||
654 | return target_emulate_evpd_b0(cmd, buf); | ||
655 | case 0xb2: | ||
656 | return target_emulate_evpd_b2(cmd, buf); | ||
657 | default: | ||
658 | printk(KERN_ERR "Unknown VPD Code: 0x%02x\n", cdb[2]); | ||
659 | return -1; | ||
660 | } | ||
661 | 681 | ||
662 | return 0; | 682 | transport_kunmap_first_data_page(cmd); |
683 | pr_err("Unknown VPD Code: 0x%02x\n", cdb[2]); | ||
684 | return -EINVAL; | ||
663 | } | 685 | } |
664 | 686 | ||
665 | static int | 687 | static int |
666 | target_emulate_readcapacity(struct se_cmd *cmd) | 688 | target_emulate_readcapacity(struct se_cmd *cmd) |
667 | { | 689 | { |
668 | struct se_device *dev = SE_DEV(cmd); | 690 | struct se_device *dev = cmd->se_dev; |
669 | unsigned char *buf = cmd->t_task->t_task_buf; | 691 | unsigned char *buf; |
670 | unsigned long long blocks_long = dev->transport->get_blocks(dev); | 692 | unsigned long long blocks_long = dev->transport->get_blocks(dev); |
671 | u32 blocks; | 693 | u32 blocks; |
672 | 694 | ||
@@ -675,30 +697,36 @@ target_emulate_readcapacity(struct se_cmd *cmd) | |||
675 | else | 697 | else |
676 | blocks = (u32)blocks_long; | 698 | blocks = (u32)blocks_long; |
677 | 699 | ||
700 | buf = transport_kmap_first_data_page(cmd); | ||
701 | |||
678 | buf[0] = (blocks >> 24) & 0xff; | 702 | buf[0] = (blocks >> 24) & 0xff; |
679 | buf[1] = (blocks >> 16) & 0xff; | 703 | buf[1] = (blocks >> 16) & 0xff; |
680 | buf[2] = (blocks >> 8) & 0xff; | 704 | buf[2] = (blocks >> 8) & 0xff; |
681 | buf[3] = blocks & 0xff; | 705 | buf[3] = blocks & 0xff; |
682 | buf[4] = (DEV_ATTRIB(dev)->block_size >> 24) & 0xff; | 706 | buf[4] = (dev->se_sub_dev->se_dev_attrib.block_size >> 24) & 0xff; |
683 | buf[5] = (DEV_ATTRIB(dev)->block_size >> 16) & 0xff; | 707 | buf[5] = (dev->se_sub_dev->se_dev_attrib.block_size >> 16) & 0xff; |
684 | buf[6] = (DEV_ATTRIB(dev)->block_size >> 8) & 0xff; | 708 | buf[6] = (dev->se_sub_dev->se_dev_attrib.block_size >> 8) & 0xff; |
685 | buf[7] = DEV_ATTRIB(dev)->block_size & 0xff; | 709 | buf[7] = dev->se_sub_dev->se_dev_attrib.block_size & 0xff; |
686 | /* | 710 | /* |
687 | * Set max 32-bit blocks to signal SERVICE ACTION READ_CAPACITY_16 | 711 | * Set max 32-bit blocks to signal SERVICE ACTION READ_CAPACITY_16 |
688 | */ | 712 | */ |
689 | if (DEV_ATTRIB(dev)->emulate_tpu || DEV_ATTRIB(dev)->emulate_tpws) | 713 | if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws) |
690 | put_unaligned_be32(0xFFFFFFFF, &buf[0]); | 714 | put_unaligned_be32(0xFFFFFFFF, &buf[0]); |
691 | 715 | ||
716 | transport_kunmap_first_data_page(cmd); | ||
717 | |||
692 | return 0; | 718 | return 0; |
693 | } | 719 | } |
694 | 720 | ||
695 | static int | 721 | static int |
696 | target_emulate_readcapacity_16(struct se_cmd *cmd) | 722 | target_emulate_readcapacity_16(struct se_cmd *cmd) |
697 | { | 723 | { |
698 | struct se_device *dev = SE_DEV(cmd); | 724 | struct se_device *dev = cmd->se_dev; |
699 | unsigned char *buf = cmd->t_task->t_task_buf; | 725 | unsigned char *buf; |
700 | unsigned long long blocks = dev->transport->get_blocks(dev); | 726 | unsigned long long blocks = dev->transport->get_blocks(dev); |
701 | 727 | ||
728 | buf = transport_kmap_first_data_page(cmd); | ||
729 | |||
702 | buf[0] = (blocks >> 56) & 0xff; | 730 | buf[0] = (blocks >> 56) & 0xff; |
703 | buf[1] = (blocks >> 48) & 0xff; | 731 | buf[1] = (blocks >> 48) & 0xff; |
704 | buf[2] = (blocks >> 40) & 0xff; | 732 | buf[2] = (blocks >> 40) & 0xff; |
@@ -707,17 +735,19 @@ target_emulate_readcapacity_16(struct se_cmd *cmd) | |||
707 | buf[5] = (blocks >> 16) & 0xff; | 735 | buf[5] = (blocks >> 16) & 0xff; |
708 | buf[6] = (blocks >> 8) & 0xff; | 736 | buf[6] = (blocks >> 8) & 0xff; |
709 | buf[7] = blocks & 0xff; | 737 | buf[7] = blocks & 0xff; |
710 | buf[8] = (DEV_ATTRIB(dev)->block_size >> 24) & 0xff; | 738 | buf[8] = (dev->se_sub_dev->se_dev_attrib.block_size >> 24) & 0xff; |
711 | buf[9] = (DEV_ATTRIB(dev)->block_size >> 16) & 0xff; | 739 | buf[9] = (dev->se_sub_dev->se_dev_attrib.block_size >> 16) & 0xff; |
712 | buf[10] = (DEV_ATTRIB(dev)->block_size >> 8) & 0xff; | 740 | buf[10] = (dev->se_sub_dev->se_dev_attrib.block_size >> 8) & 0xff; |
713 | buf[11] = DEV_ATTRIB(dev)->block_size & 0xff; | 741 | buf[11] = dev->se_sub_dev->se_dev_attrib.block_size & 0xff; |
714 | /* | 742 | /* |
715 | * Set Thin Provisioning Enable bit following sbc3r22 in section | 743 | * Set Thin Provisioning Enable bit following sbc3r22 in section |
716 | * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled. | 744 | * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled. |
717 | */ | 745 | */ |
718 | if (DEV_ATTRIB(dev)->emulate_tpu || DEV_ATTRIB(dev)->emulate_tpws) | 746 | if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws) |
719 | buf[14] = 0x80; | 747 | buf[14] = 0x80; |
720 | 748 | ||
749 | transport_kunmap_first_data_page(cmd); | ||
750 | |||
721 | return 0; | 751 | return 0; |
722 | } | 752 | } |
723 | 753 | ||
@@ -737,6 +767,35 @@ target_modesense_control(struct se_device *dev, unsigned char *p) | |||
737 | p[1] = 0x0a; | 767 | p[1] = 0x0a; |
738 | p[2] = 2; | 768 | p[2] = 2; |
739 | /* | 769 | /* |
770 | * From spc4r23, 7.4.7 Control mode page | ||
771 | * | ||
772 | * The QUEUE ALGORITHM MODIFIER field (see table 368) specifies | ||
773 | * restrictions on the algorithm used for reordering commands | ||
774 | * having the SIMPLE task attribute (see SAM-4). | ||
775 | * | ||
776 | * Table 368 -- QUEUE ALGORITHM MODIFIER field | ||
777 | * Code Description | ||
778 | * 0h Restricted reordering | ||
779 | * 1h Unrestricted reordering allowed | ||
780 | * 2h to 7h Reserved | ||
781 | * 8h to Fh Vendor specific | ||
782 | * | ||
783 | * A value of zero in the QUEUE ALGORITHM MODIFIER field specifies that | ||
784 | * the device server shall order the processing sequence of commands | ||
785 | * having the SIMPLE task attribute such that data integrity is maintained | ||
786 | * for that I_T nexus (i.e., if the transmission of new SCSI transport protocol | ||
787 | * requests is halted at any time, the final value of all data observable | ||
788 | * on the medium shall be the same as if all the commands had been processed | ||
789 | * with the ORDERED task attribute). | ||
790 | * | ||
791 | * A value of one in the QUEUE ALGORITHM MODIFIER field specifies that the | ||
792 | * device server may reorder the processing sequence of commands having the | ||
793 | * SIMPLE task attribute in any manner. Any data integrity exposures related to | ||
794 | * command sequence order shall be explicitly handled by the application client | ||
795 | * through the selection of appropriate ommands and task attributes. | ||
796 | */ | ||
797 | p[3] = (dev->se_sub_dev->se_dev_attrib.emulate_rest_reord == 1) ? 0x00 : 0x10; | ||
798 | /* | ||
740 | * From spc4r17, section 7.4.6 Control mode Page | 799 | * From spc4r17, section 7.4.6 Control mode Page |
741 | * | 800 | * |
742 | * Unit Attention interlocks control (UN_INTLCK_CTRL) to code 00b | 801 | * Unit Attention interlocks control (UN_INTLCK_CTRL) to code 00b |
@@ -765,8 +824,8 @@ target_modesense_control(struct se_device *dev, unsigned char *p) | |||
765 | * for a BUSY, TASK SET FULL, or RESERVATION CONFLICT status regardless | 824 | * for a BUSY, TASK SET FULL, or RESERVATION CONFLICT status regardless |
766 | * to the number of commands completed with one of those status codes. | 825 | * to the number of commands completed with one of those status codes. |
767 | */ | 826 | */ |
768 | p[4] = (DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl == 2) ? 0x30 : | 827 | p[4] = (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2) ? 0x30 : |
769 | (DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl == 1) ? 0x20 : 0x00; | 828 | (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 1) ? 0x20 : 0x00; |
770 | /* | 829 | /* |
771 | * From spc4r17, section 7.4.6 Control mode Page | 830 | * From spc4r17, section 7.4.6 Control mode Page |
772 | * | 831 | * |
@@ -779,7 +838,7 @@ target_modesense_control(struct se_device *dev, unsigned char *p) | |||
779 | * which the command was received shall be completed with TASK ABORTED | 838 | * which the command was received shall be completed with TASK ABORTED |
780 | * status (see SAM-4). | 839 | * status (see SAM-4). |
781 | */ | 840 | */ |
782 | p[5] = (DEV_ATTRIB(dev)->emulate_tas) ? 0x40 : 0x00; | 841 | p[5] = (dev->se_sub_dev->se_dev_attrib.emulate_tas) ? 0x40 : 0x00; |
783 | p[8] = 0xff; | 842 | p[8] = 0xff; |
784 | p[9] = 0xff; | 843 | p[9] = 0xff; |
785 | p[11] = 30; | 844 | p[11] = 30; |
@@ -792,7 +851,7 @@ target_modesense_caching(struct se_device *dev, unsigned char *p) | |||
792 | { | 851 | { |
793 | p[0] = 0x08; | 852 | p[0] = 0x08; |
794 | p[1] = 0x12; | 853 | p[1] = 0x12; |
795 | if (DEV_ATTRIB(dev)->emulate_write_cache > 0) | 854 | if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) |
796 | p[2] = 0x04; /* Write Cache Enable */ | 855 | p[2] = 0x04; /* Write Cache Enable */ |
797 | p[12] = 0x20; /* Disabled Read Ahead */ | 856 | p[12] = 0x20; /* Disabled Read Ahead */ |
798 | 857 | ||
@@ -830,9 +889,9 @@ target_modesense_dpofua(unsigned char *buf, int type) | |||
830 | static int | 889 | static int |
831 | target_emulate_modesense(struct se_cmd *cmd, int ten) | 890 | target_emulate_modesense(struct se_cmd *cmd, int ten) |
832 | { | 891 | { |
833 | struct se_device *dev = SE_DEV(cmd); | 892 | struct se_device *dev = cmd->se_dev; |
834 | char *cdb = cmd->t_task->t_task_cdb; | 893 | char *cdb = cmd->t_task_cdb; |
835 | unsigned char *rbuf = cmd->t_task->t_task_buf; | 894 | unsigned char *rbuf; |
836 | int type = dev->transport->get_device_type(dev); | 895 | int type = dev->transport->get_device_type(dev); |
837 | int offset = (ten) ? 8 : 4; | 896 | int offset = (ten) ? 8 : 4; |
838 | int length = 0; | 897 | int length = 0; |
@@ -856,7 +915,7 @@ target_emulate_modesense(struct se_cmd *cmd, int ten) | |||
856 | length += target_modesense_control(dev, &buf[offset+length]); | 915 | length += target_modesense_control(dev, &buf[offset+length]); |
857 | break; | 916 | break; |
858 | default: | 917 | default: |
859 | printk(KERN_ERR "Got Unknown Mode Page: 0x%02x\n", | 918 | pr_err("Got Unknown Mode Page: 0x%02x\n", |
860 | cdb[2] & 0x3f); | 919 | cdb[2] & 0x3f); |
861 | return PYX_TRANSPORT_UNKNOWN_MODE_PAGE; | 920 | return PYX_TRANSPORT_UNKNOWN_MODE_PAGE; |
862 | } | 921 | } |
@@ -867,13 +926,13 @@ target_emulate_modesense(struct se_cmd *cmd, int ten) | |||
867 | buf[0] = (offset >> 8) & 0xff; | 926 | buf[0] = (offset >> 8) & 0xff; |
868 | buf[1] = offset & 0xff; | 927 | buf[1] = offset & 0xff; |
869 | 928 | ||
870 | if ((SE_LUN(cmd)->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) || | 929 | if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) || |
871 | (cmd->se_deve && | 930 | (cmd->se_deve && |
872 | (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY))) | 931 | (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY))) |
873 | target_modesense_write_protect(&buf[3], type); | 932 | target_modesense_write_protect(&buf[3], type); |
874 | 933 | ||
875 | if ((DEV_ATTRIB(dev)->emulate_write_cache > 0) && | 934 | if ((dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) && |
876 | (DEV_ATTRIB(dev)->emulate_fua_write > 0)) | 935 | (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0)) |
877 | target_modesense_dpofua(&buf[3], type); | 936 | target_modesense_dpofua(&buf[3], type); |
878 | 937 | ||
879 | if ((offset + 2) > cmd->data_length) | 938 | if ((offset + 2) > cmd->data_length) |
@@ -883,19 +942,22 @@ target_emulate_modesense(struct se_cmd *cmd, int ten) | |||
883 | offset -= 1; | 942 | offset -= 1; |
884 | buf[0] = offset & 0xff; | 943 | buf[0] = offset & 0xff; |
885 | 944 | ||
886 | if ((SE_LUN(cmd)->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) || | 945 | if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) || |
887 | (cmd->se_deve && | 946 | (cmd->se_deve && |
888 | (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY))) | 947 | (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY))) |
889 | target_modesense_write_protect(&buf[2], type); | 948 | target_modesense_write_protect(&buf[2], type); |
890 | 949 | ||
891 | if ((DEV_ATTRIB(dev)->emulate_write_cache > 0) && | 950 | if ((dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) && |
892 | (DEV_ATTRIB(dev)->emulate_fua_write > 0)) | 951 | (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0)) |
893 | target_modesense_dpofua(&buf[2], type); | 952 | target_modesense_dpofua(&buf[2], type); |
894 | 953 | ||
895 | if ((offset + 1) > cmd->data_length) | 954 | if ((offset + 1) > cmd->data_length) |
896 | offset = cmd->data_length; | 955 | offset = cmd->data_length; |
897 | } | 956 | } |
957 | |||
958 | rbuf = transport_kmap_first_data_page(cmd); | ||
898 | memcpy(rbuf, buf, offset); | 959 | memcpy(rbuf, buf, offset); |
960 | transport_kunmap_first_data_page(cmd); | ||
899 | 961 | ||
900 | return 0; | 962 | return 0; |
901 | } | 963 | } |
@@ -903,16 +965,20 @@ target_emulate_modesense(struct se_cmd *cmd, int ten) | |||
903 | static int | 965 | static int |
904 | target_emulate_request_sense(struct se_cmd *cmd) | 966 | target_emulate_request_sense(struct se_cmd *cmd) |
905 | { | 967 | { |
906 | unsigned char *cdb = cmd->t_task->t_task_cdb; | 968 | unsigned char *cdb = cmd->t_task_cdb; |
907 | unsigned char *buf = cmd->t_task->t_task_buf; | 969 | unsigned char *buf; |
908 | u8 ua_asc = 0, ua_ascq = 0; | 970 | u8 ua_asc = 0, ua_ascq = 0; |
971 | int err = 0; | ||
909 | 972 | ||
910 | if (cdb[1] & 0x01) { | 973 | if (cdb[1] & 0x01) { |
911 | printk(KERN_ERR "REQUEST_SENSE description emulation not" | 974 | pr_err("REQUEST_SENSE description emulation not" |
912 | " supported\n"); | 975 | " supported\n"); |
913 | return PYX_TRANSPORT_INVALID_CDB_FIELD; | 976 | return PYX_TRANSPORT_INVALID_CDB_FIELD; |
914 | } | 977 | } |
915 | if (!(core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq))) { | 978 | |
979 | buf = transport_kmap_first_data_page(cmd); | ||
980 | |||
981 | if (!core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq)) { | ||
916 | /* | 982 | /* |
917 | * CURRENT ERROR, UNIT ATTENTION | 983 | * CURRENT ERROR, UNIT ATTENTION |
918 | */ | 984 | */ |
@@ -924,7 +990,8 @@ target_emulate_request_sense(struct se_cmd *cmd) | |||
924 | */ | 990 | */ |
925 | if (cmd->data_length <= 18) { | 991 | if (cmd->data_length <= 18) { |
926 | buf[7] = 0x00; | 992 | buf[7] = 0x00; |
927 | return 0; | 993 | err = -EINVAL; |
994 | goto end; | ||
928 | } | 995 | } |
929 | /* | 996 | /* |
930 | * The Additional Sense Code (ASC) from the UNIT ATTENTION | 997 | * The Additional Sense Code (ASC) from the UNIT ATTENTION |
@@ -944,7 +1011,8 @@ target_emulate_request_sense(struct se_cmd *cmd) | |||
944 | */ | 1011 | */ |
945 | if (cmd->data_length <= 18) { | 1012 | if (cmd->data_length <= 18) { |
946 | buf[7] = 0x00; | 1013 | buf[7] = 0x00; |
947 | return 0; | 1014 | err = -EINVAL; |
1015 | goto end; | ||
948 | } | 1016 | } |
949 | /* | 1017 | /* |
950 | * NO ADDITIONAL SENSE INFORMATION | 1018 | * NO ADDITIONAL SENSE INFORMATION |
@@ -953,6 +1021,9 @@ target_emulate_request_sense(struct se_cmd *cmd) | |||
953 | buf[7] = 0x0A; | 1021 | buf[7] = 0x0A; |
954 | } | 1022 | } |
955 | 1023 | ||
1024 | end: | ||
1025 | transport_kunmap_first_data_page(cmd); | ||
1026 | |||
956 | return 0; | 1027 | return 0; |
957 | } | 1028 | } |
958 | 1029 | ||
@@ -963,13 +1034,13 @@ target_emulate_request_sense(struct se_cmd *cmd) | |||
963 | static int | 1034 | static int |
964 | target_emulate_unmap(struct se_task *task) | 1035 | target_emulate_unmap(struct se_task *task) |
965 | { | 1036 | { |
966 | struct se_cmd *cmd = TASK_CMD(task); | 1037 | struct se_cmd *cmd = task->task_se_cmd; |
967 | struct se_device *dev = SE_DEV(cmd); | 1038 | struct se_device *dev = cmd->se_dev; |
968 | unsigned char *buf = cmd->t_task->t_task_buf, *ptr = NULL; | 1039 | unsigned char *buf, *ptr = NULL; |
969 | unsigned char *cdb = &cmd->t_task->t_task_cdb[0]; | 1040 | unsigned char *cdb = &cmd->t_task_cdb[0]; |
970 | sector_t lba; | 1041 | sector_t lba; |
971 | unsigned int size = cmd->data_length, range; | 1042 | unsigned int size = cmd->data_length, range; |
972 | int ret, offset; | 1043 | int ret = 0, offset; |
973 | unsigned short dl, bd_dl; | 1044 | unsigned short dl, bd_dl; |
974 | 1045 | ||
975 | /* First UNMAP block descriptor starts at 8 byte offset */ | 1046 | /* First UNMAP block descriptor starts at 8 byte offset */ |
@@ -977,21 +1048,24 @@ target_emulate_unmap(struct se_task *task) | |||
977 | size -= 8; | 1048 | size -= 8; |
978 | dl = get_unaligned_be16(&cdb[0]); | 1049 | dl = get_unaligned_be16(&cdb[0]); |
979 | bd_dl = get_unaligned_be16(&cdb[2]); | 1050 | bd_dl = get_unaligned_be16(&cdb[2]); |
1051 | |||
1052 | buf = transport_kmap_first_data_page(cmd); | ||
1053 | |||
980 | ptr = &buf[offset]; | 1054 | ptr = &buf[offset]; |
981 | printk(KERN_INFO "UNMAP: Sub: %s Using dl: %hu bd_dl: %hu size: %hu" | 1055 | pr_debug("UNMAP: Sub: %s Using dl: %hu bd_dl: %hu size: %hu" |
982 | " ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr); | 1056 | " ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr); |
983 | 1057 | ||
984 | while (size) { | 1058 | while (size) { |
985 | lba = get_unaligned_be64(&ptr[0]); | 1059 | lba = get_unaligned_be64(&ptr[0]); |
986 | range = get_unaligned_be32(&ptr[8]); | 1060 | range = get_unaligned_be32(&ptr[8]); |
987 | printk(KERN_INFO "UNMAP: Using lba: %llu and range: %u\n", | 1061 | pr_debug("UNMAP: Using lba: %llu and range: %u\n", |
988 | (unsigned long long)lba, range); | 1062 | (unsigned long long)lba, range); |
989 | 1063 | ||
990 | ret = dev->transport->do_discard(dev, lba, range); | 1064 | ret = dev->transport->do_discard(dev, lba, range); |
991 | if (ret < 0) { | 1065 | if (ret < 0) { |
992 | printk(KERN_ERR "blkdev_issue_discard() failed: %d\n", | 1066 | pr_err("blkdev_issue_discard() failed: %d\n", |
993 | ret); | 1067 | ret); |
994 | return -1; | 1068 | goto err; |
995 | } | 1069 | } |
996 | 1070 | ||
997 | ptr += 16; | 1071 | ptr += 16; |
@@ -1000,7 +1074,10 @@ target_emulate_unmap(struct se_task *task) | |||
1000 | 1074 | ||
1001 | task->task_scsi_status = GOOD; | 1075 | task->task_scsi_status = GOOD; |
1002 | transport_complete_task(task, 1); | 1076 | transport_complete_task(task, 1); |
1003 | return 0; | 1077 | err: |
1078 | transport_kunmap_first_data_page(cmd); | ||
1079 | |||
1080 | return ret; | ||
1004 | } | 1081 | } |
1005 | 1082 | ||
1006 | /* | 1083 | /* |
@@ -1008,23 +1085,36 @@ target_emulate_unmap(struct se_task *task) | |||
1008 | * Note this is not used for TCM/pSCSI passthrough | 1085 | * Note this is not used for TCM/pSCSI passthrough |
1009 | */ | 1086 | */ |
1010 | static int | 1087 | static int |
1011 | target_emulate_write_same(struct se_task *task) | 1088 | target_emulate_write_same(struct se_task *task, int write_same32) |
1012 | { | 1089 | { |
1013 | struct se_cmd *cmd = TASK_CMD(task); | 1090 | struct se_cmd *cmd = task->task_se_cmd; |
1014 | struct se_device *dev = SE_DEV(cmd); | 1091 | struct se_device *dev = cmd->se_dev; |
1015 | sector_t lba = cmd->t_task->t_task_lba; | 1092 | sector_t range; |
1016 | unsigned int range; | 1093 | sector_t lba = cmd->t_task_lba; |
1094 | unsigned int num_blocks; | ||
1017 | int ret; | 1095 | int ret; |
1096 | /* | ||
1097 | * Extract num_blocks from the WRITE_SAME_* CDB. Then use the explict | ||
1098 | * range when non zero is supplied, otherwise calculate the remaining | ||
1099 | * range based on ->get_blocks() - starting LBA. | ||
1100 | */ | ||
1101 | if (write_same32) | ||
1102 | num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]); | ||
1103 | else | ||
1104 | num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]); | ||
1018 | 1105 | ||
1019 | range = (cmd->data_length / DEV_ATTRIB(dev)->block_size); | 1106 | if (num_blocks != 0) |
1107 | range = num_blocks; | ||
1108 | else | ||
1109 | range = (dev->transport->get_blocks(dev) - lba); | ||
1020 | 1110 | ||
1021 | printk(KERN_INFO "WRITE_SAME UNMAP: LBA: %llu Range: %u\n", | 1111 | pr_debug("WRITE_SAME UNMAP: LBA: %llu Range: %llu\n", |
1022 | (unsigned long long)lba, range); | 1112 | (unsigned long long)lba, (unsigned long long)range); |
1023 | 1113 | ||
1024 | ret = dev->transport->do_discard(dev, lba, range); | 1114 | ret = dev->transport->do_discard(dev, lba, range); |
1025 | if (ret < 0) { | 1115 | if (ret < 0) { |
1026 | printk(KERN_INFO "blkdev_issue_discard() failed for WRITE_SAME\n"); | 1116 | pr_debug("blkdev_issue_discard() failed for WRITE_SAME\n"); |
1027 | return -1; | 1117 | return ret; |
1028 | } | 1118 | } |
1029 | 1119 | ||
1030 | task->task_scsi_status = GOOD; | 1120 | task->task_scsi_status = GOOD; |
@@ -1035,12 +1125,12 @@ target_emulate_write_same(struct se_task *task) | |||
1035 | int | 1125 | int |
1036 | transport_emulate_control_cdb(struct se_task *task) | 1126 | transport_emulate_control_cdb(struct se_task *task) |
1037 | { | 1127 | { |
1038 | struct se_cmd *cmd = TASK_CMD(task); | 1128 | struct se_cmd *cmd = task->task_se_cmd; |
1039 | struct se_device *dev = SE_DEV(cmd); | 1129 | struct se_device *dev = cmd->se_dev; |
1040 | unsigned short service_action; | 1130 | unsigned short service_action; |
1041 | int ret = 0; | 1131 | int ret = 0; |
1042 | 1132 | ||
1043 | switch (cmd->t_task->t_task_cdb[0]) { | 1133 | switch (cmd->t_task_cdb[0]) { |
1044 | case INQUIRY: | 1134 | case INQUIRY: |
1045 | ret = target_emulate_inquiry(cmd); | 1135 | ret = target_emulate_inquiry(cmd); |
1046 | break; | 1136 | break; |
@@ -1054,13 +1144,13 @@ transport_emulate_control_cdb(struct se_task *task) | |||
1054 | ret = target_emulate_modesense(cmd, 1); | 1144 | ret = target_emulate_modesense(cmd, 1); |
1055 | break; | 1145 | break; |
1056 | case SERVICE_ACTION_IN: | 1146 | case SERVICE_ACTION_IN: |
1057 | switch (cmd->t_task->t_task_cdb[1] & 0x1f) { | 1147 | switch (cmd->t_task_cdb[1] & 0x1f) { |
1058 | case SAI_READ_CAPACITY_16: | 1148 | case SAI_READ_CAPACITY_16: |
1059 | ret = target_emulate_readcapacity_16(cmd); | 1149 | ret = target_emulate_readcapacity_16(cmd); |
1060 | break; | 1150 | break; |
1061 | default: | 1151 | default: |
1062 | printk(KERN_ERR "Unsupported SA: 0x%02x\n", | 1152 | pr_err("Unsupported SA: 0x%02x\n", |
1063 | cmd->t_task->t_task_cdb[1] & 0x1f); | 1153 | cmd->t_task_cdb[1] & 0x1f); |
1064 | return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; | 1154 | return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; |
1065 | } | 1155 | } |
1066 | break; | 1156 | break; |
@@ -1069,7 +1159,7 @@ transport_emulate_control_cdb(struct se_task *task) | |||
1069 | break; | 1159 | break; |
1070 | case UNMAP: | 1160 | case UNMAP: |
1071 | if (!dev->transport->do_discard) { | 1161 | if (!dev->transport->do_discard) { |
1072 | printk(KERN_ERR "UNMAP emulation not supported for: %s\n", | 1162 | pr_err("UNMAP emulation not supported for: %s\n", |
1073 | dev->transport->name); | 1163 | dev->transport->name); |
1074 | return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; | 1164 | return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; |
1075 | } | 1165 | } |
@@ -1077,27 +1167,27 @@ transport_emulate_control_cdb(struct se_task *task) | |||
1077 | break; | 1167 | break; |
1078 | case WRITE_SAME_16: | 1168 | case WRITE_SAME_16: |
1079 | if (!dev->transport->do_discard) { | 1169 | if (!dev->transport->do_discard) { |
1080 | printk(KERN_ERR "WRITE_SAME_16 emulation not supported" | 1170 | pr_err("WRITE_SAME_16 emulation not supported" |
1081 | " for: %s\n", dev->transport->name); | 1171 | " for: %s\n", dev->transport->name); |
1082 | return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; | 1172 | return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; |
1083 | } | 1173 | } |
1084 | ret = target_emulate_write_same(task); | 1174 | ret = target_emulate_write_same(task, 0); |
1085 | break; | 1175 | break; |
1086 | case VARIABLE_LENGTH_CMD: | 1176 | case VARIABLE_LENGTH_CMD: |
1087 | service_action = | 1177 | service_action = |
1088 | get_unaligned_be16(&cmd->t_task->t_task_cdb[8]); | 1178 | get_unaligned_be16(&cmd->t_task_cdb[8]); |
1089 | switch (service_action) { | 1179 | switch (service_action) { |
1090 | case WRITE_SAME_32: | 1180 | case WRITE_SAME_32: |
1091 | if (!dev->transport->do_discard) { | 1181 | if (!dev->transport->do_discard) { |
1092 | printk(KERN_ERR "WRITE_SAME_32 SA emulation not" | 1182 | pr_err("WRITE_SAME_32 SA emulation not" |
1093 | " supported for: %s\n", | 1183 | " supported for: %s\n", |
1094 | dev->transport->name); | 1184 | dev->transport->name); |
1095 | return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; | 1185 | return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; |
1096 | } | 1186 | } |
1097 | ret = target_emulate_write_same(task); | 1187 | ret = target_emulate_write_same(task, 1); |
1098 | break; | 1188 | break; |
1099 | default: | 1189 | default: |
1100 | printk(KERN_ERR "Unsupported VARIABLE_LENGTH_CMD SA:" | 1190 | pr_err("Unsupported VARIABLE_LENGTH_CMD SA:" |
1101 | " 0x%02x\n", service_action); | 1191 | " 0x%02x\n", service_action); |
1102 | break; | 1192 | break; |
1103 | } | 1193 | } |
@@ -1105,8 +1195,7 @@ transport_emulate_control_cdb(struct se_task *task) | |||
1105 | case SYNCHRONIZE_CACHE: | 1195 | case SYNCHRONIZE_CACHE: |
1106 | case 0x91: /* SYNCHRONIZE_CACHE_16: */ | 1196 | case 0x91: /* SYNCHRONIZE_CACHE_16: */ |
1107 | if (!dev->transport->do_sync_cache) { | 1197 | if (!dev->transport->do_sync_cache) { |
1108 | printk(KERN_ERR | 1198 | pr_err("SYNCHRONIZE_CACHE emulation not supported" |
1109 | "SYNCHRONIZE_CACHE emulation not supported" | ||
1110 | " for: %s\n", dev->transport->name); | 1199 | " for: %s\n", dev->transport->name); |
1111 | return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; | 1200 | return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; |
1112 | } | 1201 | } |
@@ -1123,8 +1212,8 @@ transport_emulate_control_cdb(struct se_task *task) | |||
1123 | case WRITE_FILEMARKS: | 1212 | case WRITE_FILEMARKS: |
1124 | break; | 1213 | break; |
1125 | default: | 1214 | default: |
1126 | printk(KERN_ERR "Unsupported SCSI Opcode: 0x%02x for %s\n", | 1215 | pr_err("Unsupported SCSI Opcode: 0x%02x for %s\n", |
1127 | cmd->t_task->t_task_cdb[0], dev->transport->name); | 1216 | cmd->t_task_cdb[0], dev->transport->name); |
1128 | return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; | 1217 | return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; |
1129 | } | 1218 | } |
1130 | 1219 | ||
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index 25c1f49a7d8b..b2575d8568cc 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c | |||
@@ -37,6 +37,7 @@ | |||
37 | #include <linux/parser.h> | 37 | #include <linux/parser.h> |
38 | #include <linux/syscalls.h> | 38 | #include <linux/syscalls.h> |
39 | #include <linux/configfs.h> | 39 | #include <linux/configfs.h> |
40 | #include <linux/spinlock.h> | ||
40 | 41 | ||
41 | #include <target/target_core_base.h> | 42 | #include <target/target_core_base.h> |
42 | #include <target/target_core_device.h> | 43 | #include <target/target_core_device.h> |
@@ -52,6 +53,8 @@ | |||
52 | #include "target_core_rd.h" | 53 | #include "target_core_rd.h" |
53 | #include "target_core_stat.h" | 54 | #include "target_core_stat.h" |
54 | 55 | ||
56 | extern struct t10_alua_lu_gp *default_lu_gp; | ||
57 | |||
55 | static struct list_head g_tf_list; | 58 | static struct list_head g_tf_list; |
56 | static struct mutex g_tf_lock; | 59 | static struct mutex g_tf_lock; |
57 | 60 | ||
@@ -61,6 +64,13 @@ struct target_core_configfs_attribute { | |||
61 | ssize_t (*store)(void *, const char *, size_t); | 64 | ssize_t (*store)(void *, const char *, size_t); |
62 | }; | 65 | }; |
63 | 66 | ||
67 | static struct config_group target_core_hbagroup; | ||
68 | static struct config_group alua_group; | ||
69 | static struct config_group alua_lu_gps_group; | ||
70 | |||
71 | static DEFINE_SPINLOCK(se_device_lock); | ||
72 | static LIST_HEAD(se_dev_list); | ||
73 | |||
64 | static inline struct se_hba * | 74 | static inline struct se_hba * |
65 | item_to_hba(struct config_item *item) | 75 | item_to_hba(struct config_item *item) |
66 | { | 76 | { |
@@ -94,12 +104,12 @@ static struct target_fabric_configfs *target_core_get_fabric( | |||
94 | { | 104 | { |
95 | struct target_fabric_configfs *tf; | 105 | struct target_fabric_configfs *tf; |
96 | 106 | ||
97 | if (!(name)) | 107 | if (!name) |
98 | return NULL; | 108 | return NULL; |
99 | 109 | ||
100 | mutex_lock(&g_tf_lock); | 110 | mutex_lock(&g_tf_lock); |
101 | list_for_each_entry(tf, &g_tf_list, tf_list) { | 111 | list_for_each_entry(tf, &g_tf_list, tf_list) { |
102 | if (!(strcmp(tf->tf_name, name))) { | 112 | if (!strcmp(tf->tf_name, name)) { |
103 | atomic_inc(&tf->tf_access_cnt); | 113 | atomic_inc(&tf->tf_access_cnt); |
104 | mutex_unlock(&g_tf_lock); | 114 | mutex_unlock(&g_tf_lock); |
105 | return tf; | 115 | return tf; |
@@ -120,7 +130,7 @@ static struct config_group *target_core_register_fabric( | |||
120 | struct target_fabric_configfs *tf; | 130 | struct target_fabric_configfs *tf; |
121 | int ret; | 131 | int ret; |
122 | 132 | ||
123 | printk(KERN_INFO "Target_Core_ConfigFS: REGISTER -> group: %p name:" | 133 | pr_debug("Target_Core_ConfigFS: REGISTER -> group: %p name:" |
124 | " %s\n", group, name); | 134 | " %s\n", group, name); |
125 | /* | 135 | /* |
126 | * Ensure that TCM subsystem plugins are loaded at this point for | 136 | * Ensure that TCM subsystem plugins are loaded at this point for |
@@ -140,7 +150,7 @@ static struct config_group *target_core_register_fabric( | |||
140 | * registered, but simply provids auto loading logic for modules with | 150 | * registered, but simply provids auto loading logic for modules with |
141 | * mkdir(2) system calls with known TCM fabric modules. | 151 | * mkdir(2) system calls with known TCM fabric modules. |
142 | */ | 152 | */ |
143 | if (!(strncmp(name, "iscsi", 5))) { | 153 | if (!strncmp(name, "iscsi", 5)) { |
144 | /* | 154 | /* |
145 | * Automatically load the LIO Target fabric module when the | 155 | * Automatically load the LIO Target fabric module when the |
146 | * following is called: | 156 | * following is called: |
@@ -149,11 +159,11 @@ static struct config_group *target_core_register_fabric( | |||
149 | */ | 159 | */ |
150 | ret = request_module("iscsi_target_mod"); | 160 | ret = request_module("iscsi_target_mod"); |
151 | if (ret < 0) { | 161 | if (ret < 0) { |
152 | printk(KERN_ERR "request_module() failed for" | 162 | pr_err("request_module() failed for" |
153 | " iscsi_target_mod.ko: %d\n", ret); | 163 | " iscsi_target_mod.ko: %d\n", ret); |
154 | return ERR_PTR(-EINVAL); | 164 | return ERR_PTR(-EINVAL); |
155 | } | 165 | } |
156 | } else if (!(strncmp(name, "loopback", 8))) { | 166 | } else if (!strncmp(name, "loopback", 8)) { |
157 | /* | 167 | /* |
158 | * Automatically load the tcm_loop fabric module when the | 168 | * Automatically load the tcm_loop fabric module when the |
159 | * following is called: | 169 | * following is called: |
@@ -162,25 +172,25 @@ static struct config_group *target_core_register_fabric( | |||
162 | */ | 172 | */ |
163 | ret = request_module("tcm_loop"); | 173 | ret = request_module("tcm_loop"); |
164 | if (ret < 0) { | 174 | if (ret < 0) { |
165 | printk(KERN_ERR "request_module() failed for" | 175 | pr_err("request_module() failed for" |
166 | " tcm_loop.ko: %d\n", ret); | 176 | " tcm_loop.ko: %d\n", ret); |
167 | return ERR_PTR(-EINVAL); | 177 | return ERR_PTR(-EINVAL); |
168 | } | 178 | } |
169 | } | 179 | } |
170 | 180 | ||
171 | tf = target_core_get_fabric(name); | 181 | tf = target_core_get_fabric(name); |
172 | if (!(tf)) { | 182 | if (!tf) { |
173 | printk(KERN_ERR "target_core_get_fabric() failed for %s\n", | 183 | pr_err("target_core_get_fabric() failed for %s\n", |
174 | name); | 184 | name); |
175 | return ERR_PTR(-EINVAL); | 185 | return ERR_PTR(-EINVAL); |
176 | } | 186 | } |
177 | printk(KERN_INFO "Target_Core_ConfigFS: REGISTER -> Located fabric:" | 187 | pr_debug("Target_Core_ConfigFS: REGISTER -> Located fabric:" |
178 | " %s\n", tf->tf_name); | 188 | " %s\n", tf->tf_name); |
179 | /* | 189 | /* |
180 | * On a successful target_core_get_fabric() look, the returned | 190 | * On a successful target_core_get_fabric() look, the returned |
181 | * struct target_fabric_configfs *tf will contain a usage reference. | 191 | * struct target_fabric_configfs *tf will contain a usage reference. |
182 | */ | 192 | */ |
183 | printk(KERN_INFO "Target_Core_ConfigFS: REGISTER tfc_wwn_cit -> %p\n", | 193 | pr_debug("Target_Core_ConfigFS: REGISTER tfc_wwn_cit -> %p\n", |
184 | &TF_CIT_TMPL(tf)->tfc_wwn_cit); | 194 | &TF_CIT_TMPL(tf)->tfc_wwn_cit); |
185 | 195 | ||
186 | tf->tf_group.default_groups = tf->tf_default_groups; | 196 | tf->tf_group.default_groups = tf->tf_default_groups; |
@@ -192,14 +202,14 @@ static struct config_group *target_core_register_fabric( | |||
192 | config_group_init_type_name(&tf->tf_disc_group, "discovery_auth", | 202 | config_group_init_type_name(&tf->tf_disc_group, "discovery_auth", |
193 | &TF_CIT_TMPL(tf)->tfc_discovery_cit); | 203 | &TF_CIT_TMPL(tf)->tfc_discovery_cit); |
194 | 204 | ||
195 | printk(KERN_INFO "Target_Core_ConfigFS: REGISTER -> Allocated Fabric:" | 205 | pr_debug("Target_Core_ConfigFS: REGISTER -> Allocated Fabric:" |
196 | " %s\n", tf->tf_group.cg_item.ci_name); | 206 | " %s\n", tf->tf_group.cg_item.ci_name); |
197 | /* | 207 | /* |
198 | * Setup tf_ops.tf_subsys pointer for usage with configfs_depend_item() | 208 | * Setup tf_ops.tf_subsys pointer for usage with configfs_depend_item() |
199 | */ | 209 | */ |
200 | tf->tf_ops.tf_subsys = tf->tf_subsys; | 210 | tf->tf_ops.tf_subsys = tf->tf_subsys; |
201 | tf->tf_fabric = &tf->tf_group.cg_item; | 211 | tf->tf_fabric = &tf->tf_group.cg_item; |
202 | printk(KERN_INFO "Target_Core_ConfigFS: REGISTER -> Set tf->tf_fabric" | 212 | pr_debug("Target_Core_ConfigFS: REGISTER -> Set tf->tf_fabric" |
203 | " for %s\n", name); | 213 | " for %s\n", name); |
204 | 214 | ||
205 | return &tf->tf_group; | 215 | return &tf->tf_group; |
@@ -218,18 +228,18 @@ static void target_core_deregister_fabric( | |||
218 | struct config_item *df_item; | 228 | struct config_item *df_item; |
219 | int i; | 229 | int i; |
220 | 230 | ||
221 | printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> Looking up %s in" | 231 | pr_debug("Target_Core_ConfigFS: DEREGISTER -> Looking up %s in" |
222 | " tf list\n", config_item_name(item)); | 232 | " tf list\n", config_item_name(item)); |
223 | 233 | ||
224 | printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> located fabric:" | 234 | pr_debug("Target_Core_ConfigFS: DEREGISTER -> located fabric:" |
225 | " %s\n", tf->tf_name); | 235 | " %s\n", tf->tf_name); |
226 | atomic_dec(&tf->tf_access_cnt); | 236 | atomic_dec(&tf->tf_access_cnt); |
227 | 237 | ||
228 | printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> Releasing" | 238 | pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing" |
229 | " tf->tf_fabric for %s\n", tf->tf_name); | 239 | " tf->tf_fabric for %s\n", tf->tf_name); |
230 | tf->tf_fabric = NULL; | 240 | tf->tf_fabric = NULL; |
231 | 241 | ||
232 | printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> Releasing ci" | 242 | pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing ci" |
233 | " %s\n", config_item_name(item)); | 243 | " %s\n", config_item_name(item)); |
234 | 244 | ||
235 | tf_group = &tf->tf_group; | 245 | tf_group = &tf->tf_group; |
@@ -296,23 +306,19 @@ struct target_fabric_configfs *target_fabric_configfs_init( | |||
296 | { | 306 | { |
297 | struct target_fabric_configfs *tf; | 307 | struct target_fabric_configfs *tf; |
298 | 308 | ||
299 | if (!(fabric_mod)) { | ||
300 | printk(KERN_ERR "Missing struct module *fabric_mod pointer\n"); | ||
301 | return NULL; | ||
302 | } | ||
303 | if (!(name)) { | 309 | if (!(name)) { |
304 | printk(KERN_ERR "Unable to locate passed fabric name\n"); | 310 | pr_err("Unable to locate passed fabric name\n"); |
305 | return NULL; | 311 | return ERR_PTR(-EINVAL); |
306 | } | 312 | } |
307 | if (strlen(name) >= TARGET_FABRIC_NAME_SIZE) { | 313 | if (strlen(name) >= TARGET_FABRIC_NAME_SIZE) { |
308 | printk(KERN_ERR "Passed name: %s exceeds TARGET_FABRIC" | 314 | pr_err("Passed name: %s exceeds TARGET_FABRIC" |
309 | "_NAME_SIZE\n", name); | 315 | "_NAME_SIZE\n", name); |
310 | return NULL; | 316 | return ERR_PTR(-EINVAL); |
311 | } | 317 | } |
312 | 318 | ||
313 | tf = kzalloc(sizeof(struct target_fabric_configfs), GFP_KERNEL); | 319 | tf = kzalloc(sizeof(struct target_fabric_configfs), GFP_KERNEL); |
314 | if (!(tf)) | 320 | if (!tf) |
315 | return NULL; | 321 | return ERR_PTR(-ENOMEM); |
316 | 322 | ||
317 | INIT_LIST_HEAD(&tf->tf_list); | 323 | INIT_LIST_HEAD(&tf->tf_list); |
318 | atomic_set(&tf->tf_access_cnt, 0); | 324 | atomic_set(&tf->tf_access_cnt, 0); |
@@ -330,9 +336,9 @@ struct target_fabric_configfs *target_fabric_configfs_init( | |||
330 | list_add_tail(&tf->tf_list, &g_tf_list); | 336 | list_add_tail(&tf->tf_list, &g_tf_list); |
331 | mutex_unlock(&g_tf_lock); | 337 | mutex_unlock(&g_tf_lock); |
332 | 338 | ||
333 | printk(KERN_INFO "<<<<<<<<<<<<<<<<<<<<<< BEGIN FABRIC API >>>>>>>>" | 339 | pr_debug("<<<<<<<<<<<<<<<<<<<<<< BEGIN FABRIC API >>>>>>>>" |
334 | ">>>>>>>>>>>>>>\n"); | 340 | ">>>>>>>>>>>>>>\n"); |
335 | printk(KERN_INFO "Initialized struct target_fabric_configfs: %p for" | 341 | pr_debug("Initialized struct target_fabric_configfs: %p for" |
336 | " %s\n", tf, tf->tf_name); | 342 | " %s\n", tf, tf->tf_name); |
337 | return tf; | 343 | return tf; |
338 | } | 344 | } |
@@ -361,140 +367,132 @@ static int target_fabric_tf_ops_check( | |||
361 | { | 367 | { |
362 | struct target_core_fabric_ops *tfo = &tf->tf_ops; | 368 | struct target_core_fabric_ops *tfo = &tf->tf_ops; |
363 | 369 | ||
364 | if (!(tfo->get_fabric_name)) { | 370 | if (!tfo->get_fabric_name) { |
365 | printk(KERN_ERR "Missing tfo->get_fabric_name()\n"); | 371 | pr_err("Missing tfo->get_fabric_name()\n"); |
366 | return -EINVAL; | ||
367 | } | ||
368 | if (!(tfo->get_fabric_proto_ident)) { | ||
369 | printk(KERN_ERR "Missing tfo->get_fabric_proto_ident()\n"); | ||
370 | return -EINVAL; | ||
371 | } | ||
372 | if (!(tfo->tpg_get_wwn)) { | ||
373 | printk(KERN_ERR "Missing tfo->tpg_get_wwn()\n"); | ||
374 | return -EINVAL; | 372 | return -EINVAL; |
375 | } | 373 | } |
376 | if (!(tfo->tpg_get_tag)) { | 374 | if (!tfo->get_fabric_proto_ident) { |
377 | printk(KERN_ERR "Missing tfo->tpg_get_tag()\n"); | 375 | pr_err("Missing tfo->get_fabric_proto_ident()\n"); |
378 | return -EINVAL; | 376 | return -EINVAL; |
379 | } | 377 | } |
380 | if (!(tfo->tpg_get_default_depth)) { | 378 | if (!tfo->tpg_get_wwn) { |
381 | printk(KERN_ERR "Missing tfo->tpg_get_default_depth()\n"); | 379 | pr_err("Missing tfo->tpg_get_wwn()\n"); |
382 | return -EINVAL; | 380 | return -EINVAL; |
383 | } | 381 | } |
384 | if (!(tfo->tpg_get_pr_transport_id)) { | 382 | if (!tfo->tpg_get_tag) { |
385 | printk(KERN_ERR "Missing tfo->tpg_get_pr_transport_id()\n"); | 383 | pr_err("Missing tfo->tpg_get_tag()\n"); |
386 | return -EINVAL; | 384 | return -EINVAL; |
387 | } | 385 | } |
388 | if (!(tfo->tpg_get_pr_transport_id_len)) { | 386 | if (!tfo->tpg_get_default_depth) { |
389 | printk(KERN_ERR "Missing tfo->tpg_get_pr_transport_id_len()\n"); | 387 | pr_err("Missing tfo->tpg_get_default_depth()\n"); |
390 | return -EINVAL; | 388 | return -EINVAL; |
391 | } | 389 | } |
392 | if (!(tfo->tpg_check_demo_mode)) { | 390 | if (!tfo->tpg_get_pr_transport_id) { |
393 | printk(KERN_ERR "Missing tfo->tpg_check_demo_mode()\n"); | 391 | pr_err("Missing tfo->tpg_get_pr_transport_id()\n"); |
394 | return -EINVAL; | 392 | return -EINVAL; |
395 | } | 393 | } |
396 | if (!(tfo->tpg_check_demo_mode_cache)) { | 394 | if (!tfo->tpg_get_pr_transport_id_len) { |
397 | printk(KERN_ERR "Missing tfo->tpg_check_demo_mode_cache()\n"); | 395 | pr_err("Missing tfo->tpg_get_pr_transport_id_len()\n"); |
398 | return -EINVAL; | 396 | return -EINVAL; |
399 | } | 397 | } |
400 | if (!(tfo->tpg_check_demo_mode_write_protect)) { | 398 | if (!tfo->tpg_check_demo_mode) { |
401 | printk(KERN_ERR "Missing tfo->tpg_check_demo_mode_write_protect()\n"); | 399 | pr_err("Missing tfo->tpg_check_demo_mode()\n"); |
402 | return -EINVAL; | 400 | return -EINVAL; |
403 | } | 401 | } |
404 | if (!(tfo->tpg_check_prod_mode_write_protect)) { | 402 | if (!tfo->tpg_check_demo_mode_cache) { |
405 | printk(KERN_ERR "Missing tfo->tpg_check_prod_mode_write_protect()\n"); | 403 | pr_err("Missing tfo->tpg_check_demo_mode_cache()\n"); |
406 | return -EINVAL; | 404 | return -EINVAL; |
407 | } | 405 | } |
408 | if (!(tfo->tpg_alloc_fabric_acl)) { | 406 | if (!tfo->tpg_check_demo_mode_write_protect) { |
409 | printk(KERN_ERR "Missing tfo->tpg_alloc_fabric_acl()\n"); | 407 | pr_err("Missing tfo->tpg_check_demo_mode_write_protect()\n"); |
410 | return -EINVAL; | 408 | return -EINVAL; |
411 | } | 409 | } |
412 | if (!(tfo->tpg_release_fabric_acl)) { | 410 | if (!tfo->tpg_check_prod_mode_write_protect) { |
413 | printk(KERN_ERR "Missing tfo->tpg_release_fabric_acl()\n"); | 411 | pr_err("Missing tfo->tpg_check_prod_mode_write_protect()\n"); |
414 | return -EINVAL; | 412 | return -EINVAL; |
415 | } | 413 | } |
416 | if (!(tfo->tpg_get_inst_index)) { | 414 | if (!tfo->tpg_alloc_fabric_acl) { |
417 | printk(KERN_ERR "Missing tfo->tpg_get_inst_index()\n"); | 415 | pr_err("Missing tfo->tpg_alloc_fabric_acl()\n"); |
418 | return -EINVAL; | 416 | return -EINVAL; |
419 | } | 417 | } |
420 | if (!(tfo->release_cmd_to_pool)) { | 418 | if (!tfo->tpg_release_fabric_acl) { |
421 | printk(KERN_ERR "Missing tfo->release_cmd_to_pool()\n"); | 419 | pr_err("Missing tfo->tpg_release_fabric_acl()\n"); |
422 | return -EINVAL; | 420 | return -EINVAL; |
423 | } | 421 | } |
424 | if (!(tfo->release_cmd_direct)) { | 422 | if (!tfo->tpg_get_inst_index) { |
425 | printk(KERN_ERR "Missing tfo->release_cmd_direct()\n"); | 423 | pr_err("Missing tfo->tpg_get_inst_index()\n"); |
426 | return -EINVAL; | 424 | return -EINVAL; |
427 | } | 425 | } |
428 | if (!(tfo->shutdown_session)) { | 426 | if (!tfo->release_cmd) { |
429 | printk(KERN_ERR "Missing tfo->shutdown_session()\n"); | 427 | pr_err("Missing tfo->release_cmd()\n"); |
430 | return -EINVAL; | 428 | return -EINVAL; |
431 | } | 429 | } |
432 | if (!(tfo->close_session)) { | 430 | if (!tfo->shutdown_session) { |
433 | printk(KERN_ERR "Missing tfo->close_session()\n"); | 431 | pr_err("Missing tfo->shutdown_session()\n"); |
434 | return -EINVAL; | 432 | return -EINVAL; |
435 | } | 433 | } |
436 | if (!(tfo->stop_session)) { | 434 | if (!tfo->close_session) { |
437 | printk(KERN_ERR "Missing tfo->stop_session()\n"); | 435 | pr_err("Missing tfo->close_session()\n"); |
438 | return -EINVAL; | 436 | return -EINVAL; |
439 | } | 437 | } |
440 | if (!(tfo->fall_back_to_erl0)) { | 438 | if (!tfo->stop_session) { |
441 | printk(KERN_ERR "Missing tfo->fall_back_to_erl0()\n"); | 439 | pr_err("Missing tfo->stop_session()\n"); |
442 | return -EINVAL; | 440 | return -EINVAL; |
443 | } | 441 | } |
444 | if (!(tfo->sess_logged_in)) { | 442 | if (!tfo->fall_back_to_erl0) { |
445 | printk(KERN_ERR "Missing tfo->sess_logged_in()\n"); | 443 | pr_err("Missing tfo->fall_back_to_erl0()\n"); |
446 | return -EINVAL; | 444 | return -EINVAL; |
447 | } | 445 | } |
448 | if (!(tfo->sess_get_index)) { | 446 | if (!tfo->sess_logged_in) { |
449 | printk(KERN_ERR "Missing tfo->sess_get_index()\n"); | 447 | pr_err("Missing tfo->sess_logged_in()\n"); |
450 | return -EINVAL; | 448 | return -EINVAL; |
451 | } | 449 | } |
452 | if (!(tfo->write_pending)) { | 450 | if (!tfo->sess_get_index) { |
453 | printk(KERN_ERR "Missing tfo->write_pending()\n"); | 451 | pr_err("Missing tfo->sess_get_index()\n"); |
454 | return -EINVAL; | 452 | return -EINVAL; |
455 | } | 453 | } |
456 | if (!(tfo->write_pending_status)) { | 454 | if (!tfo->write_pending) { |
457 | printk(KERN_ERR "Missing tfo->write_pending_status()\n"); | 455 | pr_err("Missing tfo->write_pending()\n"); |
458 | return -EINVAL; | 456 | return -EINVAL; |
459 | } | 457 | } |
460 | if (!(tfo->set_default_node_attributes)) { | 458 | if (!tfo->write_pending_status) { |
461 | printk(KERN_ERR "Missing tfo->set_default_node_attributes()\n"); | 459 | pr_err("Missing tfo->write_pending_status()\n"); |
462 | return -EINVAL; | 460 | return -EINVAL; |
463 | } | 461 | } |
464 | if (!(tfo->get_task_tag)) { | 462 | if (!tfo->set_default_node_attributes) { |
465 | printk(KERN_ERR "Missing tfo->get_task_tag()\n"); | 463 | pr_err("Missing tfo->set_default_node_attributes()\n"); |
466 | return -EINVAL; | 464 | return -EINVAL; |
467 | } | 465 | } |
468 | if (!(tfo->get_cmd_state)) { | 466 | if (!tfo->get_task_tag) { |
469 | printk(KERN_ERR "Missing tfo->get_cmd_state()\n"); | 467 | pr_err("Missing tfo->get_task_tag()\n"); |
470 | return -EINVAL; | 468 | return -EINVAL; |
471 | } | 469 | } |
472 | if (!(tfo->new_cmd_failure)) { | 470 | if (!tfo->get_cmd_state) { |
473 | printk(KERN_ERR "Missing tfo->new_cmd_failure()\n"); | 471 | pr_err("Missing tfo->get_cmd_state()\n"); |
474 | return -EINVAL; | 472 | return -EINVAL; |
475 | } | 473 | } |
476 | if (!(tfo->queue_data_in)) { | 474 | if (!tfo->queue_data_in) { |
477 | printk(KERN_ERR "Missing tfo->queue_data_in()\n"); | 475 | pr_err("Missing tfo->queue_data_in()\n"); |
478 | return -EINVAL; | 476 | return -EINVAL; |
479 | } | 477 | } |
480 | if (!(tfo->queue_status)) { | 478 | if (!tfo->queue_status) { |
481 | printk(KERN_ERR "Missing tfo->queue_status()\n"); | 479 | pr_err("Missing tfo->queue_status()\n"); |
482 | return -EINVAL; | 480 | return -EINVAL; |
483 | } | 481 | } |
484 | if (!(tfo->queue_tm_rsp)) { | 482 | if (!tfo->queue_tm_rsp) { |
485 | printk(KERN_ERR "Missing tfo->queue_tm_rsp()\n"); | 483 | pr_err("Missing tfo->queue_tm_rsp()\n"); |
486 | return -EINVAL; | 484 | return -EINVAL; |
487 | } | 485 | } |
488 | if (!(tfo->set_fabric_sense_len)) { | 486 | if (!tfo->set_fabric_sense_len) { |
489 | printk(KERN_ERR "Missing tfo->set_fabric_sense_len()\n"); | 487 | pr_err("Missing tfo->set_fabric_sense_len()\n"); |
490 | return -EINVAL; | 488 | return -EINVAL; |
491 | } | 489 | } |
492 | if (!(tfo->get_fabric_sense_len)) { | 490 | if (!tfo->get_fabric_sense_len) { |
493 | printk(KERN_ERR "Missing tfo->get_fabric_sense_len()\n"); | 491 | pr_err("Missing tfo->get_fabric_sense_len()\n"); |
494 | return -EINVAL; | 492 | return -EINVAL; |
495 | } | 493 | } |
496 | if (!(tfo->is_state_remove)) { | 494 | if (!tfo->is_state_remove) { |
497 | printk(KERN_ERR "Missing tfo->is_state_remove()\n"); | 495 | pr_err("Missing tfo->is_state_remove()\n"); |
498 | return -EINVAL; | 496 | return -EINVAL; |
499 | } | 497 | } |
500 | /* | 498 | /* |
@@ -502,20 +500,20 @@ static int target_fabric_tf_ops_check( | |||
502 | * tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in | 500 | * tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in |
503 | * target_core_fabric_configfs.c WWN+TPG group context code. | 501 | * target_core_fabric_configfs.c WWN+TPG group context code. |
504 | */ | 502 | */ |
505 | if (!(tfo->fabric_make_wwn)) { | 503 | if (!tfo->fabric_make_wwn) { |
506 | printk(KERN_ERR "Missing tfo->fabric_make_wwn()\n"); | 504 | pr_err("Missing tfo->fabric_make_wwn()\n"); |
507 | return -EINVAL; | 505 | return -EINVAL; |
508 | } | 506 | } |
509 | if (!(tfo->fabric_drop_wwn)) { | 507 | if (!tfo->fabric_drop_wwn) { |
510 | printk(KERN_ERR "Missing tfo->fabric_drop_wwn()\n"); | 508 | pr_err("Missing tfo->fabric_drop_wwn()\n"); |
511 | return -EINVAL; | 509 | return -EINVAL; |
512 | } | 510 | } |
513 | if (!(tfo->fabric_make_tpg)) { | 511 | if (!tfo->fabric_make_tpg) { |
514 | printk(KERN_ERR "Missing tfo->fabric_make_tpg()\n"); | 512 | pr_err("Missing tfo->fabric_make_tpg()\n"); |
515 | return -EINVAL; | 513 | return -EINVAL; |
516 | } | 514 | } |
517 | if (!(tfo->fabric_drop_tpg)) { | 515 | if (!tfo->fabric_drop_tpg) { |
518 | printk(KERN_ERR "Missing tfo->fabric_drop_tpg()\n"); | 516 | pr_err("Missing tfo->fabric_drop_tpg()\n"); |
519 | return -EINVAL; | 517 | return -EINVAL; |
520 | } | 518 | } |
521 | 519 | ||
@@ -533,22 +531,15 @@ static int target_fabric_tf_ops_check( | |||
533 | int target_fabric_configfs_register( | 531 | int target_fabric_configfs_register( |
534 | struct target_fabric_configfs *tf) | 532 | struct target_fabric_configfs *tf) |
535 | { | 533 | { |
536 | struct config_group *su_group; | ||
537 | int ret; | 534 | int ret; |
538 | 535 | ||
539 | if (!(tf)) { | 536 | if (!tf) { |
540 | printk(KERN_ERR "Unable to locate target_fabric_configfs" | 537 | pr_err("Unable to locate target_fabric_configfs" |
541 | " pointer\n"); | 538 | " pointer\n"); |
542 | return -EINVAL; | 539 | return -EINVAL; |
543 | } | 540 | } |
544 | if (!(tf->tf_subsys)) { | 541 | if (!tf->tf_subsys) { |
545 | printk(KERN_ERR "Unable to target struct config_subsystem" | 542 | pr_err("Unable to target struct config_subsystem" |
546 | " pointer\n"); | ||
547 | return -EINVAL; | ||
548 | } | ||
549 | su_group = &tf->tf_subsys->su_group; | ||
550 | if (!(su_group)) { | ||
551 | printk(KERN_ERR "Unable to locate target struct config_group" | ||
552 | " pointer\n"); | 543 | " pointer\n"); |
553 | return -EINVAL; | 544 | return -EINVAL; |
554 | } | 545 | } |
@@ -556,7 +547,7 @@ int target_fabric_configfs_register( | |||
556 | if (ret < 0) | 547 | if (ret < 0) |
557 | return ret; | 548 | return ret; |
558 | 549 | ||
559 | printk(KERN_INFO "<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>" | 550 | pr_debug("<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>" |
560 | ">>>>>>>>>>\n"); | 551 | ">>>>>>>>>>\n"); |
561 | return 0; | 552 | return 0; |
562 | } | 553 | } |
@@ -565,48 +556,39 @@ EXPORT_SYMBOL(target_fabric_configfs_register); | |||
565 | void target_fabric_configfs_deregister( | 556 | void target_fabric_configfs_deregister( |
566 | struct target_fabric_configfs *tf) | 557 | struct target_fabric_configfs *tf) |
567 | { | 558 | { |
568 | struct config_group *su_group; | ||
569 | struct configfs_subsystem *su; | 559 | struct configfs_subsystem *su; |
570 | 560 | ||
571 | if (!(tf)) { | 561 | if (!tf) { |
572 | printk(KERN_ERR "Unable to locate passed target_fabric_" | 562 | pr_err("Unable to locate passed target_fabric_" |
573 | "configfs\n"); | 563 | "configfs\n"); |
574 | return; | 564 | return; |
575 | } | 565 | } |
576 | su = tf->tf_subsys; | 566 | su = tf->tf_subsys; |
577 | if (!(su)) { | 567 | if (!su) { |
578 | printk(KERN_ERR "Unable to locate passed tf->tf_subsys" | 568 | pr_err("Unable to locate passed tf->tf_subsys" |
579 | " pointer\n"); | 569 | " pointer\n"); |
580 | return; | 570 | return; |
581 | } | 571 | } |
582 | su_group = &tf->tf_subsys->su_group; | 572 | pr_debug("<<<<<<<<<<<<<<<<<<<<<< BEGIN FABRIC API >>>>>>>>>>" |
583 | if (!(su_group)) { | ||
584 | printk(KERN_ERR "Unable to locate target struct config_group" | ||
585 | " pointer\n"); | ||
586 | return; | ||
587 | } | ||
588 | |||
589 | printk(KERN_INFO "<<<<<<<<<<<<<<<<<<<<<< BEGIN FABRIC API >>>>>>>>>>" | ||
590 | ">>>>>>>>>>>>\n"); | 573 | ">>>>>>>>>>>>\n"); |
591 | mutex_lock(&g_tf_lock); | 574 | mutex_lock(&g_tf_lock); |
592 | if (atomic_read(&tf->tf_access_cnt)) { | 575 | if (atomic_read(&tf->tf_access_cnt)) { |
593 | mutex_unlock(&g_tf_lock); | 576 | mutex_unlock(&g_tf_lock); |
594 | printk(KERN_ERR "Non zero tf->tf_access_cnt for fabric %s\n", | 577 | pr_err("Non zero tf->tf_access_cnt for fabric %s\n", |
595 | tf->tf_name); | 578 | tf->tf_name); |
596 | BUG(); | 579 | BUG(); |
597 | } | 580 | } |
598 | list_del(&tf->tf_list); | 581 | list_del(&tf->tf_list); |
599 | mutex_unlock(&g_tf_lock); | 582 | mutex_unlock(&g_tf_lock); |
600 | 583 | ||
601 | printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> Releasing tf:" | 584 | pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing tf:" |
602 | " %s\n", tf->tf_name); | 585 | " %s\n", tf->tf_name); |
603 | tf->tf_module = NULL; | 586 | tf->tf_module = NULL; |
604 | tf->tf_subsys = NULL; | 587 | tf->tf_subsys = NULL; |
605 | kfree(tf); | 588 | kfree(tf); |
606 | 589 | ||
607 | printk("<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>>>>>>" | 590 | pr_debug("<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>>>>>>" |
608 | ">>>>>\n"); | 591 | ">>>>>\n"); |
609 | return; | ||
610 | } | 592 | } |
611 | EXPORT_SYMBOL(target_fabric_configfs_deregister); | 593 | EXPORT_SYMBOL(target_fabric_configfs_deregister); |
612 | 594 | ||
@@ -627,11 +609,12 @@ static ssize_t target_core_dev_show_attr_##_name( \ | |||
627 | \ | 609 | \ |
628 | spin_lock(&se_dev->se_dev_lock); \ | 610 | spin_lock(&se_dev->se_dev_lock); \ |
629 | dev = se_dev->se_dev_ptr; \ | 611 | dev = se_dev->se_dev_ptr; \ |
630 | if (!(dev)) { \ | 612 | if (!dev) { \ |
631 | spin_unlock(&se_dev->se_dev_lock); \ | 613 | spin_unlock(&se_dev->se_dev_lock); \ |
632 | return -ENODEV; \ | 614 | return -ENODEV; \ |
633 | } \ | 615 | } \ |
634 | rb = snprintf(page, PAGE_SIZE, "%u\n", (u32)DEV_ATTRIB(dev)->_name); \ | 616 | rb = snprintf(page, PAGE_SIZE, "%u\n", \ |
617 | (u32)dev->se_sub_dev->se_dev_attrib._name); \ | ||
635 | spin_unlock(&se_dev->se_dev_lock); \ | 618 | spin_unlock(&se_dev->se_dev_lock); \ |
636 | \ | 619 | \ |
637 | return rb; \ | 620 | return rb; \ |
@@ -650,14 +633,14 @@ static ssize_t target_core_dev_store_attr_##_name( \ | |||
650 | \ | 633 | \ |
651 | spin_lock(&se_dev->se_dev_lock); \ | 634 | spin_lock(&se_dev->se_dev_lock); \ |
652 | dev = se_dev->se_dev_ptr; \ | 635 | dev = se_dev->se_dev_ptr; \ |
653 | if (!(dev)) { \ | 636 | if (!dev) { \ |
654 | spin_unlock(&se_dev->se_dev_lock); \ | 637 | spin_unlock(&se_dev->se_dev_lock); \ |
655 | return -ENODEV; \ | 638 | return -ENODEV; \ |
656 | } \ | 639 | } \ |
657 | ret = strict_strtoul(page, 0, &val); \ | 640 | ret = strict_strtoul(page, 0, &val); \ |
658 | if (ret < 0) { \ | 641 | if (ret < 0) { \ |
659 | spin_unlock(&se_dev->se_dev_lock); \ | 642 | spin_unlock(&se_dev->se_dev_lock); \ |
660 | printk(KERN_ERR "strict_strtoul() failed with" \ | 643 | pr_err("strict_strtoul() failed with" \ |
661 | " ret: %d\n", ret); \ | 644 | " ret: %d\n", ret); \ |
662 | return -EINVAL; \ | 645 | return -EINVAL; \ |
663 | } \ | 646 | } \ |
@@ -715,6 +698,12 @@ SE_DEV_ATTR(emulate_tpws, S_IRUGO | S_IWUSR); | |||
715 | DEF_DEV_ATTRIB(enforce_pr_isids); | 698 | DEF_DEV_ATTRIB(enforce_pr_isids); |
716 | SE_DEV_ATTR(enforce_pr_isids, S_IRUGO | S_IWUSR); | 699 | SE_DEV_ATTR(enforce_pr_isids, S_IRUGO | S_IWUSR); |
717 | 700 | ||
701 | DEF_DEV_ATTRIB(is_nonrot); | ||
702 | SE_DEV_ATTR(is_nonrot, S_IRUGO | S_IWUSR); | ||
703 | |||
704 | DEF_DEV_ATTRIB(emulate_rest_reord); | ||
705 | SE_DEV_ATTR(emulate_rest_reord, S_IRUGO | S_IWUSR); | ||
706 | |||
718 | DEF_DEV_ATTRIB_RO(hw_block_size); | 707 | DEF_DEV_ATTRIB_RO(hw_block_size); |
719 | SE_DEV_ATTR_RO(hw_block_size); | 708 | SE_DEV_ATTR_RO(hw_block_size); |
720 | 709 | ||
@@ -763,6 +752,8 @@ static struct configfs_attribute *target_core_dev_attrib_attrs[] = { | |||
763 | &target_core_dev_attrib_emulate_tpu.attr, | 752 | &target_core_dev_attrib_emulate_tpu.attr, |
764 | &target_core_dev_attrib_emulate_tpws.attr, | 753 | &target_core_dev_attrib_emulate_tpws.attr, |
765 | &target_core_dev_attrib_enforce_pr_isids.attr, | 754 | &target_core_dev_attrib_enforce_pr_isids.attr, |
755 | &target_core_dev_attrib_is_nonrot.attr, | ||
756 | &target_core_dev_attrib_emulate_rest_reord.attr, | ||
766 | &target_core_dev_attrib_hw_block_size.attr, | 757 | &target_core_dev_attrib_hw_block_size.attr, |
767 | &target_core_dev_attrib_block_size.attr, | 758 | &target_core_dev_attrib_block_size.attr, |
768 | &target_core_dev_attrib_hw_max_sectors.attr, | 759 | &target_core_dev_attrib_hw_max_sectors.attr, |
@@ -819,7 +810,7 @@ static ssize_t target_core_dev_wwn_show_attr_vpd_unit_serial( | |||
819 | struct se_device *dev; | 810 | struct se_device *dev; |
820 | 811 | ||
821 | dev = se_dev->se_dev_ptr; | 812 | dev = se_dev->se_dev_ptr; |
822 | if (!(dev)) | 813 | if (!dev) |
823 | return -ENODEV; | 814 | return -ENODEV; |
824 | 815 | ||
825 | return sprintf(page, "T10 VPD Unit Serial Number: %s\n", | 816 | return sprintf(page, "T10 VPD Unit Serial Number: %s\n", |
@@ -846,13 +837,13 @@ static ssize_t target_core_dev_wwn_store_attr_vpd_unit_serial( | |||
846 | * VPD Unit Serial Number that OS dependent multipath can depend on. | 837 | * VPD Unit Serial Number that OS dependent multipath can depend on. |
847 | */ | 838 | */ |
848 | if (su_dev->su_dev_flags & SDF_FIRMWARE_VPD_UNIT_SERIAL) { | 839 | if (su_dev->su_dev_flags & SDF_FIRMWARE_VPD_UNIT_SERIAL) { |
849 | printk(KERN_ERR "Underlying SCSI device firmware provided VPD" | 840 | pr_err("Underlying SCSI device firmware provided VPD" |
850 | " Unit Serial, ignoring request\n"); | 841 | " Unit Serial, ignoring request\n"); |
851 | return -EOPNOTSUPP; | 842 | return -EOPNOTSUPP; |
852 | } | 843 | } |
853 | 844 | ||
854 | if (strlen(page) >= INQUIRY_VPD_SERIAL_LEN) { | 845 | if (strlen(page) >= INQUIRY_VPD_SERIAL_LEN) { |
855 | printk(KERN_ERR "Emulated VPD Unit Serial exceeds" | 846 | pr_err("Emulated VPD Unit Serial exceeds" |
856 | " INQUIRY_VPD_SERIAL_LEN: %d\n", INQUIRY_VPD_SERIAL_LEN); | 847 | " INQUIRY_VPD_SERIAL_LEN: %d\n", INQUIRY_VPD_SERIAL_LEN); |
857 | return -EOVERFLOW; | 848 | return -EOVERFLOW; |
858 | } | 849 | } |
@@ -863,9 +854,9 @@ static ssize_t target_core_dev_wwn_store_attr_vpd_unit_serial( | |||
863 | * could cause negative effects. | 854 | * could cause negative effects. |
864 | */ | 855 | */ |
865 | dev = su_dev->se_dev_ptr; | 856 | dev = su_dev->se_dev_ptr; |
866 | if ((dev)) { | 857 | if (dev) { |
867 | if (atomic_read(&dev->dev_export_obj.obj_access_count)) { | 858 | if (atomic_read(&dev->dev_export_obj.obj_access_count)) { |
868 | printk(KERN_ERR "Unable to set VPD Unit Serial while" | 859 | pr_err("Unable to set VPD Unit Serial while" |
869 | " active %d $FABRIC_MOD exports exist\n", | 860 | " active %d $FABRIC_MOD exports exist\n", |
870 | atomic_read(&dev->dev_export_obj.obj_access_count)); | 861 | atomic_read(&dev->dev_export_obj.obj_access_count)); |
871 | return -EINVAL; | 862 | return -EINVAL; |
@@ -883,7 +874,7 @@ static ssize_t target_core_dev_wwn_store_attr_vpd_unit_serial( | |||
883 | "%s", strstrip(buf)); | 874 | "%s", strstrip(buf)); |
884 | su_dev->su_dev_flags |= SDF_EMULATED_VPD_UNIT_SERIAL; | 875 | su_dev->su_dev_flags |= SDF_EMULATED_VPD_UNIT_SERIAL; |
885 | 876 | ||
886 | printk(KERN_INFO "Target_Core_ConfigFS: Set emulated VPD Unit Serial:" | 877 | pr_debug("Target_Core_ConfigFS: Set emulated VPD Unit Serial:" |
887 | " %s\n", su_dev->t10_wwn.unit_serial); | 878 | " %s\n", su_dev->t10_wwn.unit_serial); |
888 | 879 | ||
889 | return count; | 880 | return count; |
@@ -905,19 +896,19 @@ static ssize_t target_core_dev_wwn_show_attr_vpd_protocol_identifier( | |||
905 | ssize_t len = 0; | 896 | ssize_t len = 0; |
906 | 897 | ||
907 | dev = se_dev->se_dev_ptr; | 898 | dev = se_dev->se_dev_ptr; |
908 | if (!(dev)) | 899 | if (!dev) |
909 | return -ENODEV; | 900 | return -ENODEV; |
910 | 901 | ||
911 | memset(buf, 0, VPD_TMP_BUF_SIZE); | 902 | memset(buf, 0, VPD_TMP_BUF_SIZE); |
912 | 903 | ||
913 | spin_lock(&t10_wwn->t10_vpd_lock); | 904 | spin_lock(&t10_wwn->t10_vpd_lock); |
914 | list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) { | 905 | list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) { |
915 | if (!(vpd->protocol_identifier_set)) | 906 | if (!vpd->protocol_identifier_set) |
916 | continue; | 907 | continue; |
917 | 908 | ||
918 | transport_dump_vpd_proto_id(vpd, buf, VPD_TMP_BUF_SIZE); | 909 | transport_dump_vpd_proto_id(vpd, buf, VPD_TMP_BUF_SIZE); |
919 | 910 | ||
920 | if ((len + strlen(buf) >= PAGE_SIZE)) | 911 | if (len + strlen(buf) >= PAGE_SIZE) |
921 | break; | 912 | break; |
922 | 913 | ||
923 | len += sprintf(page+len, "%s", buf); | 914 | len += sprintf(page+len, "%s", buf); |
@@ -952,7 +943,7 @@ static ssize_t target_core_dev_wwn_show_attr_##_name( \ | |||
952 | ssize_t len = 0; \ | 943 | ssize_t len = 0; \ |
953 | \ | 944 | \ |
954 | dev = se_dev->se_dev_ptr; \ | 945 | dev = se_dev->se_dev_ptr; \ |
955 | if (!(dev)) \ | 946 | if (!dev) \ |
956 | return -ENODEV; \ | 947 | return -ENODEV; \ |
957 | \ | 948 | \ |
958 | spin_lock(&t10_wwn->t10_vpd_lock); \ | 949 | spin_lock(&t10_wwn->t10_vpd_lock); \ |
@@ -962,19 +953,19 @@ static ssize_t target_core_dev_wwn_show_attr_##_name( \ | |||
962 | \ | 953 | \ |
963 | memset(buf, 0, VPD_TMP_BUF_SIZE); \ | 954 | memset(buf, 0, VPD_TMP_BUF_SIZE); \ |
964 | transport_dump_vpd_assoc(vpd, buf, VPD_TMP_BUF_SIZE); \ | 955 | transport_dump_vpd_assoc(vpd, buf, VPD_TMP_BUF_SIZE); \ |
965 | if ((len + strlen(buf) >= PAGE_SIZE)) \ | 956 | if (len + strlen(buf) >= PAGE_SIZE) \ |
966 | break; \ | 957 | break; \ |
967 | len += sprintf(page+len, "%s", buf); \ | 958 | len += sprintf(page+len, "%s", buf); \ |
968 | \ | 959 | \ |
969 | memset(buf, 0, VPD_TMP_BUF_SIZE); \ | 960 | memset(buf, 0, VPD_TMP_BUF_SIZE); \ |
970 | transport_dump_vpd_ident_type(vpd, buf, VPD_TMP_BUF_SIZE); \ | 961 | transport_dump_vpd_ident_type(vpd, buf, VPD_TMP_BUF_SIZE); \ |
971 | if ((len + strlen(buf) >= PAGE_SIZE)) \ | 962 | if (len + strlen(buf) >= PAGE_SIZE) \ |
972 | break; \ | 963 | break; \ |
973 | len += sprintf(page+len, "%s", buf); \ | 964 | len += sprintf(page+len, "%s", buf); \ |
974 | \ | 965 | \ |
975 | memset(buf, 0, VPD_TMP_BUF_SIZE); \ | 966 | memset(buf, 0, VPD_TMP_BUF_SIZE); \ |
976 | transport_dump_vpd_ident(vpd, buf, VPD_TMP_BUF_SIZE); \ | 967 | transport_dump_vpd_ident(vpd, buf, VPD_TMP_BUF_SIZE); \ |
977 | if ((len + strlen(buf) >= PAGE_SIZE)) \ | 968 | if (len + strlen(buf) >= PAGE_SIZE) \ |
978 | break; \ | 969 | break; \ |
979 | len += sprintf(page+len, "%s", buf); \ | 970 | len += sprintf(page+len, "%s", buf); \ |
980 | } \ | 971 | } \ |
@@ -984,7 +975,7 @@ static ssize_t target_core_dev_wwn_show_attr_##_name( \ | |||
984 | } | 975 | } |
985 | 976 | ||
986 | /* | 977 | /* |
987 | * VPD page 0x83 Assoication: Logical Unit | 978 | * VPD page 0x83 Association: Logical Unit |
988 | */ | 979 | */ |
989 | DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_logical_unit, 0x00); | 980 | DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_logical_unit, 0x00); |
990 | 981 | ||
@@ -1083,7 +1074,7 @@ static ssize_t target_core_dev_pr_show_spc3_res( | |||
1083 | 1074 | ||
1084 | spin_lock(&dev->dev_reservation_lock); | 1075 | spin_lock(&dev->dev_reservation_lock); |
1085 | pr_reg = dev->dev_pr_res_holder; | 1076 | pr_reg = dev->dev_pr_res_holder; |
1086 | if (!(pr_reg)) { | 1077 | if (!pr_reg) { |
1087 | *len += sprintf(page + *len, "No SPC-3 Reservation holder\n"); | 1078 | *len += sprintf(page + *len, "No SPC-3 Reservation holder\n"); |
1088 | spin_unlock(&dev->dev_reservation_lock); | 1079 | spin_unlock(&dev->dev_reservation_lock); |
1089 | return *len; | 1080 | return *len; |
@@ -1093,7 +1084,7 @@ static ssize_t target_core_dev_pr_show_spc3_res( | |||
1093 | PR_REG_ISID_ID_LEN); | 1084 | PR_REG_ISID_ID_LEN); |
1094 | 1085 | ||
1095 | *len += sprintf(page + *len, "SPC-3 Reservation: %s Initiator: %s%s\n", | 1086 | *len += sprintf(page + *len, "SPC-3 Reservation: %s Initiator: %s%s\n", |
1096 | TPG_TFO(se_nacl->se_tpg)->get_fabric_name(), | 1087 | se_nacl->se_tpg->se_tpg_tfo->get_fabric_name(), |
1097 | se_nacl->initiatorname, (prf_isid) ? &i_buf[0] : ""); | 1088 | se_nacl->initiatorname, (prf_isid) ? &i_buf[0] : ""); |
1098 | spin_unlock(&dev->dev_reservation_lock); | 1089 | spin_unlock(&dev->dev_reservation_lock); |
1099 | 1090 | ||
@@ -1109,13 +1100,13 @@ static ssize_t target_core_dev_pr_show_spc2_res( | |||
1109 | 1100 | ||
1110 | spin_lock(&dev->dev_reservation_lock); | 1101 | spin_lock(&dev->dev_reservation_lock); |
1111 | se_nacl = dev->dev_reserved_node_acl; | 1102 | se_nacl = dev->dev_reserved_node_acl; |
1112 | if (!(se_nacl)) { | 1103 | if (!se_nacl) { |
1113 | *len += sprintf(page + *len, "No SPC-2 Reservation holder\n"); | 1104 | *len += sprintf(page + *len, "No SPC-2 Reservation holder\n"); |
1114 | spin_unlock(&dev->dev_reservation_lock); | 1105 | spin_unlock(&dev->dev_reservation_lock); |
1115 | return *len; | 1106 | return *len; |
1116 | } | 1107 | } |
1117 | *len += sprintf(page + *len, "SPC-2 Reservation: %s Initiator: %s\n", | 1108 | *len += sprintf(page + *len, "SPC-2 Reservation: %s Initiator: %s\n", |
1118 | TPG_TFO(se_nacl->se_tpg)->get_fabric_name(), | 1109 | se_nacl->se_tpg->se_tpg_tfo->get_fabric_name(), |
1119 | se_nacl->initiatorname); | 1110 | se_nacl->initiatorname); |
1120 | spin_unlock(&dev->dev_reservation_lock); | 1111 | spin_unlock(&dev->dev_reservation_lock); |
1121 | 1112 | ||
@@ -1128,10 +1119,10 @@ static ssize_t target_core_dev_pr_show_attr_res_holder( | |||
1128 | { | 1119 | { |
1129 | ssize_t len = 0; | 1120 | ssize_t len = 0; |
1130 | 1121 | ||
1131 | if (!(su_dev->se_dev_ptr)) | 1122 | if (!su_dev->se_dev_ptr) |
1132 | return -ENODEV; | 1123 | return -ENODEV; |
1133 | 1124 | ||
1134 | switch (T10_RES(su_dev)->res_type) { | 1125 | switch (su_dev->t10_pr.res_type) { |
1135 | case SPC3_PERSISTENT_RESERVATIONS: | 1126 | case SPC3_PERSISTENT_RESERVATIONS: |
1136 | target_core_dev_pr_show_spc3_res(su_dev->se_dev_ptr, | 1127 | target_core_dev_pr_show_spc3_res(su_dev->se_dev_ptr, |
1137 | page, &len); | 1128 | page, &len); |
@@ -1165,15 +1156,15 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_all_tgt_pts( | |||
1165 | ssize_t len = 0; | 1156 | ssize_t len = 0; |
1166 | 1157 | ||
1167 | dev = su_dev->se_dev_ptr; | 1158 | dev = su_dev->se_dev_ptr; |
1168 | if (!(dev)) | 1159 | if (!dev) |
1169 | return -ENODEV; | 1160 | return -ENODEV; |
1170 | 1161 | ||
1171 | if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS) | 1162 | if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) |
1172 | return len; | 1163 | return len; |
1173 | 1164 | ||
1174 | spin_lock(&dev->dev_reservation_lock); | 1165 | spin_lock(&dev->dev_reservation_lock); |
1175 | pr_reg = dev->dev_pr_res_holder; | 1166 | pr_reg = dev->dev_pr_res_holder; |
1176 | if (!(pr_reg)) { | 1167 | if (!pr_reg) { |
1177 | len = sprintf(page, "No SPC-3 Reservation holder\n"); | 1168 | len = sprintf(page, "No SPC-3 Reservation holder\n"); |
1178 | spin_unlock(&dev->dev_reservation_lock); | 1169 | spin_unlock(&dev->dev_reservation_lock); |
1179 | return len; | 1170 | return len; |
@@ -1202,13 +1193,13 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_generation( | |||
1202 | struct se_subsystem_dev *su_dev, | 1193 | struct se_subsystem_dev *su_dev, |
1203 | char *page) | 1194 | char *page) |
1204 | { | 1195 | { |
1205 | if (!(su_dev->se_dev_ptr)) | 1196 | if (!su_dev->se_dev_ptr) |
1206 | return -ENODEV; | 1197 | return -ENODEV; |
1207 | 1198 | ||
1208 | if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS) | 1199 | if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) |
1209 | return 0; | 1200 | return 0; |
1210 | 1201 | ||
1211 | return sprintf(page, "0x%08x\n", T10_RES(su_dev)->pr_generation); | 1202 | return sprintf(page, "0x%08x\n", su_dev->t10_pr.pr_generation); |
1212 | } | 1203 | } |
1213 | 1204 | ||
1214 | SE_DEV_PR_ATTR_RO(res_pr_generation); | 1205 | SE_DEV_PR_ATTR_RO(res_pr_generation); |
@@ -1229,15 +1220,15 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_holder_tg_port( | |||
1229 | ssize_t len = 0; | 1220 | ssize_t len = 0; |
1230 | 1221 | ||
1231 | dev = su_dev->se_dev_ptr; | 1222 | dev = su_dev->se_dev_ptr; |
1232 | if (!(dev)) | 1223 | if (!dev) |
1233 | return -ENODEV; | 1224 | return -ENODEV; |
1234 | 1225 | ||
1235 | if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS) | 1226 | if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) |
1236 | return len; | 1227 | return len; |
1237 | 1228 | ||
1238 | spin_lock(&dev->dev_reservation_lock); | 1229 | spin_lock(&dev->dev_reservation_lock); |
1239 | pr_reg = dev->dev_pr_res_holder; | 1230 | pr_reg = dev->dev_pr_res_holder; |
1240 | if (!(pr_reg)) { | 1231 | if (!pr_reg) { |
1241 | len = sprintf(page, "No SPC-3 Reservation holder\n"); | 1232 | len = sprintf(page, "No SPC-3 Reservation holder\n"); |
1242 | spin_unlock(&dev->dev_reservation_lock); | 1233 | spin_unlock(&dev->dev_reservation_lock); |
1243 | return len; | 1234 | return len; |
@@ -1245,7 +1236,7 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_holder_tg_port( | |||
1245 | se_nacl = pr_reg->pr_reg_nacl; | 1236 | se_nacl = pr_reg->pr_reg_nacl; |
1246 | se_tpg = se_nacl->se_tpg; | 1237 | se_tpg = se_nacl->se_tpg; |
1247 | lun = pr_reg->pr_reg_tg_pt_lun; | 1238 | lun = pr_reg->pr_reg_tg_pt_lun; |
1248 | tfo = TPG_TFO(se_tpg); | 1239 | tfo = se_tpg->se_tpg_tfo; |
1249 | 1240 | ||
1250 | len += sprintf(page+len, "SPC-3 Reservation: %s" | 1241 | len += sprintf(page+len, "SPC-3 Reservation: %s" |
1251 | " Target Node Endpoint: %s\n", tfo->get_fabric_name(), | 1242 | " Target Node Endpoint: %s\n", tfo->get_fabric_name(), |
@@ -1276,16 +1267,16 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts( | |||
1276 | ssize_t len = 0; | 1267 | ssize_t len = 0; |
1277 | int reg_count = 0, prf_isid; | 1268 | int reg_count = 0, prf_isid; |
1278 | 1269 | ||
1279 | if (!(su_dev->se_dev_ptr)) | 1270 | if (!su_dev->se_dev_ptr) |
1280 | return -ENODEV; | 1271 | return -ENODEV; |
1281 | 1272 | ||
1282 | if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS) | 1273 | if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) |
1283 | return len; | 1274 | return len; |
1284 | 1275 | ||
1285 | len += sprintf(page+len, "SPC-3 PR Registrations:\n"); | 1276 | len += sprintf(page+len, "SPC-3 PR Registrations:\n"); |
1286 | 1277 | ||
1287 | spin_lock(&T10_RES(su_dev)->registration_lock); | 1278 | spin_lock(&su_dev->t10_pr.registration_lock); |
1288 | list_for_each_entry(pr_reg, &T10_RES(su_dev)->registration_list, | 1279 | list_for_each_entry(pr_reg, &su_dev->t10_pr.registration_list, |
1289 | pr_reg_list) { | 1280 | pr_reg_list) { |
1290 | 1281 | ||
1291 | memset(buf, 0, 384); | 1282 | memset(buf, 0, 384); |
@@ -1299,15 +1290,15 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts( | |||
1299 | &i_buf[0] : "", pr_reg->pr_res_key, | 1290 | &i_buf[0] : "", pr_reg->pr_res_key, |
1300 | pr_reg->pr_res_generation); | 1291 | pr_reg->pr_res_generation); |
1301 | 1292 | ||
1302 | if ((len + strlen(buf) >= PAGE_SIZE)) | 1293 | if (len + strlen(buf) >= PAGE_SIZE) |
1303 | break; | 1294 | break; |
1304 | 1295 | ||
1305 | len += sprintf(page+len, "%s", buf); | 1296 | len += sprintf(page+len, "%s", buf); |
1306 | reg_count++; | 1297 | reg_count++; |
1307 | } | 1298 | } |
1308 | spin_unlock(&T10_RES(su_dev)->registration_lock); | 1299 | spin_unlock(&su_dev->t10_pr.registration_lock); |
1309 | 1300 | ||
1310 | if (!(reg_count)) | 1301 | if (!reg_count) |
1311 | len += sprintf(page+len, "None\n"); | 1302 | len += sprintf(page+len, "None\n"); |
1312 | 1303 | ||
1313 | return len; | 1304 | return len; |
@@ -1327,15 +1318,15 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_type( | |||
1327 | ssize_t len = 0; | 1318 | ssize_t len = 0; |
1328 | 1319 | ||
1329 | dev = su_dev->se_dev_ptr; | 1320 | dev = su_dev->se_dev_ptr; |
1330 | if (!(dev)) | 1321 | if (!dev) |
1331 | return -ENODEV; | 1322 | return -ENODEV; |
1332 | 1323 | ||
1333 | if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS) | 1324 | if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) |
1334 | return len; | 1325 | return len; |
1335 | 1326 | ||
1336 | spin_lock(&dev->dev_reservation_lock); | 1327 | spin_lock(&dev->dev_reservation_lock); |
1337 | pr_reg = dev->dev_pr_res_holder; | 1328 | pr_reg = dev->dev_pr_res_holder; |
1338 | if (!(pr_reg)) { | 1329 | if (!pr_reg) { |
1339 | len = sprintf(page, "No SPC-3 Reservation holder\n"); | 1330 | len = sprintf(page, "No SPC-3 Reservation holder\n"); |
1340 | spin_unlock(&dev->dev_reservation_lock); | 1331 | spin_unlock(&dev->dev_reservation_lock); |
1341 | return len; | 1332 | return len; |
@@ -1358,10 +1349,10 @@ static ssize_t target_core_dev_pr_show_attr_res_type( | |||
1358 | { | 1349 | { |
1359 | ssize_t len = 0; | 1350 | ssize_t len = 0; |
1360 | 1351 | ||
1361 | if (!(su_dev->se_dev_ptr)) | 1352 | if (!su_dev->se_dev_ptr) |
1362 | return -ENODEV; | 1353 | return -ENODEV; |
1363 | 1354 | ||
1364 | switch (T10_RES(su_dev)->res_type) { | 1355 | switch (su_dev->t10_pr.res_type) { |
1365 | case SPC3_PERSISTENT_RESERVATIONS: | 1356 | case SPC3_PERSISTENT_RESERVATIONS: |
1366 | len = sprintf(page, "SPC3_PERSISTENT_RESERVATIONS\n"); | 1357 | len = sprintf(page, "SPC3_PERSISTENT_RESERVATIONS\n"); |
1367 | break; | 1358 | break; |
@@ -1389,14 +1380,14 @@ static ssize_t target_core_dev_pr_show_attr_res_aptpl_active( | |||
1389 | struct se_subsystem_dev *su_dev, | 1380 | struct se_subsystem_dev *su_dev, |
1390 | char *page) | 1381 | char *page) |
1391 | { | 1382 | { |
1392 | if (!(su_dev->se_dev_ptr)) | 1383 | if (!su_dev->se_dev_ptr) |
1393 | return -ENODEV; | 1384 | return -ENODEV; |
1394 | 1385 | ||
1395 | if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS) | 1386 | if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) |
1396 | return 0; | 1387 | return 0; |
1397 | 1388 | ||
1398 | return sprintf(page, "APTPL Bit Status: %s\n", | 1389 | return sprintf(page, "APTPL Bit Status: %s\n", |
1399 | (T10_RES(su_dev)->pr_aptpl_active) ? "Activated" : "Disabled"); | 1390 | (su_dev->t10_pr.pr_aptpl_active) ? "Activated" : "Disabled"); |
1400 | } | 1391 | } |
1401 | 1392 | ||
1402 | SE_DEV_PR_ATTR_RO(res_aptpl_active); | 1393 | SE_DEV_PR_ATTR_RO(res_aptpl_active); |
@@ -1408,10 +1399,10 @@ static ssize_t target_core_dev_pr_show_attr_res_aptpl_metadata( | |||
1408 | struct se_subsystem_dev *su_dev, | 1399 | struct se_subsystem_dev *su_dev, |
1409 | char *page) | 1400 | char *page) |
1410 | { | 1401 | { |
1411 | if (!(su_dev->se_dev_ptr)) | 1402 | if (!su_dev->se_dev_ptr) |
1412 | return -ENODEV; | 1403 | return -ENODEV; |
1413 | 1404 | ||
1414 | if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS) | 1405 | if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) |
1415 | return 0; | 1406 | return 0; |
1416 | 1407 | ||
1417 | return sprintf(page, "Ready to process PR APTPL metadata..\n"); | 1408 | return sprintf(page, "Ready to process PR APTPL metadata..\n"); |
@@ -1460,14 +1451,14 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata( | |||
1460 | u8 type = 0, scope; | 1451 | u8 type = 0, scope; |
1461 | 1452 | ||
1462 | dev = su_dev->se_dev_ptr; | 1453 | dev = su_dev->se_dev_ptr; |
1463 | if (!(dev)) | 1454 | if (!dev) |
1464 | return -ENODEV; | 1455 | return -ENODEV; |
1465 | 1456 | ||
1466 | if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS) | 1457 | if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) |
1467 | return 0; | 1458 | return 0; |
1468 | 1459 | ||
1469 | if (atomic_read(&dev->dev_export_obj.obj_access_count)) { | 1460 | if (atomic_read(&dev->dev_export_obj.obj_access_count)) { |
1470 | printk(KERN_INFO "Unable to process APTPL metadata while" | 1461 | pr_debug("Unable to process APTPL metadata while" |
1471 | " active fabric exports exist\n"); | 1462 | " active fabric exports exist\n"); |
1472 | return -EINVAL; | 1463 | return -EINVAL; |
1473 | } | 1464 | } |
@@ -1497,7 +1488,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata( | |||
1497 | goto out; | 1488 | goto out; |
1498 | } | 1489 | } |
1499 | if (strlen(i_port) >= PR_APTPL_MAX_IPORT_LEN) { | 1490 | if (strlen(i_port) >= PR_APTPL_MAX_IPORT_LEN) { |
1500 | printk(KERN_ERR "APTPL metadata initiator_node=" | 1491 | pr_err("APTPL metadata initiator_node=" |
1501 | " exceeds PR_APTPL_MAX_IPORT_LEN: %d\n", | 1492 | " exceeds PR_APTPL_MAX_IPORT_LEN: %d\n", |
1502 | PR_APTPL_MAX_IPORT_LEN); | 1493 | PR_APTPL_MAX_IPORT_LEN); |
1503 | ret = -EINVAL; | 1494 | ret = -EINVAL; |
@@ -1511,7 +1502,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata( | |||
1511 | goto out; | 1502 | goto out; |
1512 | } | 1503 | } |
1513 | if (strlen(isid) >= PR_REG_ISID_LEN) { | 1504 | if (strlen(isid) >= PR_REG_ISID_LEN) { |
1514 | printk(KERN_ERR "APTPL metadata initiator_isid" | 1505 | pr_err("APTPL metadata initiator_isid" |
1515 | "= exceeds PR_REG_ISID_LEN: %d\n", | 1506 | "= exceeds PR_REG_ISID_LEN: %d\n", |
1516 | PR_REG_ISID_LEN); | 1507 | PR_REG_ISID_LEN); |
1517 | ret = -EINVAL; | 1508 | ret = -EINVAL; |
@@ -1526,7 +1517,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata( | |||
1526 | } | 1517 | } |
1527 | ret = strict_strtoull(arg_p, 0, &tmp_ll); | 1518 | ret = strict_strtoull(arg_p, 0, &tmp_ll); |
1528 | if (ret < 0) { | 1519 | if (ret < 0) { |
1529 | printk(KERN_ERR "strict_strtoull() failed for" | 1520 | pr_err("strict_strtoull() failed for" |
1530 | " sa_res_key=\n"); | 1521 | " sa_res_key=\n"); |
1531 | goto out; | 1522 | goto out; |
1532 | } | 1523 | } |
@@ -1572,7 +1563,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata( | |||
1572 | goto out; | 1563 | goto out; |
1573 | } | 1564 | } |
1574 | if (strlen(t_port) >= PR_APTPL_MAX_TPORT_LEN) { | 1565 | if (strlen(t_port) >= PR_APTPL_MAX_TPORT_LEN) { |
1575 | printk(KERN_ERR "APTPL metadata target_node=" | 1566 | pr_err("APTPL metadata target_node=" |
1576 | " exceeds PR_APTPL_MAX_TPORT_LEN: %d\n", | 1567 | " exceeds PR_APTPL_MAX_TPORT_LEN: %d\n", |
1577 | PR_APTPL_MAX_TPORT_LEN); | 1568 | PR_APTPL_MAX_TPORT_LEN); |
1578 | ret = -EINVAL; | 1569 | ret = -EINVAL; |
@@ -1596,20 +1587,20 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata( | |||
1596 | } | 1587 | } |
1597 | } | 1588 | } |
1598 | 1589 | ||
1599 | if (!(i_port) || !(t_port) || !(sa_res_key)) { | 1590 | if (!i_port || !t_port || !sa_res_key) { |
1600 | printk(KERN_ERR "Illegal parameters for APTPL registration\n"); | 1591 | pr_err("Illegal parameters for APTPL registration\n"); |
1601 | ret = -EINVAL; | 1592 | ret = -EINVAL; |
1602 | goto out; | 1593 | goto out; |
1603 | } | 1594 | } |
1604 | 1595 | ||
1605 | if (res_holder && !(type)) { | 1596 | if (res_holder && !(type)) { |
1606 | printk(KERN_ERR "Illegal PR type: 0x%02x for reservation" | 1597 | pr_err("Illegal PR type: 0x%02x for reservation" |
1607 | " holder\n", type); | 1598 | " holder\n", type); |
1608 | ret = -EINVAL; | 1599 | ret = -EINVAL; |
1609 | goto out; | 1600 | goto out; |
1610 | } | 1601 | } |
1611 | 1602 | ||
1612 | ret = core_scsi3_alloc_aptpl_registration(T10_RES(su_dev), sa_res_key, | 1603 | ret = core_scsi3_alloc_aptpl_registration(&su_dev->t10_pr, sa_res_key, |
1613 | i_port, isid, mapped_lun, t_port, tpgt, target_lun, | 1604 | i_port, isid, mapped_lun, t_port, tpgt, target_lun, |
1614 | res_holder, all_tg_pt, type); | 1605 | res_holder, all_tg_pt, type); |
1615 | out: | 1606 | out: |
@@ -1662,7 +1653,7 @@ static ssize_t target_core_show_dev_info(void *p, char *page) | |||
1662 | int bl = 0; | 1653 | int bl = 0; |
1663 | ssize_t read_bytes = 0; | 1654 | ssize_t read_bytes = 0; |
1664 | 1655 | ||
1665 | if (!(se_dev->se_dev_ptr)) | 1656 | if (!se_dev->se_dev_ptr) |
1666 | return -ENODEV; | 1657 | return -ENODEV; |
1667 | 1658 | ||
1668 | transport_dump_dev_state(se_dev->se_dev_ptr, page, &bl); | 1659 | transport_dump_dev_state(se_dev->se_dev_ptr, page, &bl); |
@@ -1688,8 +1679,8 @@ static ssize_t target_core_store_dev_control( | |||
1688 | struct se_hba *hba = se_dev->se_dev_hba; | 1679 | struct se_hba *hba = se_dev->se_dev_hba; |
1689 | struct se_subsystem_api *t = hba->transport; | 1680 | struct se_subsystem_api *t = hba->transport; |
1690 | 1681 | ||
1691 | if (!(se_dev->se_dev_su_ptr)) { | 1682 | if (!se_dev->se_dev_su_ptr) { |
1692 | printk(KERN_ERR "Unable to locate struct se_subsystem_dev>se" | 1683 | pr_err("Unable to locate struct se_subsystem_dev>se" |
1693 | "_dev_su_ptr\n"); | 1684 | "_dev_su_ptr\n"); |
1694 | return -EINVAL; | 1685 | return -EINVAL; |
1695 | } | 1686 | } |
@@ -1725,7 +1716,7 @@ static ssize_t target_core_store_dev_alias( | |||
1725 | ssize_t read_bytes; | 1716 | ssize_t read_bytes; |
1726 | 1717 | ||
1727 | if (count > (SE_DEV_ALIAS_LEN-1)) { | 1718 | if (count > (SE_DEV_ALIAS_LEN-1)) { |
1728 | printk(KERN_ERR "alias count: %d exceeds" | 1719 | pr_err("alias count: %d exceeds" |
1729 | " SE_DEV_ALIAS_LEN-1: %u\n", (int)count, | 1720 | " SE_DEV_ALIAS_LEN-1: %u\n", (int)count, |
1730 | SE_DEV_ALIAS_LEN-1); | 1721 | SE_DEV_ALIAS_LEN-1); |
1731 | return -EINVAL; | 1722 | return -EINVAL; |
@@ -1735,7 +1726,7 @@ static ssize_t target_core_store_dev_alias( | |||
1735 | read_bytes = snprintf(&se_dev->se_dev_alias[0], SE_DEV_ALIAS_LEN, | 1726 | read_bytes = snprintf(&se_dev->se_dev_alias[0], SE_DEV_ALIAS_LEN, |
1736 | "%s", page); | 1727 | "%s", page); |
1737 | 1728 | ||
1738 | printk(KERN_INFO "Target_Core_ConfigFS: %s/%s set alias: %s\n", | 1729 | pr_debug("Target_Core_ConfigFS: %s/%s set alias: %s\n", |
1739 | config_item_name(&hba->hba_group.cg_item), | 1730 | config_item_name(&hba->hba_group.cg_item), |
1740 | config_item_name(&se_dev->se_dev_group.cg_item), | 1731 | config_item_name(&se_dev->se_dev_group.cg_item), |
1741 | se_dev->se_dev_alias); | 1732 | se_dev->se_dev_alias); |
@@ -1771,7 +1762,7 @@ static ssize_t target_core_store_dev_udev_path( | |||
1771 | ssize_t read_bytes; | 1762 | ssize_t read_bytes; |
1772 | 1763 | ||
1773 | if (count > (SE_UDEV_PATH_LEN-1)) { | 1764 | if (count > (SE_UDEV_PATH_LEN-1)) { |
1774 | printk(KERN_ERR "udev_path count: %d exceeds" | 1765 | pr_err("udev_path count: %d exceeds" |
1775 | " SE_UDEV_PATH_LEN-1: %u\n", (int)count, | 1766 | " SE_UDEV_PATH_LEN-1: %u\n", (int)count, |
1776 | SE_UDEV_PATH_LEN-1); | 1767 | SE_UDEV_PATH_LEN-1); |
1777 | return -EINVAL; | 1768 | return -EINVAL; |
@@ -1781,7 +1772,7 @@ static ssize_t target_core_store_dev_udev_path( | |||
1781 | read_bytes = snprintf(&se_dev->se_dev_udev_path[0], SE_UDEV_PATH_LEN, | 1772 | read_bytes = snprintf(&se_dev->se_dev_udev_path[0], SE_UDEV_PATH_LEN, |
1782 | "%s", page); | 1773 | "%s", page); |
1783 | 1774 | ||
1784 | printk(KERN_INFO "Target_Core_ConfigFS: %s/%s set udev_path: %s\n", | 1775 | pr_debug("Target_Core_ConfigFS: %s/%s set udev_path: %s\n", |
1785 | config_item_name(&hba->hba_group.cg_item), | 1776 | config_item_name(&hba->hba_group.cg_item), |
1786 | config_item_name(&se_dev->se_dev_group.cg_item), | 1777 | config_item_name(&se_dev->se_dev_group.cg_item), |
1787 | se_dev->se_dev_udev_path); | 1778 | se_dev->se_dev_udev_path); |
@@ -1809,13 +1800,13 @@ static ssize_t target_core_store_dev_enable( | |||
1809 | char *ptr; | 1800 | char *ptr; |
1810 | 1801 | ||
1811 | ptr = strstr(page, "1"); | 1802 | ptr = strstr(page, "1"); |
1812 | if (!(ptr)) { | 1803 | if (!ptr) { |
1813 | printk(KERN_ERR "For dev_enable ops, only valid value" | 1804 | pr_err("For dev_enable ops, only valid value" |
1814 | " is \"1\"\n"); | 1805 | " is \"1\"\n"); |
1815 | return -EINVAL; | 1806 | return -EINVAL; |
1816 | } | 1807 | } |
1817 | if ((se_dev->se_dev_ptr)) { | 1808 | if (se_dev->se_dev_ptr) { |
1818 | printk(KERN_ERR "se_dev->se_dev_ptr already set for storage" | 1809 | pr_err("se_dev->se_dev_ptr already set for storage" |
1819 | " object\n"); | 1810 | " object\n"); |
1820 | return -EEXIST; | 1811 | return -EEXIST; |
1821 | } | 1812 | } |
@@ -1830,7 +1821,7 @@ static ssize_t target_core_store_dev_enable( | |||
1830 | return -EINVAL; | 1821 | return -EINVAL; |
1831 | 1822 | ||
1832 | se_dev->se_dev_ptr = dev; | 1823 | se_dev->se_dev_ptr = dev; |
1833 | printk(KERN_INFO "Target_Core_ConfigFS: Registered se_dev->se_dev_ptr:" | 1824 | pr_debug("Target_Core_ConfigFS: Registered se_dev->se_dev_ptr:" |
1834 | " %p\n", se_dev->se_dev_ptr); | 1825 | " %p\n", se_dev->se_dev_ptr); |
1835 | 1826 | ||
1836 | return count; | 1827 | return count; |
@@ -1854,22 +1845,22 @@ static ssize_t target_core_show_alua_lu_gp(void *p, char *page) | |||
1854 | ssize_t len = 0; | 1845 | ssize_t len = 0; |
1855 | 1846 | ||
1856 | dev = su_dev->se_dev_ptr; | 1847 | dev = su_dev->se_dev_ptr; |
1857 | if (!(dev)) | 1848 | if (!dev) |
1858 | return -ENODEV; | 1849 | return -ENODEV; |
1859 | 1850 | ||
1860 | if (T10_ALUA(su_dev)->alua_type != SPC3_ALUA_EMULATED) | 1851 | if (su_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED) |
1861 | return len; | 1852 | return len; |
1862 | 1853 | ||
1863 | lu_gp_mem = dev->dev_alua_lu_gp_mem; | 1854 | lu_gp_mem = dev->dev_alua_lu_gp_mem; |
1864 | if (!(lu_gp_mem)) { | 1855 | if (!lu_gp_mem) { |
1865 | printk(KERN_ERR "NULL struct se_device->dev_alua_lu_gp_mem" | 1856 | pr_err("NULL struct se_device->dev_alua_lu_gp_mem" |
1866 | " pointer\n"); | 1857 | " pointer\n"); |
1867 | return -EINVAL; | 1858 | return -EINVAL; |
1868 | } | 1859 | } |
1869 | 1860 | ||
1870 | spin_lock(&lu_gp_mem->lu_gp_mem_lock); | 1861 | spin_lock(&lu_gp_mem->lu_gp_mem_lock); |
1871 | lu_gp = lu_gp_mem->lu_gp; | 1862 | lu_gp = lu_gp_mem->lu_gp; |
1872 | if ((lu_gp)) { | 1863 | if (lu_gp) { |
1873 | lu_ci = &lu_gp->lu_gp_group.cg_item; | 1864 | lu_ci = &lu_gp->lu_gp_group.cg_item; |
1874 | len += sprintf(page, "LU Group Alias: %s\nLU Group ID: %hu\n", | 1865 | len += sprintf(page, "LU Group Alias: %s\nLU Group ID: %hu\n", |
1875 | config_item_name(lu_ci), lu_gp->lu_gp_id); | 1866 | config_item_name(lu_ci), lu_gp->lu_gp_id); |
@@ -1893,17 +1884,17 @@ static ssize_t target_core_store_alua_lu_gp( | |||
1893 | int move = 0; | 1884 | int move = 0; |
1894 | 1885 | ||
1895 | dev = su_dev->se_dev_ptr; | 1886 | dev = su_dev->se_dev_ptr; |
1896 | if (!(dev)) | 1887 | if (!dev) |
1897 | return -ENODEV; | 1888 | return -ENODEV; |
1898 | 1889 | ||
1899 | if (T10_ALUA(su_dev)->alua_type != SPC3_ALUA_EMULATED) { | 1890 | if (su_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED) { |
1900 | printk(KERN_WARNING "SPC3_ALUA_EMULATED not enabled for %s/%s\n", | 1891 | pr_warn("SPC3_ALUA_EMULATED not enabled for %s/%s\n", |
1901 | config_item_name(&hba->hba_group.cg_item), | 1892 | config_item_name(&hba->hba_group.cg_item), |
1902 | config_item_name(&su_dev->se_dev_group.cg_item)); | 1893 | config_item_name(&su_dev->se_dev_group.cg_item)); |
1903 | return -EINVAL; | 1894 | return -EINVAL; |
1904 | } | 1895 | } |
1905 | if (count > LU_GROUP_NAME_BUF) { | 1896 | if (count > LU_GROUP_NAME_BUF) { |
1906 | printk(KERN_ERR "ALUA LU Group Alias too large!\n"); | 1897 | pr_err("ALUA LU Group Alias too large!\n"); |
1907 | return -EINVAL; | 1898 | return -EINVAL; |
1908 | } | 1899 | } |
1909 | memset(buf, 0, LU_GROUP_NAME_BUF); | 1900 | memset(buf, 0, LU_GROUP_NAME_BUF); |
@@ -1919,27 +1910,27 @@ static ssize_t target_core_store_alua_lu_gp( | |||
1919 | * core_alua_get_lu_gp_by_name below(). | 1910 | * core_alua_get_lu_gp_by_name below(). |
1920 | */ | 1911 | */ |
1921 | lu_gp_new = core_alua_get_lu_gp_by_name(strstrip(buf)); | 1912 | lu_gp_new = core_alua_get_lu_gp_by_name(strstrip(buf)); |
1922 | if (!(lu_gp_new)) | 1913 | if (!lu_gp_new) |
1923 | return -ENODEV; | 1914 | return -ENODEV; |
1924 | } | 1915 | } |
1925 | lu_gp_mem = dev->dev_alua_lu_gp_mem; | 1916 | lu_gp_mem = dev->dev_alua_lu_gp_mem; |
1926 | if (!(lu_gp_mem)) { | 1917 | if (!lu_gp_mem) { |
1927 | if (lu_gp_new) | 1918 | if (lu_gp_new) |
1928 | core_alua_put_lu_gp_from_name(lu_gp_new); | 1919 | core_alua_put_lu_gp_from_name(lu_gp_new); |
1929 | printk(KERN_ERR "NULL struct se_device->dev_alua_lu_gp_mem" | 1920 | pr_err("NULL struct se_device->dev_alua_lu_gp_mem" |
1930 | " pointer\n"); | 1921 | " pointer\n"); |
1931 | return -EINVAL; | 1922 | return -EINVAL; |
1932 | } | 1923 | } |
1933 | 1924 | ||
1934 | spin_lock(&lu_gp_mem->lu_gp_mem_lock); | 1925 | spin_lock(&lu_gp_mem->lu_gp_mem_lock); |
1935 | lu_gp = lu_gp_mem->lu_gp; | 1926 | lu_gp = lu_gp_mem->lu_gp; |
1936 | if ((lu_gp)) { | 1927 | if (lu_gp) { |
1937 | /* | 1928 | /* |
1938 | * Clearing an existing lu_gp association, and replacing | 1929 | * Clearing an existing lu_gp association, and replacing |
1939 | * with NULL | 1930 | * with NULL |
1940 | */ | 1931 | */ |
1941 | if (!(lu_gp_new)) { | 1932 | if (!lu_gp_new) { |
1942 | printk(KERN_INFO "Target_Core_ConfigFS: Releasing %s/%s" | 1933 | pr_debug("Target_Core_ConfigFS: Releasing %s/%s" |
1943 | " from ALUA LU Group: core/alua/lu_gps/%s, ID:" | 1934 | " from ALUA LU Group: core/alua/lu_gps/%s, ID:" |
1944 | " %hu\n", | 1935 | " %hu\n", |
1945 | config_item_name(&hba->hba_group.cg_item), | 1936 | config_item_name(&hba->hba_group.cg_item), |
@@ -1964,7 +1955,7 @@ static ssize_t target_core_store_alua_lu_gp( | |||
1964 | __core_alua_attach_lu_gp_mem(lu_gp_mem, lu_gp_new); | 1955 | __core_alua_attach_lu_gp_mem(lu_gp_mem, lu_gp_new); |
1965 | spin_unlock(&lu_gp_mem->lu_gp_mem_lock); | 1956 | spin_unlock(&lu_gp_mem->lu_gp_mem_lock); |
1966 | 1957 | ||
1967 | printk(KERN_INFO "Target_Core_ConfigFS: %s %s/%s to ALUA LU Group:" | 1958 | pr_debug("Target_Core_ConfigFS: %s %s/%s to ALUA LU Group:" |
1968 | " core/alua/lu_gps/%s, ID: %hu\n", | 1959 | " core/alua/lu_gps/%s, ID: %hu\n", |
1969 | (move) ? "Moving" : "Adding", | 1960 | (move) ? "Moving" : "Adding", |
1970 | config_item_name(&hba->hba_group.cg_item), | 1961 | config_item_name(&hba->hba_group.cg_item), |
@@ -2008,7 +1999,7 @@ static void target_core_dev_release(struct config_item *item) | |||
2008 | *`echo 1 > $CONFIGFS/core/$HBA/$DEV/dev_enable` | 1999 | *`echo 1 > $CONFIGFS/core/$HBA/$DEV/dev_enable` |
2009 | */ | 2000 | */ |
2010 | if (se_dev->se_dev_ptr) { | 2001 | if (se_dev->se_dev_ptr) { |
2011 | printk(KERN_INFO "Target_Core_ConfigFS: Calling se_free_" | 2002 | pr_debug("Target_Core_ConfigFS: Calling se_free_" |
2012 | "virtual_device() for se_dev_ptr: %p\n", | 2003 | "virtual_device() for se_dev_ptr: %p\n", |
2013 | se_dev->se_dev_ptr); | 2004 | se_dev->se_dev_ptr); |
2014 | 2005 | ||
@@ -2017,14 +2008,14 @@ static void target_core_dev_release(struct config_item *item) | |||
2017 | /* | 2008 | /* |
2018 | * Release struct se_subsystem_dev->se_dev_su_ptr.. | 2009 | * Release struct se_subsystem_dev->se_dev_su_ptr.. |
2019 | */ | 2010 | */ |
2020 | printk(KERN_INFO "Target_Core_ConfigFS: Calling t->free_" | 2011 | pr_debug("Target_Core_ConfigFS: Calling t->free_" |
2021 | "device() for se_dev_su_ptr: %p\n", | 2012 | "device() for se_dev_su_ptr: %p\n", |
2022 | se_dev->se_dev_su_ptr); | 2013 | se_dev->se_dev_su_ptr); |
2023 | 2014 | ||
2024 | t->free_device(se_dev->se_dev_su_ptr); | 2015 | t->free_device(se_dev->se_dev_su_ptr); |
2025 | } | 2016 | } |
2026 | 2017 | ||
2027 | printk(KERN_INFO "Target_Core_ConfigFS: Deallocating se_subsystem" | 2018 | pr_debug("Target_Core_ConfigFS: Deallocating se_subsystem" |
2028 | "_dev_t: %p\n", se_dev); | 2019 | "_dev_t: %p\n", se_dev); |
2029 | kfree(se_dev); | 2020 | kfree(se_dev); |
2030 | } | 2021 | } |
@@ -2039,10 +2030,10 @@ static ssize_t target_core_dev_show(struct config_item *item, | |||
2039 | struct target_core_configfs_attribute *tc_attr = container_of( | 2030 | struct target_core_configfs_attribute *tc_attr = container_of( |
2040 | attr, struct target_core_configfs_attribute, attr); | 2031 | attr, struct target_core_configfs_attribute, attr); |
2041 | 2032 | ||
2042 | if (!(tc_attr->show)) | 2033 | if (!tc_attr->show) |
2043 | return -EINVAL; | 2034 | return -EINVAL; |
2044 | 2035 | ||
2045 | return tc_attr->show((void *)se_dev, page); | 2036 | return tc_attr->show(se_dev, page); |
2046 | } | 2037 | } |
2047 | 2038 | ||
2048 | static ssize_t target_core_dev_store(struct config_item *item, | 2039 | static ssize_t target_core_dev_store(struct config_item *item, |
@@ -2055,10 +2046,10 @@ static ssize_t target_core_dev_store(struct config_item *item, | |||
2055 | struct target_core_configfs_attribute *tc_attr = container_of( | 2046 | struct target_core_configfs_attribute *tc_attr = container_of( |
2056 | attr, struct target_core_configfs_attribute, attr); | 2047 | attr, struct target_core_configfs_attribute, attr); |
2057 | 2048 | ||
2058 | if (!(tc_attr->store)) | 2049 | if (!tc_attr->store) |
2059 | return -EINVAL; | 2050 | return -EINVAL; |
2060 | 2051 | ||
2061 | return tc_attr->store((void *)se_dev, page, count); | 2052 | return tc_attr->store(se_dev, page, count); |
2062 | } | 2053 | } |
2063 | 2054 | ||
2064 | static struct configfs_item_operations target_core_dev_item_ops = { | 2055 | static struct configfs_item_operations target_core_dev_item_ops = { |
@@ -2098,7 +2089,7 @@ static ssize_t target_core_alua_lu_gp_show_attr_lu_gp_id( | |||
2098 | struct t10_alua_lu_gp *lu_gp, | 2089 | struct t10_alua_lu_gp *lu_gp, |
2099 | char *page) | 2090 | char *page) |
2100 | { | 2091 | { |
2101 | if (!(lu_gp->lu_gp_valid_id)) | 2092 | if (!lu_gp->lu_gp_valid_id) |
2102 | return 0; | 2093 | return 0; |
2103 | 2094 | ||
2104 | return sprintf(page, "%hu\n", lu_gp->lu_gp_id); | 2095 | return sprintf(page, "%hu\n", lu_gp->lu_gp_id); |
@@ -2115,12 +2106,12 @@ static ssize_t target_core_alua_lu_gp_store_attr_lu_gp_id( | |||
2115 | 2106 | ||
2116 | ret = strict_strtoul(page, 0, &lu_gp_id); | 2107 | ret = strict_strtoul(page, 0, &lu_gp_id); |
2117 | if (ret < 0) { | 2108 | if (ret < 0) { |
2118 | printk(KERN_ERR "strict_strtoul() returned %d for" | 2109 | pr_err("strict_strtoul() returned %d for" |
2119 | " lu_gp_id\n", ret); | 2110 | " lu_gp_id\n", ret); |
2120 | return -EINVAL; | 2111 | return -EINVAL; |
2121 | } | 2112 | } |
2122 | if (lu_gp_id > 0x0000ffff) { | 2113 | if (lu_gp_id > 0x0000ffff) { |
2123 | printk(KERN_ERR "ALUA lu_gp_id: %lu exceeds maximum:" | 2114 | pr_err("ALUA lu_gp_id: %lu exceeds maximum:" |
2124 | " 0x0000ffff\n", lu_gp_id); | 2115 | " 0x0000ffff\n", lu_gp_id); |
2125 | return -EINVAL; | 2116 | return -EINVAL; |
2126 | } | 2117 | } |
@@ -2129,7 +2120,7 @@ static ssize_t target_core_alua_lu_gp_store_attr_lu_gp_id( | |||
2129 | if (ret < 0) | 2120 | if (ret < 0) |
2130 | return -EINVAL; | 2121 | return -EINVAL; |
2131 | 2122 | ||
2132 | printk(KERN_INFO "Target_Core_ConfigFS: Set ALUA Logical Unit" | 2123 | pr_debug("Target_Core_ConfigFS: Set ALUA Logical Unit" |
2133 | " Group: core/alua/lu_gps/%s to ID: %hu\n", | 2124 | " Group: core/alua/lu_gps/%s to ID: %hu\n", |
2134 | config_item_name(&alua_lu_gp_cg->cg_item), | 2125 | config_item_name(&alua_lu_gp_cg->cg_item), |
2135 | lu_gp->lu_gp_id); | 2126 | lu_gp->lu_gp_id); |
@@ -2167,7 +2158,7 @@ static ssize_t target_core_alua_lu_gp_show_attr_members( | |||
2167 | cur_len++; /* Extra byte for NULL terminator */ | 2158 | cur_len++; /* Extra byte for NULL terminator */ |
2168 | 2159 | ||
2169 | if ((cur_len + len) > PAGE_SIZE) { | 2160 | if ((cur_len + len) > PAGE_SIZE) { |
2170 | printk(KERN_WARNING "Ran out of lu_gp_show_attr" | 2161 | pr_warn("Ran out of lu_gp_show_attr" |
2171 | "_members buffer\n"); | 2162 | "_members buffer\n"); |
2172 | break; | 2163 | break; |
2173 | } | 2164 | } |
@@ -2231,7 +2222,7 @@ static struct config_group *target_core_alua_create_lu_gp( | |||
2231 | config_group_init_type_name(alua_lu_gp_cg, name, | 2222 | config_group_init_type_name(alua_lu_gp_cg, name, |
2232 | &target_core_alua_lu_gp_cit); | 2223 | &target_core_alua_lu_gp_cit); |
2233 | 2224 | ||
2234 | printk(KERN_INFO "Target_Core_ConfigFS: Allocated ALUA Logical Unit" | 2225 | pr_debug("Target_Core_ConfigFS: Allocated ALUA Logical Unit" |
2235 | " Group: core/alua/lu_gps/%s\n", | 2226 | " Group: core/alua/lu_gps/%s\n", |
2236 | config_item_name(alua_lu_gp_ci)); | 2227 | config_item_name(alua_lu_gp_ci)); |
2237 | 2228 | ||
@@ -2246,7 +2237,7 @@ static void target_core_alua_drop_lu_gp( | |||
2246 | struct t10_alua_lu_gp *lu_gp = container_of(to_config_group(item), | 2237 | struct t10_alua_lu_gp *lu_gp = container_of(to_config_group(item), |
2247 | struct t10_alua_lu_gp, lu_gp_group); | 2238 | struct t10_alua_lu_gp, lu_gp_group); |
2248 | 2239 | ||
2249 | printk(KERN_INFO "Target_Core_ConfigFS: Releasing ALUA Logical Unit" | 2240 | pr_debug("Target_Core_ConfigFS: Releasing ALUA Logical Unit" |
2250 | " Group: core/alua/lu_gps/%s, ID: %hu\n", | 2241 | " Group: core/alua/lu_gps/%s, ID: %hu\n", |
2251 | config_item_name(item), lu_gp->lu_gp_id); | 2242 | config_item_name(item), lu_gp->lu_gp_id); |
2252 | /* | 2243 | /* |
@@ -2305,22 +2296,22 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_state( | |||
2305 | unsigned long tmp; | 2296 | unsigned long tmp; |
2306 | int new_state, ret; | 2297 | int new_state, ret; |
2307 | 2298 | ||
2308 | if (!(tg_pt_gp->tg_pt_gp_valid_id)) { | 2299 | if (!tg_pt_gp->tg_pt_gp_valid_id) { |
2309 | printk(KERN_ERR "Unable to do implict ALUA on non valid" | 2300 | pr_err("Unable to do implict ALUA on non valid" |
2310 | " tg_pt_gp ID: %hu\n", tg_pt_gp->tg_pt_gp_valid_id); | 2301 | " tg_pt_gp ID: %hu\n", tg_pt_gp->tg_pt_gp_valid_id); |
2311 | return -EINVAL; | 2302 | return -EINVAL; |
2312 | } | 2303 | } |
2313 | 2304 | ||
2314 | ret = strict_strtoul(page, 0, &tmp); | 2305 | ret = strict_strtoul(page, 0, &tmp); |
2315 | if (ret < 0) { | 2306 | if (ret < 0) { |
2316 | printk("Unable to extract new ALUA access state from" | 2307 | pr_err("Unable to extract new ALUA access state from" |
2317 | " %s\n", page); | 2308 | " %s\n", page); |
2318 | return -EINVAL; | 2309 | return -EINVAL; |
2319 | } | 2310 | } |
2320 | new_state = (int)tmp; | 2311 | new_state = (int)tmp; |
2321 | 2312 | ||
2322 | if (!(tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA)) { | 2313 | if (!(tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA)) { |
2323 | printk(KERN_ERR "Unable to process implict configfs ALUA" | 2314 | pr_err("Unable to process implict configfs ALUA" |
2324 | " transition while TPGS_IMPLICT_ALUA is diabled\n"); | 2315 | " transition while TPGS_IMPLICT_ALUA is diabled\n"); |
2325 | return -EINVAL; | 2316 | return -EINVAL; |
2326 | } | 2317 | } |
@@ -2351,8 +2342,8 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_status( | |||
2351 | unsigned long tmp; | 2342 | unsigned long tmp; |
2352 | int new_status, ret; | 2343 | int new_status, ret; |
2353 | 2344 | ||
2354 | if (!(tg_pt_gp->tg_pt_gp_valid_id)) { | 2345 | if (!tg_pt_gp->tg_pt_gp_valid_id) { |
2355 | printk(KERN_ERR "Unable to do set ALUA access status on non" | 2346 | pr_err("Unable to do set ALUA access status on non" |
2356 | " valid tg_pt_gp ID: %hu\n", | 2347 | " valid tg_pt_gp ID: %hu\n", |
2357 | tg_pt_gp->tg_pt_gp_valid_id); | 2348 | tg_pt_gp->tg_pt_gp_valid_id); |
2358 | return -EINVAL; | 2349 | return -EINVAL; |
@@ -2360,7 +2351,7 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_status( | |||
2360 | 2351 | ||
2361 | ret = strict_strtoul(page, 0, &tmp); | 2352 | ret = strict_strtoul(page, 0, &tmp); |
2362 | if (ret < 0) { | 2353 | if (ret < 0) { |
2363 | printk(KERN_ERR "Unable to extract new ALUA access status" | 2354 | pr_err("Unable to extract new ALUA access status" |
2364 | " from %s\n", page); | 2355 | " from %s\n", page); |
2365 | return -EINVAL; | 2356 | return -EINVAL; |
2366 | } | 2357 | } |
@@ -2369,7 +2360,7 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_status( | |||
2369 | if ((new_status != ALUA_STATUS_NONE) && | 2360 | if ((new_status != ALUA_STATUS_NONE) && |
2370 | (new_status != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) && | 2361 | (new_status != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) && |
2371 | (new_status != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) { | 2362 | (new_status != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) { |
2372 | printk(KERN_ERR "Illegal ALUA access status: 0x%02x\n", | 2363 | pr_err("Illegal ALUA access status: 0x%02x\n", |
2373 | new_status); | 2364 | new_status); |
2374 | return -EINVAL; | 2365 | return -EINVAL; |
2375 | } | 2366 | } |
@@ -2420,12 +2411,12 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_write_metadata( | |||
2420 | 2411 | ||
2421 | ret = strict_strtoul(page, 0, &tmp); | 2412 | ret = strict_strtoul(page, 0, &tmp); |
2422 | if (ret < 0) { | 2413 | if (ret < 0) { |
2423 | printk(KERN_ERR "Unable to extract alua_write_metadata\n"); | 2414 | pr_err("Unable to extract alua_write_metadata\n"); |
2424 | return -EINVAL; | 2415 | return -EINVAL; |
2425 | } | 2416 | } |
2426 | 2417 | ||
2427 | if ((tmp != 0) && (tmp != 1)) { | 2418 | if ((tmp != 0) && (tmp != 1)) { |
2428 | printk(KERN_ERR "Illegal value for alua_write_metadata:" | 2419 | pr_err("Illegal value for alua_write_metadata:" |
2429 | " %lu\n", tmp); | 2420 | " %lu\n", tmp); |
2430 | return -EINVAL; | 2421 | return -EINVAL; |
2431 | } | 2422 | } |
@@ -2507,7 +2498,7 @@ static ssize_t target_core_alua_tg_pt_gp_show_attr_tg_pt_gp_id( | |||
2507 | struct t10_alua_tg_pt_gp *tg_pt_gp, | 2498 | struct t10_alua_tg_pt_gp *tg_pt_gp, |
2508 | char *page) | 2499 | char *page) |
2509 | { | 2500 | { |
2510 | if (!(tg_pt_gp->tg_pt_gp_valid_id)) | 2501 | if (!tg_pt_gp->tg_pt_gp_valid_id) |
2511 | return 0; | 2502 | return 0; |
2512 | 2503 | ||
2513 | return sprintf(page, "%hu\n", tg_pt_gp->tg_pt_gp_id); | 2504 | return sprintf(page, "%hu\n", tg_pt_gp->tg_pt_gp_id); |
@@ -2524,12 +2515,12 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_tg_pt_gp_id( | |||
2524 | 2515 | ||
2525 | ret = strict_strtoul(page, 0, &tg_pt_gp_id); | 2516 | ret = strict_strtoul(page, 0, &tg_pt_gp_id); |
2526 | if (ret < 0) { | 2517 | if (ret < 0) { |
2527 | printk(KERN_ERR "strict_strtoul() returned %d for" | 2518 | pr_err("strict_strtoul() returned %d for" |
2528 | " tg_pt_gp_id\n", ret); | 2519 | " tg_pt_gp_id\n", ret); |
2529 | return -EINVAL; | 2520 | return -EINVAL; |
2530 | } | 2521 | } |
2531 | if (tg_pt_gp_id > 0x0000ffff) { | 2522 | if (tg_pt_gp_id > 0x0000ffff) { |
2532 | printk(KERN_ERR "ALUA tg_pt_gp_id: %lu exceeds maximum:" | 2523 | pr_err("ALUA tg_pt_gp_id: %lu exceeds maximum:" |
2533 | " 0x0000ffff\n", tg_pt_gp_id); | 2524 | " 0x0000ffff\n", tg_pt_gp_id); |
2534 | return -EINVAL; | 2525 | return -EINVAL; |
2535 | } | 2526 | } |
@@ -2538,7 +2529,7 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_tg_pt_gp_id( | |||
2538 | if (ret < 0) | 2529 | if (ret < 0) |
2539 | return -EINVAL; | 2530 | return -EINVAL; |
2540 | 2531 | ||
2541 | printk(KERN_INFO "Target_Core_ConfigFS: Set ALUA Target Port Group: " | 2532 | pr_debug("Target_Core_ConfigFS: Set ALUA Target Port Group: " |
2542 | "core/alua/tg_pt_gps/%s to ID: %hu\n", | 2533 | "core/alua/tg_pt_gps/%s to ID: %hu\n", |
2543 | config_item_name(&alua_tg_pt_gp_cg->cg_item), | 2534 | config_item_name(&alua_tg_pt_gp_cg->cg_item), |
2544 | tg_pt_gp->tg_pt_gp_id); | 2535 | tg_pt_gp->tg_pt_gp_id); |
@@ -2572,14 +2563,14 @@ static ssize_t target_core_alua_tg_pt_gp_show_attr_members( | |||
2572 | lun = port->sep_lun; | 2563 | lun = port->sep_lun; |
2573 | 2564 | ||
2574 | cur_len = snprintf(buf, TG_PT_GROUP_NAME_BUF, "%s/%s/tpgt_%hu" | 2565 | cur_len = snprintf(buf, TG_PT_GROUP_NAME_BUF, "%s/%s/tpgt_%hu" |
2575 | "/%s\n", TPG_TFO(tpg)->get_fabric_name(), | 2566 | "/%s\n", tpg->se_tpg_tfo->get_fabric_name(), |
2576 | TPG_TFO(tpg)->tpg_get_wwn(tpg), | 2567 | tpg->se_tpg_tfo->tpg_get_wwn(tpg), |
2577 | TPG_TFO(tpg)->tpg_get_tag(tpg), | 2568 | tpg->se_tpg_tfo->tpg_get_tag(tpg), |
2578 | config_item_name(&lun->lun_group.cg_item)); | 2569 | config_item_name(&lun->lun_group.cg_item)); |
2579 | cur_len++; /* Extra byte for NULL terminator */ | 2570 | cur_len++; /* Extra byte for NULL terminator */ |
2580 | 2571 | ||
2581 | if ((cur_len + len) > PAGE_SIZE) { | 2572 | if ((cur_len + len) > PAGE_SIZE) { |
2582 | printk(KERN_WARNING "Ran out of lu_gp_show_attr" | 2573 | pr_warn("Ran out of lu_gp_show_attr" |
2583 | "_members buffer\n"); | 2574 | "_members buffer\n"); |
2584 | break; | 2575 | break; |
2585 | } | 2576 | } |
@@ -2645,7 +2636,7 @@ static struct config_group *target_core_alua_create_tg_pt_gp( | |||
2645 | struct config_item *alua_tg_pt_gp_ci = NULL; | 2636 | struct config_item *alua_tg_pt_gp_ci = NULL; |
2646 | 2637 | ||
2647 | tg_pt_gp = core_alua_allocate_tg_pt_gp(su_dev, name, 0); | 2638 | tg_pt_gp = core_alua_allocate_tg_pt_gp(su_dev, name, 0); |
2648 | if (!(tg_pt_gp)) | 2639 | if (!tg_pt_gp) |
2649 | return NULL; | 2640 | return NULL; |
2650 | 2641 | ||
2651 | alua_tg_pt_gp_cg = &tg_pt_gp->tg_pt_gp_group; | 2642 | alua_tg_pt_gp_cg = &tg_pt_gp->tg_pt_gp_group; |
@@ -2654,7 +2645,7 @@ static struct config_group *target_core_alua_create_tg_pt_gp( | |||
2654 | config_group_init_type_name(alua_tg_pt_gp_cg, name, | 2645 | config_group_init_type_name(alua_tg_pt_gp_cg, name, |
2655 | &target_core_alua_tg_pt_gp_cit); | 2646 | &target_core_alua_tg_pt_gp_cit); |
2656 | 2647 | ||
2657 | printk(KERN_INFO "Target_Core_ConfigFS: Allocated ALUA Target Port" | 2648 | pr_debug("Target_Core_ConfigFS: Allocated ALUA Target Port" |
2658 | " Group: alua/tg_pt_gps/%s\n", | 2649 | " Group: alua/tg_pt_gps/%s\n", |
2659 | config_item_name(alua_tg_pt_gp_ci)); | 2650 | config_item_name(alua_tg_pt_gp_ci)); |
2660 | 2651 | ||
@@ -2668,7 +2659,7 @@ static void target_core_alua_drop_tg_pt_gp( | |||
2668 | struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(to_config_group(item), | 2659 | struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(to_config_group(item), |
2669 | struct t10_alua_tg_pt_gp, tg_pt_gp_group); | 2660 | struct t10_alua_tg_pt_gp, tg_pt_gp_group); |
2670 | 2661 | ||
2671 | printk(KERN_INFO "Target_Core_ConfigFS: Releasing ALUA Target Port" | 2662 | pr_debug("Target_Core_ConfigFS: Releasing ALUA Target Port" |
2672 | " Group: alua/tg_pt_gps/%s, ID: %hu\n", | 2663 | " Group: alua/tg_pt_gps/%s, ID: %hu\n", |
2673 | config_item_name(item), tg_pt_gp->tg_pt_gp_id); | 2664 | config_item_name(item), tg_pt_gp->tg_pt_gp_id); |
2674 | /* | 2665 | /* |
@@ -2759,21 +2750,21 @@ static struct config_group *target_core_make_subdev( | |||
2759 | 2750 | ||
2760 | se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL); | 2751 | se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL); |
2761 | if (!se_dev) { | 2752 | if (!se_dev) { |
2762 | printk(KERN_ERR "Unable to allocate memory for" | 2753 | pr_err("Unable to allocate memory for" |
2763 | " struct se_subsystem_dev\n"); | 2754 | " struct se_subsystem_dev\n"); |
2764 | goto unlock; | 2755 | goto unlock; |
2765 | } | 2756 | } |
2766 | INIT_LIST_HEAD(&se_dev->g_se_dev_list); | 2757 | INIT_LIST_HEAD(&se_dev->se_dev_node); |
2767 | INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list); | 2758 | INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list); |
2768 | spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock); | 2759 | spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock); |
2769 | INIT_LIST_HEAD(&se_dev->t10_reservation.registration_list); | 2760 | INIT_LIST_HEAD(&se_dev->t10_pr.registration_list); |
2770 | INIT_LIST_HEAD(&se_dev->t10_reservation.aptpl_reg_list); | 2761 | INIT_LIST_HEAD(&se_dev->t10_pr.aptpl_reg_list); |
2771 | spin_lock_init(&se_dev->t10_reservation.registration_lock); | 2762 | spin_lock_init(&se_dev->t10_pr.registration_lock); |
2772 | spin_lock_init(&se_dev->t10_reservation.aptpl_reg_lock); | 2763 | spin_lock_init(&se_dev->t10_pr.aptpl_reg_lock); |
2773 | INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list); | 2764 | INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list); |
2774 | spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock); | 2765 | spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock); |
2775 | spin_lock_init(&se_dev->se_dev_lock); | 2766 | spin_lock_init(&se_dev->se_dev_lock); |
2776 | se_dev->t10_reservation.pr_aptpl_buf_len = PR_APTPL_BUF_LEN; | 2767 | se_dev->t10_pr.pr_aptpl_buf_len = PR_APTPL_BUF_LEN; |
2777 | se_dev->t10_wwn.t10_sub_dev = se_dev; | 2768 | se_dev->t10_wwn.t10_sub_dev = se_dev; |
2778 | se_dev->t10_alua.t10_sub_dev = se_dev; | 2769 | se_dev->t10_alua.t10_sub_dev = se_dev; |
2779 | se_dev->se_dev_attrib.da_sub_dev = se_dev; | 2770 | se_dev->se_dev_attrib.da_sub_dev = se_dev; |
@@ -2783,7 +2774,7 @@ static struct config_group *target_core_make_subdev( | |||
2783 | 2774 | ||
2784 | dev_cg->default_groups = kzalloc(sizeof(struct config_group) * 7, | 2775 | dev_cg->default_groups = kzalloc(sizeof(struct config_group) * 7, |
2785 | GFP_KERNEL); | 2776 | GFP_KERNEL); |
2786 | if (!(dev_cg->default_groups)) | 2777 | if (!dev_cg->default_groups) |
2787 | goto out; | 2778 | goto out; |
2788 | /* | 2779 | /* |
2789 | * Set se_dev_su_ptr from struct se_subsystem_api returned void ptr | 2780 | * Set se_dev_su_ptr from struct se_subsystem_api returned void ptr |
@@ -2794,14 +2785,14 @@ static struct config_group *target_core_make_subdev( | |||
2794 | * configfs tree for device object's struct config_group. | 2785 | * configfs tree for device object's struct config_group. |
2795 | */ | 2786 | */ |
2796 | se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, name); | 2787 | se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, name); |
2797 | if (!(se_dev->se_dev_su_ptr)) { | 2788 | if (!se_dev->se_dev_su_ptr) { |
2798 | printk(KERN_ERR "Unable to locate subsystem dependent pointer" | 2789 | pr_err("Unable to locate subsystem dependent pointer" |
2799 | " from allocate_virtdevice()\n"); | 2790 | " from allocate_virtdevice()\n"); |
2800 | goto out; | 2791 | goto out; |
2801 | } | 2792 | } |
2802 | spin_lock(&se_global->g_device_lock); | 2793 | spin_lock(&se_device_lock); |
2803 | list_add_tail(&se_dev->g_se_dev_list, &se_global->g_se_dev_list); | 2794 | list_add_tail(&se_dev->se_dev_node, &se_dev_list); |
2804 | spin_unlock(&se_global->g_device_lock); | 2795 | spin_unlock(&se_device_lock); |
2805 | 2796 | ||
2806 | config_group_init_type_name(&se_dev->se_dev_group, name, | 2797 | config_group_init_type_name(&se_dev->se_dev_group, name, |
2807 | &target_core_dev_cit); | 2798 | &target_core_dev_cit); |
@@ -2826,14 +2817,14 @@ static struct config_group *target_core_make_subdev( | |||
2826 | * Add core/$HBA/$DEV/alua/default_tg_pt_gp | 2817 | * Add core/$HBA/$DEV/alua/default_tg_pt_gp |
2827 | */ | 2818 | */ |
2828 | tg_pt_gp = core_alua_allocate_tg_pt_gp(se_dev, "default_tg_pt_gp", 1); | 2819 | tg_pt_gp = core_alua_allocate_tg_pt_gp(se_dev, "default_tg_pt_gp", 1); |
2829 | if (!(tg_pt_gp)) | 2820 | if (!tg_pt_gp) |
2830 | goto out; | 2821 | goto out; |
2831 | 2822 | ||
2832 | tg_pt_gp_cg = &T10_ALUA(se_dev)->alua_tg_pt_gps_group; | 2823 | tg_pt_gp_cg = &se_dev->t10_alua.alua_tg_pt_gps_group; |
2833 | tg_pt_gp_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, | 2824 | tg_pt_gp_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, |
2834 | GFP_KERNEL); | 2825 | GFP_KERNEL); |
2835 | if (!(tg_pt_gp_cg->default_groups)) { | 2826 | if (!tg_pt_gp_cg->default_groups) { |
2836 | printk(KERN_ERR "Unable to allocate tg_pt_gp_cg->" | 2827 | pr_err("Unable to allocate tg_pt_gp_cg->" |
2837 | "default_groups\n"); | 2828 | "default_groups\n"); |
2838 | goto out; | 2829 | goto out; |
2839 | } | 2830 | } |
@@ -2842,28 +2833,28 @@ static struct config_group *target_core_make_subdev( | |||
2842 | "default_tg_pt_gp", &target_core_alua_tg_pt_gp_cit); | 2833 | "default_tg_pt_gp", &target_core_alua_tg_pt_gp_cit); |
2843 | tg_pt_gp_cg->default_groups[0] = &tg_pt_gp->tg_pt_gp_group; | 2834 | tg_pt_gp_cg->default_groups[0] = &tg_pt_gp->tg_pt_gp_group; |
2844 | tg_pt_gp_cg->default_groups[1] = NULL; | 2835 | tg_pt_gp_cg->default_groups[1] = NULL; |
2845 | T10_ALUA(se_dev)->default_tg_pt_gp = tg_pt_gp; | 2836 | se_dev->t10_alua.default_tg_pt_gp = tg_pt_gp; |
2846 | /* | 2837 | /* |
2847 | * Add core/$HBA/$DEV/statistics/ default groups | 2838 | * Add core/$HBA/$DEV/statistics/ default groups |
2848 | */ | 2839 | */ |
2849 | dev_stat_grp = &DEV_STAT_GRP(se_dev)->stat_group; | 2840 | dev_stat_grp = &se_dev->dev_stat_grps.stat_group; |
2850 | dev_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 4, | 2841 | dev_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 4, |
2851 | GFP_KERNEL); | 2842 | GFP_KERNEL); |
2852 | if (!dev_stat_grp->default_groups) { | 2843 | if (!dev_stat_grp->default_groups) { |
2853 | printk(KERN_ERR "Unable to allocate dev_stat_grp->default_groups\n"); | 2844 | pr_err("Unable to allocate dev_stat_grp->default_groups\n"); |
2854 | goto out; | 2845 | goto out; |
2855 | } | 2846 | } |
2856 | target_stat_setup_dev_default_groups(se_dev); | 2847 | target_stat_setup_dev_default_groups(se_dev); |
2857 | 2848 | ||
2858 | printk(KERN_INFO "Target_Core_ConfigFS: Allocated struct se_subsystem_dev:" | 2849 | pr_debug("Target_Core_ConfigFS: Allocated struct se_subsystem_dev:" |
2859 | " %p se_dev_su_ptr: %p\n", se_dev, se_dev->se_dev_su_ptr); | 2850 | " %p se_dev_su_ptr: %p\n", se_dev, se_dev->se_dev_su_ptr); |
2860 | 2851 | ||
2861 | mutex_unlock(&hba->hba_access_mutex); | 2852 | mutex_unlock(&hba->hba_access_mutex); |
2862 | return &se_dev->se_dev_group; | 2853 | return &se_dev->se_dev_group; |
2863 | out: | 2854 | out: |
2864 | if (T10_ALUA(se_dev)->default_tg_pt_gp) { | 2855 | if (se_dev->t10_alua.default_tg_pt_gp) { |
2865 | core_alua_free_tg_pt_gp(T10_ALUA(se_dev)->default_tg_pt_gp); | 2856 | core_alua_free_tg_pt_gp(se_dev->t10_alua.default_tg_pt_gp); |
2866 | T10_ALUA(se_dev)->default_tg_pt_gp = NULL; | 2857 | se_dev->t10_alua.default_tg_pt_gp = NULL; |
2867 | } | 2858 | } |
2868 | if (dev_stat_grp) | 2859 | if (dev_stat_grp) |
2869 | kfree(dev_stat_grp->default_groups); | 2860 | kfree(dev_stat_grp->default_groups); |
@@ -2896,11 +2887,11 @@ static void target_core_drop_subdev( | |||
2896 | mutex_lock(&hba->hba_access_mutex); | 2887 | mutex_lock(&hba->hba_access_mutex); |
2897 | t = hba->transport; | 2888 | t = hba->transport; |
2898 | 2889 | ||
2899 | spin_lock(&se_global->g_device_lock); | 2890 | spin_lock(&se_device_lock); |
2900 | list_del(&se_dev->g_se_dev_list); | 2891 | list_del(&se_dev->se_dev_node); |
2901 | spin_unlock(&se_global->g_device_lock); | 2892 | spin_unlock(&se_device_lock); |
2902 | 2893 | ||
2903 | dev_stat_grp = &DEV_STAT_GRP(se_dev)->stat_group; | 2894 | dev_stat_grp = &se_dev->dev_stat_grps.stat_group; |
2904 | for (i = 0; dev_stat_grp->default_groups[i]; i++) { | 2895 | for (i = 0; dev_stat_grp->default_groups[i]; i++) { |
2905 | df_item = &dev_stat_grp->default_groups[i]->cg_item; | 2896 | df_item = &dev_stat_grp->default_groups[i]->cg_item; |
2906 | dev_stat_grp->default_groups[i] = NULL; | 2897 | dev_stat_grp->default_groups[i] = NULL; |
@@ -2908,7 +2899,7 @@ static void target_core_drop_subdev( | |||
2908 | } | 2899 | } |
2909 | kfree(dev_stat_grp->default_groups); | 2900 | kfree(dev_stat_grp->default_groups); |
2910 | 2901 | ||
2911 | tg_pt_gp_cg = &T10_ALUA(se_dev)->alua_tg_pt_gps_group; | 2902 | tg_pt_gp_cg = &se_dev->t10_alua.alua_tg_pt_gps_group; |
2912 | for (i = 0; tg_pt_gp_cg->default_groups[i]; i++) { | 2903 | for (i = 0; tg_pt_gp_cg->default_groups[i]; i++) { |
2913 | df_item = &tg_pt_gp_cg->default_groups[i]->cg_item; | 2904 | df_item = &tg_pt_gp_cg->default_groups[i]->cg_item; |
2914 | tg_pt_gp_cg->default_groups[i] = NULL; | 2905 | tg_pt_gp_cg->default_groups[i] = NULL; |
@@ -2919,7 +2910,7 @@ static void target_core_drop_subdev( | |||
2919 | * core_alua_free_tg_pt_gp() is called from ->default_tg_pt_gp | 2910 | * core_alua_free_tg_pt_gp() is called from ->default_tg_pt_gp |
2920 | * directly from target_core_alua_tg_pt_gp_release(). | 2911 | * directly from target_core_alua_tg_pt_gp_release(). |
2921 | */ | 2912 | */ |
2922 | T10_ALUA(se_dev)->default_tg_pt_gp = NULL; | 2913 | se_dev->t10_alua.default_tg_pt_gp = NULL; |
2923 | 2914 | ||
2924 | dev_cg = &se_dev->se_dev_group; | 2915 | dev_cg = &se_dev->se_dev_group; |
2925 | for (i = 0; dev_cg->default_groups[i]; i++) { | 2916 | for (i = 0; dev_cg->default_groups[i]; i++) { |
@@ -2988,13 +2979,13 @@ static ssize_t target_core_hba_store_attr_hba_mode(struct se_hba *hba, | |||
2988 | 2979 | ||
2989 | ret = strict_strtoul(page, 0, &mode_flag); | 2980 | ret = strict_strtoul(page, 0, &mode_flag); |
2990 | if (ret < 0) { | 2981 | if (ret < 0) { |
2991 | printk(KERN_ERR "Unable to extract hba mode flag: %d\n", ret); | 2982 | pr_err("Unable to extract hba mode flag: %d\n", ret); |
2992 | return -EINVAL; | 2983 | return -EINVAL; |
2993 | } | 2984 | } |
2994 | 2985 | ||
2995 | spin_lock(&hba->device_lock); | 2986 | spin_lock(&hba->device_lock); |
2996 | if (!(list_empty(&hba->hba_dev_list))) { | 2987 | if (!list_empty(&hba->hba_dev_list)) { |
2997 | printk(KERN_ERR "Unable to set hba_mode with active devices\n"); | 2988 | pr_err("Unable to set hba_mode with active devices\n"); |
2998 | spin_unlock(&hba->device_lock); | 2989 | spin_unlock(&hba->device_lock); |
2999 | return -EINVAL; | 2990 | return -EINVAL; |
3000 | } | 2991 | } |
@@ -3053,7 +3044,7 @@ static struct config_group *target_core_call_addhbatotarget( | |||
3053 | 3044 | ||
3054 | memset(buf, 0, TARGET_CORE_NAME_MAX_LEN); | 3045 | memset(buf, 0, TARGET_CORE_NAME_MAX_LEN); |
3055 | if (strlen(name) >= TARGET_CORE_NAME_MAX_LEN) { | 3046 | if (strlen(name) >= TARGET_CORE_NAME_MAX_LEN) { |
3056 | printk(KERN_ERR "Passed *name strlen(): %d exceeds" | 3047 | pr_err("Passed *name strlen(): %d exceeds" |
3057 | " TARGET_CORE_NAME_MAX_LEN: %d\n", (int)strlen(name), | 3048 | " TARGET_CORE_NAME_MAX_LEN: %d\n", (int)strlen(name), |
3058 | TARGET_CORE_NAME_MAX_LEN); | 3049 | TARGET_CORE_NAME_MAX_LEN); |
3059 | return ERR_PTR(-ENAMETOOLONG); | 3050 | return ERR_PTR(-ENAMETOOLONG); |
@@ -3061,8 +3052,8 @@ static struct config_group *target_core_call_addhbatotarget( | |||
3061 | snprintf(buf, TARGET_CORE_NAME_MAX_LEN, "%s", name); | 3052 | snprintf(buf, TARGET_CORE_NAME_MAX_LEN, "%s", name); |
3062 | 3053 | ||
3063 | str = strstr(buf, "_"); | 3054 | str = strstr(buf, "_"); |
3064 | if (!(str)) { | 3055 | if (!str) { |
3065 | printk(KERN_ERR "Unable to locate \"_\" for $SUBSYSTEM_PLUGIN_$HOST_ID\n"); | 3056 | pr_err("Unable to locate \"_\" for $SUBSYSTEM_PLUGIN_$HOST_ID\n"); |
3066 | return ERR_PTR(-EINVAL); | 3057 | return ERR_PTR(-EINVAL); |
3067 | } | 3058 | } |
3068 | se_plugin_str = buf; | 3059 | se_plugin_str = buf; |
@@ -3071,7 +3062,7 @@ static struct config_group *target_core_call_addhbatotarget( | |||
3071 | * Namely rd_direct and rd_mcp.. | 3062 | * Namely rd_direct and rd_mcp.. |
3072 | */ | 3063 | */ |
3073 | str2 = strstr(str+1, "_"); | 3064 | str2 = strstr(str+1, "_"); |
3074 | if ((str2)) { | 3065 | if (str2) { |
3075 | *str2 = '\0'; /* Terminate for *se_plugin_str */ | 3066 | *str2 = '\0'; /* Terminate for *se_plugin_str */ |
3076 | str2++; /* Skip to start of plugin dependent ID */ | 3067 | str2++; /* Skip to start of plugin dependent ID */ |
3077 | str = str2; | 3068 | str = str2; |
@@ -3082,7 +3073,7 @@ static struct config_group *target_core_call_addhbatotarget( | |||
3082 | 3073 | ||
3083 | ret = strict_strtoul(str, 0, &plugin_dep_id); | 3074 | ret = strict_strtoul(str, 0, &plugin_dep_id); |
3084 | if (ret < 0) { | 3075 | if (ret < 0) { |
3085 | printk(KERN_ERR "strict_strtoul() returned %d for" | 3076 | pr_err("strict_strtoul() returned %d for" |
3086 | " plugin_dep_id\n", ret); | 3077 | " plugin_dep_id\n", ret); |
3087 | return ERR_PTR(-EINVAL); | 3078 | return ERR_PTR(-EINVAL); |
3088 | } | 3079 | } |
@@ -3135,7 +3126,7 @@ static int __init target_core_init_configfs(void) | |||
3135 | struct t10_alua_lu_gp *lu_gp; | 3126 | struct t10_alua_lu_gp *lu_gp; |
3136 | int ret; | 3127 | int ret; |
3137 | 3128 | ||
3138 | printk(KERN_INFO "TARGET_CORE[0]: Loading Generic Kernel Storage" | 3129 | pr_debug("TARGET_CORE[0]: Loading Generic Kernel Storage" |
3139 | " Engine: %s on %s/%s on "UTS_RELEASE"\n", | 3130 | " Engine: %s on %s/%s on "UTS_RELEASE"\n", |
3140 | TARGET_CORE_VERSION, utsname()->sysname, utsname()->machine); | 3131 | TARGET_CORE_VERSION, utsname()->sysname, utsname()->machine); |
3141 | 3132 | ||
@@ -3145,10 +3136,9 @@ static int __init target_core_init_configfs(void) | |||
3145 | 3136 | ||
3146 | INIT_LIST_HEAD(&g_tf_list); | 3137 | INIT_LIST_HEAD(&g_tf_list); |
3147 | mutex_init(&g_tf_lock); | 3138 | mutex_init(&g_tf_lock); |
3148 | init_scsi_index_table(); | 3139 | ret = init_se_kmem_caches(); |
3149 | ret = init_se_global(); | ||
3150 | if (ret < 0) | 3140 | if (ret < 0) |
3151 | return -1; | 3141 | return ret; |
3152 | /* | 3142 | /* |
3153 | * Create $CONFIGFS/target/core default group for HBA <-> Storage Object | 3143 | * Create $CONFIGFS/target/core default group for HBA <-> Storage Object |
3154 | * and ALUA Logical Unit Group and Target Port Group infrastructure. | 3144 | * and ALUA Logical Unit Group and Target Port Group infrastructure. |
@@ -3156,44 +3146,44 @@ static int __init target_core_init_configfs(void) | |||
3156 | target_cg = &subsys->su_group; | 3146 | target_cg = &subsys->su_group; |
3157 | target_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, | 3147 | target_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, |
3158 | GFP_KERNEL); | 3148 | GFP_KERNEL); |
3159 | if (!(target_cg->default_groups)) { | 3149 | if (!target_cg->default_groups) { |
3160 | printk(KERN_ERR "Unable to allocate target_cg->default_groups\n"); | 3150 | pr_err("Unable to allocate target_cg->default_groups\n"); |
3161 | goto out_global; | 3151 | goto out_global; |
3162 | } | 3152 | } |
3163 | 3153 | ||
3164 | config_group_init_type_name(&se_global->target_core_hbagroup, | 3154 | config_group_init_type_name(&target_core_hbagroup, |
3165 | "core", &target_core_cit); | 3155 | "core", &target_core_cit); |
3166 | target_cg->default_groups[0] = &se_global->target_core_hbagroup; | 3156 | target_cg->default_groups[0] = &target_core_hbagroup; |
3167 | target_cg->default_groups[1] = NULL; | 3157 | target_cg->default_groups[1] = NULL; |
3168 | /* | 3158 | /* |
3169 | * Create ALUA infrastructure under /sys/kernel/config/target/core/alua/ | 3159 | * Create ALUA infrastructure under /sys/kernel/config/target/core/alua/ |
3170 | */ | 3160 | */ |
3171 | hba_cg = &se_global->target_core_hbagroup; | 3161 | hba_cg = &target_core_hbagroup; |
3172 | hba_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, | 3162 | hba_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, |
3173 | GFP_KERNEL); | 3163 | GFP_KERNEL); |
3174 | if (!(hba_cg->default_groups)) { | 3164 | if (!hba_cg->default_groups) { |
3175 | printk(KERN_ERR "Unable to allocate hba_cg->default_groups\n"); | 3165 | pr_err("Unable to allocate hba_cg->default_groups\n"); |
3176 | goto out_global; | 3166 | goto out_global; |
3177 | } | 3167 | } |
3178 | config_group_init_type_name(&se_global->alua_group, | 3168 | config_group_init_type_name(&alua_group, |
3179 | "alua", &target_core_alua_cit); | 3169 | "alua", &target_core_alua_cit); |
3180 | hba_cg->default_groups[0] = &se_global->alua_group; | 3170 | hba_cg->default_groups[0] = &alua_group; |
3181 | hba_cg->default_groups[1] = NULL; | 3171 | hba_cg->default_groups[1] = NULL; |
3182 | /* | 3172 | /* |
3183 | * Add ALUA Logical Unit Group and Target Port Group ConfigFS | 3173 | * Add ALUA Logical Unit Group and Target Port Group ConfigFS |
3184 | * groups under /sys/kernel/config/target/core/alua/ | 3174 | * groups under /sys/kernel/config/target/core/alua/ |
3185 | */ | 3175 | */ |
3186 | alua_cg = &se_global->alua_group; | 3176 | alua_cg = &alua_group; |
3187 | alua_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, | 3177 | alua_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, |
3188 | GFP_KERNEL); | 3178 | GFP_KERNEL); |
3189 | if (!(alua_cg->default_groups)) { | 3179 | if (!alua_cg->default_groups) { |
3190 | printk(KERN_ERR "Unable to allocate alua_cg->default_groups\n"); | 3180 | pr_err("Unable to allocate alua_cg->default_groups\n"); |
3191 | goto out_global; | 3181 | goto out_global; |
3192 | } | 3182 | } |
3193 | 3183 | ||
3194 | config_group_init_type_name(&se_global->alua_lu_gps_group, | 3184 | config_group_init_type_name(&alua_lu_gps_group, |
3195 | "lu_gps", &target_core_alua_lu_gps_cit); | 3185 | "lu_gps", &target_core_alua_lu_gps_cit); |
3196 | alua_cg->default_groups[0] = &se_global->alua_lu_gps_group; | 3186 | alua_cg->default_groups[0] = &alua_lu_gps_group; |
3197 | alua_cg->default_groups[1] = NULL; | 3187 | alua_cg->default_groups[1] = NULL; |
3198 | /* | 3188 | /* |
3199 | * Add core/alua/lu_gps/default_lu_gp | 3189 | * Add core/alua/lu_gps/default_lu_gp |
@@ -3202,11 +3192,11 @@ static int __init target_core_init_configfs(void) | |||
3202 | if (IS_ERR(lu_gp)) | 3192 | if (IS_ERR(lu_gp)) |
3203 | goto out_global; | 3193 | goto out_global; |
3204 | 3194 | ||
3205 | lu_gp_cg = &se_global->alua_lu_gps_group; | 3195 | lu_gp_cg = &alua_lu_gps_group; |
3206 | lu_gp_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, | 3196 | lu_gp_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, |
3207 | GFP_KERNEL); | 3197 | GFP_KERNEL); |
3208 | if (!(lu_gp_cg->default_groups)) { | 3198 | if (!lu_gp_cg->default_groups) { |
3209 | printk(KERN_ERR "Unable to allocate lu_gp_cg->default_groups\n"); | 3199 | pr_err("Unable to allocate lu_gp_cg->default_groups\n"); |
3210 | goto out_global; | 3200 | goto out_global; |
3211 | } | 3201 | } |
3212 | 3202 | ||
@@ -3214,17 +3204,17 @@ static int __init target_core_init_configfs(void) | |||
3214 | &target_core_alua_lu_gp_cit); | 3204 | &target_core_alua_lu_gp_cit); |
3215 | lu_gp_cg->default_groups[0] = &lu_gp->lu_gp_group; | 3205 | lu_gp_cg->default_groups[0] = &lu_gp->lu_gp_group; |
3216 | lu_gp_cg->default_groups[1] = NULL; | 3206 | lu_gp_cg->default_groups[1] = NULL; |
3217 | se_global->default_lu_gp = lu_gp; | 3207 | default_lu_gp = lu_gp; |
3218 | /* | 3208 | /* |
3219 | * Register the target_core_mod subsystem with configfs. | 3209 | * Register the target_core_mod subsystem with configfs. |
3220 | */ | 3210 | */ |
3221 | ret = configfs_register_subsystem(subsys); | 3211 | ret = configfs_register_subsystem(subsys); |
3222 | if (ret < 0) { | 3212 | if (ret < 0) { |
3223 | printk(KERN_ERR "Error %d while registering subsystem %s\n", | 3213 | pr_err("Error %d while registering subsystem %s\n", |
3224 | ret, subsys->su_group.cg_item.ci_namebuf); | 3214 | ret, subsys->su_group.cg_item.ci_namebuf); |
3225 | goto out_global; | 3215 | goto out_global; |
3226 | } | 3216 | } |
3227 | printk(KERN_INFO "TARGET_CORE[0]: Initialized ConfigFS Fabric" | 3217 | pr_debug("TARGET_CORE[0]: Initialized ConfigFS Fabric" |
3228 | " Infrastructure: "TARGET_CORE_CONFIGFS_VERSION" on %s/%s" | 3218 | " Infrastructure: "TARGET_CORE_CONFIGFS_VERSION" on %s/%s" |
3229 | " on "UTS_RELEASE"\n", utsname()->sysname, utsname()->machine); | 3219 | " on "UTS_RELEASE"\n", utsname()->sysname, utsname()->machine); |
3230 | /* | 3220 | /* |
@@ -3244,9 +3234,9 @@ out: | |||
3244 | core_dev_release_virtual_lun0(); | 3234 | core_dev_release_virtual_lun0(); |
3245 | rd_module_exit(); | 3235 | rd_module_exit(); |
3246 | out_global: | 3236 | out_global: |
3247 | if (se_global->default_lu_gp) { | 3237 | if (default_lu_gp) { |
3248 | core_alua_free_lu_gp(se_global->default_lu_gp); | 3238 | core_alua_free_lu_gp(default_lu_gp); |
3249 | se_global->default_lu_gp = NULL; | 3239 | default_lu_gp = NULL; |
3250 | } | 3240 | } |
3251 | if (lu_gp_cg) | 3241 | if (lu_gp_cg) |
3252 | kfree(lu_gp_cg->default_groups); | 3242 | kfree(lu_gp_cg->default_groups); |
@@ -3255,8 +3245,8 @@ out_global: | |||
3255 | if (hba_cg) | 3245 | if (hba_cg) |
3256 | kfree(hba_cg->default_groups); | 3246 | kfree(hba_cg->default_groups); |
3257 | kfree(target_cg->default_groups); | 3247 | kfree(target_cg->default_groups); |
3258 | release_se_global(); | 3248 | release_se_kmem_caches(); |
3259 | return -1; | 3249 | return ret; |
3260 | } | 3250 | } |
3261 | 3251 | ||
3262 | static void __exit target_core_exit_configfs(void) | 3252 | static void __exit target_core_exit_configfs(void) |
@@ -3266,10 +3256,9 @@ static void __exit target_core_exit_configfs(void) | |||
3266 | struct config_item *item; | 3256 | struct config_item *item; |
3267 | int i; | 3257 | int i; |
3268 | 3258 | ||
3269 | se_global->in_shutdown = 1; | ||
3270 | subsys = target_core_subsystem[0]; | 3259 | subsys = target_core_subsystem[0]; |
3271 | 3260 | ||
3272 | lu_gp_cg = &se_global->alua_lu_gps_group; | 3261 | lu_gp_cg = &alua_lu_gps_group; |
3273 | for (i = 0; lu_gp_cg->default_groups[i]; i++) { | 3262 | for (i = 0; lu_gp_cg->default_groups[i]; i++) { |
3274 | item = &lu_gp_cg->default_groups[i]->cg_item; | 3263 | item = &lu_gp_cg->default_groups[i]->cg_item; |
3275 | lu_gp_cg->default_groups[i] = NULL; | 3264 | lu_gp_cg->default_groups[i] = NULL; |
@@ -3278,7 +3267,7 @@ static void __exit target_core_exit_configfs(void) | |||
3278 | kfree(lu_gp_cg->default_groups); | 3267 | kfree(lu_gp_cg->default_groups); |
3279 | lu_gp_cg->default_groups = NULL; | 3268 | lu_gp_cg->default_groups = NULL; |
3280 | 3269 | ||
3281 | alua_cg = &se_global->alua_group; | 3270 | alua_cg = &alua_group; |
3282 | for (i = 0; alua_cg->default_groups[i]; i++) { | 3271 | for (i = 0; alua_cg->default_groups[i]; i++) { |
3283 | item = &alua_cg->default_groups[i]->cg_item; | 3272 | item = &alua_cg->default_groups[i]->cg_item; |
3284 | alua_cg->default_groups[i] = NULL; | 3273 | alua_cg->default_groups[i] = NULL; |
@@ -3287,7 +3276,7 @@ static void __exit target_core_exit_configfs(void) | |||
3287 | kfree(alua_cg->default_groups); | 3276 | kfree(alua_cg->default_groups); |
3288 | alua_cg->default_groups = NULL; | 3277 | alua_cg->default_groups = NULL; |
3289 | 3278 | ||
3290 | hba_cg = &se_global->target_core_hbagroup; | 3279 | hba_cg = &target_core_hbagroup; |
3291 | for (i = 0; hba_cg->default_groups[i]; i++) { | 3280 | for (i = 0; hba_cg->default_groups[i]; i++) { |
3292 | item = &hba_cg->default_groups[i]->cg_item; | 3281 | item = &hba_cg->default_groups[i]->cg_item; |
3293 | hba_cg->default_groups[i] = NULL; | 3282 | hba_cg->default_groups[i] = NULL; |
@@ -3302,17 +3291,15 @@ static void __exit target_core_exit_configfs(void) | |||
3302 | configfs_unregister_subsystem(subsys); | 3291 | configfs_unregister_subsystem(subsys); |
3303 | kfree(subsys->su_group.default_groups); | 3292 | kfree(subsys->su_group.default_groups); |
3304 | 3293 | ||
3305 | core_alua_free_lu_gp(se_global->default_lu_gp); | 3294 | core_alua_free_lu_gp(default_lu_gp); |
3306 | se_global->default_lu_gp = NULL; | 3295 | default_lu_gp = NULL; |
3307 | 3296 | ||
3308 | printk(KERN_INFO "TARGET_CORE[0]: Released ConfigFS Fabric" | 3297 | pr_debug("TARGET_CORE[0]: Released ConfigFS Fabric" |
3309 | " Infrastructure\n"); | 3298 | " Infrastructure\n"); |
3310 | 3299 | ||
3311 | core_dev_release_virtual_lun0(); | 3300 | core_dev_release_virtual_lun0(); |
3312 | rd_module_exit(); | 3301 | rd_module_exit(); |
3313 | release_se_global(); | 3302 | release_se_kmem_caches(); |
3314 | |||
3315 | return; | ||
3316 | } | 3303 | } |
3317 | 3304 | ||
3318 | MODULE_DESCRIPTION("Target_Core_Mod/ConfigFS"); | 3305 | MODULE_DESCRIPTION("Target_Core_Mod/ConfigFS"); |
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index ba698ea62bb2..b38b6c993e65 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | * Filename: target_core_device.c (based on iscsi_target_device.c) | 2 | * Filename: target_core_device.c (based on iscsi_target_device.c) |
3 | * | 3 | * |
4 | * This file contains the iSCSI Virtual Device and Disk Transport | 4 | * This file contains the TCM Virtual Device and Disk Transport |
5 | * agnostic related functions. | 5 | * agnostic related functions. |
6 | * | 6 | * |
7 | * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc. | 7 | * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc. |
@@ -54,177 +54,183 @@ | |||
54 | static void se_dev_start(struct se_device *dev); | 54 | static void se_dev_start(struct se_device *dev); |
55 | static void se_dev_stop(struct se_device *dev); | 55 | static void se_dev_stop(struct se_device *dev); |
56 | 56 | ||
57 | int transport_get_lun_for_cmd( | 57 | static struct se_hba *lun0_hba; |
58 | struct se_cmd *se_cmd, | 58 | static struct se_subsystem_dev *lun0_su_dev; |
59 | unsigned char *cdb, | 59 | /* not static, needed by tpg.c */ |
60 | u32 unpacked_lun) | 60 | struct se_device *g_lun0_dev; |
61 | |||
62 | int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun) | ||
61 | { | 63 | { |
62 | struct se_dev_entry *deve; | ||
63 | struct se_lun *se_lun = NULL; | 64 | struct se_lun *se_lun = NULL; |
64 | struct se_session *se_sess = SE_SESS(se_cmd); | 65 | struct se_session *se_sess = se_cmd->se_sess; |
66 | struct se_device *dev; | ||
65 | unsigned long flags; | 67 | unsigned long flags; |
66 | int read_only = 0; | ||
67 | 68 | ||
68 | spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); | 69 | if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) { |
69 | deve = se_cmd->se_deve = | 70 | se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN; |
70 | &SE_NODE_ACL(se_sess)->device_list[unpacked_lun]; | 71 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
71 | if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { | 72 | return -ENODEV; |
72 | if (se_cmd) { | 73 | } |
73 | deve->total_cmds++; | 74 | |
74 | deve->total_bytes += se_cmd->data_length; | 75 | spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags); |
75 | 76 | se_cmd->se_deve = &se_sess->se_node_acl->device_list[unpacked_lun]; | |
76 | if (se_cmd->data_direction == DMA_TO_DEVICE) { | 77 | if (se_cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { |
77 | if (deve->lun_flags & | 78 | struct se_dev_entry *deve = se_cmd->se_deve; |
78 | TRANSPORT_LUNFLAGS_READ_ONLY) { | 79 | |
79 | read_only = 1; | 80 | deve->total_cmds++; |
80 | goto out; | 81 | deve->total_bytes += se_cmd->data_length; |
81 | } | 82 | |
82 | deve->write_bytes += se_cmd->data_length; | 83 | if ((se_cmd->data_direction == DMA_TO_DEVICE) && |
83 | } else if (se_cmd->data_direction == | 84 | (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) { |
84 | DMA_FROM_DEVICE) { | 85 | se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED; |
85 | deve->read_bytes += se_cmd->data_length; | 86 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
86 | } | 87 | pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN" |
88 | " Access for 0x%08x\n", | ||
89 | se_cmd->se_tfo->get_fabric_name(), | ||
90 | unpacked_lun); | ||
91 | spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags); | ||
92 | return -EACCES; | ||
87 | } | 93 | } |
94 | |||
95 | if (se_cmd->data_direction == DMA_TO_DEVICE) | ||
96 | deve->write_bytes += se_cmd->data_length; | ||
97 | else if (se_cmd->data_direction == DMA_FROM_DEVICE) | ||
98 | deve->read_bytes += se_cmd->data_length; | ||
99 | |||
88 | deve->deve_cmds++; | 100 | deve->deve_cmds++; |
89 | 101 | ||
90 | se_lun = se_cmd->se_lun = deve->se_lun; | 102 | se_lun = deve->se_lun; |
103 | se_cmd->se_lun = deve->se_lun; | ||
91 | se_cmd->pr_res_key = deve->pr_res_key; | 104 | se_cmd->pr_res_key = deve->pr_res_key; |
92 | se_cmd->orig_fe_lun = unpacked_lun; | 105 | se_cmd->orig_fe_lun = unpacked_lun; |
93 | se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev; | 106 | se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev; |
94 | se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; | 107 | se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; |
95 | } | 108 | } |
96 | out: | 109 | spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags); |
97 | spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); | ||
98 | 110 | ||
99 | if (!se_lun) { | 111 | if (!se_lun) { |
100 | if (read_only) { | 112 | /* |
101 | se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED; | 113 | * Use the se_portal_group->tpg_virt_lun0 to allow for |
114 | * REPORT_LUNS, et al to be returned when no active | ||
115 | * MappedLUN=0 exists for this Initiator Port. | ||
116 | */ | ||
117 | if (unpacked_lun != 0) { | ||
118 | se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN; | ||
102 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | 119 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
103 | printk("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN" | 120 | pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" |
104 | " Access for 0x%08x\n", | 121 | " Access for 0x%08x\n", |
105 | CMD_TFO(se_cmd)->get_fabric_name(), | 122 | se_cmd->se_tfo->get_fabric_name(), |
106 | unpacked_lun); | 123 | unpacked_lun); |
107 | return -1; | 124 | return -ENODEV; |
108 | } else { | ||
109 | /* | ||
110 | * Use the se_portal_group->tpg_virt_lun0 to allow for | ||
111 | * REPORT_LUNS, et al to be returned when no active | ||
112 | * MappedLUN=0 exists for this Initiator Port. | ||
113 | */ | ||
114 | if (unpacked_lun != 0) { | ||
115 | se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN; | ||
116 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | ||
117 | printk("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" | ||
118 | " Access for 0x%08x\n", | ||
119 | CMD_TFO(se_cmd)->get_fabric_name(), | ||
120 | unpacked_lun); | ||
121 | return -1; | ||
122 | } | ||
123 | /* | ||
124 | * Force WRITE PROTECT for virtual LUN 0 | ||
125 | */ | ||
126 | if ((se_cmd->data_direction != DMA_FROM_DEVICE) && | ||
127 | (se_cmd->data_direction != DMA_NONE)) { | ||
128 | se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED; | ||
129 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | ||
130 | return -1; | ||
131 | } | ||
132 | #if 0 | ||
133 | printk("TARGET_CORE[%s]: Using virtual LUN0! :-)\n", | ||
134 | CMD_TFO(se_cmd)->get_fabric_name()); | ||
135 | #endif | ||
136 | se_lun = se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0; | ||
137 | se_cmd->orig_fe_lun = 0; | ||
138 | se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev; | ||
139 | se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; | ||
140 | } | 125 | } |
126 | /* | ||
127 | * Force WRITE PROTECT for virtual LUN 0 | ||
128 | */ | ||
129 | if ((se_cmd->data_direction != DMA_FROM_DEVICE) && | ||
130 | (se_cmd->data_direction != DMA_NONE)) { | ||
131 | se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED; | ||
132 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | ||
133 | return -EACCES; | ||
134 | } | ||
135 | |||
136 | se_lun = &se_sess->se_tpg->tpg_virt_lun0; | ||
137 | se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0; | ||
138 | se_cmd->orig_fe_lun = 0; | ||
139 | se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev; | ||
140 | se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; | ||
141 | } | 141 | } |
142 | /* | 142 | /* |
143 | * Determine if the struct se_lun is online. | 143 | * Determine if the struct se_lun is online. |
144 | * FIXME: Check for LUN_RESET + UNIT Attention | ||
144 | */ | 145 | */ |
145 | /* #warning FIXME: Check for LUN_RESET + UNIT Attention */ | ||
146 | if (se_dev_check_online(se_lun->lun_se_dev) != 0) { | 146 | if (se_dev_check_online(se_lun->lun_se_dev) != 0) { |
147 | se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN; | 147 | se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN; |
148 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | 148 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
149 | return -1; | 149 | return -ENODEV; |
150 | } | 150 | } |
151 | 151 | ||
152 | { | 152 | /* Directly associate cmd with se_dev */ |
153 | struct se_device *dev = se_lun->lun_se_dev; | 153 | se_cmd->se_dev = se_lun->lun_se_dev; |
154 | spin_lock_irq(&dev->stats_lock); | 154 | |
155 | /* TODO: get rid of this and use atomics for stats */ | ||
156 | dev = se_lun->lun_se_dev; | ||
157 | spin_lock_irqsave(&dev->stats_lock, flags); | ||
155 | dev->num_cmds++; | 158 | dev->num_cmds++; |
156 | if (se_cmd->data_direction == DMA_TO_DEVICE) | 159 | if (se_cmd->data_direction == DMA_TO_DEVICE) |
157 | dev->write_bytes += se_cmd->data_length; | 160 | dev->write_bytes += se_cmd->data_length; |
158 | else if (se_cmd->data_direction == DMA_FROM_DEVICE) | 161 | else if (se_cmd->data_direction == DMA_FROM_DEVICE) |
159 | dev->read_bytes += se_cmd->data_length; | 162 | dev->read_bytes += se_cmd->data_length; |
160 | spin_unlock_irq(&dev->stats_lock); | 163 | spin_unlock_irqrestore(&dev->stats_lock, flags); |
161 | } | ||
162 | 164 | ||
163 | /* | 165 | /* |
164 | * Add the iscsi_cmd_t to the struct se_lun's cmd list. This list is used | 166 | * Add the iscsi_cmd_t to the struct se_lun's cmd list. This list is used |
165 | * for tracking state of struct se_cmds during LUN shutdown events. | 167 | * for tracking state of struct se_cmds during LUN shutdown events. |
166 | */ | 168 | */ |
167 | spin_lock_irqsave(&se_lun->lun_cmd_lock, flags); | 169 | spin_lock_irqsave(&se_lun->lun_cmd_lock, flags); |
168 | list_add_tail(&se_cmd->se_lun_list, &se_lun->lun_cmd_list); | 170 | list_add_tail(&se_cmd->se_lun_node, &se_lun->lun_cmd_list); |
169 | atomic_set(&T_TASK(se_cmd)->transport_lun_active, 1); | 171 | atomic_set(&se_cmd->transport_lun_active, 1); |
170 | #if 0 | ||
171 | printk(KERN_INFO "Adding ITT: 0x%08x to LUN LIST[%d]\n", | ||
172 | CMD_TFO(se_cmd)->get_task_tag(se_cmd), se_lun->unpacked_lun); | ||
173 | #endif | ||
174 | spin_unlock_irqrestore(&se_lun->lun_cmd_lock, flags); | 172 | spin_unlock_irqrestore(&se_lun->lun_cmd_lock, flags); |
175 | 173 | ||
176 | return 0; | 174 | return 0; |
177 | } | 175 | } |
178 | EXPORT_SYMBOL(transport_get_lun_for_cmd); | 176 | EXPORT_SYMBOL(transport_lookup_cmd_lun); |
179 | 177 | ||
180 | int transport_get_lun_for_tmr( | 178 | int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun) |
181 | struct se_cmd *se_cmd, | ||
182 | u32 unpacked_lun) | ||
183 | { | 179 | { |
184 | struct se_device *dev = NULL; | ||
185 | struct se_dev_entry *deve; | 180 | struct se_dev_entry *deve; |
186 | struct se_lun *se_lun = NULL; | 181 | struct se_lun *se_lun = NULL; |
187 | struct se_session *se_sess = SE_SESS(se_cmd); | 182 | struct se_session *se_sess = se_cmd->se_sess; |
188 | struct se_tmr_req *se_tmr = se_cmd->se_tmr_req; | 183 | struct se_tmr_req *se_tmr = se_cmd->se_tmr_req; |
184 | unsigned long flags; | ||
185 | |||
186 | if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) { | ||
187 | se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN; | ||
188 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | ||
189 | return -ENODEV; | ||
190 | } | ||
191 | |||
192 | spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags); | ||
193 | se_cmd->se_deve = &se_sess->se_node_acl->device_list[unpacked_lun]; | ||
194 | deve = se_cmd->se_deve; | ||
189 | 195 | ||
190 | spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); | ||
191 | deve = se_cmd->se_deve = | ||
192 | &SE_NODE_ACL(se_sess)->device_list[unpacked_lun]; | ||
193 | if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { | 196 | if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { |
194 | se_lun = se_cmd->se_lun = se_tmr->tmr_lun = deve->se_lun; | 197 | se_tmr->tmr_lun = deve->se_lun; |
195 | dev = se_lun->lun_se_dev; | 198 | se_cmd->se_lun = deve->se_lun; |
199 | se_lun = deve->se_lun; | ||
196 | se_cmd->pr_res_key = deve->pr_res_key; | 200 | se_cmd->pr_res_key = deve->pr_res_key; |
197 | se_cmd->orig_fe_lun = unpacked_lun; | 201 | se_cmd->orig_fe_lun = unpacked_lun; |
198 | se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev; | 202 | se_cmd->se_orig_obj_ptr = se_cmd->se_dev; |
199 | /* se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; */ | ||
200 | } | 203 | } |
201 | spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); | 204 | spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags); |
202 | 205 | ||
203 | if (!se_lun) { | 206 | if (!se_lun) { |
204 | printk(KERN_INFO "TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" | 207 | pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" |
205 | " Access for 0x%08x\n", | 208 | " Access for 0x%08x\n", |
206 | CMD_TFO(se_cmd)->get_fabric_name(), | 209 | se_cmd->se_tfo->get_fabric_name(), |
207 | unpacked_lun); | 210 | unpacked_lun); |
208 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | 211 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
209 | return -1; | 212 | return -ENODEV; |
210 | } | 213 | } |
211 | /* | 214 | /* |
212 | * Determine if the struct se_lun is online. | 215 | * Determine if the struct se_lun is online. |
216 | * FIXME: Check for LUN_RESET + UNIT Attention | ||
213 | */ | 217 | */ |
214 | /* #warning FIXME: Check for LUN_RESET + UNIT Attention */ | ||
215 | if (se_dev_check_online(se_lun->lun_se_dev) != 0) { | 218 | if (se_dev_check_online(se_lun->lun_se_dev) != 0) { |
216 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | 219 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
217 | return -1; | 220 | return -ENODEV; |
218 | } | 221 | } |
219 | se_tmr->tmr_dev = dev; | ||
220 | 222 | ||
221 | spin_lock(&dev->se_tmr_lock); | 223 | /* Directly associate cmd with se_dev */ |
222 | list_add_tail(&se_tmr->tmr_list, &dev->dev_tmr_list); | 224 | se_cmd->se_dev = se_lun->lun_se_dev; |
223 | spin_unlock(&dev->se_tmr_lock); | 225 | se_tmr->tmr_dev = se_lun->lun_se_dev; |
226 | |||
227 | spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags); | ||
228 | list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list); | ||
229 | spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags); | ||
224 | 230 | ||
225 | return 0; | 231 | return 0; |
226 | } | 232 | } |
227 | EXPORT_SYMBOL(transport_get_lun_for_tmr); | 233 | EXPORT_SYMBOL(transport_lookup_tmr_lun); |
228 | 234 | ||
229 | /* | 235 | /* |
230 | * This function is called from core_scsi3_emulate_pro_register_and_move() | 236 | * This function is called from core_scsi3_emulate_pro_register_and_move() |
@@ -249,17 +255,17 @@ struct se_dev_entry *core_get_se_deve_from_rtpi( | |||
249 | continue; | 255 | continue; |
250 | 256 | ||
251 | lun = deve->se_lun; | 257 | lun = deve->se_lun; |
252 | if (!(lun)) { | 258 | if (!lun) { |
253 | printk(KERN_ERR "%s device entries device pointer is" | 259 | pr_err("%s device entries device pointer is" |
254 | " NULL, but Initiator has access.\n", | 260 | " NULL, but Initiator has access.\n", |
255 | TPG_TFO(tpg)->get_fabric_name()); | 261 | tpg->se_tpg_tfo->get_fabric_name()); |
256 | continue; | 262 | continue; |
257 | } | 263 | } |
258 | port = lun->lun_sep; | 264 | port = lun->lun_sep; |
259 | if (!(port)) { | 265 | if (!port) { |
260 | printk(KERN_ERR "%s device entries device pointer is" | 266 | pr_err("%s device entries device pointer is" |
261 | " NULL, but Initiator has access.\n", | 267 | " NULL, but Initiator has access.\n", |
262 | TPG_TFO(tpg)->get_fabric_name()); | 268 | tpg->se_tpg_tfo->get_fabric_name()); |
263 | continue; | 269 | continue; |
264 | } | 270 | } |
265 | if (port->sep_rtpi != rtpi) | 271 | if (port->sep_rtpi != rtpi) |
@@ -295,9 +301,9 @@ int core_free_device_list_for_node( | |||
295 | continue; | 301 | continue; |
296 | 302 | ||
297 | if (!deve->se_lun) { | 303 | if (!deve->se_lun) { |
298 | printk(KERN_ERR "%s device entries device pointer is" | 304 | pr_err("%s device entries device pointer is" |
299 | " NULL, but Initiator has access.\n", | 305 | " NULL, but Initiator has access.\n", |
300 | TPG_TFO(tpg)->get_fabric_name()); | 306 | tpg->se_tpg_tfo->get_fabric_name()); |
301 | continue; | 307 | continue; |
302 | } | 308 | } |
303 | lun = deve->se_lun; | 309 | lun = deve->se_lun; |
@@ -323,8 +329,6 @@ void core_dec_lacl_count(struct se_node_acl *se_nacl, struct se_cmd *se_cmd) | |||
323 | deve = &se_nacl->device_list[se_cmd->orig_fe_lun]; | 329 | deve = &se_nacl->device_list[se_cmd->orig_fe_lun]; |
324 | deve->deve_cmds--; | 330 | deve->deve_cmds--; |
325 | spin_unlock_irq(&se_nacl->device_list_lock); | 331 | spin_unlock_irq(&se_nacl->device_list_lock); |
326 | |||
327 | return; | ||
328 | } | 332 | } |
329 | 333 | ||
330 | void core_update_device_list_access( | 334 | void core_update_device_list_access( |
@@ -344,8 +348,6 @@ void core_update_device_list_access( | |||
344 | deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY; | 348 | deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY; |
345 | } | 349 | } |
346 | spin_unlock_irq(&nacl->device_list_lock); | 350 | spin_unlock_irq(&nacl->device_list_lock); |
347 | |||
348 | return; | ||
349 | } | 351 | } |
350 | 352 | ||
351 | /* core_update_device_list_for_node(): | 353 | /* core_update_device_list_for_node(): |
@@ -370,7 +372,7 @@ int core_update_device_list_for_node( | |||
370 | * struct se_dev_entry pointers below as logic in | 372 | * struct se_dev_entry pointers below as logic in |
371 | * core_alua_do_transition_tg_pt() depends on these being present. | 373 | * core_alua_do_transition_tg_pt() depends on these being present. |
372 | */ | 374 | */ |
373 | if (!(enable)) { | 375 | if (!enable) { |
374 | /* | 376 | /* |
375 | * deve->se_lun_acl will be NULL for demo-mode created LUNs | 377 | * deve->se_lun_acl will be NULL for demo-mode created LUNs |
376 | * that have not been explicitly concerted to MappedLUNs -> | 378 | * that have not been explicitly concerted to MappedLUNs -> |
@@ -393,18 +395,18 @@ int core_update_device_list_for_node( | |||
393 | */ | 395 | */ |
394 | if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { | 396 | if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { |
395 | if (deve->se_lun_acl != NULL) { | 397 | if (deve->se_lun_acl != NULL) { |
396 | printk(KERN_ERR "struct se_dev_entry->se_lun_acl" | 398 | pr_err("struct se_dev_entry->se_lun_acl" |
397 | " already set for demo mode -> explict" | 399 | " already set for demo mode -> explict" |
398 | " LUN ACL transition\n"); | 400 | " LUN ACL transition\n"); |
399 | spin_unlock_irq(&nacl->device_list_lock); | 401 | spin_unlock_irq(&nacl->device_list_lock); |
400 | return -1; | 402 | return -EINVAL; |
401 | } | 403 | } |
402 | if (deve->se_lun != lun) { | 404 | if (deve->se_lun != lun) { |
403 | printk(KERN_ERR "struct se_dev_entry->se_lun does" | 405 | pr_err("struct se_dev_entry->se_lun does" |
404 | " match passed struct se_lun for demo mode" | 406 | " match passed struct se_lun for demo mode" |
405 | " -> explict LUN ACL transition\n"); | 407 | " -> explict LUN ACL transition\n"); |
406 | spin_unlock_irq(&nacl->device_list_lock); | 408 | spin_unlock_irq(&nacl->device_list_lock); |
407 | return -1; | 409 | return -EINVAL; |
408 | } | 410 | } |
409 | deve->se_lun_acl = lun_acl; | 411 | deve->se_lun_acl = lun_acl; |
410 | trans = 1; | 412 | trans = 1; |
@@ -492,8 +494,6 @@ void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg) | |||
492 | spin_lock_bh(&tpg->acl_node_lock); | 494 | spin_lock_bh(&tpg->acl_node_lock); |
493 | } | 495 | } |
494 | spin_unlock_bh(&tpg->acl_node_lock); | 496 | spin_unlock_bh(&tpg->acl_node_lock); |
495 | |||
496 | return; | ||
497 | } | 497 | } |
498 | 498 | ||
499 | static struct se_port *core_alloc_port(struct se_device *dev) | 499 | static struct se_port *core_alloc_port(struct se_device *dev) |
@@ -501,9 +501,9 @@ static struct se_port *core_alloc_port(struct se_device *dev) | |||
501 | struct se_port *port, *port_tmp; | 501 | struct se_port *port, *port_tmp; |
502 | 502 | ||
503 | port = kzalloc(sizeof(struct se_port), GFP_KERNEL); | 503 | port = kzalloc(sizeof(struct se_port), GFP_KERNEL); |
504 | if (!(port)) { | 504 | if (!port) { |
505 | printk(KERN_ERR "Unable to allocate struct se_port\n"); | 505 | pr_err("Unable to allocate struct se_port\n"); |
506 | return NULL; | 506 | return ERR_PTR(-ENOMEM); |
507 | } | 507 | } |
508 | INIT_LIST_HEAD(&port->sep_alua_list); | 508 | INIT_LIST_HEAD(&port->sep_alua_list); |
509 | INIT_LIST_HEAD(&port->sep_list); | 509 | INIT_LIST_HEAD(&port->sep_list); |
@@ -513,10 +513,10 @@ static struct se_port *core_alloc_port(struct se_device *dev) | |||
513 | 513 | ||
514 | spin_lock(&dev->se_port_lock); | 514 | spin_lock(&dev->se_port_lock); |
515 | if (dev->dev_port_count == 0x0000ffff) { | 515 | if (dev->dev_port_count == 0x0000ffff) { |
516 | printk(KERN_WARNING "Reached dev->dev_port_count ==" | 516 | pr_warn("Reached dev->dev_port_count ==" |
517 | " 0x0000ffff\n"); | 517 | " 0x0000ffff\n"); |
518 | spin_unlock(&dev->se_port_lock); | 518 | spin_unlock(&dev->se_port_lock); |
519 | return NULL; | 519 | return ERR_PTR(-ENOSPC); |
520 | } | 520 | } |
521 | again: | 521 | again: |
522 | /* | 522 | /* |
@@ -532,7 +532,7 @@ again: | |||
532 | * 3h to FFFFh Relative port 3 through 65 535 | 532 | * 3h to FFFFh Relative port 3 through 65 535 |
533 | */ | 533 | */ |
534 | port->sep_rtpi = dev->dev_rpti_counter++; | 534 | port->sep_rtpi = dev->dev_rpti_counter++; |
535 | if (!(port->sep_rtpi)) | 535 | if (!port->sep_rtpi) |
536 | goto again; | 536 | goto again; |
537 | 537 | ||
538 | list_for_each_entry(port_tmp, &dev->dev_sep_list, sep_list) { | 538 | list_for_each_entry(port_tmp, &dev->dev_sep_list, sep_list) { |
@@ -554,7 +554,7 @@ static void core_export_port( | |||
554 | struct se_port *port, | 554 | struct se_port *port, |
555 | struct se_lun *lun) | 555 | struct se_lun *lun) |
556 | { | 556 | { |
557 | struct se_subsystem_dev *su_dev = SU_DEV(dev); | 557 | struct se_subsystem_dev *su_dev = dev->se_sub_dev; |
558 | struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem = NULL; | 558 | struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem = NULL; |
559 | 559 | ||
560 | spin_lock(&dev->se_port_lock); | 560 | spin_lock(&dev->se_port_lock); |
@@ -567,20 +567,20 @@ static void core_export_port( | |||
567 | list_add_tail(&port->sep_list, &dev->dev_sep_list); | 567 | list_add_tail(&port->sep_list, &dev->dev_sep_list); |
568 | spin_unlock(&dev->se_port_lock); | 568 | spin_unlock(&dev->se_port_lock); |
569 | 569 | ||
570 | if (T10_ALUA(su_dev)->alua_type == SPC3_ALUA_EMULATED) { | 570 | if (su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) { |
571 | tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port); | 571 | tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port); |
572 | if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) { | 572 | if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) { |
573 | printk(KERN_ERR "Unable to allocate t10_alua_tg_pt" | 573 | pr_err("Unable to allocate t10_alua_tg_pt" |
574 | "_gp_member_t\n"); | 574 | "_gp_member_t\n"); |
575 | return; | 575 | return; |
576 | } | 576 | } |
577 | spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); | 577 | spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); |
578 | __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, | 578 | __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, |
579 | T10_ALUA(su_dev)->default_tg_pt_gp); | 579 | su_dev->t10_alua.default_tg_pt_gp); |
580 | spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); | 580 | spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); |
581 | printk(KERN_INFO "%s/%s: Adding to default ALUA Target Port" | 581 | pr_debug("%s/%s: Adding to default ALUA Target Port" |
582 | " Group: alua/default_tg_pt_gp\n", | 582 | " Group: alua/default_tg_pt_gp\n", |
583 | TRANSPORT(dev)->name, TPG_TFO(tpg)->get_fabric_name()); | 583 | dev->transport->name, tpg->se_tpg_tfo->get_fabric_name()); |
584 | } | 584 | } |
585 | 585 | ||
586 | dev->dev_port_count++; | 586 | dev->dev_port_count++; |
@@ -607,8 +607,6 @@ static void core_release_port(struct se_device *dev, struct se_port *port) | |||
607 | list_del(&port->sep_list); | 607 | list_del(&port->sep_list); |
608 | dev->dev_port_count--; | 608 | dev->dev_port_count--; |
609 | kfree(port); | 609 | kfree(port); |
610 | |||
611 | return; | ||
612 | } | 610 | } |
613 | 611 | ||
614 | int core_dev_export( | 612 | int core_dev_export( |
@@ -619,8 +617,8 @@ int core_dev_export( | |||
619 | struct se_port *port; | 617 | struct se_port *port; |
620 | 618 | ||
621 | port = core_alloc_port(dev); | 619 | port = core_alloc_port(dev); |
622 | if (!(port)) | 620 | if (IS_ERR(port)) |
623 | return -1; | 621 | return PTR_ERR(port); |
624 | 622 | ||
625 | lun->lun_se_dev = dev; | 623 | lun->lun_se_dev = dev; |
626 | se_dev_start(dev); | 624 | se_dev_start(dev); |
@@ -657,33 +655,35 @@ int transport_core_report_lun_response(struct se_cmd *se_cmd) | |||
657 | { | 655 | { |
658 | struct se_dev_entry *deve; | 656 | struct se_dev_entry *deve; |
659 | struct se_lun *se_lun; | 657 | struct se_lun *se_lun; |
660 | struct se_session *se_sess = SE_SESS(se_cmd); | 658 | struct se_session *se_sess = se_cmd->se_sess; |
661 | struct se_task *se_task; | 659 | struct se_task *se_task; |
662 | unsigned char *buf = (unsigned char *)T_TASK(se_cmd)->t_task_buf; | 660 | unsigned char *buf; |
663 | u32 cdb_offset = 0, lun_count = 0, offset = 8, i; | 661 | u32 cdb_offset = 0, lun_count = 0, offset = 8, i; |
664 | 662 | ||
665 | list_for_each_entry(se_task, &T_TASK(se_cmd)->t_task_list, t_list) | 663 | list_for_each_entry(se_task, &se_cmd->t_task_list, t_list) |
666 | break; | 664 | break; |
667 | 665 | ||
668 | if (!(se_task)) { | 666 | if (!se_task) { |
669 | printk(KERN_ERR "Unable to locate struct se_task for struct se_cmd\n"); | 667 | pr_err("Unable to locate struct se_task for struct se_cmd\n"); |
670 | return PYX_TRANSPORT_LU_COMM_FAILURE; | 668 | return PYX_TRANSPORT_LU_COMM_FAILURE; |
671 | } | 669 | } |
672 | 670 | ||
671 | buf = transport_kmap_first_data_page(se_cmd); | ||
672 | |||
673 | /* | 673 | /* |
674 | * If no struct se_session pointer is present, this struct se_cmd is | 674 | * If no struct se_session pointer is present, this struct se_cmd is |
675 | * coming via a target_core_mod PASSTHROUGH op, and not through | 675 | * coming via a target_core_mod PASSTHROUGH op, and not through |
676 | * a $FABRIC_MOD. In that case, report LUN=0 only. | 676 | * a $FABRIC_MOD. In that case, report LUN=0 only. |
677 | */ | 677 | */ |
678 | if (!(se_sess)) { | 678 | if (!se_sess) { |
679 | int_to_scsilun(0, (struct scsi_lun *)&buf[offset]); | 679 | int_to_scsilun(0, (struct scsi_lun *)&buf[offset]); |
680 | lun_count = 1; | 680 | lun_count = 1; |
681 | goto done; | 681 | goto done; |
682 | } | 682 | } |
683 | 683 | ||
684 | spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); | 684 | spin_lock_irq(&se_sess->se_node_acl->device_list_lock); |
685 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { | 685 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { |
686 | deve = &SE_NODE_ACL(se_sess)->device_list[i]; | 686 | deve = &se_sess->se_node_acl->device_list[i]; |
687 | if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)) | 687 | if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)) |
688 | continue; | 688 | continue; |
689 | se_lun = deve->se_lun; | 689 | se_lun = deve->se_lun; |
@@ -700,12 +700,13 @@ int transport_core_report_lun_response(struct se_cmd *se_cmd) | |||
700 | offset += 8; | 700 | offset += 8; |
701 | cdb_offset += 8; | 701 | cdb_offset += 8; |
702 | } | 702 | } |
703 | spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); | 703 | spin_unlock_irq(&se_sess->se_node_acl->device_list_lock); |
704 | 704 | ||
705 | /* | 705 | /* |
706 | * See SPC3 r07, page 159. | 706 | * See SPC3 r07, page 159. |
707 | */ | 707 | */ |
708 | done: | 708 | done: |
709 | transport_kunmap_first_data_page(se_cmd); | ||
709 | lun_count *= 8; | 710 | lun_count *= 8; |
710 | buf[0] = ((lun_count >> 24) & 0xff); | 711 | buf[0] = ((lun_count >> 24) & 0xff); |
711 | buf[1] = ((lun_count >> 16) & 0xff); | 712 | buf[1] = ((lun_count >> 16) & 0xff); |
@@ -744,26 +745,20 @@ void se_release_device_for_hba(struct se_device *dev) | |||
744 | core_scsi3_free_all_registrations(dev); | 745 | core_scsi3_free_all_registrations(dev); |
745 | se_release_vpd_for_dev(dev); | 746 | se_release_vpd_for_dev(dev); |
746 | 747 | ||
747 | kfree(dev->dev_status_queue_obj); | ||
748 | kfree(dev->dev_queue_obj); | ||
749 | kfree(dev); | 748 | kfree(dev); |
750 | |||
751 | return; | ||
752 | } | 749 | } |
753 | 750 | ||
754 | void se_release_vpd_for_dev(struct se_device *dev) | 751 | void se_release_vpd_for_dev(struct se_device *dev) |
755 | { | 752 | { |
756 | struct t10_vpd *vpd, *vpd_tmp; | 753 | struct t10_vpd *vpd, *vpd_tmp; |
757 | 754 | ||
758 | spin_lock(&DEV_T10_WWN(dev)->t10_vpd_lock); | 755 | spin_lock(&dev->se_sub_dev->t10_wwn.t10_vpd_lock); |
759 | list_for_each_entry_safe(vpd, vpd_tmp, | 756 | list_for_each_entry_safe(vpd, vpd_tmp, |
760 | &DEV_T10_WWN(dev)->t10_vpd_list, vpd_list) { | 757 | &dev->se_sub_dev->t10_wwn.t10_vpd_list, vpd_list) { |
761 | list_del(&vpd->vpd_list); | 758 | list_del(&vpd->vpd_list); |
762 | kfree(vpd); | 759 | kfree(vpd); |
763 | } | 760 | } |
764 | spin_unlock(&DEV_T10_WWN(dev)->t10_vpd_lock); | 761 | spin_unlock(&dev->se_sub_dev->t10_wwn.t10_vpd_lock); |
765 | |||
766 | return; | ||
767 | } | 762 | } |
768 | 763 | ||
769 | /* se_free_virtual_device(): | 764 | /* se_free_virtual_device(): |
@@ -822,12 +817,13 @@ static void se_dev_stop(struct se_device *dev) | |||
822 | 817 | ||
823 | int se_dev_check_online(struct se_device *dev) | 818 | int se_dev_check_online(struct se_device *dev) |
824 | { | 819 | { |
820 | unsigned long flags; | ||
825 | int ret; | 821 | int ret; |
826 | 822 | ||
827 | spin_lock_irq(&dev->dev_status_lock); | 823 | spin_lock_irqsave(&dev->dev_status_lock, flags); |
828 | ret = ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) || | 824 | ret = ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) || |
829 | (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED)) ? 0 : 1; | 825 | (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED)) ? 0 : 1; |
830 | spin_unlock_irq(&dev->dev_status_lock); | 826 | spin_unlock_irqrestore(&dev->dev_status_lock, flags); |
831 | 827 | ||
832 | return ret; | 828 | return ret; |
833 | } | 829 | } |
@@ -849,59 +845,61 @@ void se_dev_set_default_attribs( | |||
849 | { | 845 | { |
850 | struct queue_limits *limits = &dev_limits->limits; | 846 | struct queue_limits *limits = &dev_limits->limits; |
851 | 847 | ||
852 | DEV_ATTRIB(dev)->emulate_dpo = DA_EMULATE_DPO; | 848 | dev->se_sub_dev->se_dev_attrib.emulate_dpo = DA_EMULATE_DPO; |
853 | DEV_ATTRIB(dev)->emulate_fua_write = DA_EMULATE_FUA_WRITE; | 849 | dev->se_sub_dev->se_dev_attrib.emulate_fua_write = DA_EMULATE_FUA_WRITE; |
854 | DEV_ATTRIB(dev)->emulate_fua_read = DA_EMULATE_FUA_READ; | 850 | dev->se_sub_dev->se_dev_attrib.emulate_fua_read = DA_EMULATE_FUA_READ; |
855 | DEV_ATTRIB(dev)->emulate_write_cache = DA_EMULATE_WRITE_CACHE; | 851 | dev->se_sub_dev->se_dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE; |
856 | DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL; | 852 | dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL; |
857 | DEV_ATTRIB(dev)->emulate_tas = DA_EMULATE_TAS; | 853 | dev->se_sub_dev->se_dev_attrib.emulate_tas = DA_EMULATE_TAS; |
858 | DEV_ATTRIB(dev)->emulate_tpu = DA_EMULATE_TPU; | 854 | dev->se_sub_dev->se_dev_attrib.emulate_tpu = DA_EMULATE_TPU; |
859 | DEV_ATTRIB(dev)->emulate_tpws = DA_EMULATE_TPWS; | 855 | dev->se_sub_dev->se_dev_attrib.emulate_tpws = DA_EMULATE_TPWS; |
860 | DEV_ATTRIB(dev)->emulate_reservations = DA_EMULATE_RESERVATIONS; | 856 | dev->se_sub_dev->se_dev_attrib.emulate_reservations = DA_EMULATE_RESERVATIONS; |
861 | DEV_ATTRIB(dev)->emulate_alua = DA_EMULATE_ALUA; | 857 | dev->se_sub_dev->se_dev_attrib.emulate_alua = DA_EMULATE_ALUA; |
862 | DEV_ATTRIB(dev)->enforce_pr_isids = DA_ENFORCE_PR_ISIDS; | 858 | dev->se_sub_dev->se_dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS; |
859 | dev->se_sub_dev->se_dev_attrib.is_nonrot = DA_IS_NONROT; | ||
860 | dev->se_sub_dev->se_dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD; | ||
863 | /* | 861 | /* |
864 | * The TPU=1 and TPWS=1 settings will be set in TCM/IBLOCK | 862 | * The TPU=1 and TPWS=1 settings will be set in TCM/IBLOCK |
865 | * iblock_create_virtdevice() from struct queue_limits values | 863 | * iblock_create_virtdevice() from struct queue_limits values |
866 | * if blk_queue_discard()==1 | 864 | * if blk_queue_discard()==1 |
867 | */ | 865 | */ |
868 | DEV_ATTRIB(dev)->max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT; | 866 | dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT; |
869 | DEV_ATTRIB(dev)->max_unmap_block_desc_count = | 867 | dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count = |
870 | DA_MAX_UNMAP_BLOCK_DESC_COUNT; | 868 | DA_MAX_UNMAP_BLOCK_DESC_COUNT; |
871 | DEV_ATTRIB(dev)->unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT; | 869 | dev->se_sub_dev->se_dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT; |
872 | DEV_ATTRIB(dev)->unmap_granularity_alignment = | 870 | dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment = |
873 | DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT; | 871 | DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT; |
874 | /* | 872 | /* |
875 | * block_size is based on subsystem plugin dependent requirements. | 873 | * block_size is based on subsystem plugin dependent requirements. |
876 | */ | 874 | */ |
877 | DEV_ATTRIB(dev)->hw_block_size = limits->logical_block_size; | 875 | dev->se_sub_dev->se_dev_attrib.hw_block_size = limits->logical_block_size; |
878 | DEV_ATTRIB(dev)->block_size = limits->logical_block_size; | 876 | dev->se_sub_dev->se_dev_attrib.block_size = limits->logical_block_size; |
879 | /* | 877 | /* |
880 | * max_sectors is based on subsystem plugin dependent requirements. | 878 | * max_sectors is based on subsystem plugin dependent requirements. |
881 | */ | 879 | */ |
882 | DEV_ATTRIB(dev)->hw_max_sectors = limits->max_hw_sectors; | 880 | dev->se_sub_dev->se_dev_attrib.hw_max_sectors = limits->max_hw_sectors; |
883 | DEV_ATTRIB(dev)->max_sectors = limits->max_sectors; | 881 | dev->se_sub_dev->se_dev_attrib.max_sectors = limits->max_sectors; |
884 | /* | 882 | /* |
885 | * Set optimal_sectors from max_sectors, which can be lowered via | 883 | * Set optimal_sectors from max_sectors, which can be lowered via |
886 | * configfs. | 884 | * configfs. |
887 | */ | 885 | */ |
888 | DEV_ATTRIB(dev)->optimal_sectors = limits->max_sectors; | 886 | dev->se_sub_dev->se_dev_attrib.optimal_sectors = limits->max_sectors; |
889 | /* | 887 | /* |
890 | * queue_depth is based on subsystem plugin dependent requirements. | 888 | * queue_depth is based on subsystem plugin dependent requirements. |
891 | */ | 889 | */ |
892 | DEV_ATTRIB(dev)->hw_queue_depth = dev_limits->hw_queue_depth; | 890 | dev->se_sub_dev->se_dev_attrib.hw_queue_depth = dev_limits->hw_queue_depth; |
893 | DEV_ATTRIB(dev)->queue_depth = dev_limits->queue_depth; | 891 | dev->se_sub_dev->se_dev_attrib.queue_depth = dev_limits->queue_depth; |
894 | } | 892 | } |
895 | 893 | ||
896 | int se_dev_set_task_timeout(struct se_device *dev, u32 task_timeout) | 894 | int se_dev_set_task_timeout(struct se_device *dev, u32 task_timeout) |
897 | { | 895 | { |
898 | if (task_timeout > DA_TASK_TIMEOUT_MAX) { | 896 | if (task_timeout > DA_TASK_TIMEOUT_MAX) { |
899 | printk(KERN_ERR "dev[%p]: Passed task_timeout: %u larger then" | 897 | pr_err("dev[%p]: Passed task_timeout: %u larger then" |
900 | " DA_TASK_TIMEOUT_MAX\n", dev, task_timeout); | 898 | " DA_TASK_TIMEOUT_MAX\n", dev, task_timeout); |
901 | return -1; | 899 | return -EINVAL; |
902 | } else { | 900 | } else { |
903 | DEV_ATTRIB(dev)->task_timeout = task_timeout; | 901 | dev->se_sub_dev->se_dev_attrib.task_timeout = task_timeout; |
904 | printk(KERN_INFO "dev[%p]: Set SE Device task_timeout: %u\n", | 902 | pr_debug("dev[%p]: Set SE Device task_timeout: %u\n", |
905 | dev, task_timeout); | 903 | dev, task_timeout); |
906 | } | 904 | } |
907 | 905 | ||
@@ -912,9 +910,9 @@ int se_dev_set_max_unmap_lba_count( | |||
912 | struct se_device *dev, | 910 | struct se_device *dev, |
913 | u32 max_unmap_lba_count) | 911 | u32 max_unmap_lba_count) |
914 | { | 912 | { |
915 | DEV_ATTRIB(dev)->max_unmap_lba_count = max_unmap_lba_count; | 913 | dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = max_unmap_lba_count; |
916 | printk(KERN_INFO "dev[%p]: Set max_unmap_lba_count: %u\n", | 914 | pr_debug("dev[%p]: Set max_unmap_lba_count: %u\n", |
917 | dev, DEV_ATTRIB(dev)->max_unmap_lba_count); | 915 | dev, dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count); |
918 | return 0; | 916 | return 0; |
919 | } | 917 | } |
920 | 918 | ||
@@ -922,9 +920,10 @@ int se_dev_set_max_unmap_block_desc_count( | |||
922 | struct se_device *dev, | 920 | struct se_device *dev, |
923 | u32 max_unmap_block_desc_count) | 921 | u32 max_unmap_block_desc_count) |
924 | { | 922 | { |
925 | DEV_ATTRIB(dev)->max_unmap_block_desc_count = max_unmap_block_desc_count; | 923 | dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count = |
926 | printk(KERN_INFO "dev[%p]: Set max_unmap_block_desc_count: %u\n", | 924 | max_unmap_block_desc_count; |
927 | dev, DEV_ATTRIB(dev)->max_unmap_block_desc_count); | 925 | pr_debug("dev[%p]: Set max_unmap_block_desc_count: %u\n", |
926 | dev, dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count); | ||
928 | return 0; | 927 | return 0; |
929 | } | 928 | } |
930 | 929 | ||
@@ -932,9 +931,9 @@ int se_dev_set_unmap_granularity( | |||
932 | struct se_device *dev, | 931 | struct se_device *dev, |
933 | u32 unmap_granularity) | 932 | u32 unmap_granularity) |
934 | { | 933 | { |
935 | DEV_ATTRIB(dev)->unmap_granularity = unmap_granularity; | 934 | dev->se_sub_dev->se_dev_attrib.unmap_granularity = unmap_granularity; |
936 | printk(KERN_INFO "dev[%p]: Set unmap_granularity: %u\n", | 935 | pr_debug("dev[%p]: Set unmap_granularity: %u\n", |
937 | dev, DEV_ATTRIB(dev)->unmap_granularity); | 936 | dev, dev->se_sub_dev->se_dev_attrib.unmap_granularity); |
938 | return 0; | 937 | return 0; |
939 | } | 938 | } |
940 | 939 | ||
@@ -942,109 +941,109 @@ int se_dev_set_unmap_granularity_alignment( | |||
942 | struct se_device *dev, | 941 | struct se_device *dev, |
943 | u32 unmap_granularity_alignment) | 942 | u32 unmap_granularity_alignment) |
944 | { | 943 | { |
945 | DEV_ATTRIB(dev)->unmap_granularity_alignment = unmap_granularity_alignment; | 944 | dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment = unmap_granularity_alignment; |
946 | printk(KERN_INFO "dev[%p]: Set unmap_granularity_alignment: %u\n", | 945 | pr_debug("dev[%p]: Set unmap_granularity_alignment: %u\n", |
947 | dev, DEV_ATTRIB(dev)->unmap_granularity_alignment); | 946 | dev, dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment); |
948 | return 0; | 947 | return 0; |
949 | } | 948 | } |
950 | 949 | ||
951 | int se_dev_set_emulate_dpo(struct se_device *dev, int flag) | 950 | int se_dev_set_emulate_dpo(struct se_device *dev, int flag) |
952 | { | 951 | { |
953 | if ((flag != 0) && (flag != 1)) { | 952 | if ((flag != 0) && (flag != 1)) { |
954 | printk(KERN_ERR "Illegal value %d\n", flag); | 953 | pr_err("Illegal value %d\n", flag); |
955 | return -1; | 954 | return -EINVAL; |
956 | } | 955 | } |
957 | if (TRANSPORT(dev)->dpo_emulated == NULL) { | 956 | if (dev->transport->dpo_emulated == NULL) { |
958 | printk(KERN_ERR "TRANSPORT(dev)->dpo_emulated is NULL\n"); | 957 | pr_err("dev->transport->dpo_emulated is NULL\n"); |
959 | return -1; | 958 | return -EINVAL; |
960 | } | 959 | } |
961 | if (TRANSPORT(dev)->dpo_emulated(dev) == 0) { | 960 | if (dev->transport->dpo_emulated(dev) == 0) { |
962 | printk(KERN_ERR "TRANSPORT(dev)->dpo_emulated not supported\n"); | 961 | pr_err("dev->transport->dpo_emulated not supported\n"); |
963 | return -1; | 962 | return -EINVAL; |
964 | } | 963 | } |
965 | DEV_ATTRIB(dev)->emulate_dpo = flag; | 964 | dev->se_sub_dev->se_dev_attrib.emulate_dpo = flag; |
966 | printk(KERN_INFO "dev[%p]: SE Device Page Out (DPO) Emulation" | 965 | pr_debug("dev[%p]: SE Device Page Out (DPO) Emulation" |
967 | " bit: %d\n", dev, DEV_ATTRIB(dev)->emulate_dpo); | 966 | " bit: %d\n", dev, dev->se_sub_dev->se_dev_attrib.emulate_dpo); |
968 | return 0; | 967 | return 0; |
969 | } | 968 | } |
970 | 969 | ||
971 | int se_dev_set_emulate_fua_write(struct se_device *dev, int flag) | 970 | int se_dev_set_emulate_fua_write(struct se_device *dev, int flag) |
972 | { | 971 | { |
973 | if ((flag != 0) && (flag != 1)) { | 972 | if ((flag != 0) && (flag != 1)) { |
974 | printk(KERN_ERR "Illegal value %d\n", flag); | 973 | pr_err("Illegal value %d\n", flag); |
975 | return -1; | 974 | return -EINVAL; |
976 | } | 975 | } |
977 | if (TRANSPORT(dev)->fua_write_emulated == NULL) { | 976 | if (dev->transport->fua_write_emulated == NULL) { |
978 | printk(KERN_ERR "TRANSPORT(dev)->fua_write_emulated is NULL\n"); | 977 | pr_err("dev->transport->fua_write_emulated is NULL\n"); |
979 | return -1; | 978 | return -EINVAL; |
980 | } | 979 | } |
981 | if (TRANSPORT(dev)->fua_write_emulated(dev) == 0) { | 980 | if (dev->transport->fua_write_emulated(dev) == 0) { |
982 | printk(KERN_ERR "TRANSPORT(dev)->fua_write_emulated not supported\n"); | 981 | pr_err("dev->transport->fua_write_emulated not supported\n"); |
983 | return -1; | 982 | return -EINVAL; |
984 | } | 983 | } |
985 | DEV_ATTRIB(dev)->emulate_fua_write = flag; | 984 | dev->se_sub_dev->se_dev_attrib.emulate_fua_write = flag; |
986 | printk(KERN_INFO "dev[%p]: SE Device Forced Unit Access WRITEs: %d\n", | 985 | pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n", |
987 | dev, DEV_ATTRIB(dev)->emulate_fua_write); | 986 | dev, dev->se_sub_dev->se_dev_attrib.emulate_fua_write); |
988 | return 0; | 987 | return 0; |
989 | } | 988 | } |
990 | 989 | ||
991 | int se_dev_set_emulate_fua_read(struct se_device *dev, int flag) | 990 | int se_dev_set_emulate_fua_read(struct se_device *dev, int flag) |
992 | { | 991 | { |
993 | if ((flag != 0) && (flag != 1)) { | 992 | if ((flag != 0) && (flag != 1)) { |
994 | printk(KERN_ERR "Illegal value %d\n", flag); | 993 | pr_err("Illegal value %d\n", flag); |
995 | return -1; | 994 | return -EINVAL; |
996 | } | 995 | } |
997 | if (TRANSPORT(dev)->fua_read_emulated == NULL) { | 996 | if (dev->transport->fua_read_emulated == NULL) { |
998 | printk(KERN_ERR "TRANSPORT(dev)->fua_read_emulated is NULL\n"); | 997 | pr_err("dev->transport->fua_read_emulated is NULL\n"); |
999 | return -1; | 998 | return -EINVAL; |
1000 | } | 999 | } |
1001 | if (TRANSPORT(dev)->fua_read_emulated(dev) == 0) { | 1000 | if (dev->transport->fua_read_emulated(dev) == 0) { |
1002 | printk(KERN_ERR "TRANSPORT(dev)->fua_read_emulated not supported\n"); | 1001 | pr_err("dev->transport->fua_read_emulated not supported\n"); |
1003 | return -1; | 1002 | return -EINVAL; |
1004 | } | 1003 | } |
1005 | DEV_ATTRIB(dev)->emulate_fua_read = flag; | 1004 | dev->se_sub_dev->se_dev_attrib.emulate_fua_read = flag; |
1006 | printk(KERN_INFO "dev[%p]: SE Device Forced Unit Access READs: %d\n", | 1005 | pr_debug("dev[%p]: SE Device Forced Unit Access READs: %d\n", |
1007 | dev, DEV_ATTRIB(dev)->emulate_fua_read); | 1006 | dev, dev->se_sub_dev->se_dev_attrib.emulate_fua_read); |
1008 | return 0; | 1007 | return 0; |
1009 | } | 1008 | } |
1010 | 1009 | ||
1011 | int se_dev_set_emulate_write_cache(struct se_device *dev, int flag) | 1010 | int se_dev_set_emulate_write_cache(struct se_device *dev, int flag) |
1012 | { | 1011 | { |
1013 | if ((flag != 0) && (flag != 1)) { | 1012 | if ((flag != 0) && (flag != 1)) { |
1014 | printk(KERN_ERR "Illegal value %d\n", flag); | 1013 | pr_err("Illegal value %d\n", flag); |
1015 | return -1; | 1014 | return -EINVAL; |
1016 | } | 1015 | } |
1017 | if (TRANSPORT(dev)->write_cache_emulated == NULL) { | 1016 | if (dev->transport->write_cache_emulated == NULL) { |
1018 | printk(KERN_ERR "TRANSPORT(dev)->write_cache_emulated is NULL\n"); | 1017 | pr_err("dev->transport->write_cache_emulated is NULL\n"); |
1019 | return -1; | 1018 | return -EINVAL; |
1020 | } | 1019 | } |
1021 | if (TRANSPORT(dev)->write_cache_emulated(dev) == 0) { | 1020 | if (dev->transport->write_cache_emulated(dev) == 0) { |
1022 | printk(KERN_ERR "TRANSPORT(dev)->write_cache_emulated not supported\n"); | 1021 | pr_err("dev->transport->write_cache_emulated not supported\n"); |
1023 | return -1; | 1022 | return -EINVAL; |
1024 | } | 1023 | } |
1025 | DEV_ATTRIB(dev)->emulate_write_cache = flag; | 1024 | dev->se_sub_dev->se_dev_attrib.emulate_write_cache = flag; |
1026 | printk(KERN_INFO "dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n", | 1025 | pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n", |
1027 | dev, DEV_ATTRIB(dev)->emulate_write_cache); | 1026 | dev, dev->se_sub_dev->se_dev_attrib.emulate_write_cache); |
1028 | return 0; | 1027 | return 0; |
1029 | } | 1028 | } |
1030 | 1029 | ||
1031 | int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag) | 1030 | int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag) |
1032 | { | 1031 | { |
1033 | if ((flag != 0) && (flag != 1) && (flag != 2)) { | 1032 | if ((flag != 0) && (flag != 1) && (flag != 2)) { |
1034 | printk(KERN_ERR "Illegal value %d\n", flag); | 1033 | pr_err("Illegal value %d\n", flag); |
1035 | return -1; | 1034 | return -EINVAL; |
1036 | } | 1035 | } |
1037 | 1036 | ||
1038 | if (atomic_read(&dev->dev_export_obj.obj_access_count)) { | 1037 | if (atomic_read(&dev->dev_export_obj.obj_access_count)) { |
1039 | printk(KERN_ERR "dev[%p]: Unable to change SE Device" | 1038 | pr_err("dev[%p]: Unable to change SE Device" |
1040 | " UA_INTRLCK_CTRL while dev_export_obj: %d count" | 1039 | " UA_INTRLCK_CTRL while dev_export_obj: %d count" |
1041 | " exists\n", dev, | 1040 | " exists\n", dev, |
1042 | atomic_read(&dev->dev_export_obj.obj_access_count)); | 1041 | atomic_read(&dev->dev_export_obj.obj_access_count)); |
1043 | return -1; | 1042 | return -EINVAL; |
1044 | } | 1043 | } |
1045 | DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl = flag; | 1044 | dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl = flag; |
1046 | printk(KERN_INFO "dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n", | 1045 | pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n", |
1047 | dev, DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl); | 1046 | dev, dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl); |
1048 | 1047 | ||
1049 | return 0; | 1048 | return 0; |
1050 | } | 1049 | } |
@@ -1052,19 +1051,19 @@ int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag) | |||
1052 | int se_dev_set_emulate_tas(struct se_device *dev, int flag) | 1051 | int se_dev_set_emulate_tas(struct se_device *dev, int flag) |
1053 | { | 1052 | { |
1054 | if ((flag != 0) && (flag != 1)) { | 1053 | if ((flag != 0) && (flag != 1)) { |
1055 | printk(KERN_ERR "Illegal value %d\n", flag); | 1054 | pr_err("Illegal value %d\n", flag); |
1056 | return -1; | 1055 | return -EINVAL; |
1057 | } | 1056 | } |
1058 | 1057 | ||
1059 | if (atomic_read(&dev->dev_export_obj.obj_access_count)) { | 1058 | if (atomic_read(&dev->dev_export_obj.obj_access_count)) { |
1060 | printk(KERN_ERR "dev[%p]: Unable to change SE Device TAS while" | 1059 | pr_err("dev[%p]: Unable to change SE Device TAS while" |
1061 | " dev_export_obj: %d count exists\n", dev, | 1060 | " dev_export_obj: %d count exists\n", dev, |
1062 | atomic_read(&dev->dev_export_obj.obj_access_count)); | 1061 | atomic_read(&dev->dev_export_obj.obj_access_count)); |
1063 | return -1; | 1062 | return -EINVAL; |
1064 | } | 1063 | } |
1065 | DEV_ATTRIB(dev)->emulate_tas = flag; | 1064 | dev->se_sub_dev->se_dev_attrib.emulate_tas = flag; |
1066 | printk(KERN_INFO "dev[%p]: SE Device TASK_ABORTED status bit: %s\n", | 1065 | pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n", |
1067 | dev, (DEV_ATTRIB(dev)->emulate_tas) ? "Enabled" : "Disabled"); | 1066 | dev, (dev->se_sub_dev->se_dev_attrib.emulate_tas) ? "Enabled" : "Disabled"); |
1068 | 1067 | ||
1069 | return 0; | 1068 | return 0; |
1070 | } | 1069 | } |
@@ -1072,20 +1071,20 @@ int se_dev_set_emulate_tas(struct se_device *dev, int flag) | |||
1072 | int se_dev_set_emulate_tpu(struct se_device *dev, int flag) | 1071 | int se_dev_set_emulate_tpu(struct se_device *dev, int flag) |
1073 | { | 1072 | { |
1074 | if ((flag != 0) && (flag != 1)) { | 1073 | if ((flag != 0) && (flag != 1)) { |
1075 | printk(KERN_ERR "Illegal value %d\n", flag); | 1074 | pr_err("Illegal value %d\n", flag); |
1076 | return -1; | 1075 | return -EINVAL; |
1077 | } | 1076 | } |
1078 | /* | 1077 | /* |
1079 | * We expect this value to be non-zero when generic Block Layer | 1078 | * We expect this value to be non-zero when generic Block Layer |
1080 | * Discard supported is detected iblock_create_virtdevice(). | 1079 | * Discard supported is detected iblock_create_virtdevice(). |
1081 | */ | 1080 | */ |
1082 | if (!(DEV_ATTRIB(dev)->max_unmap_block_desc_count)) { | 1081 | if (!dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) { |
1083 | printk(KERN_ERR "Generic Block Discard not supported\n"); | 1082 | pr_err("Generic Block Discard not supported\n"); |
1084 | return -ENOSYS; | 1083 | return -ENOSYS; |
1085 | } | 1084 | } |
1086 | 1085 | ||
1087 | DEV_ATTRIB(dev)->emulate_tpu = flag; | 1086 | dev->se_sub_dev->se_dev_attrib.emulate_tpu = flag; |
1088 | printk(KERN_INFO "dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n", | 1087 | pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n", |
1089 | dev, flag); | 1088 | dev, flag); |
1090 | return 0; | 1089 | return 0; |
1091 | } | 1090 | } |
@@ -1093,20 +1092,20 @@ int se_dev_set_emulate_tpu(struct se_device *dev, int flag) | |||
1093 | int se_dev_set_emulate_tpws(struct se_device *dev, int flag) | 1092 | int se_dev_set_emulate_tpws(struct se_device *dev, int flag) |
1094 | { | 1093 | { |
1095 | if ((flag != 0) && (flag != 1)) { | 1094 | if ((flag != 0) && (flag != 1)) { |
1096 | printk(KERN_ERR "Illegal value %d\n", flag); | 1095 | pr_err("Illegal value %d\n", flag); |
1097 | return -1; | 1096 | return -EINVAL; |
1098 | } | 1097 | } |
1099 | /* | 1098 | /* |
1100 | * We expect this value to be non-zero when generic Block Layer | 1099 | * We expect this value to be non-zero when generic Block Layer |
1101 | * Discard supported is detected iblock_create_virtdevice(). | 1100 | * Discard supported is detected iblock_create_virtdevice(). |
1102 | */ | 1101 | */ |
1103 | if (!(DEV_ATTRIB(dev)->max_unmap_block_desc_count)) { | 1102 | if (!dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) { |
1104 | printk(KERN_ERR "Generic Block Discard not supported\n"); | 1103 | pr_err("Generic Block Discard not supported\n"); |
1105 | return -ENOSYS; | 1104 | return -ENOSYS; |
1106 | } | 1105 | } |
1107 | 1106 | ||
1108 | DEV_ATTRIB(dev)->emulate_tpws = flag; | 1107 | dev->se_sub_dev->se_dev_attrib.emulate_tpws = flag; |
1109 | printk(KERN_INFO "dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n", | 1108 | pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n", |
1110 | dev, flag); | 1109 | dev, flag); |
1111 | return 0; | 1110 | return 0; |
1112 | } | 1111 | } |
@@ -1114,12 +1113,36 @@ int se_dev_set_emulate_tpws(struct se_device *dev, int flag) | |||
1114 | int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag) | 1113 | int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag) |
1115 | { | 1114 | { |
1116 | if ((flag != 0) && (flag != 1)) { | 1115 | if ((flag != 0) && (flag != 1)) { |
1116 | pr_err("Illegal value %d\n", flag); | ||
1117 | return -EINVAL; | ||
1118 | } | ||
1119 | dev->se_sub_dev->se_dev_attrib.enforce_pr_isids = flag; | ||
1120 | pr_debug("dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev, | ||
1121 | (dev->se_sub_dev->se_dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled"); | ||
1122 | return 0; | ||
1123 | } | ||
1124 | |||
1125 | int se_dev_set_is_nonrot(struct se_device *dev, int flag) | ||
1126 | { | ||
1127 | if ((flag != 0) && (flag != 1)) { | ||
1117 | printk(KERN_ERR "Illegal value %d\n", flag); | 1128 | printk(KERN_ERR "Illegal value %d\n", flag); |
1118 | return -1; | 1129 | return -EINVAL; |
1130 | } | ||
1131 | dev->se_sub_dev->se_dev_attrib.is_nonrot = flag; | ||
1132 | pr_debug("dev[%p]: SE Device is_nonrot bit: %d\n", | ||
1133 | dev, flag); | ||
1134 | return 0; | ||
1135 | } | ||
1136 | |||
1137 | int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag) | ||
1138 | { | ||
1139 | if (flag != 0) { | ||
1140 | printk(KERN_ERR "dev[%p]: SE Device emulatation of restricted" | ||
1141 | " reordering not implemented\n", dev); | ||
1142 | return -ENOSYS; | ||
1119 | } | 1143 | } |
1120 | DEV_ATTRIB(dev)->enforce_pr_isids = flag; | 1144 | dev->se_sub_dev->se_dev_attrib.emulate_rest_reord = flag; |
1121 | printk(KERN_INFO "dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev, | 1145 | pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n", dev, flag); |
1122 | (DEV_ATTRIB(dev)->enforce_pr_isids) ? "Enabled" : "Disabled"); | ||
1123 | return 0; | 1146 | return 0; |
1124 | } | 1147 | } |
1125 | 1148 | ||
@@ -1131,44 +1154,44 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth) | |||
1131 | u32 orig_queue_depth = dev->queue_depth; | 1154 | u32 orig_queue_depth = dev->queue_depth; |
1132 | 1155 | ||
1133 | if (atomic_read(&dev->dev_export_obj.obj_access_count)) { | 1156 | if (atomic_read(&dev->dev_export_obj.obj_access_count)) { |
1134 | printk(KERN_ERR "dev[%p]: Unable to change SE Device TCQ while" | 1157 | pr_err("dev[%p]: Unable to change SE Device TCQ while" |
1135 | " dev_export_obj: %d count exists\n", dev, | 1158 | " dev_export_obj: %d count exists\n", dev, |
1136 | atomic_read(&dev->dev_export_obj.obj_access_count)); | 1159 | atomic_read(&dev->dev_export_obj.obj_access_count)); |
1137 | return -1; | 1160 | return -EINVAL; |
1138 | } | 1161 | } |
1139 | if (!(queue_depth)) { | 1162 | if (!queue_depth) { |
1140 | printk(KERN_ERR "dev[%p]: Illegal ZERO value for queue" | 1163 | pr_err("dev[%p]: Illegal ZERO value for queue" |
1141 | "_depth\n", dev); | 1164 | "_depth\n", dev); |
1142 | return -1; | 1165 | return -EINVAL; |
1143 | } | 1166 | } |
1144 | 1167 | ||
1145 | if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { | 1168 | if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { |
1146 | if (queue_depth > DEV_ATTRIB(dev)->hw_queue_depth) { | 1169 | if (queue_depth > dev->se_sub_dev->se_dev_attrib.hw_queue_depth) { |
1147 | printk(KERN_ERR "dev[%p]: Passed queue_depth: %u" | 1170 | pr_err("dev[%p]: Passed queue_depth: %u" |
1148 | " exceeds TCM/SE_Device TCQ: %u\n", | 1171 | " exceeds TCM/SE_Device TCQ: %u\n", |
1149 | dev, queue_depth, | 1172 | dev, queue_depth, |
1150 | DEV_ATTRIB(dev)->hw_queue_depth); | 1173 | dev->se_sub_dev->se_dev_attrib.hw_queue_depth); |
1151 | return -1; | 1174 | return -EINVAL; |
1152 | } | 1175 | } |
1153 | } else { | 1176 | } else { |
1154 | if (queue_depth > DEV_ATTRIB(dev)->queue_depth) { | 1177 | if (queue_depth > dev->se_sub_dev->se_dev_attrib.queue_depth) { |
1155 | if (queue_depth > DEV_ATTRIB(dev)->hw_queue_depth) { | 1178 | if (queue_depth > dev->se_sub_dev->se_dev_attrib.hw_queue_depth) { |
1156 | printk(KERN_ERR "dev[%p]: Passed queue_depth:" | 1179 | pr_err("dev[%p]: Passed queue_depth:" |
1157 | " %u exceeds TCM/SE_Device MAX" | 1180 | " %u exceeds TCM/SE_Device MAX" |
1158 | " TCQ: %u\n", dev, queue_depth, | 1181 | " TCQ: %u\n", dev, queue_depth, |
1159 | DEV_ATTRIB(dev)->hw_queue_depth); | 1182 | dev->se_sub_dev->se_dev_attrib.hw_queue_depth); |
1160 | return -1; | 1183 | return -EINVAL; |
1161 | } | 1184 | } |
1162 | } | 1185 | } |
1163 | } | 1186 | } |
1164 | 1187 | ||
1165 | DEV_ATTRIB(dev)->queue_depth = dev->queue_depth = queue_depth; | 1188 | dev->se_sub_dev->se_dev_attrib.queue_depth = dev->queue_depth = queue_depth; |
1166 | if (queue_depth > orig_queue_depth) | 1189 | if (queue_depth > orig_queue_depth) |
1167 | atomic_add(queue_depth - orig_queue_depth, &dev->depth_left); | 1190 | atomic_add(queue_depth - orig_queue_depth, &dev->depth_left); |
1168 | else if (queue_depth < orig_queue_depth) | 1191 | else if (queue_depth < orig_queue_depth) |
1169 | atomic_sub(orig_queue_depth - queue_depth, &dev->depth_left); | 1192 | atomic_sub(orig_queue_depth - queue_depth, &dev->depth_left); |
1170 | 1193 | ||
1171 | printk(KERN_INFO "dev[%p]: SE Device TCQ Depth changed to: %u\n", | 1194 | pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n", |
1172 | dev, queue_depth); | 1195 | dev, queue_depth); |
1173 | return 0; | 1196 | return 0; |
1174 | } | 1197 | } |
@@ -1178,50 +1201,50 @@ int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors) | |||
1178 | int force = 0; /* Force setting for VDEVS */ | 1201 | int force = 0; /* Force setting for VDEVS */ |
1179 | 1202 | ||
1180 | if (atomic_read(&dev->dev_export_obj.obj_access_count)) { | 1203 | if (atomic_read(&dev->dev_export_obj.obj_access_count)) { |
1181 | printk(KERN_ERR "dev[%p]: Unable to change SE Device" | 1204 | pr_err("dev[%p]: Unable to change SE Device" |
1182 | " max_sectors while dev_export_obj: %d count exists\n", | 1205 | " max_sectors while dev_export_obj: %d count exists\n", |
1183 | dev, atomic_read(&dev->dev_export_obj.obj_access_count)); | 1206 | dev, atomic_read(&dev->dev_export_obj.obj_access_count)); |
1184 | return -1; | 1207 | return -EINVAL; |
1185 | } | 1208 | } |
1186 | if (!(max_sectors)) { | 1209 | if (!max_sectors) { |
1187 | printk(KERN_ERR "dev[%p]: Illegal ZERO value for" | 1210 | pr_err("dev[%p]: Illegal ZERO value for" |
1188 | " max_sectors\n", dev); | 1211 | " max_sectors\n", dev); |
1189 | return -1; | 1212 | return -EINVAL; |
1190 | } | 1213 | } |
1191 | if (max_sectors < DA_STATUS_MAX_SECTORS_MIN) { | 1214 | if (max_sectors < DA_STATUS_MAX_SECTORS_MIN) { |
1192 | printk(KERN_ERR "dev[%p]: Passed max_sectors: %u less than" | 1215 | pr_err("dev[%p]: Passed max_sectors: %u less than" |
1193 | " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, max_sectors, | 1216 | " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, max_sectors, |
1194 | DA_STATUS_MAX_SECTORS_MIN); | 1217 | DA_STATUS_MAX_SECTORS_MIN); |
1195 | return -1; | 1218 | return -EINVAL; |
1196 | } | 1219 | } |
1197 | if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { | 1220 | if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { |
1198 | if (max_sectors > DEV_ATTRIB(dev)->hw_max_sectors) { | 1221 | if (max_sectors > dev->se_sub_dev->se_dev_attrib.hw_max_sectors) { |
1199 | printk(KERN_ERR "dev[%p]: Passed max_sectors: %u" | 1222 | pr_err("dev[%p]: Passed max_sectors: %u" |
1200 | " greater than TCM/SE_Device max_sectors:" | 1223 | " greater than TCM/SE_Device max_sectors:" |
1201 | " %u\n", dev, max_sectors, | 1224 | " %u\n", dev, max_sectors, |
1202 | DEV_ATTRIB(dev)->hw_max_sectors); | 1225 | dev->se_sub_dev->se_dev_attrib.hw_max_sectors); |
1203 | return -1; | 1226 | return -EINVAL; |
1204 | } | 1227 | } |
1205 | } else { | 1228 | } else { |
1206 | if (!(force) && (max_sectors > | 1229 | if (!force && (max_sectors > |
1207 | DEV_ATTRIB(dev)->hw_max_sectors)) { | 1230 | dev->se_sub_dev->se_dev_attrib.hw_max_sectors)) { |
1208 | printk(KERN_ERR "dev[%p]: Passed max_sectors: %u" | 1231 | pr_err("dev[%p]: Passed max_sectors: %u" |
1209 | " greater than TCM/SE_Device max_sectors" | 1232 | " greater than TCM/SE_Device max_sectors" |
1210 | ": %u, use force=1 to override.\n", dev, | 1233 | ": %u, use force=1 to override.\n", dev, |
1211 | max_sectors, DEV_ATTRIB(dev)->hw_max_sectors); | 1234 | max_sectors, dev->se_sub_dev->se_dev_attrib.hw_max_sectors); |
1212 | return -1; | 1235 | return -EINVAL; |
1213 | } | 1236 | } |
1214 | if (max_sectors > DA_STATUS_MAX_SECTORS_MAX) { | 1237 | if (max_sectors > DA_STATUS_MAX_SECTORS_MAX) { |
1215 | printk(KERN_ERR "dev[%p]: Passed max_sectors: %u" | 1238 | pr_err("dev[%p]: Passed max_sectors: %u" |
1216 | " greater than DA_STATUS_MAX_SECTORS_MAX:" | 1239 | " greater than DA_STATUS_MAX_SECTORS_MAX:" |
1217 | " %u\n", dev, max_sectors, | 1240 | " %u\n", dev, max_sectors, |
1218 | DA_STATUS_MAX_SECTORS_MAX); | 1241 | DA_STATUS_MAX_SECTORS_MAX); |
1219 | return -1; | 1242 | return -EINVAL; |
1220 | } | 1243 | } |
1221 | } | 1244 | } |
1222 | 1245 | ||
1223 | DEV_ATTRIB(dev)->max_sectors = max_sectors; | 1246 | dev->se_sub_dev->se_dev_attrib.max_sectors = max_sectors; |
1224 | printk("dev[%p]: SE Device max_sectors changed to %u\n", | 1247 | pr_debug("dev[%p]: SE Device max_sectors changed to %u\n", |
1225 | dev, max_sectors); | 1248 | dev, max_sectors); |
1226 | return 0; | 1249 | return 0; |
1227 | } | 1250 | } |
@@ -1229,25 +1252,25 @@ int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors) | |||
1229 | int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors) | 1252 | int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors) |
1230 | { | 1253 | { |
1231 | if (atomic_read(&dev->dev_export_obj.obj_access_count)) { | 1254 | if (atomic_read(&dev->dev_export_obj.obj_access_count)) { |
1232 | printk(KERN_ERR "dev[%p]: Unable to change SE Device" | 1255 | pr_err("dev[%p]: Unable to change SE Device" |
1233 | " optimal_sectors while dev_export_obj: %d count exists\n", | 1256 | " optimal_sectors while dev_export_obj: %d count exists\n", |
1234 | dev, atomic_read(&dev->dev_export_obj.obj_access_count)); | 1257 | dev, atomic_read(&dev->dev_export_obj.obj_access_count)); |
1235 | return -EINVAL; | 1258 | return -EINVAL; |
1236 | } | 1259 | } |
1237 | if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { | 1260 | if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { |
1238 | printk(KERN_ERR "dev[%p]: Passed optimal_sectors cannot be" | 1261 | pr_err("dev[%p]: Passed optimal_sectors cannot be" |
1239 | " changed for TCM/pSCSI\n", dev); | 1262 | " changed for TCM/pSCSI\n", dev); |
1240 | return -EINVAL; | 1263 | return -EINVAL; |
1241 | } | 1264 | } |
1242 | if (optimal_sectors > DEV_ATTRIB(dev)->max_sectors) { | 1265 | if (optimal_sectors > dev->se_sub_dev->se_dev_attrib.max_sectors) { |
1243 | printk(KERN_ERR "dev[%p]: Passed optimal_sectors %u cannot be" | 1266 | pr_err("dev[%p]: Passed optimal_sectors %u cannot be" |
1244 | " greater than max_sectors: %u\n", dev, | 1267 | " greater than max_sectors: %u\n", dev, |
1245 | optimal_sectors, DEV_ATTRIB(dev)->max_sectors); | 1268 | optimal_sectors, dev->se_sub_dev->se_dev_attrib.max_sectors); |
1246 | return -EINVAL; | 1269 | return -EINVAL; |
1247 | } | 1270 | } |
1248 | 1271 | ||
1249 | DEV_ATTRIB(dev)->optimal_sectors = optimal_sectors; | 1272 | dev->se_sub_dev->se_dev_attrib.optimal_sectors = optimal_sectors; |
1250 | printk(KERN_INFO "dev[%p]: SE Device optimal_sectors changed to %u\n", | 1273 | pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n", |
1251 | dev, optimal_sectors); | 1274 | dev, optimal_sectors); |
1252 | return 0; | 1275 | return 0; |
1253 | } | 1276 | } |
@@ -1255,31 +1278,31 @@ int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors) | |||
1255 | int se_dev_set_block_size(struct se_device *dev, u32 block_size) | 1278 | int se_dev_set_block_size(struct se_device *dev, u32 block_size) |
1256 | { | 1279 | { |
1257 | if (atomic_read(&dev->dev_export_obj.obj_access_count)) { | 1280 | if (atomic_read(&dev->dev_export_obj.obj_access_count)) { |
1258 | printk(KERN_ERR "dev[%p]: Unable to change SE Device block_size" | 1281 | pr_err("dev[%p]: Unable to change SE Device block_size" |
1259 | " while dev_export_obj: %d count exists\n", dev, | 1282 | " while dev_export_obj: %d count exists\n", dev, |
1260 | atomic_read(&dev->dev_export_obj.obj_access_count)); | 1283 | atomic_read(&dev->dev_export_obj.obj_access_count)); |
1261 | return -1; | 1284 | return -EINVAL; |
1262 | } | 1285 | } |
1263 | 1286 | ||
1264 | if ((block_size != 512) && | 1287 | if ((block_size != 512) && |
1265 | (block_size != 1024) && | 1288 | (block_size != 1024) && |
1266 | (block_size != 2048) && | 1289 | (block_size != 2048) && |
1267 | (block_size != 4096)) { | 1290 | (block_size != 4096)) { |
1268 | printk(KERN_ERR "dev[%p]: Illegal value for block_device: %u" | 1291 | pr_err("dev[%p]: Illegal value for block_device: %u" |
1269 | " for SE device, must be 512, 1024, 2048 or 4096\n", | 1292 | " for SE device, must be 512, 1024, 2048 or 4096\n", |
1270 | dev, block_size); | 1293 | dev, block_size); |
1271 | return -1; | 1294 | return -EINVAL; |
1272 | } | 1295 | } |
1273 | 1296 | ||
1274 | if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { | 1297 | if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { |
1275 | printk(KERN_ERR "dev[%p]: Not allowed to change block_size for" | 1298 | pr_err("dev[%p]: Not allowed to change block_size for" |
1276 | " Physical Device, use for Linux/SCSI to change" | 1299 | " Physical Device, use for Linux/SCSI to change" |
1277 | " block_size for underlying hardware\n", dev); | 1300 | " block_size for underlying hardware\n", dev); |
1278 | return -1; | 1301 | return -EINVAL; |
1279 | } | 1302 | } |
1280 | 1303 | ||
1281 | DEV_ATTRIB(dev)->block_size = block_size; | 1304 | dev->se_sub_dev->se_dev_attrib.block_size = block_size; |
1282 | printk(KERN_INFO "dev[%p]: SE Device block_size changed to %u\n", | 1305 | pr_debug("dev[%p]: SE Device block_size changed to %u\n", |
1283 | dev, block_size); | 1306 | dev, block_size); |
1284 | return 0; | 1307 | return 0; |
1285 | } | 1308 | } |
@@ -1294,13 +1317,13 @@ struct se_lun *core_dev_add_lun( | |||
1294 | u32 lun_access = 0; | 1317 | u32 lun_access = 0; |
1295 | 1318 | ||
1296 | if (atomic_read(&dev->dev_access_obj.obj_access_count) != 0) { | 1319 | if (atomic_read(&dev->dev_access_obj.obj_access_count) != 0) { |
1297 | printk(KERN_ERR "Unable to export struct se_device while dev_access_obj: %d\n", | 1320 | pr_err("Unable to export struct se_device while dev_access_obj: %d\n", |
1298 | atomic_read(&dev->dev_access_obj.obj_access_count)); | 1321 | atomic_read(&dev->dev_access_obj.obj_access_count)); |
1299 | return NULL; | 1322 | return NULL; |
1300 | } | 1323 | } |
1301 | 1324 | ||
1302 | lun_p = core_tpg_pre_addlun(tpg, lun); | 1325 | lun_p = core_tpg_pre_addlun(tpg, lun); |
1303 | if ((IS_ERR(lun_p)) || !(lun_p)) | 1326 | if ((IS_ERR(lun_p)) || !lun_p) |
1304 | return NULL; | 1327 | return NULL; |
1305 | 1328 | ||
1306 | if (dev->dev_flags & DF_READ_ONLY) | 1329 | if (dev->dev_flags & DF_READ_ONLY) |
@@ -1311,15 +1334,15 @@ struct se_lun *core_dev_add_lun( | |||
1311 | if (core_tpg_post_addlun(tpg, lun_p, lun_access, dev) < 0) | 1334 | if (core_tpg_post_addlun(tpg, lun_p, lun_access, dev) < 0) |
1312 | return NULL; | 1335 | return NULL; |
1313 | 1336 | ||
1314 | printk(KERN_INFO "%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from" | 1337 | pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from" |
1315 | " CORE HBA: %u\n", TPG_TFO(tpg)->get_fabric_name(), | 1338 | " CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(), |
1316 | TPG_TFO(tpg)->tpg_get_tag(tpg), lun_p->unpacked_lun, | 1339 | tpg->se_tpg_tfo->tpg_get_tag(tpg), lun_p->unpacked_lun, |
1317 | TPG_TFO(tpg)->get_fabric_name(), hba->hba_id); | 1340 | tpg->se_tpg_tfo->get_fabric_name(), hba->hba_id); |
1318 | /* | 1341 | /* |
1319 | * Update LUN maps for dynamically added initiators when | 1342 | * Update LUN maps for dynamically added initiators when |
1320 | * generate_node_acl is enabled. | 1343 | * generate_node_acl is enabled. |
1321 | */ | 1344 | */ |
1322 | if (TPG_TFO(tpg)->tpg_check_demo_mode(tpg)) { | 1345 | if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) { |
1323 | struct se_node_acl *acl; | 1346 | struct se_node_acl *acl; |
1324 | spin_lock_bh(&tpg->acl_node_lock); | 1347 | spin_lock_bh(&tpg->acl_node_lock); |
1325 | list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { | 1348 | list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { |
@@ -1347,15 +1370,15 @@ int core_dev_del_lun( | |||
1347 | int ret = 0; | 1370 | int ret = 0; |
1348 | 1371 | ||
1349 | lun = core_tpg_pre_dellun(tpg, unpacked_lun, &ret); | 1372 | lun = core_tpg_pre_dellun(tpg, unpacked_lun, &ret); |
1350 | if (!(lun)) | 1373 | if (!lun) |
1351 | return ret; | 1374 | return ret; |
1352 | 1375 | ||
1353 | core_tpg_post_dellun(tpg, lun); | 1376 | core_tpg_post_dellun(tpg, lun); |
1354 | 1377 | ||
1355 | printk(KERN_INFO "%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from" | 1378 | pr_debug("%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from" |
1356 | " device object\n", TPG_TFO(tpg)->get_fabric_name(), | 1379 | " device object\n", tpg->se_tpg_tfo->get_fabric_name(), |
1357 | TPG_TFO(tpg)->tpg_get_tag(tpg), unpacked_lun, | 1380 | tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun, |
1358 | TPG_TFO(tpg)->get_fabric_name()); | 1381 | tpg->se_tpg_tfo->get_fabric_name()); |
1359 | 1382 | ||
1360 | return 0; | 1383 | return 0; |
1361 | } | 1384 | } |
@@ -1366,21 +1389,21 @@ struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_l | |||
1366 | 1389 | ||
1367 | spin_lock(&tpg->tpg_lun_lock); | 1390 | spin_lock(&tpg->tpg_lun_lock); |
1368 | if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { | 1391 | if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { |
1369 | printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS" | 1392 | pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS" |
1370 | "_PER_TPG-1: %u for Target Portal Group: %hu\n", | 1393 | "_PER_TPG-1: %u for Target Portal Group: %hu\n", |
1371 | TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, | 1394 | tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, |
1372 | TRANSPORT_MAX_LUNS_PER_TPG-1, | 1395 | TRANSPORT_MAX_LUNS_PER_TPG-1, |
1373 | TPG_TFO(tpg)->tpg_get_tag(tpg)); | 1396 | tpg->se_tpg_tfo->tpg_get_tag(tpg)); |
1374 | spin_unlock(&tpg->tpg_lun_lock); | 1397 | spin_unlock(&tpg->tpg_lun_lock); |
1375 | return NULL; | 1398 | return NULL; |
1376 | } | 1399 | } |
1377 | lun = &tpg->tpg_lun_list[unpacked_lun]; | 1400 | lun = &tpg->tpg_lun_list[unpacked_lun]; |
1378 | 1401 | ||
1379 | if (lun->lun_status != TRANSPORT_LUN_STATUS_FREE) { | 1402 | if (lun->lun_status != TRANSPORT_LUN_STATUS_FREE) { |
1380 | printk(KERN_ERR "%s Logical Unit Number: %u is not free on" | 1403 | pr_err("%s Logical Unit Number: %u is not free on" |
1381 | " Target Portal Group: %hu, ignoring request.\n", | 1404 | " Target Portal Group: %hu, ignoring request.\n", |
1382 | TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, | 1405 | tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, |
1383 | TPG_TFO(tpg)->tpg_get_tag(tpg)); | 1406 | tpg->se_tpg_tfo->tpg_get_tag(tpg)); |
1384 | spin_unlock(&tpg->tpg_lun_lock); | 1407 | spin_unlock(&tpg->tpg_lun_lock); |
1385 | return NULL; | 1408 | return NULL; |
1386 | } | 1409 | } |
@@ -1399,21 +1422,21 @@ static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked | |||
1399 | 1422 | ||
1400 | spin_lock(&tpg->tpg_lun_lock); | 1423 | spin_lock(&tpg->tpg_lun_lock); |
1401 | if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { | 1424 | if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { |
1402 | printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER" | 1425 | pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER" |
1403 | "_TPG-1: %u for Target Portal Group: %hu\n", | 1426 | "_TPG-1: %u for Target Portal Group: %hu\n", |
1404 | TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, | 1427 | tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, |
1405 | TRANSPORT_MAX_LUNS_PER_TPG-1, | 1428 | TRANSPORT_MAX_LUNS_PER_TPG-1, |
1406 | TPG_TFO(tpg)->tpg_get_tag(tpg)); | 1429 | tpg->se_tpg_tfo->tpg_get_tag(tpg)); |
1407 | spin_unlock(&tpg->tpg_lun_lock); | 1430 | spin_unlock(&tpg->tpg_lun_lock); |
1408 | return NULL; | 1431 | return NULL; |
1409 | } | 1432 | } |
1410 | lun = &tpg->tpg_lun_list[unpacked_lun]; | 1433 | lun = &tpg->tpg_lun_list[unpacked_lun]; |
1411 | 1434 | ||
1412 | if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) { | 1435 | if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) { |
1413 | printk(KERN_ERR "%s Logical Unit Number: %u is not active on" | 1436 | pr_err("%s Logical Unit Number: %u is not active on" |
1414 | " Target Portal Group: %hu, ignoring request.\n", | 1437 | " Target Portal Group: %hu, ignoring request.\n", |
1415 | TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, | 1438 | tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, |
1416 | TPG_TFO(tpg)->tpg_get_tag(tpg)); | 1439 | tpg->se_tpg_tfo->tpg_get_tag(tpg)); |
1417 | spin_unlock(&tpg->tpg_lun_lock); | 1440 | spin_unlock(&tpg->tpg_lun_lock); |
1418 | return NULL; | 1441 | return NULL; |
1419 | } | 1442 | } |
@@ -1432,19 +1455,19 @@ struct se_lun_acl *core_dev_init_initiator_node_lun_acl( | |||
1432 | struct se_node_acl *nacl; | 1455 | struct se_node_acl *nacl; |
1433 | 1456 | ||
1434 | if (strlen(initiatorname) >= TRANSPORT_IQN_LEN) { | 1457 | if (strlen(initiatorname) >= TRANSPORT_IQN_LEN) { |
1435 | printk(KERN_ERR "%s InitiatorName exceeds maximum size.\n", | 1458 | pr_err("%s InitiatorName exceeds maximum size.\n", |
1436 | TPG_TFO(tpg)->get_fabric_name()); | 1459 | tpg->se_tpg_tfo->get_fabric_name()); |
1437 | *ret = -EOVERFLOW; | 1460 | *ret = -EOVERFLOW; |
1438 | return NULL; | 1461 | return NULL; |
1439 | } | 1462 | } |
1440 | nacl = core_tpg_get_initiator_node_acl(tpg, initiatorname); | 1463 | nacl = core_tpg_get_initiator_node_acl(tpg, initiatorname); |
1441 | if (!(nacl)) { | 1464 | if (!nacl) { |
1442 | *ret = -EINVAL; | 1465 | *ret = -EINVAL; |
1443 | return NULL; | 1466 | return NULL; |
1444 | } | 1467 | } |
1445 | lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL); | 1468 | lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL); |
1446 | if (!(lacl)) { | 1469 | if (!lacl) { |
1447 | printk(KERN_ERR "Unable to allocate memory for struct se_lun_acl.\n"); | 1470 | pr_err("Unable to allocate memory for struct se_lun_acl.\n"); |
1448 | *ret = -ENOMEM; | 1471 | *ret = -ENOMEM; |
1449 | return NULL; | 1472 | return NULL; |
1450 | } | 1473 | } |
@@ -1467,16 +1490,16 @@ int core_dev_add_initiator_node_lun_acl( | |||
1467 | struct se_node_acl *nacl; | 1490 | struct se_node_acl *nacl; |
1468 | 1491 | ||
1469 | lun = core_dev_get_lun(tpg, unpacked_lun); | 1492 | lun = core_dev_get_lun(tpg, unpacked_lun); |
1470 | if (!(lun)) { | 1493 | if (!lun) { |
1471 | printk(KERN_ERR "%s Logical Unit Number: %u is not active on" | 1494 | pr_err("%s Logical Unit Number: %u is not active on" |
1472 | " Target Portal Group: %hu, ignoring request.\n", | 1495 | " Target Portal Group: %hu, ignoring request.\n", |
1473 | TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, | 1496 | tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, |
1474 | TPG_TFO(tpg)->tpg_get_tag(tpg)); | 1497 | tpg->se_tpg_tfo->tpg_get_tag(tpg)); |
1475 | return -EINVAL; | 1498 | return -EINVAL; |
1476 | } | 1499 | } |
1477 | 1500 | ||
1478 | nacl = lacl->se_lun_nacl; | 1501 | nacl = lacl->se_lun_nacl; |
1479 | if (!(nacl)) | 1502 | if (!nacl) |
1480 | return -EINVAL; | 1503 | return -EINVAL; |
1481 | 1504 | ||
1482 | if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) && | 1505 | if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) && |
@@ -1495,9 +1518,9 @@ int core_dev_add_initiator_node_lun_acl( | |||
1495 | smp_mb__after_atomic_inc(); | 1518 | smp_mb__after_atomic_inc(); |
1496 | spin_unlock(&lun->lun_acl_lock); | 1519 | spin_unlock(&lun->lun_acl_lock); |
1497 | 1520 | ||
1498 | printk(KERN_INFO "%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for " | 1521 | pr_debug("%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for " |
1499 | " InitiatorNode: %s\n", TPG_TFO(tpg)->get_fabric_name(), | 1522 | " InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(), |
1500 | TPG_TFO(tpg)->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun, | 1523 | tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun, |
1501 | (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO", | 1524 | (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO", |
1502 | lacl->initiatorname); | 1525 | lacl->initiatorname); |
1503 | /* | 1526 | /* |
@@ -1520,7 +1543,7 @@ int core_dev_del_initiator_node_lun_acl( | |||
1520 | struct se_node_acl *nacl; | 1543 | struct se_node_acl *nacl; |
1521 | 1544 | ||
1522 | nacl = lacl->se_lun_nacl; | 1545 | nacl = lacl->se_lun_nacl; |
1523 | if (!(nacl)) | 1546 | if (!nacl) |
1524 | return -EINVAL; | 1547 | return -EINVAL; |
1525 | 1548 | ||
1526 | spin_lock(&lun->lun_acl_lock); | 1549 | spin_lock(&lun->lun_acl_lock); |
@@ -1534,10 +1557,10 @@ int core_dev_del_initiator_node_lun_acl( | |||
1534 | 1557 | ||
1535 | lacl->se_lun = NULL; | 1558 | lacl->se_lun = NULL; |
1536 | 1559 | ||
1537 | printk(KERN_INFO "%s_TPG[%hu]_LUN[%u] - Removed ACL for" | 1560 | pr_debug("%s_TPG[%hu]_LUN[%u] - Removed ACL for" |
1538 | " InitiatorNode: %s Mapped LUN: %u\n", | 1561 | " InitiatorNode: %s Mapped LUN: %u\n", |
1539 | TPG_TFO(tpg)->get_fabric_name(), | 1562 | tpg->se_tpg_tfo->get_fabric_name(), |
1540 | TPG_TFO(tpg)->tpg_get_tag(tpg), lun->unpacked_lun, | 1563 | tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, |
1541 | lacl->initiatorname, lacl->mapped_lun); | 1564 | lacl->initiatorname, lacl->mapped_lun); |
1542 | 1565 | ||
1543 | return 0; | 1566 | return 0; |
@@ -1547,10 +1570,10 @@ void core_dev_free_initiator_node_lun_acl( | |||
1547 | struct se_portal_group *tpg, | 1570 | struct se_portal_group *tpg, |
1548 | struct se_lun_acl *lacl) | 1571 | struct se_lun_acl *lacl) |
1549 | { | 1572 | { |
1550 | printk("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s" | 1573 | pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s" |
1551 | " Mapped LUN: %u\n", TPG_TFO(tpg)->get_fabric_name(), | 1574 | " Mapped LUN: %u\n", tpg->se_tpg_tfo->get_fabric_name(), |
1552 | TPG_TFO(tpg)->tpg_get_tag(tpg), | 1575 | tpg->se_tpg_tfo->tpg_get_tag(tpg), |
1553 | TPG_TFO(tpg)->get_fabric_name(), | 1576 | tpg->se_tpg_tfo->get_fabric_name(), |
1554 | lacl->initiatorname, lacl->mapped_lun); | 1577 | lacl->initiatorname, lacl->mapped_lun); |
1555 | 1578 | ||
1556 | kfree(lacl); | 1579 | kfree(lacl); |
@@ -1565,64 +1588,64 @@ int core_dev_setup_virtual_lun0(void) | |||
1565 | char buf[16]; | 1588 | char buf[16]; |
1566 | int ret; | 1589 | int ret; |
1567 | 1590 | ||
1568 | hba = core_alloc_hba("rd_dr", 0, HBA_FLAGS_INTERNAL_USE); | 1591 | hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE); |
1569 | if (IS_ERR(hba)) | 1592 | if (IS_ERR(hba)) |
1570 | return PTR_ERR(hba); | 1593 | return PTR_ERR(hba); |
1571 | 1594 | ||
1572 | se_global->g_lun0_hba = hba; | 1595 | lun0_hba = hba; |
1573 | t = hba->transport; | 1596 | t = hba->transport; |
1574 | 1597 | ||
1575 | se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL); | 1598 | se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL); |
1576 | if (!(se_dev)) { | 1599 | if (!se_dev) { |
1577 | printk(KERN_ERR "Unable to allocate memory for" | 1600 | pr_err("Unable to allocate memory for" |
1578 | " struct se_subsystem_dev\n"); | 1601 | " struct se_subsystem_dev\n"); |
1579 | ret = -ENOMEM; | 1602 | ret = -ENOMEM; |
1580 | goto out; | 1603 | goto out; |
1581 | } | 1604 | } |
1582 | INIT_LIST_HEAD(&se_dev->g_se_dev_list); | 1605 | INIT_LIST_HEAD(&se_dev->se_dev_node); |
1583 | INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list); | 1606 | INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list); |
1584 | spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock); | 1607 | spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock); |
1585 | INIT_LIST_HEAD(&se_dev->t10_reservation.registration_list); | 1608 | INIT_LIST_HEAD(&se_dev->t10_pr.registration_list); |
1586 | INIT_LIST_HEAD(&se_dev->t10_reservation.aptpl_reg_list); | 1609 | INIT_LIST_HEAD(&se_dev->t10_pr.aptpl_reg_list); |
1587 | spin_lock_init(&se_dev->t10_reservation.registration_lock); | 1610 | spin_lock_init(&se_dev->t10_pr.registration_lock); |
1588 | spin_lock_init(&se_dev->t10_reservation.aptpl_reg_lock); | 1611 | spin_lock_init(&se_dev->t10_pr.aptpl_reg_lock); |
1589 | INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list); | 1612 | INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list); |
1590 | spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock); | 1613 | spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock); |
1591 | spin_lock_init(&se_dev->se_dev_lock); | 1614 | spin_lock_init(&se_dev->se_dev_lock); |
1592 | se_dev->t10_reservation.pr_aptpl_buf_len = PR_APTPL_BUF_LEN; | 1615 | se_dev->t10_pr.pr_aptpl_buf_len = PR_APTPL_BUF_LEN; |
1593 | se_dev->t10_wwn.t10_sub_dev = se_dev; | 1616 | se_dev->t10_wwn.t10_sub_dev = se_dev; |
1594 | se_dev->t10_alua.t10_sub_dev = se_dev; | 1617 | se_dev->t10_alua.t10_sub_dev = se_dev; |
1595 | se_dev->se_dev_attrib.da_sub_dev = se_dev; | 1618 | se_dev->se_dev_attrib.da_sub_dev = se_dev; |
1596 | se_dev->se_dev_hba = hba; | 1619 | se_dev->se_dev_hba = hba; |
1597 | 1620 | ||
1598 | se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, "virt_lun0"); | 1621 | se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, "virt_lun0"); |
1599 | if (!(se_dev->se_dev_su_ptr)) { | 1622 | if (!se_dev->se_dev_su_ptr) { |
1600 | printk(KERN_ERR "Unable to locate subsystem dependent pointer" | 1623 | pr_err("Unable to locate subsystem dependent pointer" |
1601 | " from allocate_virtdevice()\n"); | 1624 | " from allocate_virtdevice()\n"); |
1602 | ret = -ENOMEM; | 1625 | ret = -ENOMEM; |
1603 | goto out; | 1626 | goto out; |
1604 | } | 1627 | } |
1605 | se_global->g_lun0_su_dev = se_dev; | 1628 | lun0_su_dev = se_dev; |
1606 | 1629 | ||
1607 | memset(buf, 0, 16); | 1630 | memset(buf, 0, 16); |
1608 | sprintf(buf, "rd_pages=8"); | 1631 | sprintf(buf, "rd_pages=8"); |
1609 | t->set_configfs_dev_params(hba, se_dev, buf, sizeof(buf)); | 1632 | t->set_configfs_dev_params(hba, se_dev, buf, sizeof(buf)); |
1610 | 1633 | ||
1611 | dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr); | 1634 | dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr); |
1612 | if (!(dev) || IS_ERR(dev)) { | 1635 | if (IS_ERR(dev)) { |
1613 | ret = -ENOMEM; | 1636 | ret = PTR_ERR(dev); |
1614 | goto out; | 1637 | goto out; |
1615 | } | 1638 | } |
1616 | se_dev->se_dev_ptr = dev; | 1639 | se_dev->se_dev_ptr = dev; |
1617 | se_global->g_lun0_dev = dev; | 1640 | g_lun0_dev = dev; |
1618 | 1641 | ||
1619 | return 0; | 1642 | return 0; |
1620 | out: | 1643 | out: |
1621 | se_global->g_lun0_su_dev = NULL; | 1644 | lun0_su_dev = NULL; |
1622 | kfree(se_dev); | 1645 | kfree(se_dev); |
1623 | if (se_global->g_lun0_hba) { | 1646 | if (lun0_hba) { |
1624 | core_delete_hba(se_global->g_lun0_hba); | 1647 | core_delete_hba(lun0_hba); |
1625 | se_global->g_lun0_hba = NULL; | 1648 | lun0_hba = NULL; |
1626 | } | 1649 | } |
1627 | return ret; | 1650 | return ret; |
1628 | } | 1651 | } |
@@ -1630,14 +1653,14 @@ out: | |||
1630 | 1653 | ||
1631 | void core_dev_release_virtual_lun0(void) | 1654 | void core_dev_release_virtual_lun0(void) |
1632 | { | 1655 | { |
1633 | struct se_hba *hba = se_global->g_lun0_hba; | 1656 | struct se_hba *hba = lun0_hba; |
1634 | struct se_subsystem_dev *su_dev = se_global->g_lun0_su_dev; | 1657 | struct se_subsystem_dev *su_dev = lun0_su_dev; |
1635 | 1658 | ||
1636 | if (!(hba)) | 1659 | if (!hba) |
1637 | return; | 1660 | return; |
1638 | 1661 | ||
1639 | if (se_global->g_lun0_dev) | 1662 | if (g_lun0_dev) |
1640 | se_free_virtual_device(se_global->g_lun0_dev, hba); | 1663 | se_free_virtual_device(g_lun0_dev, hba); |
1641 | 1664 | ||
1642 | kfree(su_dev); | 1665 | kfree(su_dev); |
1643 | core_delete_hba(hba); | 1666 | core_delete_hba(hba); |
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c index 07ab5a3bb8e8..f1654694f4ea 100644 --- a/drivers/target/target_core_fabric_configfs.c +++ b/drivers/target/target_core_fabric_configfs.c | |||
@@ -60,7 +60,7 @@ static void target_fabric_setup_##_name##_cit(struct target_fabric_configfs *tf) | |||
60 | cit->ct_group_ops = _group_ops; \ | 60 | cit->ct_group_ops = _group_ops; \ |
61 | cit->ct_attrs = _attrs; \ | 61 | cit->ct_attrs = _attrs; \ |
62 | cit->ct_owner = tf->tf_module; \ | 62 | cit->ct_owner = tf->tf_module; \ |
63 | printk("Setup generic %s\n", __stringify(_name)); \ | 63 | pr_debug("Setup generic %s\n", __stringify(_name)); \ |
64 | } | 64 | } |
65 | 65 | ||
66 | /* Start of tfc_tpg_mappedlun_cit */ | 66 | /* Start of tfc_tpg_mappedlun_cit */ |
@@ -80,8 +80,8 @@ static int target_fabric_mappedlun_link( | |||
80 | /* | 80 | /* |
81 | * Ensure that the source port exists | 81 | * Ensure that the source port exists |
82 | */ | 82 | */ |
83 | if (!(lun->lun_sep) || !(lun->lun_sep->sep_tpg)) { | 83 | if (!lun->lun_sep || !lun->lun_sep->sep_tpg) { |
84 | printk(KERN_ERR "Source se_lun->lun_sep or lun->lun_sep->sep" | 84 | pr_err("Source se_lun->lun_sep or lun->lun_sep->sep" |
85 | "_tpg does not exist\n"); | 85 | "_tpg does not exist\n"); |
86 | return -EINVAL; | 86 | return -EINVAL; |
87 | } | 87 | } |
@@ -96,12 +96,12 @@ static int target_fabric_mappedlun_link( | |||
96 | * Make sure the SymLink is going to the same $FABRIC/$WWN/tpgt_$TPGT | 96 | * Make sure the SymLink is going to the same $FABRIC/$WWN/tpgt_$TPGT |
97 | */ | 97 | */ |
98 | if (strcmp(config_item_name(wwn_ci), config_item_name(wwn_ci_s))) { | 98 | if (strcmp(config_item_name(wwn_ci), config_item_name(wwn_ci_s))) { |
99 | printk(KERN_ERR "Illegal Initiator ACL SymLink outside of %s\n", | 99 | pr_err("Illegal Initiator ACL SymLink outside of %s\n", |
100 | config_item_name(wwn_ci)); | 100 | config_item_name(wwn_ci)); |
101 | return -EINVAL; | 101 | return -EINVAL; |
102 | } | 102 | } |
103 | if (strcmp(config_item_name(tpg_ci), config_item_name(tpg_ci_s))) { | 103 | if (strcmp(config_item_name(tpg_ci), config_item_name(tpg_ci_s))) { |
104 | printk(KERN_ERR "Illegal Initiator ACL Symlink outside of %s" | 104 | pr_err("Illegal Initiator ACL Symlink outside of %s" |
105 | " TPGT: %s\n", config_item_name(wwn_ci), | 105 | " TPGT: %s\n", config_item_name(wwn_ci), |
106 | config_item_name(tpg_ci)); | 106 | config_item_name(tpg_ci)); |
107 | return -EINVAL; | 107 | return -EINVAL; |
@@ -118,7 +118,7 @@ static int target_fabric_mappedlun_link( | |||
118 | lun_access = deve->lun_flags; | 118 | lun_access = deve->lun_flags; |
119 | else | 119 | else |
120 | lun_access = | 120 | lun_access = |
121 | (TPG_TFO(se_tpg)->tpg_check_prod_mode_write_protect( | 121 | (se_tpg->se_tpg_tfo->tpg_check_prod_mode_write_protect( |
122 | se_tpg)) ? TRANSPORT_LUNFLAGS_READ_ONLY : | 122 | se_tpg)) ? TRANSPORT_LUNFLAGS_READ_ONLY : |
123 | TRANSPORT_LUNFLAGS_READ_WRITE; | 123 | TRANSPORT_LUNFLAGS_READ_WRITE; |
124 | spin_unlock_irq(&lacl->se_lun_nacl->device_list_lock); | 124 | spin_unlock_irq(&lacl->se_lun_nacl->device_list_lock); |
@@ -147,7 +147,7 @@ static int target_fabric_mappedlun_unlink( | |||
147 | /* | 147 | /* |
148 | * Determine if the underlying MappedLUN has already been released.. | 148 | * Determine if the underlying MappedLUN has already been released.. |
149 | */ | 149 | */ |
150 | if (!(deve->se_lun)) | 150 | if (!deve->se_lun) |
151 | return 0; | 151 | return 0; |
152 | 152 | ||
153 | lun = container_of(to_config_group(lun_ci), struct se_lun, lun_group); | 153 | lun = container_of(to_config_group(lun_ci), struct se_lun, lun_group); |
@@ -202,9 +202,9 @@ static ssize_t target_fabric_mappedlun_store_write_protect( | |||
202 | TRANSPORT_LUNFLAGS_READ_WRITE, | 202 | TRANSPORT_LUNFLAGS_READ_WRITE, |
203 | lacl->se_lun_nacl); | 203 | lacl->se_lun_nacl); |
204 | 204 | ||
205 | printk(KERN_INFO "%s_ConfigFS: Changed Initiator ACL: %s" | 205 | pr_debug("%s_ConfigFS: Changed Initiator ACL: %s" |
206 | " Mapped LUN: %u Write Protect bit to %s\n", | 206 | " Mapped LUN: %u Write Protect bit to %s\n", |
207 | TPG_TFO(se_tpg)->get_fabric_name(), | 207 | se_tpg->se_tpg_tfo->get_fabric_name(), |
208 | lacl->initiatorname, lacl->mapped_lun, (op) ? "ON" : "OFF"); | 208 | lacl->initiatorname, lacl->mapped_lun, (op) ? "ON" : "OFF"); |
209 | 209 | ||
210 | return count; | 210 | return count; |
@@ -327,14 +327,14 @@ static struct config_group *target_fabric_make_mappedlun( | |||
327 | int ret = 0; | 327 | int ret = 0; |
328 | 328 | ||
329 | acl_ci = &group->cg_item; | 329 | acl_ci = &group->cg_item; |
330 | if (!(acl_ci)) { | 330 | if (!acl_ci) { |
331 | printk(KERN_ERR "Unable to locatel acl_ci\n"); | 331 | pr_err("Unable to locatel acl_ci\n"); |
332 | return NULL; | 332 | return NULL; |
333 | } | 333 | } |
334 | 334 | ||
335 | buf = kzalloc(strlen(name) + 1, GFP_KERNEL); | 335 | buf = kzalloc(strlen(name) + 1, GFP_KERNEL); |
336 | if (!(buf)) { | 336 | if (!buf) { |
337 | printk(KERN_ERR "Unable to allocate memory for name buf\n"); | 337 | pr_err("Unable to allocate memory for name buf\n"); |
338 | return ERR_PTR(-ENOMEM); | 338 | return ERR_PTR(-ENOMEM); |
339 | } | 339 | } |
340 | snprintf(buf, strlen(name) + 1, "%s", name); | 340 | snprintf(buf, strlen(name) + 1, "%s", name); |
@@ -342,7 +342,7 @@ static struct config_group *target_fabric_make_mappedlun( | |||
342 | * Make sure user is creating iscsi/$IQN/$TPGT/acls/$INITIATOR/lun_$ID. | 342 | * Make sure user is creating iscsi/$IQN/$TPGT/acls/$INITIATOR/lun_$ID. |
343 | */ | 343 | */ |
344 | if (strstr(buf, "lun_") != buf) { | 344 | if (strstr(buf, "lun_") != buf) { |
345 | printk(KERN_ERR "Unable to locate \"lun_\" from buf: %s" | 345 | pr_err("Unable to locate \"lun_\" from buf: %s" |
346 | " name: %s\n", buf, name); | 346 | " name: %s\n", buf, name); |
347 | ret = -EINVAL; | 347 | ret = -EINVAL; |
348 | goto out; | 348 | goto out; |
@@ -358,7 +358,7 @@ static struct config_group *target_fabric_make_mappedlun( | |||
358 | 358 | ||
359 | lacl = core_dev_init_initiator_node_lun_acl(se_tpg, mapped_lun, | 359 | lacl = core_dev_init_initiator_node_lun_acl(se_tpg, mapped_lun, |
360 | config_item_name(acl_ci), &ret); | 360 | config_item_name(acl_ci), &ret); |
361 | if (!(lacl)) { | 361 | if (!lacl) { |
362 | ret = -EINVAL; | 362 | ret = -EINVAL; |
363 | goto out; | 363 | goto out; |
364 | } | 364 | } |
@@ -367,7 +367,7 @@ static struct config_group *target_fabric_make_mappedlun( | |||
367 | lacl_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, | 367 | lacl_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, |
368 | GFP_KERNEL); | 368 | GFP_KERNEL); |
369 | if (!lacl_cg->default_groups) { | 369 | if (!lacl_cg->default_groups) { |
370 | printk(KERN_ERR "Unable to allocate lacl_cg->default_groups\n"); | 370 | pr_err("Unable to allocate lacl_cg->default_groups\n"); |
371 | ret = -ENOMEM; | 371 | ret = -ENOMEM; |
372 | goto out; | 372 | goto out; |
373 | } | 373 | } |
@@ -379,11 +379,11 @@ static struct config_group *target_fabric_make_mappedlun( | |||
379 | lacl_cg->default_groups[0] = &lacl->ml_stat_grps.stat_group; | 379 | lacl_cg->default_groups[0] = &lacl->ml_stat_grps.stat_group; |
380 | lacl_cg->default_groups[1] = NULL; | 380 | lacl_cg->default_groups[1] = NULL; |
381 | 381 | ||
382 | ml_stat_grp = &ML_STAT_GRPS(lacl)->stat_group; | 382 | ml_stat_grp = &lacl->ml_stat_grps.stat_group; |
383 | ml_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 3, | 383 | ml_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 3, |
384 | GFP_KERNEL); | 384 | GFP_KERNEL); |
385 | if (!ml_stat_grp->default_groups) { | 385 | if (!ml_stat_grp->default_groups) { |
386 | printk(KERN_ERR "Unable to allocate ml_stat_grp->default_groups\n"); | 386 | pr_err("Unable to allocate ml_stat_grp->default_groups\n"); |
387 | ret = -ENOMEM; | 387 | ret = -ENOMEM; |
388 | goto out; | 388 | goto out; |
389 | } | 389 | } |
@@ -408,7 +408,7 @@ static void target_fabric_drop_mappedlun( | |||
408 | struct config_group *lacl_cg = NULL, *ml_stat_grp = NULL; | 408 | struct config_group *lacl_cg = NULL, *ml_stat_grp = NULL; |
409 | int i; | 409 | int i; |
410 | 410 | ||
411 | ml_stat_grp = &ML_STAT_GRPS(lacl)->stat_group; | 411 | ml_stat_grp = &lacl->ml_stat_grps.stat_group; |
412 | for (i = 0; ml_stat_grp->default_groups[i]; i++) { | 412 | for (i = 0; ml_stat_grp->default_groups[i]; i++) { |
413 | df_item = &ml_stat_grp->default_groups[i]->cg_item; | 413 | df_item = &ml_stat_grp->default_groups[i]->cg_item; |
414 | ml_stat_grp->default_groups[i] = NULL; | 414 | ml_stat_grp->default_groups[i] = NULL; |
@@ -474,8 +474,8 @@ static struct config_group *target_fabric_make_nodeacl( | |||
474 | struct se_node_acl *se_nacl; | 474 | struct se_node_acl *se_nacl; |
475 | struct config_group *nacl_cg; | 475 | struct config_group *nacl_cg; |
476 | 476 | ||
477 | if (!(tf->tf_ops.fabric_make_nodeacl)) { | 477 | if (!tf->tf_ops.fabric_make_nodeacl) { |
478 | printk(KERN_ERR "tf->tf_ops.fabric_make_nodeacl is NULL\n"); | 478 | pr_err("tf->tf_ops.fabric_make_nodeacl is NULL\n"); |
479 | return ERR_PTR(-ENOSYS); | 479 | return ERR_PTR(-ENOSYS); |
480 | } | 480 | } |
481 | 481 | ||
@@ -572,13 +572,13 @@ static struct config_group *target_fabric_make_np( | |||
572 | struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; | 572 | struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; |
573 | struct se_tpg_np *se_tpg_np; | 573 | struct se_tpg_np *se_tpg_np; |
574 | 574 | ||
575 | if (!(tf->tf_ops.fabric_make_np)) { | 575 | if (!tf->tf_ops.fabric_make_np) { |
576 | printk(KERN_ERR "tf->tf_ops.fabric_make_np is NULL\n"); | 576 | pr_err("tf->tf_ops.fabric_make_np is NULL\n"); |
577 | return ERR_PTR(-ENOSYS); | 577 | return ERR_PTR(-ENOSYS); |
578 | } | 578 | } |
579 | 579 | ||
580 | se_tpg_np = tf->tf_ops.fabric_make_np(se_tpg, group, name); | 580 | se_tpg_np = tf->tf_ops.fabric_make_np(se_tpg, group, name); |
581 | if (!(se_tpg_np) || IS_ERR(se_tpg_np)) | 581 | if (!se_tpg_np || IS_ERR(se_tpg_np)) |
582 | return ERR_PTR(-EINVAL); | 582 | return ERR_PTR(-EINVAL); |
583 | 583 | ||
584 | se_tpg_np->tpg_np_parent = se_tpg; | 584 | se_tpg_np->tpg_np_parent = se_tpg; |
@@ -627,10 +627,7 @@ static ssize_t target_fabric_port_show_attr_alua_tg_pt_gp( | |||
627 | struct se_lun *lun, | 627 | struct se_lun *lun, |
628 | char *page) | 628 | char *page) |
629 | { | 629 | { |
630 | if (!(lun)) | 630 | if (!lun || !lun->lun_sep) |
631 | return -ENODEV; | ||
632 | |||
633 | if (!(lun->lun_sep)) | ||
634 | return -ENODEV; | 631 | return -ENODEV; |
635 | 632 | ||
636 | return core_alua_show_tg_pt_gp_info(lun->lun_sep, page); | 633 | return core_alua_show_tg_pt_gp_info(lun->lun_sep, page); |
@@ -641,10 +638,7 @@ static ssize_t target_fabric_port_store_attr_alua_tg_pt_gp( | |||
641 | const char *page, | 638 | const char *page, |
642 | size_t count) | 639 | size_t count) |
643 | { | 640 | { |
644 | if (!(lun)) | 641 | if (!lun || !lun->lun_sep) |
645 | return -ENODEV; | ||
646 | |||
647 | if (!(lun->lun_sep)) | ||
648 | return -ENODEV; | 642 | return -ENODEV; |
649 | 643 | ||
650 | return core_alua_store_tg_pt_gp_info(lun->lun_sep, page, count); | 644 | return core_alua_store_tg_pt_gp_info(lun->lun_sep, page, count); |
@@ -659,10 +653,7 @@ static ssize_t target_fabric_port_show_attr_alua_tg_pt_offline( | |||
659 | struct se_lun *lun, | 653 | struct se_lun *lun, |
660 | char *page) | 654 | char *page) |
661 | { | 655 | { |
662 | if (!(lun)) | 656 | if (!lun || !lun->lun_sep) |
663 | return -ENODEV; | ||
664 | |||
665 | if (!(lun->lun_sep)) | ||
666 | return -ENODEV; | 657 | return -ENODEV; |
667 | 658 | ||
668 | return core_alua_show_offline_bit(lun, page); | 659 | return core_alua_show_offline_bit(lun, page); |
@@ -673,10 +664,7 @@ static ssize_t target_fabric_port_store_attr_alua_tg_pt_offline( | |||
673 | const char *page, | 664 | const char *page, |
674 | size_t count) | 665 | size_t count) |
675 | { | 666 | { |
676 | if (!(lun)) | 667 | if (!lun || !lun->lun_sep) |
677 | return -ENODEV; | ||
678 | |||
679 | if (!(lun->lun_sep)) | ||
680 | return -ENODEV; | 668 | return -ENODEV; |
681 | 669 | ||
682 | return core_alua_store_offline_bit(lun, page, count); | 670 | return core_alua_store_offline_bit(lun, page, count); |
@@ -691,10 +679,7 @@ static ssize_t target_fabric_port_show_attr_alua_tg_pt_status( | |||
691 | struct se_lun *lun, | 679 | struct se_lun *lun, |
692 | char *page) | 680 | char *page) |
693 | { | 681 | { |
694 | if (!(lun)) | 682 | if (!lun || !lun->lun_sep) |
695 | return -ENODEV; | ||
696 | |||
697 | if (!(lun->lun_sep)) | ||
698 | return -ENODEV; | 683 | return -ENODEV; |
699 | 684 | ||
700 | return core_alua_show_secondary_status(lun, page); | 685 | return core_alua_show_secondary_status(lun, page); |
@@ -705,10 +690,7 @@ static ssize_t target_fabric_port_store_attr_alua_tg_pt_status( | |||
705 | const char *page, | 690 | const char *page, |
706 | size_t count) | 691 | size_t count) |
707 | { | 692 | { |
708 | if (!(lun)) | 693 | if (!lun || !lun->lun_sep) |
709 | return -ENODEV; | ||
710 | |||
711 | if (!(lun->lun_sep)) | ||
712 | return -ENODEV; | 694 | return -ENODEV; |
713 | 695 | ||
714 | return core_alua_store_secondary_status(lun, page, count); | 696 | return core_alua_store_secondary_status(lun, page, count); |
@@ -723,10 +705,7 @@ static ssize_t target_fabric_port_show_attr_alua_tg_pt_write_md( | |||
723 | struct se_lun *lun, | 705 | struct se_lun *lun, |
724 | char *page) | 706 | char *page) |
725 | { | 707 | { |
726 | if (!(lun)) | 708 | if (!lun || !lun->lun_sep) |
727 | return -ENODEV; | ||
728 | |||
729 | if (!(lun->lun_sep)) | ||
730 | return -ENODEV; | 709 | return -ENODEV; |
731 | 710 | ||
732 | return core_alua_show_secondary_write_metadata(lun, page); | 711 | return core_alua_show_secondary_write_metadata(lun, page); |
@@ -737,10 +716,7 @@ static ssize_t target_fabric_port_store_attr_alua_tg_pt_write_md( | |||
737 | const char *page, | 716 | const char *page, |
738 | size_t count) | 717 | size_t count) |
739 | { | 718 | { |
740 | if (!(lun)) | 719 | if (!lun || !lun->lun_sep) |
741 | return -ENODEV; | ||
742 | |||
743 | if (!(lun->lun_sep)) | ||
744 | return -ENODEV; | 720 | return -ENODEV; |
745 | 721 | ||
746 | return core_alua_store_secondary_write_metadata(lun, page, count); | 722 | return core_alua_store_secondary_write_metadata(lun, page, count); |
@@ -781,13 +757,13 @@ static int target_fabric_port_link( | |||
781 | tf = se_tpg->se_tpg_wwn->wwn_tf; | 757 | tf = se_tpg->se_tpg_wwn->wwn_tf; |
782 | 758 | ||
783 | if (lun->lun_se_dev != NULL) { | 759 | if (lun->lun_se_dev != NULL) { |
784 | printk(KERN_ERR "Port Symlink already exists\n"); | 760 | pr_err("Port Symlink already exists\n"); |
785 | return -EEXIST; | 761 | return -EEXIST; |
786 | } | 762 | } |
787 | 763 | ||
788 | dev = se_dev->se_dev_ptr; | 764 | dev = se_dev->se_dev_ptr; |
789 | if (!(dev)) { | 765 | if (!dev) { |
790 | printk(KERN_ERR "Unable to locate struct se_device pointer from" | 766 | pr_err("Unable to locate struct se_device pointer from" |
791 | " %s\n", config_item_name(se_dev_ci)); | 767 | " %s\n", config_item_name(se_dev_ci)); |
792 | ret = -ENODEV; | 768 | ret = -ENODEV; |
793 | goto out; | 769 | goto out; |
@@ -795,8 +771,8 @@ static int target_fabric_port_link( | |||
795 | 771 | ||
796 | lun_p = core_dev_add_lun(se_tpg, dev->se_hba, dev, | 772 | lun_p = core_dev_add_lun(se_tpg, dev->se_hba, dev, |
797 | lun->unpacked_lun); | 773 | lun->unpacked_lun); |
798 | if ((IS_ERR(lun_p)) || !(lun_p)) { | 774 | if (IS_ERR(lun_p) || !lun_p) { |
799 | printk(KERN_ERR "core_dev_add_lun() failed\n"); | 775 | pr_err("core_dev_add_lun() failed\n"); |
800 | ret = -EINVAL; | 776 | ret = -EINVAL; |
801 | goto out; | 777 | goto out; |
802 | } | 778 | } |
@@ -888,7 +864,7 @@ static struct config_group *target_fabric_make_lun( | |||
888 | int errno; | 864 | int errno; |
889 | 865 | ||
890 | if (strstr(name, "lun_") != name) { | 866 | if (strstr(name, "lun_") != name) { |
891 | printk(KERN_ERR "Unable to locate \'_\" in" | 867 | pr_err("Unable to locate \'_\" in" |
892 | " \"lun_$LUN_NUMBER\"\n"); | 868 | " \"lun_$LUN_NUMBER\"\n"); |
893 | return ERR_PTR(-EINVAL); | 869 | return ERR_PTR(-EINVAL); |
894 | } | 870 | } |
@@ -896,14 +872,14 @@ static struct config_group *target_fabric_make_lun( | |||
896 | return ERR_PTR(-EINVAL); | 872 | return ERR_PTR(-EINVAL); |
897 | 873 | ||
898 | lun = core_get_lun_from_tpg(se_tpg, unpacked_lun); | 874 | lun = core_get_lun_from_tpg(se_tpg, unpacked_lun); |
899 | if (!(lun)) | 875 | if (!lun) |
900 | return ERR_PTR(-EINVAL); | 876 | return ERR_PTR(-EINVAL); |
901 | 877 | ||
902 | lun_cg = &lun->lun_group; | 878 | lun_cg = &lun->lun_group; |
903 | lun_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, | 879 | lun_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, |
904 | GFP_KERNEL); | 880 | GFP_KERNEL); |
905 | if (!lun_cg->default_groups) { | 881 | if (!lun_cg->default_groups) { |
906 | printk(KERN_ERR "Unable to allocate lun_cg->default_groups\n"); | 882 | pr_err("Unable to allocate lun_cg->default_groups\n"); |
907 | return ERR_PTR(-ENOMEM); | 883 | return ERR_PTR(-ENOMEM); |
908 | } | 884 | } |
909 | 885 | ||
@@ -914,11 +890,11 @@ static struct config_group *target_fabric_make_lun( | |||
914 | lun_cg->default_groups[0] = &lun->port_stat_grps.stat_group; | 890 | lun_cg->default_groups[0] = &lun->port_stat_grps.stat_group; |
915 | lun_cg->default_groups[1] = NULL; | 891 | lun_cg->default_groups[1] = NULL; |
916 | 892 | ||
917 | port_stat_grp = &PORT_STAT_GRP(lun)->stat_group; | 893 | port_stat_grp = &lun->port_stat_grps.stat_group; |
918 | port_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 3, | 894 | port_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 3, |
919 | GFP_KERNEL); | 895 | GFP_KERNEL); |
920 | if (!port_stat_grp->default_groups) { | 896 | if (!port_stat_grp->default_groups) { |
921 | printk(KERN_ERR "Unable to allocate port_stat_grp->default_groups\n"); | 897 | pr_err("Unable to allocate port_stat_grp->default_groups\n"); |
922 | errno = -ENOMEM; | 898 | errno = -ENOMEM; |
923 | goto out; | 899 | goto out; |
924 | } | 900 | } |
@@ -941,7 +917,7 @@ static void target_fabric_drop_lun( | |||
941 | struct config_group *lun_cg, *port_stat_grp; | 917 | struct config_group *lun_cg, *port_stat_grp; |
942 | int i; | 918 | int i; |
943 | 919 | ||
944 | port_stat_grp = &PORT_STAT_GRP(lun)->stat_group; | 920 | port_stat_grp = &lun->port_stat_grps.stat_group; |
945 | for (i = 0; port_stat_grp->default_groups[i]; i++) { | 921 | for (i = 0; port_stat_grp->default_groups[i]; i++) { |
946 | df_item = &port_stat_grp->default_groups[i]->cg_item; | 922 | df_item = &port_stat_grp->default_groups[i]->cg_item; |
947 | port_stat_grp->default_groups[i] = NULL; | 923 | port_stat_grp->default_groups[i] = NULL; |
@@ -1031,13 +1007,13 @@ static struct config_group *target_fabric_make_tpg( | |||
1031 | struct target_fabric_configfs *tf = wwn->wwn_tf; | 1007 | struct target_fabric_configfs *tf = wwn->wwn_tf; |
1032 | struct se_portal_group *se_tpg; | 1008 | struct se_portal_group *se_tpg; |
1033 | 1009 | ||
1034 | if (!(tf->tf_ops.fabric_make_tpg)) { | 1010 | if (!tf->tf_ops.fabric_make_tpg) { |
1035 | printk(KERN_ERR "tf->tf_ops.fabric_make_tpg is NULL\n"); | 1011 | pr_err("tf->tf_ops.fabric_make_tpg is NULL\n"); |
1036 | return ERR_PTR(-ENOSYS); | 1012 | return ERR_PTR(-ENOSYS); |
1037 | } | 1013 | } |
1038 | 1014 | ||
1039 | se_tpg = tf->tf_ops.fabric_make_tpg(wwn, group, name); | 1015 | se_tpg = tf->tf_ops.fabric_make_tpg(wwn, group, name); |
1040 | if (!(se_tpg) || IS_ERR(se_tpg)) | 1016 | if (!se_tpg || IS_ERR(se_tpg)) |
1041 | return ERR_PTR(-EINVAL); | 1017 | return ERR_PTR(-EINVAL); |
1042 | /* | 1018 | /* |
1043 | * Setup default groups from pre-allocated se_tpg->tpg_default_groups | 1019 | * Setup default groups from pre-allocated se_tpg->tpg_default_groups |
@@ -1130,13 +1106,13 @@ static struct config_group *target_fabric_make_wwn( | |||
1130 | struct target_fabric_configfs, tf_group); | 1106 | struct target_fabric_configfs, tf_group); |
1131 | struct se_wwn *wwn; | 1107 | struct se_wwn *wwn; |
1132 | 1108 | ||
1133 | if (!(tf->tf_ops.fabric_make_wwn)) { | 1109 | if (!tf->tf_ops.fabric_make_wwn) { |
1134 | printk(KERN_ERR "tf->tf_ops.fabric_make_wwn is NULL\n"); | 1110 | pr_err("tf->tf_ops.fabric_make_wwn is NULL\n"); |
1135 | return ERR_PTR(-ENOSYS); | 1111 | return ERR_PTR(-ENOSYS); |
1136 | } | 1112 | } |
1137 | 1113 | ||
1138 | wwn = tf->tf_ops.fabric_make_wwn(tf, group, name); | 1114 | wwn = tf->tf_ops.fabric_make_wwn(tf, group, name); |
1139 | if (!(wwn) || IS_ERR(wwn)) | 1115 | if (!wwn || IS_ERR(wwn)) |
1140 | return ERR_PTR(-EINVAL); | 1116 | return ERR_PTR(-EINVAL); |
1141 | 1117 | ||
1142 | wwn->wwn_tf = tf; | 1118 | wwn->wwn_tf = tf; |
diff --git a/drivers/target/target_core_fabric_lib.c b/drivers/target/target_core_fabric_lib.c index 1e193f324895..c4ea3a9a555b 100644 --- a/drivers/target/target_core_fabric_lib.c +++ b/drivers/target/target_core_fabric_lib.c | |||
@@ -25,6 +25,7 @@ | |||
25 | * | 25 | * |
26 | ******************************************************************************/ | 26 | ******************************************************************************/ |
27 | 27 | ||
28 | #include <linux/kernel.h> | ||
28 | #include <linux/string.h> | 29 | #include <linux/string.h> |
29 | #include <linux/ctype.h> | 30 | #include <linux/ctype.h> |
30 | #include <linux/spinlock.h> | 31 | #include <linux/spinlock.h> |
@@ -61,9 +62,8 @@ u32 sas_get_pr_transport_id( | |||
61 | int *format_code, | 62 | int *format_code, |
62 | unsigned char *buf) | 63 | unsigned char *buf) |
63 | { | 64 | { |
64 | unsigned char binary, *ptr; | 65 | unsigned char *ptr; |
65 | int i; | 66 | |
66 | u32 off = 4; | ||
67 | /* | 67 | /* |
68 | * Set PROTOCOL IDENTIFIER to 6h for SAS | 68 | * Set PROTOCOL IDENTIFIER to 6h for SAS |
69 | */ | 69 | */ |
@@ -74,10 +74,8 @@ u32 sas_get_pr_transport_id( | |||
74 | */ | 74 | */ |
75 | ptr = &se_nacl->initiatorname[4]; /* Skip over 'naa. prefix */ | 75 | ptr = &se_nacl->initiatorname[4]; /* Skip over 'naa. prefix */ |
76 | 76 | ||
77 | for (i = 0; i < 16; i += 2) { | 77 | hex2bin(&buf[4], ptr, 8); |
78 | binary = transport_asciihex_to_binaryhex(&ptr[i]); | 78 | |
79 | buf[off++] = binary; | ||
80 | } | ||
81 | /* | 79 | /* |
82 | * The SAS Transport ID is a hardcoded 24-byte length | 80 | * The SAS Transport ID is a hardcoded 24-byte length |
83 | */ | 81 | */ |
@@ -157,7 +155,7 @@ u32 fc_get_pr_transport_id( | |||
157 | int *format_code, | 155 | int *format_code, |
158 | unsigned char *buf) | 156 | unsigned char *buf) |
159 | { | 157 | { |
160 | unsigned char binary, *ptr; | 158 | unsigned char *ptr; |
161 | int i; | 159 | int i; |
162 | u32 off = 8; | 160 | u32 off = 8; |
163 | /* | 161 | /* |
@@ -172,12 +170,11 @@ u32 fc_get_pr_transport_id( | |||
172 | ptr = &se_nacl->initiatorname[0]; | 170 | ptr = &se_nacl->initiatorname[0]; |
173 | 171 | ||
174 | for (i = 0; i < 24; ) { | 172 | for (i = 0; i < 24; ) { |
175 | if (!(strncmp(&ptr[i], ":", 1))) { | 173 | if (!strncmp(&ptr[i], ":", 1)) { |
176 | i++; | 174 | i++; |
177 | continue; | 175 | continue; |
178 | } | 176 | } |
179 | binary = transport_asciihex_to_binaryhex(&ptr[i]); | 177 | hex2bin(&buf[off++], &ptr[i], 1); |
180 | buf[off++] = binary; | ||
181 | i += 2; | 178 | i += 2; |
182 | } | 179 | } |
183 | /* | 180 | /* |
@@ -386,7 +383,7 @@ char *iscsi_parse_pr_out_transport_id( | |||
386 | * Reserved | 383 | * Reserved |
387 | */ | 384 | */ |
388 | if ((format_code != 0x00) && (format_code != 0x40)) { | 385 | if ((format_code != 0x00) && (format_code != 0x40)) { |
389 | printk(KERN_ERR "Illegal format code: 0x%02x for iSCSI" | 386 | pr_err("Illegal format code: 0x%02x for iSCSI" |
390 | " Initiator Transport ID\n", format_code); | 387 | " Initiator Transport ID\n", format_code); |
391 | return NULL; | 388 | return NULL; |
392 | } | 389 | } |
@@ -406,7 +403,7 @@ char *iscsi_parse_pr_out_transport_id( | |||
406 | tid_len += padding; | 403 | tid_len += padding; |
407 | 404 | ||
408 | if ((add_len + 4) != tid_len) { | 405 | if ((add_len + 4) != tid_len) { |
409 | printk(KERN_INFO "LIO-Target Extracted add_len: %hu " | 406 | pr_debug("LIO-Target Extracted add_len: %hu " |
410 | "does not match calculated tid_len: %u," | 407 | "does not match calculated tid_len: %u," |
411 | " using tid_len instead\n", add_len+4, tid_len); | 408 | " using tid_len instead\n", add_len+4, tid_len); |
412 | *out_tid_len = tid_len; | 409 | *out_tid_len = tid_len; |
@@ -420,8 +417,8 @@ char *iscsi_parse_pr_out_transport_id( | |||
420 | */ | 417 | */ |
421 | if (format_code == 0x40) { | 418 | if (format_code == 0x40) { |
422 | p = strstr((char *)&buf[4], ",i,0x"); | 419 | p = strstr((char *)&buf[4], ",i,0x"); |
423 | if (!(p)) { | 420 | if (!p) { |
424 | printk(KERN_ERR "Unable to locate \",i,0x\" seperator" | 421 | pr_err("Unable to locate \",i,0x\" seperator" |
425 | " for Initiator port identifier: %s\n", | 422 | " for Initiator port identifier: %s\n", |
426 | (char *)&buf[4]); | 423 | (char *)&buf[4]); |
427 | return NULL; | 424 | return NULL; |
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index 150c4305f385..bc1b33639b8d 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c | |||
@@ -42,18 +42,6 @@ | |||
42 | 42 | ||
43 | #include "target_core_file.h" | 43 | #include "target_core_file.h" |
44 | 44 | ||
45 | #if 1 | ||
46 | #define DEBUG_FD_CACHE(x...) printk(x) | ||
47 | #else | ||
48 | #define DEBUG_FD_CACHE(x...) | ||
49 | #endif | ||
50 | |||
51 | #if 1 | ||
52 | #define DEBUG_FD_FUA(x...) printk(x) | ||
53 | #else | ||
54 | #define DEBUG_FD_FUA(x...) | ||
55 | #endif | ||
56 | |||
57 | static struct se_subsystem_api fileio_template; | 45 | static struct se_subsystem_api fileio_template; |
58 | 46 | ||
59 | /* fd_attach_hba(): (Part of se_subsystem_api_t template) | 47 | /* fd_attach_hba(): (Part of se_subsystem_api_t template) |
@@ -65,24 +53,21 @@ static int fd_attach_hba(struct se_hba *hba, u32 host_id) | |||
65 | struct fd_host *fd_host; | 53 | struct fd_host *fd_host; |
66 | 54 | ||
67 | fd_host = kzalloc(sizeof(struct fd_host), GFP_KERNEL); | 55 | fd_host = kzalloc(sizeof(struct fd_host), GFP_KERNEL); |
68 | if (!(fd_host)) { | 56 | if (!fd_host) { |
69 | printk(KERN_ERR "Unable to allocate memory for struct fd_host\n"); | 57 | pr_err("Unable to allocate memory for struct fd_host\n"); |
70 | return -1; | 58 | return -ENOMEM; |
71 | } | 59 | } |
72 | 60 | ||
73 | fd_host->fd_host_id = host_id; | 61 | fd_host->fd_host_id = host_id; |
74 | 62 | ||
75 | atomic_set(&hba->left_queue_depth, FD_HBA_QUEUE_DEPTH); | 63 | hba->hba_ptr = fd_host; |
76 | atomic_set(&hba->max_queue_depth, FD_HBA_QUEUE_DEPTH); | ||
77 | hba->hba_ptr = (void *) fd_host; | ||
78 | 64 | ||
79 | printk(KERN_INFO "CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic" | 65 | pr_debug("CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic" |
80 | " Target Core Stack %s\n", hba->hba_id, FD_VERSION, | 66 | " Target Core Stack %s\n", hba->hba_id, FD_VERSION, |
81 | TARGET_CORE_MOD_VERSION); | 67 | TARGET_CORE_MOD_VERSION); |
82 | printk(KERN_INFO "CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic" | 68 | pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic" |
83 | " Target Core with TCQ Depth: %d MaxSectors: %u\n", | 69 | " MaxSectors: %u\n", |
84 | hba->hba_id, fd_host->fd_host_id, | 70 | hba->hba_id, fd_host->fd_host_id, FD_MAX_SECTORS); |
85 | atomic_read(&hba->max_queue_depth), FD_MAX_SECTORS); | ||
86 | 71 | ||
87 | return 0; | 72 | return 0; |
88 | } | 73 | } |
@@ -91,7 +76,7 @@ static void fd_detach_hba(struct se_hba *hba) | |||
91 | { | 76 | { |
92 | struct fd_host *fd_host = hba->hba_ptr; | 77 | struct fd_host *fd_host = hba->hba_ptr; |
93 | 78 | ||
94 | printk(KERN_INFO "CORE_HBA[%d] - Detached FILEIO HBA: %u from Generic" | 79 | pr_debug("CORE_HBA[%d] - Detached FILEIO HBA: %u from Generic" |
95 | " Target Core\n", hba->hba_id, fd_host->fd_host_id); | 80 | " Target Core\n", hba->hba_id, fd_host->fd_host_id); |
96 | 81 | ||
97 | kfree(fd_host); | 82 | kfree(fd_host); |
@@ -104,14 +89,14 @@ static void *fd_allocate_virtdevice(struct se_hba *hba, const char *name) | |||
104 | struct fd_host *fd_host = (struct fd_host *) hba->hba_ptr; | 89 | struct fd_host *fd_host = (struct fd_host *) hba->hba_ptr; |
105 | 90 | ||
106 | fd_dev = kzalloc(sizeof(struct fd_dev), GFP_KERNEL); | 91 | fd_dev = kzalloc(sizeof(struct fd_dev), GFP_KERNEL); |
107 | if (!(fd_dev)) { | 92 | if (!fd_dev) { |
108 | printk(KERN_ERR "Unable to allocate memory for struct fd_dev\n"); | 93 | pr_err("Unable to allocate memory for struct fd_dev\n"); |
109 | return NULL; | 94 | return NULL; |
110 | } | 95 | } |
111 | 96 | ||
112 | fd_dev->fd_host = fd_host; | 97 | fd_dev->fd_host = fd_host; |
113 | 98 | ||
114 | printk(KERN_INFO "FILEIO: Allocated fd_dev for %p\n", name); | 99 | pr_debug("FILEIO: Allocated fd_dev for %p\n", name); |
115 | 100 | ||
116 | return fd_dev; | 101 | return fd_dev; |
117 | } | 102 | } |
@@ -144,7 +129,7 @@ static struct se_device *fd_create_virtdevice( | |||
144 | set_fs(old_fs); | 129 | set_fs(old_fs); |
145 | 130 | ||
146 | if (IS_ERR(dev_p)) { | 131 | if (IS_ERR(dev_p)) { |
147 | printk(KERN_ERR "getname(%s) failed: %lu\n", | 132 | pr_err("getname(%s) failed: %lu\n", |
148 | fd_dev->fd_dev_name, IS_ERR(dev_p)); | 133 | fd_dev->fd_dev_name, IS_ERR(dev_p)); |
149 | ret = PTR_ERR(dev_p); | 134 | ret = PTR_ERR(dev_p); |
150 | goto fail; | 135 | goto fail; |
@@ -167,12 +152,12 @@ static struct se_device *fd_create_virtdevice( | |||
167 | 152 | ||
168 | file = filp_open(dev_p, flags, 0600); | 153 | file = filp_open(dev_p, flags, 0600); |
169 | if (IS_ERR(file)) { | 154 | if (IS_ERR(file)) { |
170 | printk(KERN_ERR "filp_open(%s) failed\n", dev_p); | 155 | pr_err("filp_open(%s) failed\n", dev_p); |
171 | ret = PTR_ERR(file); | 156 | ret = PTR_ERR(file); |
172 | goto fail; | 157 | goto fail; |
173 | } | 158 | } |
174 | if (!file || !file->f_dentry) { | 159 | if (!file || !file->f_dentry) { |
175 | printk(KERN_ERR "filp_open(%s) failed\n", dev_p); | 160 | pr_err("filp_open(%s) failed\n", dev_p); |
176 | goto fail; | 161 | goto fail; |
177 | } | 162 | } |
178 | fd_dev->fd_file = file; | 163 | fd_dev->fd_file = file; |
@@ -202,14 +187,14 @@ static struct se_device *fd_create_virtdevice( | |||
202 | fd_dev->fd_dev_size = (i_size_read(file->f_mapping->host) - | 187 | fd_dev->fd_dev_size = (i_size_read(file->f_mapping->host) - |
203 | fd_dev->fd_block_size); | 188 | fd_dev->fd_block_size); |
204 | 189 | ||
205 | printk(KERN_INFO "FILEIO: Using size: %llu bytes from struct" | 190 | pr_debug("FILEIO: Using size: %llu bytes from struct" |
206 | " block_device blocks: %llu logical_block_size: %d\n", | 191 | " block_device blocks: %llu logical_block_size: %d\n", |
207 | fd_dev->fd_dev_size, | 192 | fd_dev->fd_dev_size, |
208 | div_u64(fd_dev->fd_dev_size, fd_dev->fd_block_size), | 193 | div_u64(fd_dev->fd_dev_size, fd_dev->fd_block_size), |
209 | fd_dev->fd_block_size); | 194 | fd_dev->fd_block_size); |
210 | } else { | 195 | } else { |
211 | if (!(fd_dev->fbd_flags & FBDF_HAS_SIZE)) { | 196 | if (!(fd_dev->fbd_flags & FBDF_HAS_SIZE)) { |
212 | printk(KERN_ERR "FILEIO: Missing fd_dev_size=" | 197 | pr_err("FILEIO: Missing fd_dev_size=" |
213 | " parameter, and no backing struct" | 198 | " parameter, and no backing struct" |
214 | " block_device\n"); | 199 | " block_device\n"); |
215 | goto fail; | 200 | goto fail; |
@@ -226,15 +211,15 @@ static struct se_device *fd_create_virtdevice( | |||
226 | dev_limits.queue_depth = FD_DEVICE_QUEUE_DEPTH; | 211 | dev_limits.queue_depth = FD_DEVICE_QUEUE_DEPTH; |
227 | 212 | ||
228 | dev = transport_add_device_to_core_hba(hba, &fileio_template, | 213 | dev = transport_add_device_to_core_hba(hba, &fileio_template, |
229 | se_dev, dev_flags, (void *)fd_dev, | 214 | se_dev, dev_flags, fd_dev, |
230 | &dev_limits, "FILEIO", FD_VERSION); | 215 | &dev_limits, "FILEIO", FD_VERSION); |
231 | if (!(dev)) | 216 | if (!dev) |
232 | goto fail; | 217 | goto fail; |
233 | 218 | ||
234 | fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++; | 219 | fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++; |
235 | fd_dev->fd_queue_depth = dev->queue_depth; | 220 | fd_dev->fd_queue_depth = dev->queue_depth; |
236 | 221 | ||
237 | printk(KERN_INFO "CORE_FILE[%u] - Added TCM FILEIO Device ID: %u at %s," | 222 | pr_debug("CORE_FILE[%u] - Added TCM FILEIO Device ID: %u at %s," |
238 | " %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id, | 223 | " %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id, |
239 | fd_dev->fd_dev_name, fd_dev->fd_dev_size); | 224 | fd_dev->fd_dev_name, fd_dev->fd_dev_size); |
240 | 225 | ||
@@ -272,45 +257,45 @@ static inline struct fd_request *FILE_REQ(struct se_task *task) | |||
272 | 257 | ||
273 | 258 | ||
274 | static struct se_task * | 259 | static struct se_task * |
275 | fd_alloc_task(struct se_cmd *cmd) | 260 | fd_alloc_task(unsigned char *cdb) |
276 | { | 261 | { |
277 | struct fd_request *fd_req; | 262 | struct fd_request *fd_req; |
278 | 263 | ||
279 | fd_req = kzalloc(sizeof(struct fd_request), GFP_KERNEL); | 264 | fd_req = kzalloc(sizeof(struct fd_request), GFP_KERNEL); |
280 | if (!(fd_req)) { | 265 | if (!fd_req) { |
281 | printk(KERN_ERR "Unable to allocate struct fd_request\n"); | 266 | pr_err("Unable to allocate struct fd_request\n"); |
282 | return NULL; | 267 | return NULL; |
283 | } | 268 | } |
284 | 269 | ||
285 | fd_req->fd_dev = SE_DEV(cmd)->dev_ptr; | ||
286 | |||
287 | return &fd_req->fd_task; | 270 | return &fd_req->fd_task; |
288 | } | 271 | } |
289 | 272 | ||
290 | static int fd_do_readv(struct se_task *task) | 273 | static int fd_do_readv(struct se_task *task) |
291 | { | 274 | { |
292 | struct fd_request *req = FILE_REQ(task); | 275 | struct fd_request *req = FILE_REQ(task); |
293 | struct file *fd = req->fd_dev->fd_file; | 276 | struct fd_dev *dev = req->fd_task.se_dev->dev_ptr; |
277 | struct file *fd = dev->fd_file; | ||
294 | struct scatterlist *sg = task->task_sg; | 278 | struct scatterlist *sg = task->task_sg; |
295 | struct iovec *iov; | 279 | struct iovec *iov; |
296 | mm_segment_t old_fs; | 280 | mm_segment_t old_fs; |
297 | loff_t pos = (task->task_lba * DEV_ATTRIB(task->se_dev)->block_size); | 281 | loff_t pos = (task->task_lba * |
282 | task->se_dev->se_sub_dev->se_dev_attrib.block_size); | ||
298 | int ret = 0, i; | 283 | int ret = 0, i; |
299 | 284 | ||
300 | iov = kzalloc(sizeof(struct iovec) * task->task_sg_num, GFP_KERNEL); | 285 | iov = kzalloc(sizeof(struct iovec) * task->task_sg_nents, GFP_KERNEL); |
301 | if (!(iov)) { | 286 | if (!iov) { |
302 | printk(KERN_ERR "Unable to allocate fd_do_readv iov[]\n"); | 287 | pr_err("Unable to allocate fd_do_readv iov[]\n"); |
303 | return -1; | 288 | return -ENOMEM; |
304 | } | 289 | } |
305 | 290 | ||
306 | for (i = 0; i < task->task_sg_num; i++) { | 291 | for (i = 0; i < task->task_sg_nents; i++) { |
307 | iov[i].iov_len = sg[i].length; | 292 | iov[i].iov_len = sg[i].length; |
308 | iov[i].iov_base = sg_virt(&sg[i]); | 293 | iov[i].iov_base = sg_virt(&sg[i]); |
309 | } | 294 | } |
310 | 295 | ||
311 | old_fs = get_fs(); | 296 | old_fs = get_fs(); |
312 | set_fs(get_ds()); | 297 | set_fs(get_ds()); |
313 | ret = vfs_readv(fd, &iov[0], task->task_sg_num, &pos); | 298 | ret = vfs_readv(fd, &iov[0], task->task_sg_nents, &pos); |
314 | set_fs(old_fs); | 299 | set_fs(old_fs); |
315 | 300 | ||
316 | kfree(iov); | 301 | kfree(iov); |
@@ -321,16 +306,16 @@ static int fd_do_readv(struct se_task *task) | |||
321 | */ | 306 | */ |
322 | if (S_ISBLK(fd->f_dentry->d_inode->i_mode)) { | 307 | if (S_ISBLK(fd->f_dentry->d_inode->i_mode)) { |
323 | if (ret < 0 || ret != task->task_size) { | 308 | if (ret < 0 || ret != task->task_size) { |
324 | printk(KERN_ERR "vfs_readv() returned %d," | 309 | pr_err("vfs_readv() returned %d," |
325 | " expecting %d for S_ISBLK\n", ret, | 310 | " expecting %d for S_ISBLK\n", ret, |
326 | (int)task->task_size); | 311 | (int)task->task_size); |
327 | return -1; | 312 | return (ret < 0 ? ret : -EINVAL); |
328 | } | 313 | } |
329 | } else { | 314 | } else { |
330 | if (ret < 0) { | 315 | if (ret < 0) { |
331 | printk(KERN_ERR "vfs_readv() returned %d for non" | 316 | pr_err("vfs_readv() returned %d for non" |
332 | " S_ISBLK\n", ret); | 317 | " S_ISBLK\n", ret); |
333 | return -1; | 318 | return ret; |
334 | } | 319 | } |
335 | } | 320 | } |
336 | 321 | ||
@@ -340,34 +325,36 @@ static int fd_do_readv(struct se_task *task) | |||
340 | static int fd_do_writev(struct se_task *task) | 325 | static int fd_do_writev(struct se_task *task) |
341 | { | 326 | { |
342 | struct fd_request *req = FILE_REQ(task); | 327 | struct fd_request *req = FILE_REQ(task); |
343 | struct file *fd = req->fd_dev->fd_file; | 328 | struct fd_dev *dev = req->fd_task.se_dev->dev_ptr; |
329 | struct file *fd = dev->fd_file; | ||
344 | struct scatterlist *sg = task->task_sg; | 330 | struct scatterlist *sg = task->task_sg; |
345 | struct iovec *iov; | 331 | struct iovec *iov; |
346 | mm_segment_t old_fs; | 332 | mm_segment_t old_fs; |
347 | loff_t pos = (task->task_lba * DEV_ATTRIB(task->se_dev)->block_size); | 333 | loff_t pos = (task->task_lba * |
334 | task->se_dev->se_sub_dev->se_dev_attrib.block_size); | ||
348 | int ret, i = 0; | 335 | int ret, i = 0; |
349 | 336 | ||
350 | iov = kzalloc(sizeof(struct iovec) * task->task_sg_num, GFP_KERNEL); | 337 | iov = kzalloc(sizeof(struct iovec) * task->task_sg_nents, GFP_KERNEL); |
351 | if (!(iov)) { | 338 | if (!iov) { |
352 | printk(KERN_ERR "Unable to allocate fd_do_writev iov[]\n"); | 339 | pr_err("Unable to allocate fd_do_writev iov[]\n"); |
353 | return -1; | 340 | return -ENOMEM; |
354 | } | 341 | } |
355 | 342 | ||
356 | for (i = 0; i < task->task_sg_num; i++) { | 343 | for (i = 0; i < task->task_sg_nents; i++) { |
357 | iov[i].iov_len = sg[i].length; | 344 | iov[i].iov_len = sg[i].length; |
358 | iov[i].iov_base = sg_virt(&sg[i]); | 345 | iov[i].iov_base = sg_virt(&sg[i]); |
359 | } | 346 | } |
360 | 347 | ||
361 | old_fs = get_fs(); | 348 | old_fs = get_fs(); |
362 | set_fs(get_ds()); | 349 | set_fs(get_ds()); |
363 | ret = vfs_writev(fd, &iov[0], task->task_sg_num, &pos); | 350 | ret = vfs_writev(fd, &iov[0], task->task_sg_nents, &pos); |
364 | set_fs(old_fs); | 351 | set_fs(old_fs); |
365 | 352 | ||
366 | kfree(iov); | 353 | kfree(iov); |
367 | 354 | ||
368 | if (ret < 0 || ret != task->task_size) { | 355 | if (ret < 0 || ret != task->task_size) { |
369 | printk(KERN_ERR "vfs_writev() returned %d\n", ret); | 356 | pr_err("vfs_writev() returned %d\n", ret); |
370 | return -1; | 357 | return (ret < 0 ? ret : -EINVAL); |
371 | } | 358 | } |
372 | 359 | ||
373 | return 1; | 360 | return 1; |
@@ -375,10 +362,10 @@ static int fd_do_writev(struct se_task *task) | |||
375 | 362 | ||
376 | static void fd_emulate_sync_cache(struct se_task *task) | 363 | static void fd_emulate_sync_cache(struct se_task *task) |
377 | { | 364 | { |
378 | struct se_cmd *cmd = TASK_CMD(task); | 365 | struct se_cmd *cmd = task->task_se_cmd; |
379 | struct se_device *dev = cmd->se_dev; | 366 | struct se_device *dev = cmd->se_dev; |
380 | struct fd_dev *fd_dev = dev->dev_ptr; | 367 | struct fd_dev *fd_dev = dev->dev_ptr; |
381 | int immed = (cmd->t_task->t_task_cdb[1] & 0x2); | 368 | int immed = (cmd->t_task_cdb[1] & 0x2); |
382 | loff_t start, end; | 369 | loff_t start, end; |
383 | int ret; | 370 | int ret; |
384 | 371 | ||
@@ -392,11 +379,11 @@ static void fd_emulate_sync_cache(struct se_task *task) | |||
392 | /* | 379 | /* |
393 | * Determine if we will be flushing the entire device. | 380 | * Determine if we will be flushing the entire device. |
394 | */ | 381 | */ |
395 | if (cmd->t_task->t_task_lba == 0 && cmd->data_length == 0) { | 382 | if (cmd->t_task_lba == 0 && cmd->data_length == 0) { |
396 | start = 0; | 383 | start = 0; |
397 | end = LLONG_MAX; | 384 | end = LLONG_MAX; |
398 | } else { | 385 | } else { |
399 | start = cmd->t_task->t_task_lba * DEV_ATTRIB(dev)->block_size; | 386 | start = cmd->t_task_lba * dev->se_sub_dev->se_dev_attrib.block_size; |
400 | if (cmd->data_length) | 387 | if (cmd->data_length) |
401 | end = start + cmd->data_length; | 388 | end = start + cmd->data_length; |
402 | else | 389 | else |
@@ -405,7 +392,7 @@ static void fd_emulate_sync_cache(struct se_task *task) | |||
405 | 392 | ||
406 | ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1); | 393 | ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1); |
407 | if (ret != 0) | 394 | if (ret != 0) |
408 | printk(KERN_ERR "FILEIO: vfs_fsync_range() failed: %d\n", ret); | 395 | pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret); |
409 | 396 | ||
410 | if (!immed) | 397 | if (!immed) |
411 | transport_complete_sync_cache(cmd, ret == 0); | 398 | transport_complete_sync_cache(cmd, ret == 0); |
@@ -446,16 +433,16 @@ static void fd_emulate_write_fua(struct se_cmd *cmd, struct se_task *task) | |||
446 | { | 433 | { |
447 | struct se_device *dev = cmd->se_dev; | 434 | struct se_device *dev = cmd->se_dev; |
448 | struct fd_dev *fd_dev = dev->dev_ptr; | 435 | struct fd_dev *fd_dev = dev->dev_ptr; |
449 | loff_t start = task->task_lba * DEV_ATTRIB(dev)->block_size; | 436 | loff_t start = task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size; |
450 | loff_t end = start + task->task_size; | 437 | loff_t end = start + task->task_size; |
451 | int ret; | 438 | int ret; |
452 | 439 | ||
453 | DEBUG_FD_CACHE("FILEIO: FUA WRITE LBA: %llu, bytes: %u\n", | 440 | pr_debug("FILEIO: FUA WRITE LBA: %llu, bytes: %u\n", |
454 | task->task_lba, task->task_size); | 441 | task->task_lba, task->task_size); |
455 | 442 | ||
456 | ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1); | 443 | ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1); |
457 | if (ret != 0) | 444 | if (ret != 0) |
458 | printk(KERN_ERR "FILEIO: vfs_fsync_range() failed: %d\n", ret); | 445 | pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret); |
459 | } | 446 | } |
460 | 447 | ||
461 | static int fd_do_task(struct se_task *task) | 448 | static int fd_do_task(struct se_task *task) |
@@ -474,9 +461,9 @@ static int fd_do_task(struct se_task *task) | |||
474 | ret = fd_do_writev(task); | 461 | ret = fd_do_writev(task); |
475 | 462 | ||
476 | if (ret > 0 && | 463 | if (ret > 0 && |
477 | DEV_ATTRIB(dev)->emulate_write_cache > 0 && | 464 | dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0 && |
478 | DEV_ATTRIB(dev)->emulate_fua_write > 0 && | 465 | dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 && |
479 | T_TASK(cmd)->t_tasks_fua) { | 466 | cmd->t_tasks_fua) { |
480 | /* | 467 | /* |
481 | * We might need to be a bit smarter here | 468 | * We might need to be a bit smarter here |
482 | * and return some sense data to let the initiator | 469 | * and return some sense data to let the initiator |
@@ -549,7 +536,7 @@ static ssize_t fd_set_configfs_dev_params( | |||
549 | snprintf(fd_dev->fd_dev_name, FD_MAX_DEV_NAME, | 536 | snprintf(fd_dev->fd_dev_name, FD_MAX_DEV_NAME, |
550 | "%s", arg_p); | 537 | "%s", arg_p); |
551 | kfree(arg_p); | 538 | kfree(arg_p); |
552 | printk(KERN_INFO "FILEIO: Referencing Path: %s\n", | 539 | pr_debug("FILEIO: Referencing Path: %s\n", |
553 | fd_dev->fd_dev_name); | 540 | fd_dev->fd_dev_name); |
554 | fd_dev->fbd_flags |= FBDF_HAS_PATH; | 541 | fd_dev->fbd_flags |= FBDF_HAS_PATH; |
555 | break; | 542 | break; |
@@ -562,23 +549,23 @@ static ssize_t fd_set_configfs_dev_params( | |||
562 | ret = strict_strtoull(arg_p, 0, &fd_dev->fd_dev_size); | 549 | ret = strict_strtoull(arg_p, 0, &fd_dev->fd_dev_size); |
563 | kfree(arg_p); | 550 | kfree(arg_p); |
564 | if (ret < 0) { | 551 | if (ret < 0) { |
565 | printk(KERN_ERR "strict_strtoull() failed for" | 552 | pr_err("strict_strtoull() failed for" |
566 | " fd_dev_size=\n"); | 553 | " fd_dev_size=\n"); |
567 | goto out; | 554 | goto out; |
568 | } | 555 | } |
569 | printk(KERN_INFO "FILEIO: Referencing Size: %llu" | 556 | pr_debug("FILEIO: Referencing Size: %llu" |
570 | " bytes\n", fd_dev->fd_dev_size); | 557 | " bytes\n", fd_dev->fd_dev_size); |
571 | fd_dev->fbd_flags |= FBDF_HAS_SIZE; | 558 | fd_dev->fbd_flags |= FBDF_HAS_SIZE; |
572 | break; | 559 | break; |
573 | case Opt_fd_buffered_io: | 560 | case Opt_fd_buffered_io: |
574 | match_int(args, &arg); | 561 | match_int(args, &arg); |
575 | if (arg != 1) { | 562 | if (arg != 1) { |
576 | printk(KERN_ERR "bogus fd_buffered_io=%d value\n", arg); | 563 | pr_err("bogus fd_buffered_io=%d value\n", arg); |
577 | ret = -EINVAL; | 564 | ret = -EINVAL; |
578 | goto out; | 565 | goto out; |
579 | } | 566 | } |
580 | 567 | ||
581 | printk(KERN_INFO "FILEIO: Using buffered I/O" | 568 | pr_debug("FILEIO: Using buffered I/O" |
582 | " operations for struct fd_dev\n"); | 569 | " operations for struct fd_dev\n"); |
583 | 570 | ||
584 | fd_dev->fbd_flags |= FDBD_USE_BUFFERED_IO; | 571 | fd_dev->fbd_flags |= FDBD_USE_BUFFERED_IO; |
@@ -598,8 +585,8 @@ static ssize_t fd_check_configfs_dev_params(struct se_hba *hba, struct se_subsys | |||
598 | struct fd_dev *fd_dev = (struct fd_dev *) se_dev->se_dev_su_ptr; | 585 | struct fd_dev *fd_dev = (struct fd_dev *) se_dev->se_dev_su_ptr; |
599 | 586 | ||
600 | if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) { | 587 | if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) { |
601 | printk(KERN_ERR "Missing fd_dev_name=\n"); | 588 | pr_err("Missing fd_dev_name=\n"); |
602 | return -1; | 589 | return -EINVAL; |
603 | } | 590 | } |
604 | 591 | ||
605 | return 0; | 592 | return 0; |
@@ -654,7 +641,7 @@ static sector_t fd_get_blocks(struct se_device *dev) | |||
654 | { | 641 | { |
655 | struct fd_dev *fd_dev = dev->dev_ptr; | 642 | struct fd_dev *fd_dev = dev->dev_ptr; |
656 | unsigned long long blocks_long = div_u64(fd_dev->fd_dev_size, | 643 | unsigned long long blocks_long = div_u64(fd_dev->fd_dev_size, |
657 | DEV_ATTRIB(dev)->block_size); | 644 | dev->se_sub_dev->se_dev_attrib.block_size); |
658 | 645 | ||
659 | return blocks_long; | 646 | return blocks_long; |
660 | } | 647 | } |
diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h index ef4de2b4bd46..daebd710b893 100644 --- a/drivers/target/target_core_file.h +++ b/drivers/target/target_core_file.h | |||
@@ -4,8 +4,6 @@ | |||
4 | #define FD_VERSION "4.0" | 4 | #define FD_VERSION "4.0" |
5 | 5 | ||
6 | #define FD_MAX_DEV_NAME 256 | 6 | #define FD_MAX_DEV_NAME 256 |
7 | /* Maximum queuedepth for the FILEIO HBA */ | ||
8 | #define FD_HBA_QUEUE_DEPTH 256 | ||
9 | #define FD_DEVICE_QUEUE_DEPTH 32 | 7 | #define FD_DEVICE_QUEUE_DEPTH 32 |
10 | #define FD_MAX_DEVICE_QUEUE_DEPTH 128 | 8 | #define FD_MAX_DEVICE_QUEUE_DEPTH 128 |
11 | #define FD_BLOCKSIZE 512 | 9 | #define FD_BLOCKSIZE 512 |
@@ -18,8 +16,6 @@ struct fd_request { | |||
18 | struct se_task fd_task; | 16 | struct se_task fd_task; |
19 | /* SCSI CDB from iSCSI Command PDU */ | 17 | /* SCSI CDB from iSCSI Command PDU */ |
20 | unsigned char fd_scsi_cdb[TCM_MAX_COMMAND_SIZE]; | 18 | unsigned char fd_scsi_cdb[TCM_MAX_COMMAND_SIZE]; |
21 | /* FILEIO device */ | ||
22 | struct fd_dev *fd_dev; | ||
23 | } ____cacheline_aligned; | 19 | } ____cacheline_aligned; |
24 | 20 | ||
25 | #define FBDF_HAS_PATH 0x01 | 21 | #define FBDF_HAS_PATH 0x01 |
diff --git a/drivers/target/target_core_hba.c b/drivers/target/target_core_hba.c index 0b8f8da89019..0639b975d6f5 100644 --- a/drivers/target/target_core_hba.c +++ b/drivers/target/target_core_hba.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | * Filename: target_core_hba.c | 2 | * Filename: target_core_hba.c |
3 | * | 3 | * |
4 | * This file copntains the iSCSI HBA Transport related functions. | 4 | * This file contains the TCM HBA Transport related functions. |
5 | * | 5 | * |
6 | * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc. | 6 | * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc. |
7 | * Copyright (c) 2005, 2006, 2007 SBE, Inc. | 7 | * Copyright (c) 2005, 2006, 2007 SBE, Inc. |
@@ -45,6 +45,11 @@ | |||
45 | static LIST_HEAD(subsystem_list); | 45 | static LIST_HEAD(subsystem_list); |
46 | static DEFINE_MUTEX(subsystem_mutex); | 46 | static DEFINE_MUTEX(subsystem_mutex); |
47 | 47 | ||
48 | static u32 hba_id_counter; | ||
49 | |||
50 | static DEFINE_SPINLOCK(hba_lock); | ||
51 | static LIST_HEAD(hba_list); | ||
52 | |||
48 | int transport_subsystem_register(struct se_subsystem_api *sub_api) | 53 | int transport_subsystem_register(struct se_subsystem_api *sub_api) |
49 | { | 54 | { |
50 | struct se_subsystem_api *s; | 55 | struct se_subsystem_api *s; |
@@ -53,8 +58,8 @@ int transport_subsystem_register(struct se_subsystem_api *sub_api) | |||
53 | 58 | ||
54 | mutex_lock(&subsystem_mutex); | 59 | mutex_lock(&subsystem_mutex); |
55 | list_for_each_entry(s, &subsystem_list, sub_api_list) { | 60 | list_for_each_entry(s, &subsystem_list, sub_api_list) { |
56 | if (!(strcmp(s->name, sub_api->name))) { | 61 | if (!strcmp(s->name, sub_api->name)) { |
57 | printk(KERN_ERR "%p is already registered with" | 62 | pr_err("%p is already registered with" |
58 | " duplicate name %s, unable to process" | 63 | " duplicate name %s, unable to process" |
59 | " request\n", s, s->name); | 64 | " request\n", s, s->name); |
60 | mutex_unlock(&subsystem_mutex); | 65 | mutex_unlock(&subsystem_mutex); |
@@ -64,7 +69,7 @@ int transport_subsystem_register(struct se_subsystem_api *sub_api) | |||
64 | list_add_tail(&sub_api->sub_api_list, &subsystem_list); | 69 | list_add_tail(&sub_api->sub_api_list, &subsystem_list); |
65 | mutex_unlock(&subsystem_mutex); | 70 | mutex_unlock(&subsystem_mutex); |
66 | 71 | ||
67 | printk(KERN_INFO "TCM: Registered subsystem plugin: %s struct module:" | 72 | pr_debug("TCM: Registered subsystem plugin: %s struct module:" |
68 | " %p\n", sub_api->name, sub_api->owner); | 73 | " %p\n", sub_api->name, sub_api->owner); |
69 | return 0; | 74 | return 0; |
70 | } | 75 | } |
@@ -104,21 +109,17 @@ core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags) | |||
104 | 109 | ||
105 | hba = kzalloc(sizeof(*hba), GFP_KERNEL); | 110 | hba = kzalloc(sizeof(*hba), GFP_KERNEL); |
106 | if (!hba) { | 111 | if (!hba) { |
107 | printk(KERN_ERR "Unable to allocate struct se_hba\n"); | 112 | pr_err("Unable to allocate struct se_hba\n"); |
108 | return ERR_PTR(-ENOMEM); | 113 | return ERR_PTR(-ENOMEM); |
109 | } | 114 | } |
110 | 115 | ||
111 | INIT_LIST_HEAD(&hba->hba_dev_list); | 116 | INIT_LIST_HEAD(&hba->hba_dev_list); |
112 | spin_lock_init(&hba->device_lock); | 117 | spin_lock_init(&hba->device_lock); |
113 | spin_lock_init(&hba->hba_queue_lock); | ||
114 | mutex_init(&hba->hba_access_mutex); | 118 | mutex_init(&hba->hba_access_mutex); |
115 | 119 | ||
116 | hba->hba_index = scsi_get_new_index(SCSI_INST_INDEX); | 120 | hba->hba_index = scsi_get_new_index(SCSI_INST_INDEX); |
117 | hba->hba_flags |= hba_flags; | 121 | hba->hba_flags |= hba_flags; |
118 | 122 | ||
119 | atomic_set(&hba->max_queue_depth, 0); | ||
120 | atomic_set(&hba->left_queue_depth, 0); | ||
121 | |||
122 | hba->transport = core_get_backend(plugin_name); | 123 | hba->transport = core_get_backend(plugin_name); |
123 | if (!hba->transport) { | 124 | if (!hba->transport) { |
124 | ret = -EINVAL; | 125 | ret = -EINVAL; |
@@ -129,12 +130,12 @@ core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags) | |||
129 | if (ret < 0) | 130 | if (ret < 0) |
130 | goto out_module_put; | 131 | goto out_module_put; |
131 | 132 | ||
132 | spin_lock(&se_global->hba_lock); | 133 | spin_lock(&hba_lock); |
133 | hba->hba_id = se_global->g_hba_id_counter++; | 134 | hba->hba_id = hba_id_counter++; |
134 | list_add_tail(&hba->hba_list, &se_global->g_hba_list); | 135 | list_add_tail(&hba->hba_node, &hba_list); |
135 | spin_unlock(&se_global->hba_lock); | 136 | spin_unlock(&hba_lock); |
136 | 137 | ||
137 | printk(KERN_INFO "CORE_HBA[%d] - Attached HBA to Generic Target" | 138 | pr_debug("CORE_HBA[%d] - Attached HBA to Generic Target" |
138 | " Core\n", hba->hba_id); | 139 | " Core\n", hba->hba_id); |
139 | 140 | ||
140 | return hba; | 141 | return hba; |
@@ -156,11 +157,11 @@ core_delete_hba(struct se_hba *hba) | |||
156 | 157 | ||
157 | hba->transport->detach_hba(hba); | 158 | hba->transport->detach_hba(hba); |
158 | 159 | ||
159 | spin_lock(&se_global->hba_lock); | 160 | spin_lock(&hba_lock); |
160 | list_del(&hba->hba_list); | 161 | list_del(&hba->hba_node); |
161 | spin_unlock(&se_global->hba_lock); | 162 | spin_unlock(&hba_lock); |
162 | 163 | ||
163 | printk(KERN_INFO "CORE_HBA[%d] - Detached HBA from Generic Target" | 164 | pr_debug("CORE_HBA[%d] - Detached HBA from Generic Target" |
164 | " Core\n", hba->hba_id); | 165 | " Core\n", hba->hba_id); |
165 | 166 | ||
166 | if (hba->transport->owner) | 167 | if (hba->transport->owner) |
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 86639004af9e..7e1234105442 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c | |||
@@ -47,12 +47,6 @@ | |||
47 | 47 | ||
48 | #include "target_core_iblock.h" | 48 | #include "target_core_iblock.h" |
49 | 49 | ||
50 | #if 0 | ||
51 | #define DEBUG_IBLOCK(x...) printk(x) | ||
52 | #else | ||
53 | #define DEBUG_IBLOCK(x...) | ||
54 | #endif | ||
55 | |||
56 | static struct se_subsystem_api iblock_template; | 50 | static struct se_subsystem_api iblock_template; |
57 | 51 | ||
58 | static void iblock_bio_done(struct bio *, int); | 52 | static void iblock_bio_done(struct bio *, int); |
@@ -66,25 +60,22 @@ static int iblock_attach_hba(struct se_hba *hba, u32 host_id) | |||
66 | struct iblock_hba *ib_host; | 60 | struct iblock_hba *ib_host; |
67 | 61 | ||
68 | ib_host = kzalloc(sizeof(struct iblock_hba), GFP_KERNEL); | 62 | ib_host = kzalloc(sizeof(struct iblock_hba), GFP_KERNEL); |
69 | if (!(ib_host)) { | 63 | if (!ib_host) { |
70 | printk(KERN_ERR "Unable to allocate memory for" | 64 | pr_err("Unable to allocate memory for" |
71 | " struct iblock_hba\n"); | 65 | " struct iblock_hba\n"); |
72 | return -ENOMEM; | 66 | return -ENOMEM; |
73 | } | 67 | } |
74 | 68 | ||
75 | ib_host->iblock_host_id = host_id; | 69 | ib_host->iblock_host_id = host_id; |
76 | 70 | ||
77 | atomic_set(&hba->left_queue_depth, IBLOCK_HBA_QUEUE_DEPTH); | 71 | hba->hba_ptr = ib_host; |
78 | atomic_set(&hba->max_queue_depth, IBLOCK_HBA_QUEUE_DEPTH); | ||
79 | hba->hba_ptr = (void *) ib_host; | ||
80 | 72 | ||
81 | printk(KERN_INFO "CORE_HBA[%d] - TCM iBlock HBA Driver %s on" | 73 | pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on" |
82 | " Generic Target Core Stack %s\n", hba->hba_id, | 74 | " Generic Target Core Stack %s\n", hba->hba_id, |
83 | IBLOCK_VERSION, TARGET_CORE_MOD_VERSION); | 75 | IBLOCK_VERSION, TARGET_CORE_MOD_VERSION); |
84 | 76 | ||
85 | printk(KERN_INFO "CORE_HBA[%d] - Attached iBlock HBA: %u to Generic" | 77 | pr_debug("CORE_HBA[%d] - Attached iBlock HBA: %u to Generic\n", |
86 | " Target Core TCQ Depth: %d\n", hba->hba_id, | 78 | hba->hba_id, ib_host->iblock_host_id); |
87 | ib_host->iblock_host_id, atomic_read(&hba->max_queue_depth)); | ||
88 | 79 | ||
89 | return 0; | 80 | return 0; |
90 | } | 81 | } |
@@ -93,7 +84,7 @@ static void iblock_detach_hba(struct se_hba *hba) | |||
93 | { | 84 | { |
94 | struct iblock_hba *ib_host = hba->hba_ptr; | 85 | struct iblock_hba *ib_host = hba->hba_ptr; |
95 | 86 | ||
96 | printk(KERN_INFO "CORE_HBA[%d] - Detached iBlock HBA: %u from Generic" | 87 | pr_debug("CORE_HBA[%d] - Detached iBlock HBA: %u from Generic" |
97 | " Target Core\n", hba->hba_id, ib_host->iblock_host_id); | 88 | " Target Core\n", hba->hba_id, ib_host->iblock_host_id); |
98 | 89 | ||
99 | kfree(ib_host); | 90 | kfree(ib_host); |
@@ -106,13 +97,13 @@ static void *iblock_allocate_virtdevice(struct se_hba *hba, const char *name) | |||
106 | struct iblock_hba *ib_host = hba->hba_ptr; | 97 | struct iblock_hba *ib_host = hba->hba_ptr; |
107 | 98 | ||
108 | ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL); | 99 | ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL); |
109 | if (!(ib_dev)) { | 100 | if (!ib_dev) { |
110 | printk(KERN_ERR "Unable to allocate struct iblock_dev\n"); | 101 | pr_err("Unable to allocate struct iblock_dev\n"); |
111 | return NULL; | 102 | return NULL; |
112 | } | 103 | } |
113 | ib_dev->ibd_host = ib_host; | 104 | ib_dev->ibd_host = ib_host; |
114 | 105 | ||
115 | printk(KERN_INFO "IBLOCK: Allocated ib_dev for %s\n", name); | 106 | pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name); |
116 | 107 | ||
117 | return ib_dev; | 108 | return ib_dev; |
118 | } | 109 | } |
@@ -131,8 +122,8 @@ static struct se_device *iblock_create_virtdevice( | |||
131 | u32 dev_flags = 0; | 122 | u32 dev_flags = 0; |
132 | int ret = -EINVAL; | 123 | int ret = -EINVAL; |
133 | 124 | ||
134 | if (!(ib_dev)) { | 125 | if (!ib_dev) { |
135 | printk(KERN_ERR "Unable to locate struct iblock_dev parameter\n"); | 126 | pr_err("Unable to locate struct iblock_dev parameter\n"); |
136 | return ERR_PTR(ret); | 127 | return ERR_PTR(ret); |
137 | } | 128 | } |
138 | memset(&dev_limits, 0, sizeof(struct se_dev_limits)); | 129 | memset(&dev_limits, 0, sizeof(struct se_dev_limits)); |
@@ -140,16 +131,16 @@ static struct se_device *iblock_create_virtdevice( | |||
140 | * These settings need to be made tunable.. | 131 | * These settings need to be made tunable.. |
141 | */ | 132 | */ |
142 | ib_dev->ibd_bio_set = bioset_create(32, 64); | 133 | ib_dev->ibd_bio_set = bioset_create(32, 64); |
143 | if (!(ib_dev->ibd_bio_set)) { | 134 | if (!ib_dev->ibd_bio_set) { |
144 | printk(KERN_ERR "IBLOCK: Unable to create bioset()\n"); | 135 | pr_err("IBLOCK: Unable to create bioset()\n"); |
145 | return ERR_PTR(-ENOMEM); | 136 | return ERR_PTR(-ENOMEM); |
146 | } | 137 | } |
147 | printk(KERN_INFO "IBLOCK: Created bio_set()\n"); | 138 | pr_debug("IBLOCK: Created bio_set()\n"); |
148 | /* | 139 | /* |
149 | * iblock_check_configfs_dev_params() ensures that ib_dev->ibd_udev_path | 140 | * iblock_check_configfs_dev_params() ensures that ib_dev->ibd_udev_path |
150 | * must already have been set in order for echo 1 > $HBA/$DEV/enable to run. | 141 | * must already have been set in order for echo 1 > $HBA/$DEV/enable to run. |
151 | */ | 142 | */ |
152 | printk(KERN_INFO "IBLOCK: Claiming struct block_device: %s\n", | 143 | pr_debug( "IBLOCK: Claiming struct block_device: %s\n", |
153 | ib_dev->ibd_udev_path); | 144 | ib_dev->ibd_udev_path); |
154 | 145 | ||
155 | bd = blkdev_get_by_path(ib_dev->ibd_udev_path, | 146 | bd = blkdev_get_by_path(ib_dev->ibd_udev_path, |
@@ -167,42 +158,41 @@ static struct se_device *iblock_create_virtdevice( | |||
167 | limits->logical_block_size = bdev_logical_block_size(bd); | 158 | limits->logical_block_size = bdev_logical_block_size(bd); |
168 | limits->max_hw_sectors = queue_max_hw_sectors(q); | 159 | limits->max_hw_sectors = queue_max_hw_sectors(q); |
169 | limits->max_sectors = queue_max_sectors(q); | 160 | limits->max_sectors = queue_max_sectors(q); |
170 | dev_limits.hw_queue_depth = IBLOCK_MAX_DEVICE_QUEUE_DEPTH; | 161 | dev_limits.hw_queue_depth = q->nr_requests; |
171 | dev_limits.queue_depth = IBLOCK_DEVICE_QUEUE_DEPTH; | 162 | dev_limits.queue_depth = q->nr_requests; |
172 | 163 | ||
173 | ib_dev->ibd_major = MAJOR(bd->bd_dev); | ||
174 | ib_dev->ibd_minor = MINOR(bd->bd_dev); | ||
175 | ib_dev->ibd_bd = bd; | 164 | ib_dev->ibd_bd = bd; |
176 | 165 | ||
177 | dev = transport_add_device_to_core_hba(hba, | 166 | dev = transport_add_device_to_core_hba(hba, |
178 | &iblock_template, se_dev, dev_flags, (void *)ib_dev, | 167 | &iblock_template, se_dev, dev_flags, ib_dev, |
179 | &dev_limits, "IBLOCK", IBLOCK_VERSION); | 168 | &dev_limits, "IBLOCK", IBLOCK_VERSION); |
180 | if (!(dev)) | 169 | if (!dev) |
181 | goto failed; | 170 | goto failed; |
182 | 171 | ||
183 | ib_dev->ibd_depth = dev->queue_depth; | ||
184 | |||
185 | /* | 172 | /* |
186 | * Check if the underlying struct block_device request_queue supports | 173 | * Check if the underlying struct block_device request_queue supports |
187 | * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM | 174 | * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM |
188 | * in ATA and we need to set TPE=1 | 175 | * in ATA and we need to set TPE=1 |
189 | */ | 176 | */ |
190 | if (blk_queue_discard(q)) { | 177 | if (blk_queue_discard(q)) { |
191 | DEV_ATTRIB(dev)->max_unmap_lba_count = | 178 | dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = |
192 | q->limits.max_discard_sectors; | 179 | q->limits.max_discard_sectors; |
193 | /* | 180 | /* |
194 | * Currently hardcoded to 1 in Linux/SCSI code.. | 181 | * Currently hardcoded to 1 in Linux/SCSI code.. |
195 | */ | 182 | */ |
196 | DEV_ATTRIB(dev)->max_unmap_block_desc_count = 1; | 183 | dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count = 1; |
197 | DEV_ATTRIB(dev)->unmap_granularity = | 184 | dev->se_sub_dev->se_dev_attrib.unmap_granularity = |
198 | q->limits.discard_granularity; | 185 | q->limits.discard_granularity; |
199 | DEV_ATTRIB(dev)->unmap_granularity_alignment = | 186 | dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment = |
200 | q->limits.discard_alignment; | 187 | q->limits.discard_alignment; |
201 | 188 | ||
202 | printk(KERN_INFO "IBLOCK: BLOCK Discard support available," | 189 | pr_debug("IBLOCK: BLOCK Discard support available," |
203 | " disabled by default\n"); | 190 | " disabled by default\n"); |
204 | } | 191 | } |
205 | 192 | ||
193 | if (blk_queue_nonrot(q)) | ||
194 | dev->se_sub_dev->se_dev_attrib.is_nonrot = 1; | ||
195 | |||
206 | return dev; | 196 | return dev; |
207 | 197 | ||
208 | failed: | 198 | failed: |
@@ -211,8 +201,6 @@ failed: | |||
211 | ib_dev->ibd_bio_set = NULL; | 201 | ib_dev->ibd_bio_set = NULL; |
212 | } | 202 | } |
213 | ib_dev->ibd_bd = NULL; | 203 | ib_dev->ibd_bd = NULL; |
214 | ib_dev->ibd_major = 0; | ||
215 | ib_dev->ibd_minor = 0; | ||
216 | return ERR_PTR(ret); | 204 | return ERR_PTR(ret); |
217 | } | 205 | } |
218 | 206 | ||
@@ -233,17 +221,16 @@ static inline struct iblock_req *IBLOCK_REQ(struct se_task *task) | |||
233 | } | 221 | } |
234 | 222 | ||
235 | static struct se_task * | 223 | static struct se_task * |
236 | iblock_alloc_task(struct se_cmd *cmd) | 224 | iblock_alloc_task(unsigned char *cdb) |
237 | { | 225 | { |
238 | struct iblock_req *ib_req; | 226 | struct iblock_req *ib_req; |
239 | 227 | ||
240 | ib_req = kzalloc(sizeof(struct iblock_req), GFP_KERNEL); | 228 | ib_req = kzalloc(sizeof(struct iblock_req), GFP_KERNEL); |
241 | if (!(ib_req)) { | 229 | if (!ib_req) { |
242 | printk(KERN_ERR "Unable to allocate memory for struct iblock_req\n"); | 230 | pr_err("Unable to allocate memory for struct iblock_req\n"); |
243 | return NULL; | 231 | return NULL; |
244 | } | 232 | } |
245 | 233 | ||
246 | ib_req->ib_dev = SE_DEV(cmd)->dev_ptr; | ||
247 | atomic_set(&ib_req->ib_bio_cnt, 0); | 234 | atomic_set(&ib_req->ib_bio_cnt, 0); |
248 | return &ib_req->ib_task; | 235 | return &ib_req->ib_task; |
249 | } | 236 | } |
@@ -257,12 +244,12 @@ static unsigned long long iblock_emulate_read_cap_with_block_size( | |||
257 | bdev_logical_block_size(bd)) - 1); | 244 | bdev_logical_block_size(bd)) - 1); |
258 | u32 block_size = bdev_logical_block_size(bd); | 245 | u32 block_size = bdev_logical_block_size(bd); |
259 | 246 | ||
260 | if (block_size == DEV_ATTRIB(dev)->block_size) | 247 | if (block_size == dev->se_sub_dev->se_dev_attrib.block_size) |
261 | return blocks_long; | 248 | return blocks_long; |
262 | 249 | ||
263 | switch (block_size) { | 250 | switch (block_size) { |
264 | case 4096: | 251 | case 4096: |
265 | switch (DEV_ATTRIB(dev)->block_size) { | 252 | switch (dev->se_sub_dev->se_dev_attrib.block_size) { |
266 | case 2048: | 253 | case 2048: |
267 | blocks_long <<= 1; | 254 | blocks_long <<= 1; |
268 | break; | 255 | break; |
@@ -276,7 +263,7 @@ static unsigned long long iblock_emulate_read_cap_with_block_size( | |||
276 | } | 263 | } |
277 | break; | 264 | break; |
278 | case 2048: | 265 | case 2048: |
279 | switch (DEV_ATTRIB(dev)->block_size) { | 266 | switch (dev->se_sub_dev->se_dev_attrib.block_size) { |
280 | case 4096: | 267 | case 4096: |
281 | blocks_long >>= 1; | 268 | blocks_long >>= 1; |
282 | break; | 269 | break; |
@@ -291,7 +278,7 @@ static unsigned long long iblock_emulate_read_cap_with_block_size( | |||
291 | } | 278 | } |
292 | break; | 279 | break; |
293 | case 1024: | 280 | case 1024: |
294 | switch (DEV_ATTRIB(dev)->block_size) { | 281 | switch (dev->se_sub_dev->se_dev_attrib.block_size) { |
295 | case 4096: | 282 | case 4096: |
296 | blocks_long >>= 2; | 283 | blocks_long >>= 2; |
297 | break; | 284 | break; |
@@ -306,7 +293,7 @@ static unsigned long long iblock_emulate_read_cap_with_block_size( | |||
306 | } | 293 | } |
307 | break; | 294 | break; |
308 | case 512: | 295 | case 512: |
309 | switch (DEV_ATTRIB(dev)->block_size) { | 296 | switch (dev->se_sub_dev->se_dev_attrib.block_size) { |
310 | case 4096: | 297 | case 4096: |
311 | blocks_long >>= 3; | 298 | blocks_long >>= 3; |
312 | break; | 299 | break; |
@@ -332,9 +319,9 @@ static unsigned long long iblock_emulate_read_cap_with_block_size( | |||
332 | */ | 319 | */ |
333 | static void iblock_emulate_sync_cache(struct se_task *task) | 320 | static void iblock_emulate_sync_cache(struct se_task *task) |
334 | { | 321 | { |
335 | struct se_cmd *cmd = TASK_CMD(task); | 322 | struct se_cmd *cmd = task->task_se_cmd; |
336 | struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr; | 323 | struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr; |
337 | int immed = (T_TASK(cmd)->t_task_cdb[1] & 0x2); | 324 | int immed = (cmd->t_task_cdb[1] & 0x2); |
338 | sector_t error_sector; | 325 | sector_t error_sector; |
339 | int ret; | 326 | int ret; |
340 | 327 | ||
@@ -351,7 +338,7 @@ static void iblock_emulate_sync_cache(struct se_task *task) | |||
351 | */ | 338 | */ |
352 | ret = blkdev_issue_flush(ib_dev->ibd_bd, GFP_KERNEL, &error_sector); | 339 | ret = blkdev_issue_flush(ib_dev->ibd_bd, GFP_KERNEL, &error_sector); |
353 | if (ret != 0) { | 340 | if (ret != 0) { |
354 | printk(KERN_ERR "IBLOCK: block_issue_flush() failed: %d " | 341 | pr_err("IBLOCK: block_issue_flush() failed: %d " |
355 | " error_sector: %llu\n", ret, | 342 | " error_sector: %llu\n", ret, |
356 | (unsigned long long)error_sector); | 343 | (unsigned long long)error_sector); |
357 | } | 344 | } |
@@ -401,9 +388,9 @@ static int iblock_do_task(struct se_task *task) | |||
401 | * Force data to disk if we pretend to not have a volatile | 388 | * Force data to disk if we pretend to not have a volatile |
402 | * write cache, or the initiator set the Force Unit Access bit. | 389 | * write cache, or the initiator set the Force Unit Access bit. |
403 | */ | 390 | */ |
404 | if (DEV_ATTRIB(dev)->emulate_write_cache == 0 || | 391 | if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 || |
405 | (DEV_ATTRIB(dev)->emulate_fua_write > 0 && | 392 | (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 && |
406 | T_TASK(task->task_se_cmd)->t_tasks_fua)) | 393 | task->task_se_cmd->t_tasks_fua)) |
407 | rw = WRITE_FUA; | 394 | rw = WRITE_FUA; |
408 | else | 395 | else |
409 | rw = WRITE; | 396 | rw = WRITE; |
@@ -415,8 +402,9 @@ static int iblock_do_task(struct se_task *task) | |||
415 | while (bio) { | 402 | while (bio) { |
416 | nbio = bio->bi_next; | 403 | nbio = bio->bi_next; |
417 | bio->bi_next = NULL; | 404 | bio->bi_next = NULL; |
418 | DEBUG_IBLOCK("Calling submit_bio() task: %p bio: %p" | 405 | pr_debug("Calling submit_bio() task: %p bio: %p" |
419 | " bio->bi_sector: %llu\n", task, bio, bio->bi_sector); | 406 | " bio->bi_sector: %llu\n", task, bio, |
407 | (unsigned long long)bio->bi_sector); | ||
420 | 408 | ||
421 | submit_bio(rw, bio); | 409 | submit_bio(rw, bio); |
422 | bio = nbio; | 410 | bio = nbio; |
@@ -470,7 +458,7 @@ static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba, | |||
470 | struct iblock_dev *ib_dev = se_dev->se_dev_su_ptr; | 458 | struct iblock_dev *ib_dev = se_dev->se_dev_su_ptr; |
471 | char *orig, *ptr, *arg_p, *opts; | 459 | char *orig, *ptr, *arg_p, *opts; |
472 | substring_t args[MAX_OPT_ARGS]; | 460 | substring_t args[MAX_OPT_ARGS]; |
473 | int ret = 0, arg, token; | 461 | int ret = 0, token; |
474 | 462 | ||
475 | opts = kstrdup(page, GFP_KERNEL); | 463 | opts = kstrdup(page, GFP_KERNEL); |
476 | if (!opts) | 464 | if (!opts) |
@@ -486,7 +474,7 @@ static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba, | |||
486 | switch (token) { | 474 | switch (token) { |
487 | case Opt_udev_path: | 475 | case Opt_udev_path: |
488 | if (ib_dev->ibd_bd) { | 476 | if (ib_dev->ibd_bd) { |
489 | printk(KERN_ERR "Unable to set udev_path= while" | 477 | pr_err("Unable to set udev_path= while" |
490 | " ib_dev->ibd_bd exists\n"); | 478 | " ib_dev->ibd_bd exists\n"); |
491 | ret = -EEXIST; | 479 | ret = -EEXIST; |
492 | goto out; | 480 | goto out; |
@@ -499,15 +487,11 @@ static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba, | |||
499 | snprintf(ib_dev->ibd_udev_path, SE_UDEV_PATH_LEN, | 487 | snprintf(ib_dev->ibd_udev_path, SE_UDEV_PATH_LEN, |
500 | "%s", arg_p); | 488 | "%s", arg_p); |
501 | kfree(arg_p); | 489 | kfree(arg_p); |
502 | printk(KERN_INFO "IBLOCK: Referencing UDEV path: %s\n", | 490 | pr_debug("IBLOCK: Referencing UDEV path: %s\n", |
503 | ib_dev->ibd_udev_path); | 491 | ib_dev->ibd_udev_path); |
504 | ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH; | 492 | ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH; |
505 | break; | 493 | break; |
506 | case Opt_force: | 494 | case Opt_force: |
507 | match_int(args, &arg); | ||
508 | ib_dev->ibd_force = arg; | ||
509 | printk(KERN_INFO "IBLOCK: Set force=%d\n", | ||
510 | ib_dev->ibd_force); | ||
511 | break; | 495 | break; |
512 | default: | 496 | default: |
513 | break; | 497 | break; |
@@ -526,8 +510,8 @@ static ssize_t iblock_check_configfs_dev_params( | |||
526 | struct iblock_dev *ibd = se_dev->se_dev_su_ptr; | 510 | struct iblock_dev *ibd = se_dev->se_dev_su_ptr; |
527 | 511 | ||
528 | if (!(ibd->ibd_flags & IBDF_HAS_UDEV_PATH)) { | 512 | if (!(ibd->ibd_flags & IBDF_HAS_UDEV_PATH)) { |
529 | printk(KERN_ERR "Missing udev_path= parameters for IBLOCK\n"); | 513 | pr_err("Missing udev_path= parameters for IBLOCK\n"); |
530 | return -1; | 514 | return -EINVAL; |
531 | } | 515 | } |
532 | 516 | ||
533 | return 0; | 517 | return 0; |
@@ -555,12 +539,11 @@ static ssize_t iblock_show_configfs_dev_params( | |||
555 | bl += sprintf(b + bl, " "); | 539 | bl += sprintf(b + bl, " "); |
556 | if (bd) { | 540 | if (bd) { |
557 | bl += sprintf(b + bl, "Major: %d Minor: %d %s\n", | 541 | bl += sprintf(b + bl, "Major: %d Minor: %d %s\n", |
558 | ibd->ibd_major, ibd->ibd_minor, (!bd->bd_contains) ? | 542 | MAJOR(bd->bd_dev), MINOR(bd->bd_dev), (!bd->bd_contains) ? |
559 | "" : (bd->bd_holder == (struct iblock_dev *)ibd) ? | 543 | "" : (bd->bd_holder == (struct iblock_dev *)ibd) ? |
560 | "CLAIMED: IBLOCK" : "CLAIMED: OS"); | 544 | "CLAIMED: IBLOCK" : "CLAIMED: OS"); |
561 | } else { | 545 | } else { |
562 | bl += sprintf(b + bl, "Major: %d Minor: %d\n", | 546 | bl += sprintf(b + bl, "Major: 0 Minor: 0\n"); |
563 | ibd->ibd_major, ibd->ibd_minor); | ||
564 | } | 547 | } |
565 | 548 | ||
566 | return bl; | 549 | return bl; |
@@ -585,103 +568,103 @@ static struct bio *iblock_get_bio( | |||
585 | struct bio *bio; | 568 | struct bio *bio; |
586 | 569 | ||
587 | bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set); | 570 | bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set); |
588 | if (!(bio)) { | 571 | if (!bio) { |
589 | printk(KERN_ERR "Unable to allocate memory for bio\n"); | 572 | pr_err("Unable to allocate memory for bio\n"); |
590 | *ret = PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; | 573 | *ret = PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; |
591 | return NULL; | 574 | return NULL; |
592 | } | 575 | } |
593 | 576 | ||
594 | DEBUG_IBLOCK("Allocated bio: %p task_sg_num: %u using ibd_bio_set:" | 577 | pr_debug("Allocated bio: %p task_sg_nents: %u using ibd_bio_set:" |
595 | " %p\n", bio, task->task_sg_num, ib_dev->ibd_bio_set); | 578 | " %p\n", bio, task->task_sg_nents, ib_dev->ibd_bio_set); |
596 | DEBUG_IBLOCK("Allocated bio: %p task_size: %u\n", bio, task->task_size); | 579 | pr_debug("Allocated bio: %p task_size: %u\n", bio, task->task_size); |
597 | 580 | ||
598 | bio->bi_bdev = ib_dev->ibd_bd; | 581 | bio->bi_bdev = ib_dev->ibd_bd; |
599 | bio->bi_private = (void *) task; | 582 | bio->bi_private = task; |
600 | bio->bi_destructor = iblock_bio_destructor; | 583 | bio->bi_destructor = iblock_bio_destructor; |
601 | bio->bi_end_io = &iblock_bio_done; | 584 | bio->bi_end_io = &iblock_bio_done; |
602 | bio->bi_sector = lba; | 585 | bio->bi_sector = lba; |
603 | atomic_inc(&ib_req->ib_bio_cnt); | 586 | atomic_inc(&ib_req->ib_bio_cnt); |
604 | 587 | ||
605 | DEBUG_IBLOCK("Set bio->bi_sector: %llu\n", bio->bi_sector); | 588 | pr_debug("Set bio->bi_sector: %llu\n", (unsigned long long)bio->bi_sector); |
606 | DEBUG_IBLOCK("Set ib_req->ib_bio_cnt: %d\n", | 589 | pr_debug("Set ib_req->ib_bio_cnt: %d\n", |
607 | atomic_read(&ib_req->ib_bio_cnt)); | 590 | atomic_read(&ib_req->ib_bio_cnt)); |
608 | return bio; | 591 | return bio; |
609 | } | 592 | } |
610 | 593 | ||
611 | static int iblock_map_task_SG(struct se_task *task) | 594 | static int iblock_map_data_SG(struct se_task *task) |
612 | { | 595 | { |
613 | struct se_cmd *cmd = task->task_se_cmd; | 596 | struct se_cmd *cmd = task->task_se_cmd; |
614 | struct se_device *dev = SE_DEV(cmd); | 597 | struct se_device *dev = cmd->se_dev; |
615 | struct iblock_dev *ib_dev = task->se_dev->dev_ptr; | 598 | struct iblock_dev *ib_dev = task->se_dev->dev_ptr; |
616 | struct iblock_req *ib_req = IBLOCK_REQ(task); | 599 | struct iblock_req *ib_req = IBLOCK_REQ(task); |
617 | struct bio *bio = NULL, *hbio = NULL, *tbio = NULL; | 600 | struct bio *bio = NULL, *hbio = NULL, *tbio = NULL; |
618 | struct scatterlist *sg; | 601 | struct scatterlist *sg; |
619 | int ret = 0; | 602 | int ret = 0; |
620 | u32 i, sg_num = task->task_sg_num; | 603 | u32 i, sg_num = task->task_sg_nents; |
621 | sector_t block_lba; | 604 | sector_t block_lba; |
622 | /* | 605 | /* |
623 | * Do starting conversion up from non 512-byte blocksize with | 606 | * Do starting conversion up from non 512-byte blocksize with |
624 | * struct se_task SCSI blocksize into Linux/Block 512 units for BIO. | 607 | * struct se_task SCSI blocksize into Linux/Block 512 units for BIO. |
625 | */ | 608 | */ |
626 | if (DEV_ATTRIB(dev)->block_size == 4096) | 609 | if (dev->se_sub_dev->se_dev_attrib.block_size == 4096) |
627 | block_lba = (task->task_lba << 3); | 610 | block_lba = (task->task_lba << 3); |
628 | else if (DEV_ATTRIB(dev)->block_size == 2048) | 611 | else if (dev->se_sub_dev->se_dev_attrib.block_size == 2048) |
629 | block_lba = (task->task_lba << 2); | 612 | block_lba = (task->task_lba << 2); |
630 | else if (DEV_ATTRIB(dev)->block_size == 1024) | 613 | else if (dev->se_sub_dev->se_dev_attrib.block_size == 1024) |
631 | block_lba = (task->task_lba << 1); | 614 | block_lba = (task->task_lba << 1); |
632 | else if (DEV_ATTRIB(dev)->block_size == 512) | 615 | else if (dev->se_sub_dev->se_dev_attrib.block_size == 512) |
633 | block_lba = task->task_lba; | 616 | block_lba = task->task_lba; |
634 | else { | 617 | else { |
635 | printk(KERN_ERR "Unsupported SCSI -> BLOCK LBA conversion:" | 618 | pr_err("Unsupported SCSI -> BLOCK LBA conversion:" |
636 | " %u\n", DEV_ATTRIB(dev)->block_size); | 619 | " %u\n", dev->se_sub_dev->se_dev_attrib.block_size); |
637 | return PYX_TRANSPORT_LU_COMM_FAILURE; | 620 | return PYX_TRANSPORT_LU_COMM_FAILURE; |
638 | } | 621 | } |
639 | 622 | ||
640 | bio = iblock_get_bio(task, ib_req, ib_dev, &ret, block_lba, sg_num); | 623 | bio = iblock_get_bio(task, ib_req, ib_dev, &ret, block_lba, sg_num); |
641 | if (!(bio)) | 624 | if (!bio) |
642 | return ret; | 625 | return ret; |
643 | 626 | ||
644 | ib_req->ib_bio = bio; | 627 | ib_req->ib_bio = bio; |
645 | hbio = tbio = bio; | 628 | hbio = tbio = bio; |
646 | /* | 629 | /* |
647 | * Use fs/bio.c:bio_add_pages() to setup the bio_vec maplist | 630 | * Use fs/bio.c:bio_add_pages() to setup the bio_vec maplist |
648 | * from TCM struct se_mem -> task->task_sg -> struct scatterlist memory. | 631 | * from task->task_sg -> struct scatterlist memory. |
649 | */ | 632 | */ |
650 | for_each_sg(task->task_sg, sg, task->task_sg_num, i) { | 633 | for_each_sg(task->task_sg, sg, task->task_sg_nents, i) { |
651 | DEBUG_IBLOCK("task: %p bio: %p Calling bio_add_page(): page:" | 634 | pr_debug("task: %p bio: %p Calling bio_add_page(): page:" |
652 | " %p len: %u offset: %u\n", task, bio, sg_page(sg), | 635 | " %p len: %u offset: %u\n", task, bio, sg_page(sg), |
653 | sg->length, sg->offset); | 636 | sg->length, sg->offset); |
654 | again: | 637 | again: |
655 | ret = bio_add_page(bio, sg_page(sg), sg->length, sg->offset); | 638 | ret = bio_add_page(bio, sg_page(sg), sg->length, sg->offset); |
656 | if (ret != sg->length) { | 639 | if (ret != sg->length) { |
657 | 640 | ||
658 | DEBUG_IBLOCK("*** Set bio->bi_sector: %llu\n", | 641 | pr_debug("*** Set bio->bi_sector: %llu\n", |
659 | bio->bi_sector); | 642 | (unsigned long long)bio->bi_sector); |
660 | DEBUG_IBLOCK("** task->task_size: %u\n", | 643 | pr_debug("** task->task_size: %u\n", |
661 | task->task_size); | 644 | task->task_size); |
662 | DEBUG_IBLOCK("*** bio->bi_max_vecs: %u\n", | 645 | pr_debug("*** bio->bi_max_vecs: %u\n", |
663 | bio->bi_max_vecs); | 646 | bio->bi_max_vecs); |
664 | DEBUG_IBLOCK("*** bio->bi_vcnt: %u\n", | 647 | pr_debug("*** bio->bi_vcnt: %u\n", |
665 | bio->bi_vcnt); | 648 | bio->bi_vcnt); |
666 | 649 | ||
667 | bio = iblock_get_bio(task, ib_req, ib_dev, &ret, | 650 | bio = iblock_get_bio(task, ib_req, ib_dev, &ret, |
668 | block_lba, sg_num); | 651 | block_lba, sg_num); |
669 | if (!(bio)) | 652 | if (!bio) |
670 | goto fail; | 653 | goto fail; |
671 | 654 | ||
672 | tbio = tbio->bi_next = bio; | 655 | tbio = tbio->bi_next = bio; |
673 | DEBUG_IBLOCK("-----------------> Added +1 bio: %p to" | 656 | pr_debug("-----------------> Added +1 bio: %p to" |
674 | " list, Going to again\n", bio); | 657 | " list, Going to again\n", bio); |
675 | goto again; | 658 | goto again; |
676 | } | 659 | } |
677 | /* Always in 512 byte units for Linux/Block */ | 660 | /* Always in 512 byte units for Linux/Block */ |
678 | block_lba += sg->length >> IBLOCK_LBA_SHIFT; | 661 | block_lba += sg->length >> IBLOCK_LBA_SHIFT; |
679 | sg_num--; | 662 | sg_num--; |
680 | DEBUG_IBLOCK("task: %p bio-add_page() passed!, decremented" | 663 | pr_debug("task: %p bio-add_page() passed!, decremented" |
681 | " sg_num to %u\n", task, sg_num); | 664 | " sg_num to %u\n", task, sg_num); |
682 | DEBUG_IBLOCK("task: %p bio_add_page() passed!, increased lba" | 665 | pr_debug("task: %p bio_add_page() passed!, increased lba" |
683 | " to %llu\n", task, block_lba); | 666 | " to %llu\n", task, (unsigned long long)block_lba); |
684 | DEBUG_IBLOCK("task: %p bio_add_page() passed!, bio->bi_vcnt:" | 667 | pr_debug("task: %p bio_add_page() passed!, bio->bi_vcnt:" |
685 | " %u\n", task, bio->bi_vcnt); | 668 | " %u\n", task, bio->bi_vcnt); |
686 | } | 669 | } |
687 | 670 | ||
@@ -727,11 +710,11 @@ static void iblock_bio_done(struct bio *bio, int err) | |||
727 | /* | 710 | /* |
728 | * Set -EIO if !BIO_UPTODATE and the passed is still err=0 | 711 | * Set -EIO if !BIO_UPTODATE and the passed is still err=0 |
729 | */ | 712 | */ |
730 | if (!(test_bit(BIO_UPTODATE, &bio->bi_flags)) && !(err)) | 713 | if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && !err) |
731 | err = -EIO; | 714 | err = -EIO; |
732 | 715 | ||
733 | if (err != 0) { | 716 | if (err != 0) { |
734 | printk(KERN_ERR "test_bit(BIO_UPTODATE) failed for bio: %p," | 717 | pr_err("test_bit(BIO_UPTODATE) failed for bio: %p," |
735 | " err: %d\n", bio, err); | 718 | " err: %d\n", bio, err); |
736 | /* | 719 | /* |
737 | * Bump the ib_bio_err_cnt and release bio. | 720 | * Bump the ib_bio_err_cnt and release bio. |
@@ -742,15 +725,15 @@ static void iblock_bio_done(struct bio *bio, int err) | |||
742 | /* | 725 | /* |
743 | * Wait to complete the task until the last bio as completed. | 726 | * Wait to complete the task until the last bio as completed. |
744 | */ | 727 | */ |
745 | if (!(atomic_dec_and_test(&ibr->ib_bio_cnt))) | 728 | if (!atomic_dec_and_test(&ibr->ib_bio_cnt)) |
746 | return; | 729 | return; |
747 | 730 | ||
748 | ibr->ib_bio = NULL; | 731 | ibr->ib_bio = NULL; |
749 | transport_complete_task(task, 0); | 732 | transport_complete_task(task, 0); |
750 | return; | 733 | return; |
751 | } | 734 | } |
752 | DEBUG_IBLOCK("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n", | 735 | pr_debug("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n", |
753 | task, bio, task->task_lba, bio->bi_sector, err); | 736 | task, bio, task->task_lba, (unsigned long long)bio->bi_sector, err); |
754 | /* | 737 | /* |
755 | * bio_put() will call iblock_bio_destructor() to release the bio back | 738 | * bio_put() will call iblock_bio_destructor() to release the bio back |
756 | * to ibr->ib_bio_set. | 739 | * to ibr->ib_bio_set. |
@@ -759,7 +742,7 @@ static void iblock_bio_done(struct bio *bio, int err) | |||
759 | /* | 742 | /* |
760 | * Wait to complete the task until the last bio as completed. | 743 | * Wait to complete the task until the last bio as completed. |
761 | */ | 744 | */ |
762 | if (!(atomic_dec_and_test(&ibr->ib_bio_cnt))) | 745 | if (!atomic_dec_and_test(&ibr->ib_bio_cnt)) |
763 | return; | 746 | return; |
764 | /* | 747 | /* |
765 | * Return GOOD status for task if zero ib_bio_err_cnt exists. | 748 | * Return GOOD status for task if zero ib_bio_err_cnt exists. |
@@ -772,7 +755,7 @@ static struct se_subsystem_api iblock_template = { | |||
772 | .name = "iblock", | 755 | .name = "iblock", |
773 | .owner = THIS_MODULE, | 756 | .owner = THIS_MODULE, |
774 | .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV, | 757 | .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV, |
775 | .map_task_SG = iblock_map_task_SG, | 758 | .map_data_SG = iblock_map_data_SG, |
776 | .attach_hba = iblock_attach_hba, | 759 | .attach_hba = iblock_attach_hba, |
777 | .detach_hba = iblock_detach_hba, | 760 | .detach_hba = iblock_detach_hba, |
778 | .allocate_virtdevice = iblock_allocate_virtdevice, | 761 | .allocate_virtdevice = iblock_allocate_virtdevice, |
diff --git a/drivers/target/target_core_iblock.h b/drivers/target/target_core_iblock.h index 64c1f4d69f76..a121cd1b6575 100644 --- a/drivers/target/target_core_iblock.h +++ b/drivers/target/target_core_iblock.h | |||
@@ -3,9 +3,6 @@ | |||
3 | 3 | ||
4 | #define IBLOCK_VERSION "4.0" | 4 | #define IBLOCK_VERSION "4.0" |
5 | 5 | ||
6 | #define IBLOCK_HBA_QUEUE_DEPTH 512 | ||
7 | #define IBLOCK_DEVICE_QUEUE_DEPTH 32 | ||
8 | #define IBLOCK_MAX_DEVICE_QUEUE_DEPTH 128 | ||
9 | #define IBLOCK_MAX_CDBS 16 | 6 | #define IBLOCK_MAX_CDBS 16 |
10 | #define IBLOCK_LBA_SHIFT 9 | 7 | #define IBLOCK_LBA_SHIFT 9 |
11 | 8 | ||
@@ -15,18 +12,12 @@ struct iblock_req { | |||
15 | atomic_t ib_bio_cnt; | 12 | atomic_t ib_bio_cnt; |
16 | atomic_t ib_bio_err_cnt; | 13 | atomic_t ib_bio_err_cnt; |
17 | struct bio *ib_bio; | 14 | struct bio *ib_bio; |
18 | struct iblock_dev *ib_dev; | ||
19 | } ____cacheline_aligned; | 15 | } ____cacheline_aligned; |
20 | 16 | ||
21 | #define IBDF_HAS_UDEV_PATH 0x01 | 17 | #define IBDF_HAS_UDEV_PATH 0x01 |
22 | #define IBDF_HAS_FORCE 0x02 | ||
23 | 18 | ||
24 | struct iblock_dev { | 19 | struct iblock_dev { |
25 | unsigned char ibd_udev_path[SE_UDEV_PATH_LEN]; | 20 | unsigned char ibd_udev_path[SE_UDEV_PATH_LEN]; |
26 | int ibd_force; | ||
27 | int ibd_major; | ||
28 | int ibd_minor; | ||
29 | u32 ibd_depth; | ||
30 | u32 ibd_flags; | 21 | u32 ibd_flags; |
31 | struct bio_set *ibd_bio_set; | 22 | struct bio_set *ibd_bio_set; |
32 | struct block_device *ibd_bd; | 23 | struct block_device *ibd_bd; |
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index b662db3a320b..1c1b849cd4fb 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c | |||
@@ -62,7 +62,7 @@ int core_pr_dump_initiator_port( | |||
62 | char *buf, | 62 | char *buf, |
63 | u32 size) | 63 | u32 size) |
64 | { | 64 | { |
65 | if (!(pr_reg->isid_present_at_reg)) | 65 | if (!pr_reg->isid_present_at_reg) |
66 | return 0; | 66 | return 0; |
67 | 67 | ||
68 | snprintf(buf, size, ",i,0x%s", &pr_reg->pr_reg_isid[0]); | 68 | snprintf(buf, size, ",i,0x%s", &pr_reg->pr_reg_isid[0]); |
@@ -95,7 +95,7 @@ static int core_scsi2_reservation_check(struct se_cmd *cmd, u32 *pr_reg_type) | |||
95 | struct se_session *sess = cmd->se_sess; | 95 | struct se_session *sess = cmd->se_sess; |
96 | int ret; | 96 | int ret; |
97 | 97 | ||
98 | if (!(sess)) | 98 | if (!sess) |
99 | return 0; | 99 | return 0; |
100 | 100 | ||
101 | spin_lock(&dev->dev_reservation_lock); | 101 | spin_lock(&dev->dev_reservation_lock); |
@@ -105,13 +105,13 @@ static int core_scsi2_reservation_check(struct se_cmd *cmd, u32 *pr_reg_type) | |||
105 | } | 105 | } |
106 | if (dev->dev_reserved_node_acl != sess->se_node_acl) { | 106 | if (dev->dev_reserved_node_acl != sess->se_node_acl) { |
107 | spin_unlock(&dev->dev_reservation_lock); | 107 | spin_unlock(&dev->dev_reservation_lock); |
108 | return -1; | 108 | return -EINVAL; |
109 | } | 109 | } |
110 | if (!(dev->dev_flags & DF_SPC2_RESERVATIONS_WITH_ISID)) { | 110 | if (!(dev->dev_flags & DF_SPC2_RESERVATIONS_WITH_ISID)) { |
111 | spin_unlock(&dev->dev_reservation_lock); | 111 | spin_unlock(&dev->dev_reservation_lock); |
112 | return 0; | 112 | return 0; |
113 | } | 113 | } |
114 | ret = (dev->dev_res_bin_isid == sess->sess_bin_isid) ? 0 : -1; | 114 | ret = (dev->dev_res_bin_isid == sess->sess_bin_isid) ? 0 : -EINVAL; |
115 | spin_unlock(&dev->dev_reservation_lock); | 115 | spin_unlock(&dev->dev_reservation_lock); |
116 | 116 | ||
117 | return ret; | 117 | return ret; |
@@ -123,7 +123,7 @@ static int core_scsi2_reservation_release(struct se_cmd *cmd) | |||
123 | struct se_session *sess = cmd->se_sess; | 123 | struct se_session *sess = cmd->se_sess; |
124 | struct se_portal_group *tpg = sess->se_tpg; | 124 | struct se_portal_group *tpg = sess->se_tpg; |
125 | 125 | ||
126 | if (!(sess) || !(tpg)) | 126 | if (!sess || !tpg) |
127 | return 0; | 127 | return 0; |
128 | 128 | ||
129 | spin_lock(&dev->dev_reservation_lock); | 129 | spin_lock(&dev->dev_reservation_lock); |
@@ -142,9 +142,9 @@ static int core_scsi2_reservation_release(struct se_cmd *cmd) | |||
142 | dev->dev_res_bin_isid = 0; | 142 | dev->dev_res_bin_isid = 0; |
143 | dev->dev_flags &= ~DF_SPC2_RESERVATIONS_WITH_ISID; | 143 | dev->dev_flags &= ~DF_SPC2_RESERVATIONS_WITH_ISID; |
144 | } | 144 | } |
145 | printk(KERN_INFO "SCSI-2 Released reservation for %s LUN: %u ->" | 145 | pr_debug("SCSI-2 Released reservation for %s LUN: %u ->" |
146 | " MAPPED LUN: %u for %s\n", TPG_TFO(tpg)->get_fabric_name(), | 146 | " MAPPED LUN: %u for %s\n", tpg->se_tpg_tfo->get_fabric_name(), |
147 | SE_LUN(cmd)->unpacked_lun, cmd->se_deve->mapped_lun, | 147 | cmd->se_lun->unpacked_lun, cmd->se_deve->mapped_lun, |
148 | sess->se_node_acl->initiatorname); | 148 | sess->se_node_acl->initiatorname); |
149 | spin_unlock(&dev->dev_reservation_lock); | 149 | spin_unlock(&dev->dev_reservation_lock); |
150 | 150 | ||
@@ -157,9 +157,9 @@ static int core_scsi2_reservation_reserve(struct se_cmd *cmd) | |||
157 | struct se_session *sess = cmd->se_sess; | 157 | struct se_session *sess = cmd->se_sess; |
158 | struct se_portal_group *tpg = sess->se_tpg; | 158 | struct se_portal_group *tpg = sess->se_tpg; |
159 | 159 | ||
160 | if ((T_TASK(cmd)->t_task_cdb[1] & 0x01) && | 160 | if ((cmd->t_task_cdb[1] & 0x01) && |
161 | (T_TASK(cmd)->t_task_cdb[1] & 0x02)) { | 161 | (cmd->t_task_cdb[1] & 0x02)) { |
162 | printk(KERN_ERR "LongIO and Obselete Bits set, returning" | 162 | pr_err("LongIO and Obselete Bits set, returning" |
163 | " ILLEGAL_REQUEST\n"); | 163 | " ILLEGAL_REQUEST\n"); |
164 | return PYX_TRANSPORT_ILLEGAL_REQUEST; | 164 | return PYX_TRANSPORT_ILLEGAL_REQUEST; |
165 | } | 165 | } |
@@ -167,19 +167,19 @@ static int core_scsi2_reservation_reserve(struct se_cmd *cmd) | |||
167 | * This is currently the case for target_core_mod passthrough struct se_cmd | 167 | * This is currently the case for target_core_mod passthrough struct se_cmd |
168 | * ops | 168 | * ops |
169 | */ | 169 | */ |
170 | if (!(sess) || !(tpg)) | 170 | if (!sess || !tpg) |
171 | return 0; | 171 | return 0; |
172 | 172 | ||
173 | spin_lock(&dev->dev_reservation_lock); | 173 | spin_lock(&dev->dev_reservation_lock); |
174 | if (dev->dev_reserved_node_acl && | 174 | if (dev->dev_reserved_node_acl && |
175 | (dev->dev_reserved_node_acl != sess->se_node_acl)) { | 175 | (dev->dev_reserved_node_acl != sess->se_node_acl)) { |
176 | printk(KERN_ERR "SCSI-2 RESERVATION CONFLIFT for %s fabric\n", | 176 | pr_err("SCSI-2 RESERVATION CONFLIFT for %s fabric\n", |
177 | TPG_TFO(tpg)->get_fabric_name()); | 177 | tpg->se_tpg_tfo->get_fabric_name()); |
178 | printk(KERN_ERR "Original reserver LUN: %u %s\n", | 178 | pr_err("Original reserver LUN: %u %s\n", |
179 | SE_LUN(cmd)->unpacked_lun, | 179 | cmd->se_lun->unpacked_lun, |
180 | dev->dev_reserved_node_acl->initiatorname); | 180 | dev->dev_reserved_node_acl->initiatorname); |
181 | printk(KERN_ERR "Current attempt - LUN: %u -> MAPPED LUN: %u" | 181 | pr_err("Current attempt - LUN: %u -> MAPPED LUN: %u" |
182 | " from %s \n", SE_LUN(cmd)->unpacked_lun, | 182 | " from %s \n", cmd->se_lun->unpacked_lun, |
183 | cmd->se_deve->mapped_lun, | 183 | cmd->se_deve->mapped_lun, |
184 | sess->se_node_acl->initiatorname); | 184 | sess->se_node_acl->initiatorname); |
185 | spin_unlock(&dev->dev_reservation_lock); | 185 | spin_unlock(&dev->dev_reservation_lock); |
@@ -192,9 +192,9 @@ static int core_scsi2_reservation_reserve(struct se_cmd *cmd) | |||
192 | dev->dev_res_bin_isid = sess->sess_bin_isid; | 192 | dev->dev_res_bin_isid = sess->sess_bin_isid; |
193 | dev->dev_flags |= DF_SPC2_RESERVATIONS_WITH_ISID; | 193 | dev->dev_flags |= DF_SPC2_RESERVATIONS_WITH_ISID; |
194 | } | 194 | } |
195 | printk(KERN_INFO "SCSI-2 Reserved %s LUN: %u -> MAPPED LUN: %u" | 195 | pr_debug("SCSI-2 Reserved %s LUN: %u -> MAPPED LUN: %u" |
196 | " for %s\n", TPG_TFO(tpg)->get_fabric_name(), | 196 | " for %s\n", tpg->se_tpg_tfo->get_fabric_name(), |
197 | SE_LUN(cmd)->unpacked_lun, cmd->se_deve->mapped_lun, | 197 | cmd->se_lun->unpacked_lun, cmd->se_deve->mapped_lun, |
198 | sess->se_node_acl->initiatorname); | 198 | sess->se_node_acl->initiatorname); |
199 | spin_unlock(&dev->dev_reservation_lock); | 199 | spin_unlock(&dev->dev_reservation_lock); |
200 | 200 | ||
@@ -215,15 +215,15 @@ int core_scsi2_emulate_crh(struct se_cmd *cmd) | |||
215 | struct se_session *se_sess = cmd->se_sess; | 215 | struct se_session *se_sess = cmd->se_sess; |
216 | struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev; | 216 | struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev; |
217 | struct t10_pr_registration *pr_reg; | 217 | struct t10_pr_registration *pr_reg; |
218 | struct t10_reservation_template *pr_tmpl = &su_dev->t10_reservation; | 218 | struct t10_reservation *pr_tmpl = &su_dev->t10_pr; |
219 | unsigned char *cdb = &T_TASK(cmd)->t_task_cdb[0]; | 219 | unsigned char *cdb = &cmd->t_task_cdb[0]; |
220 | int crh = (T10_RES(su_dev)->res_type == SPC3_PERSISTENT_RESERVATIONS); | 220 | int crh = (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS); |
221 | int conflict = 0; | 221 | int conflict = 0; |
222 | 222 | ||
223 | if (!(se_sess)) | 223 | if (!se_sess) |
224 | return 0; | 224 | return 0; |
225 | 225 | ||
226 | if (!(crh)) | 226 | if (!crh) |
227 | goto after_crh; | 227 | goto after_crh; |
228 | 228 | ||
229 | pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl, | 229 | pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl, |
@@ -280,7 +280,7 @@ int core_scsi2_emulate_crh(struct se_cmd *cmd) | |||
280 | } | 280 | } |
281 | 281 | ||
282 | if (conflict) { | 282 | if (conflict) { |
283 | printk(KERN_ERR "Received legacy SPC-2 RESERVE/RELEASE" | 283 | pr_err("Received legacy SPC-2 RESERVE/RELEASE" |
284 | " while active SPC-3 registrations exist," | 284 | " while active SPC-3 registrations exist," |
285 | " returning RESERVATION_CONFLICT\n"); | 285 | " returning RESERVATION_CONFLICT\n"); |
286 | return PYX_TRANSPORT_RESERVATION_CONFLICT; | 286 | return PYX_TRANSPORT_RESERVATION_CONFLICT; |
@@ -307,7 +307,7 @@ static int core_scsi3_pr_seq_non_holder( | |||
307 | u32 pr_reg_type) | 307 | u32 pr_reg_type) |
308 | { | 308 | { |
309 | struct se_dev_entry *se_deve; | 309 | struct se_dev_entry *se_deve; |
310 | struct se_session *se_sess = SE_SESS(cmd); | 310 | struct se_session *se_sess = cmd->se_sess; |
311 | int other_cdb = 0, ignore_reg; | 311 | int other_cdb = 0, ignore_reg; |
312 | int registered_nexus = 0, ret = 1; /* Conflict by default */ | 312 | int registered_nexus = 0, ret = 1; /* Conflict by default */ |
313 | int all_reg = 0, reg_only = 0; /* ALL_REG, REG_ONLY */ | 313 | int all_reg = 0, reg_only = 0; /* ALL_REG, REG_ONLY */ |
@@ -362,7 +362,7 @@ static int core_scsi3_pr_seq_non_holder( | |||
362 | registered_nexus = 1; | 362 | registered_nexus = 1; |
363 | break; | 363 | break; |
364 | default: | 364 | default: |
365 | return -1; | 365 | return -EINVAL; |
366 | } | 366 | } |
367 | /* | 367 | /* |
368 | * Referenced from spc4r17 table 45 for *NON* PR holder access | 368 | * Referenced from spc4r17 table 45 for *NON* PR holder access |
@@ -412,9 +412,9 @@ static int core_scsi3_pr_seq_non_holder( | |||
412 | ret = (registered_nexus) ? 0 : 1; | 412 | ret = (registered_nexus) ? 0 : 1; |
413 | break; | 413 | break; |
414 | default: | 414 | default: |
415 | printk(KERN_ERR "Unknown PERSISTENT_RESERVE_OUT service" | 415 | pr_err("Unknown PERSISTENT_RESERVE_OUT service" |
416 | " action: 0x%02x\n", cdb[1] & 0x1f); | 416 | " action: 0x%02x\n", cdb[1] & 0x1f); |
417 | return -1; | 417 | return -EINVAL; |
418 | } | 418 | } |
419 | break; | 419 | break; |
420 | case RELEASE: | 420 | case RELEASE: |
@@ -459,9 +459,9 @@ static int core_scsi3_pr_seq_non_holder( | |||
459 | ret = 0; /* Allowed */ | 459 | ret = 0; /* Allowed */ |
460 | break; | 460 | break; |
461 | default: | 461 | default: |
462 | printk(KERN_ERR "Unknown MI Service Action: 0x%02x\n", | 462 | pr_err("Unknown MI Service Action: 0x%02x\n", |
463 | (cdb[1] & 0x1f)); | 463 | (cdb[1] & 0x1f)); |
464 | return -1; | 464 | return -EINVAL; |
465 | } | 465 | } |
466 | break; | 466 | break; |
467 | case ACCESS_CONTROL_IN: | 467 | case ACCESS_CONTROL_IN: |
@@ -481,9 +481,9 @@ static int core_scsi3_pr_seq_non_holder( | |||
481 | * Case where the CDB is explicitly allowed in the above switch | 481 | * Case where the CDB is explicitly allowed in the above switch |
482 | * statement. | 482 | * statement. |
483 | */ | 483 | */ |
484 | if (!(ret) && !(other_cdb)) { | 484 | if (!ret && !other_cdb) { |
485 | #if 0 | 485 | #if 0 |
486 | printk(KERN_INFO "Allowing explict CDB: 0x%02x for %s" | 486 | pr_debug("Allowing explict CDB: 0x%02x for %s" |
487 | " reservation holder\n", cdb[0], | 487 | " reservation holder\n", cdb[0], |
488 | core_scsi3_pr_dump_type(pr_reg_type)); | 488 | core_scsi3_pr_dump_type(pr_reg_type)); |
489 | #endif | 489 | #endif |
@@ -498,7 +498,7 @@ static int core_scsi3_pr_seq_non_holder( | |||
498 | /* | 498 | /* |
499 | * Conflict for write exclusive | 499 | * Conflict for write exclusive |
500 | */ | 500 | */ |
501 | printk(KERN_INFO "%s Conflict for unregistered nexus" | 501 | pr_debug("%s Conflict for unregistered nexus" |
502 | " %s CDB: 0x%02x to %s reservation\n", | 502 | " %s CDB: 0x%02x to %s reservation\n", |
503 | transport_dump_cmd_direction(cmd), | 503 | transport_dump_cmd_direction(cmd), |
504 | se_sess->se_node_acl->initiatorname, cdb[0], | 504 | se_sess->se_node_acl->initiatorname, cdb[0], |
@@ -515,8 +515,8 @@ static int core_scsi3_pr_seq_non_holder( | |||
515 | * nexuses to issue CDBs. | 515 | * nexuses to issue CDBs. |
516 | */ | 516 | */ |
517 | #if 0 | 517 | #if 0 |
518 | if (!(registered_nexus)) { | 518 | if (!registered_nexus) { |
519 | printk(KERN_INFO "Allowing implict CDB: 0x%02x" | 519 | pr_debug("Allowing implict CDB: 0x%02x" |
520 | " for %s reservation on unregistered" | 520 | " for %s reservation on unregistered" |
521 | " nexus\n", cdb[0], | 521 | " nexus\n", cdb[0], |
522 | core_scsi3_pr_dump_type(pr_reg_type)); | 522 | core_scsi3_pr_dump_type(pr_reg_type)); |
@@ -531,14 +531,14 @@ static int core_scsi3_pr_seq_non_holder( | |||
531 | * allow commands from registered nexuses. | 531 | * allow commands from registered nexuses. |
532 | */ | 532 | */ |
533 | #if 0 | 533 | #if 0 |
534 | printk(KERN_INFO "Allowing implict CDB: 0x%02x for %s" | 534 | pr_debug("Allowing implict CDB: 0x%02x for %s" |
535 | " reservation\n", cdb[0], | 535 | " reservation\n", cdb[0], |
536 | core_scsi3_pr_dump_type(pr_reg_type)); | 536 | core_scsi3_pr_dump_type(pr_reg_type)); |
537 | #endif | 537 | #endif |
538 | return 0; | 538 | return 0; |
539 | } | 539 | } |
540 | } | 540 | } |
541 | printk(KERN_INFO "%s Conflict for %sregistered nexus %s CDB: 0x%2x" | 541 | pr_debug("%s Conflict for %sregistered nexus %s CDB: 0x%2x" |
542 | " for %s reservation\n", transport_dump_cmd_direction(cmd), | 542 | " for %s reservation\n", transport_dump_cmd_direction(cmd), |
543 | (registered_nexus) ? "" : "un", | 543 | (registered_nexus) ? "" : "un", |
544 | se_sess->se_node_acl->initiatorname, cdb[0], | 544 | se_sess->se_node_acl->initiatorname, cdb[0], |
@@ -549,7 +549,7 @@ static int core_scsi3_pr_seq_non_holder( | |||
549 | 549 | ||
550 | static u32 core_scsi3_pr_generation(struct se_device *dev) | 550 | static u32 core_scsi3_pr_generation(struct se_device *dev) |
551 | { | 551 | { |
552 | struct se_subsystem_dev *su_dev = SU_DEV(dev); | 552 | struct se_subsystem_dev *su_dev = dev->se_sub_dev; |
553 | u32 prg; | 553 | u32 prg; |
554 | /* | 554 | /* |
555 | * PRGeneration field shall contain the value of a 32-bit wrapping | 555 | * PRGeneration field shall contain the value of a 32-bit wrapping |
@@ -561,7 +561,7 @@ static u32 core_scsi3_pr_generation(struct se_device *dev) | |||
561 | * See spc4r17 section 6.3.12 READ_KEYS service action | 561 | * See spc4r17 section 6.3.12 READ_KEYS service action |
562 | */ | 562 | */ |
563 | spin_lock(&dev->dev_reservation_lock); | 563 | spin_lock(&dev->dev_reservation_lock); |
564 | prg = T10_RES(su_dev)->pr_generation++; | 564 | prg = su_dev->t10_pr.pr_generation++; |
565 | spin_unlock(&dev->dev_reservation_lock); | 565 | spin_unlock(&dev->dev_reservation_lock); |
566 | 566 | ||
567 | return prg; | 567 | return prg; |
@@ -575,7 +575,7 @@ static int core_scsi3_pr_reservation_check( | |||
575 | struct se_session *sess = cmd->se_sess; | 575 | struct se_session *sess = cmd->se_sess; |
576 | int ret; | 576 | int ret; |
577 | 577 | ||
578 | if (!(sess)) | 578 | if (!sess) |
579 | return 0; | 579 | return 0; |
580 | /* | 580 | /* |
581 | * A legacy SPC-2 reservation is being held. | 581 | * A legacy SPC-2 reservation is being held. |
@@ -584,7 +584,7 @@ static int core_scsi3_pr_reservation_check( | |||
584 | return core_scsi2_reservation_check(cmd, pr_reg_type); | 584 | return core_scsi2_reservation_check(cmd, pr_reg_type); |
585 | 585 | ||
586 | spin_lock(&dev->dev_reservation_lock); | 586 | spin_lock(&dev->dev_reservation_lock); |
587 | if (!(dev->dev_pr_res_holder)) { | 587 | if (!dev->dev_pr_res_holder) { |
588 | spin_unlock(&dev->dev_reservation_lock); | 588 | spin_unlock(&dev->dev_reservation_lock); |
589 | return 0; | 589 | return 0; |
590 | } | 590 | } |
@@ -592,14 +592,14 @@ static int core_scsi3_pr_reservation_check( | |||
592 | cmd->pr_res_key = dev->dev_pr_res_holder->pr_res_key; | 592 | cmd->pr_res_key = dev->dev_pr_res_holder->pr_res_key; |
593 | if (dev->dev_pr_res_holder->pr_reg_nacl != sess->se_node_acl) { | 593 | if (dev->dev_pr_res_holder->pr_reg_nacl != sess->se_node_acl) { |
594 | spin_unlock(&dev->dev_reservation_lock); | 594 | spin_unlock(&dev->dev_reservation_lock); |
595 | return -1; | 595 | return -EINVAL; |
596 | } | 596 | } |
597 | if (!(dev->dev_pr_res_holder->isid_present_at_reg)) { | 597 | if (!dev->dev_pr_res_holder->isid_present_at_reg) { |
598 | spin_unlock(&dev->dev_reservation_lock); | 598 | spin_unlock(&dev->dev_reservation_lock); |
599 | return 0; | 599 | return 0; |
600 | } | 600 | } |
601 | ret = (dev->dev_pr_res_holder->pr_reg_bin_isid == | 601 | ret = (dev->dev_pr_res_holder->pr_reg_bin_isid == |
602 | sess->sess_bin_isid) ? 0 : -1; | 602 | sess->sess_bin_isid) ? 0 : -EINVAL; |
603 | /* | 603 | /* |
604 | * Use bit in *pr_reg_type to notify ISID mismatch in | 604 | * Use bit in *pr_reg_type to notify ISID mismatch in |
605 | * core_scsi3_pr_seq_non_holder(). | 605 | * core_scsi3_pr_seq_non_holder(). |
@@ -620,19 +620,19 @@ static struct t10_pr_registration *__core_scsi3_do_alloc_registration( | |||
620 | int all_tg_pt, | 620 | int all_tg_pt, |
621 | int aptpl) | 621 | int aptpl) |
622 | { | 622 | { |
623 | struct se_subsystem_dev *su_dev = SU_DEV(dev); | 623 | struct se_subsystem_dev *su_dev = dev->se_sub_dev; |
624 | struct t10_pr_registration *pr_reg; | 624 | struct t10_pr_registration *pr_reg; |
625 | 625 | ||
626 | pr_reg = kmem_cache_zalloc(t10_pr_reg_cache, GFP_ATOMIC); | 626 | pr_reg = kmem_cache_zalloc(t10_pr_reg_cache, GFP_ATOMIC); |
627 | if (!(pr_reg)) { | 627 | if (!pr_reg) { |
628 | printk(KERN_ERR "Unable to allocate struct t10_pr_registration\n"); | 628 | pr_err("Unable to allocate struct t10_pr_registration\n"); |
629 | return NULL; | 629 | return NULL; |
630 | } | 630 | } |
631 | 631 | ||
632 | pr_reg->pr_aptpl_buf = kzalloc(T10_RES(su_dev)->pr_aptpl_buf_len, | 632 | pr_reg->pr_aptpl_buf = kzalloc(su_dev->t10_pr.pr_aptpl_buf_len, |
633 | GFP_ATOMIC); | 633 | GFP_ATOMIC); |
634 | if (!(pr_reg->pr_aptpl_buf)) { | 634 | if (!pr_reg->pr_aptpl_buf) { |
635 | printk(KERN_ERR "Unable to allocate pr_reg->pr_aptpl_buf\n"); | 635 | pr_err("Unable to allocate pr_reg->pr_aptpl_buf\n"); |
636 | kmem_cache_free(t10_pr_reg_cache, pr_reg); | 636 | kmem_cache_free(t10_pr_reg_cache, pr_reg); |
637 | return NULL; | 637 | return NULL; |
638 | } | 638 | } |
@@ -692,12 +692,12 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration( | |||
692 | */ | 692 | */ |
693 | pr_reg = __core_scsi3_do_alloc_registration(dev, nacl, deve, isid, | 693 | pr_reg = __core_scsi3_do_alloc_registration(dev, nacl, deve, isid, |
694 | sa_res_key, all_tg_pt, aptpl); | 694 | sa_res_key, all_tg_pt, aptpl); |
695 | if (!(pr_reg)) | 695 | if (!pr_reg) |
696 | return NULL; | 696 | return NULL; |
697 | /* | 697 | /* |
698 | * Return pointer to pr_reg for ALL_TG_PT=0 | 698 | * Return pointer to pr_reg for ALL_TG_PT=0 |
699 | */ | 699 | */ |
700 | if (!(all_tg_pt)) | 700 | if (!all_tg_pt) |
701 | return pr_reg; | 701 | return pr_reg; |
702 | /* | 702 | /* |
703 | * Create list of matching SCSI Initiator Port registrations | 703 | * Create list of matching SCSI Initiator Port registrations |
@@ -717,7 +717,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration( | |||
717 | * that have not been make explict via a ConfigFS | 717 | * that have not been make explict via a ConfigFS |
718 | * MappedLUN group for the SCSI Initiator Node ACL. | 718 | * MappedLUN group for the SCSI Initiator Node ACL. |
719 | */ | 719 | */ |
720 | if (!(deve_tmp->se_lun_acl)) | 720 | if (!deve_tmp->se_lun_acl) |
721 | continue; | 721 | continue; |
722 | 722 | ||
723 | nacl_tmp = deve_tmp->se_lun_acl->se_lun_nacl; | 723 | nacl_tmp = deve_tmp->se_lun_acl->se_lun_nacl; |
@@ -751,7 +751,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration( | |||
751 | */ | 751 | */ |
752 | ret = core_scsi3_lunacl_depend_item(deve_tmp); | 752 | ret = core_scsi3_lunacl_depend_item(deve_tmp); |
753 | if (ret < 0) { | 753 | if (ret < 0) { |
754 | printk(KERN_ERR "core_scsi3_lunacl_depend" | 754 | pr_err("core_scsi3_lunacl_depend" |
755 | "_item() failed\n"); | 755 | "_item() failed\n"); |
756 | atomic_dec(&port->sep_tg_pt_ref_cnt); | 756 | atomic_dec(&port->sep_tg_pt_ref_cnt); |
757 | smp_mb__after_atomic_dec(); | 757 | smp_mb__after_atomic_dec(); |
@@ -769,7 +769,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration( | |||
769 | pr_reg_atp = __core_scsi3_do_alloc_registration(dev, | 769 | pr_reg_atp = __core_scsi3_do_alloc_registration(dev, |
770 | nacl_tmp, deve_tmp, NULL, | 770 | nacl_tmp, deve_tmp, NULL, |
771 | sa_res_key, all_tg_pt, aptpl); | 771 | sa_res_key, all_tg_pt, aptpl); |
772 | if (!(pr_reg_atp)) { | 772 | if (!pr_reg_atp) { |
773 | atomic_dec(&port->sep_tg_pt_ref_cnt); | 773 | atomic_dec(&port->sep_tg_pt_ref_cnt); |
774 | smp_mb__after_atomic_dec(); | 774 | smp_mb__after_atomic_dec(); |
775 | atomic_dec(&deve_tmp->pr_ref_count); | 775 | atomic_dec(&deve_tmp->pr_ref_count); |
@@ -803,7 +803,7 @@ out: | |||
803 | } | 803 | } |
804 | 804 | ||
805 | int core_scsi3_alloc_aptpl_registration( | 805 | int core_scsi3_alloc_aptpl_registration( |
806 | struct t10_reservation_template *pr_tmpl, | 806 | struct t10_reservation *pr_tmpl, |
807 | u64 sa_res_key, | 807 | u64 sa_res_key, |
808 | unsigned char *i_port, | 808 | unsigned char *i_port, |
809 | unsigned char *isid, | 809 | unsigned char *isid, |
@@ -817,15 +817,15 @@ int core_scsi3_alloc_aptpl_registration( | |||
817 | { | 817 | { |
818 | struct t10_pr_registration *pr_reg; | 818 | struct t10_pr_registration *pr_reg; |
819 | 819 | ||
820 | if (!(i_port) || !(t_port) || !(sa_res_key)) { | 820 | if (!i_port || !t_port || !sa_res_key) { |
821 | printk(KERN_ERR "Illegal parameters for APTPL registration\n"); | 821 | pr_err("Illegal parameters for APTPL registration\n"); |
822 | return -1; | 822 | return -EINVAL; |
823 | } | 823 | } |
824 | 824 | ||
825 | pr_reg = kmem_cache_zalloc(t10_pr_reg_cache, GFP_KERNEL); | 825 | pr_reg = kmem_cache_zalloc(t10_pr_reg_cache, GFP_KERNEL); |
826 | if (!(pr_reg)) { | 826 | if (!pr_reg) { |
827 | printk(KERN_ERR "Unable to allocate struct t10_pr_registration\n"); | 827 | pr_err("Unable to allocate struct t10_pr_registration\n"); |
828 | return -1; | 828 | return -ENOMEM; |
829 | } | 829 | } |
830 | pr_reg->pr_aptpl_buf = kzalloc(pr_tmpl->pr_aptpl_buf_len, GFP_KERNEL); | 830 | pr_reg->pr_aptpl_buf = kzalloc(pr_tmpl->pr_aptpl_buf_len, GFP_KERNEL); |
831 | 831 | ||
@@ -869,7 +869,7 @@ int core_scsi3_alloc_aptpl_registration( | |||
869 | pr_reg->pr_res_holder = res_holder; | 869 | pr_reg->pr_res_holder = res_holder; |
870 | 870 | ||
871 | list_add_tail(&pr_reg->pr_reg_aptpl_list, &pr_tmpl->aptpl_reg_list); | 871 | list_add_tail(&pr_reg->pr_reg_aptpl_list, &pr_tmpl->aptpl_reg_list); |
872 | printk(KERN_INFO "SPC-3 PR APTPL Successfully added registration%s from" | 872 | pr_debug("SPC-3 PR APTPL Successfully added registration%s from" |
873 | " metadata\n", (res_holder) ? "+reservation" : ""); | 873 | " metadata\n", (res_holder) ? "+reservation" : ""); |
874 | return 0; | 874 | return 0; |
875 | } | 875 | } |
@@ -891,13 +891,13 @@ static void core_scsi3_aptpl_reserve( | |||
891 | dev->dev_pr_res_holder = pr_reg; | 891 | dev->dev_pr_res_holder = pr_reg; |
892 | spin_unlock(&dev->dev_reservation_lock); | 892 | spin_unlock(&dev->dev_reservation_lock); |
893 | 893 | ||
894 | printk(KERN_INFO "SPC-3 PR [%s] Service Action: APTPL RESERVE created" | 894 | pr_debug("SPC-3 PR [%s] Service Action: APTPL RESERVE created" |
895 | " new reservation holder TYPE: %s ALL_TG_PT: %d\n", | 895 | " new reservation holder TYPE: %s ALL_TG_PT: %d\n", |
896 | TPG_TFO(tpg)->get_fabric_name(), | 896 | tpg->se_tpg_tfo->get_fabric_name(), |
897 | core_scsi3_pr_dump_type(pr_reg->pr_res_type), | 897 | core_scsi3_pr_dump_type(pr_reg->pr_res_type), |
898 | (pr_reg->pr_reg_all_tg_pt) ? 1 : 0); | 898 | (pr_reg->pr_reg_all_tg_pt) ? 1 : 0); |
899 | printk(KERN_INFO "SPC-3 PR [%s] RESERVE Node: %s%s\n", | 899 | pr_debug("SPC-3 PR [%s] RESERVE Node: %s%s\n", |
900 | TPG_TFO(tpg)->get_fabric_name(), node_acl->initiatorname, | 900 | tpg->se_tpg_tfo->get_fabric_name(), node_acl->initiatorname, |
901 | (prf_isid) ? &i_buf[0] : ""); | 901 | (prf_isid) ? &i_buf[0] : ""); |
902 | } | 902 | } |
903 | 903 | ||
@@ -913,7 +913,7 @@ static int __core_scsi3_check_aptpl_registration( | |||
913 | struct se_dev_entry *deve) | 913 | struct se_dev_entry *deve) |
914 | { | 914 | { |
915 | struct t10_pr_registration *pr_reg, *pr_reg_tmp; | 915 | struct t10_pr_registration *pr_reg, *pr_reg_tmp; |
916 | struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation; | 916 | struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; |
917 | unsigned char i_port[PR_APTPL_MAX_IPORT_LEN]; | 917 | unsigned char i_port[PR_APTPL_MAX_IPORT_LEN]; |
918 | unsigned char t_port[PR_APTPL_MAX_TPORT_LEN]; | 918 | unsigned char t_port[PR_APTPL_MAX_TPORT_LEN]; |
919 | u16 tpgt; | 919 | u16 tpgt; |
@@ -925,8 +925,8 @@ static int __core_scsi3_check_aptpl_registration( | |||
925 | */ | 925 | */ |
926 | snprintf(i_port, PR_APTPL_MAX_IPORT_LEN, "%s", nacl->initiatorname); | 926 | snprintf(i_port, PR_APTPL_MAX_IPORT_LEN, "%s", nacl->initiatorname); |
927 | snprintf(t_port, PR_APTPL_MAX_TPORT_LEN, "%s", | 927 | snprintf(t_port, PR_APTPL_MAX_TPORT_LEN, "%s", |
928 | TPG_TFO(tpg)->tpg_get_wwn(tpg)); | 928 | tpg->se_tpg_tfo->tpg_get_wwn(tpg)); |
929 | tpgt = TPG_TFO(tpg)->tpg_get_tag(tpg); | 929 | tpgt = tpg->se_tpg_tfo->tpg_get_tag(tpg); |
930 | /* | 930 | /* |
931 | * Look for the matching registrations+reservation from those | 931 | * Look for the matching registrations+reservation from those |
932 | * created from APTPL metadata. Note that multiple registrations | 932 | * created from APTPL metadata. Note that multiple registrations |
@@ -936,7 +936,7 @@ static int __core_scsi3_check_aptpl_registration( | |||
936 | spin_lock(&pr_tmpl->aptpl_reg_lock); | 936 | spin_lock(&pr_tmpl->aptpl_reg_lock); |
937 | list_for_each_entry_safe(pr_reg, pr_reg_tmp, &pr_tmpl->aptpl_reg_list, | 937 | list_for_each_entry_safe(pr_reg, pr_reg_tmp, &pr_tmpl->aptpl_reg_list, |
938 | pr_reg_aptpl_list) { | 938 | pr_reg_aptpl_list) { |
939 | if (!(strcmp(pr_reg->pr_iport, i_port)) && | 939 | if (!strcmp(pr_reg->pr_iport, i_port) && |
940 | (pr_reg->pr_res_mapped_lun == deve->mapped_lun) && | 940 | (pr_reg->pr_res_mapped_lun == deve->mapped_lun) && |
941 | !(strcmp(pr_reg->pr_tport, t_port)) && | 941 | !(strcmp(pr_reg->pr_tport, t_port)) && |
942 | (pr_reg->pr_reg_tpgt == tpgt) && | 942 | (pr_reg->pr_reg_tpgt == tpgt) && |
@@ -980,11 +980,11 @@ int core_scsi3_check_aptpl_registration( | |||
980 | struct se_lun *lun, | 980 | struct se_lun *lun, |
981 | struct se_lun_acl *lun_acl) | 981 | struct se_lun_acl *lun_acl) |
982 | { | 982 | { |
983 | struct se_subsystem_dev *su_dev = SU_DEV(dev); | 983 | struct se_subsystem_dev *su_dev = dev->se_sub_dev; |
984 | struct se_node_acl *nacl = lun_acl->se_lun_nacl; | 984 | struct se_node_acl *nacl = lun_acl->se_lun_nacl; |
985 | struct se_dev_entry *deve = &nacl->device_list[lun_acl->mapped_lun]; | 985 | struct se_dev_entry *deve = &nacl->device_list[lun_acl->mapped_lun]; |
986 | 986 | ||
987 | if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS) | 987 | if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) |
988 | return 0; | 988 | return 0; |
989 | 989 | ||
990 | return __core_scsi3_check_aptpl_registration(dev, tpg, lun, | 990 | return __core_scsi3_check_aptpl_registration(dev, tpg, lun, |
@@ -1006,19 +1006,19 @@ static void __core_scsi3_dump_registration( | |||
1006 | prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0], | 1006 | prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0], |
1007 | PR_REG_ISID_ID_LEN); | 1007 | PR_REG_ISID_ID_LEN); |
1008 | 1008 | ||
1009 | printk(KERN_INFO "SPC-3 PR [%s] Service Action: REGISTER%s Initiator" | 1009 | pr_debug("SPC-3 PR [%s] Service Action: REGISTER%s Initiator" |
1010 | " Node: %s%s\n", tfo->get_fabric_name(), (register_type == 2) ? | 1010 | " Node: %s%s\n", tfo->get_fabric_name(), (register_type == 2) ? |
1011 | "_AND_MOVE" : (register_type == 1) ? | 1011 | "_AND_MOVE" : (register_type == 1) ? |
1012 | "_AND_IGNORE_EXISTING_KEY" : "", nacl->initiatorname, | 1012 | "_AND_IGNORE_EXISTING_KEY" : "", nacl->initiatorname, |
1013 | (prf_isid) ? i_buf : ""); | 1013 | (prf_isid) ? i_buf : ""); |
1014 | printk(KERN_INFO "SPC-3 PR [%s] registration on Target Port: %s,0x%04x\n", | 1014 | pr_debug("SPC-3 PR [%s] registration on Target Port: %s,0x%04x\n", |
1015 | tfo->get_fabric_name(), tfo->tpg_get_wwn(se_tpg), | 1015 | tfo->get_fabric_name(), tfo->tpg_get_wwn(se_tpg), |
1016 | tfo->tpg_get_tag(se_tpg)); | 1016 | tfo->tpg_get_tag(se_tpg)); |
1017 | printk(KERN_INFO "SPC-3 PR [%s] for %s TCM Subsystem %s Object Target" | 1017 | pr_debug("SPC-3 PR [%s] for %s TCM Subsystem %s Object Target" |
1018 | " Port(s)\n", tfo->get_fabric_name(), | 1018 | " Port(s)\n", tfo->get_fabric_name(), |
1019 | (pr_reg->pr_reg_all_tg_pt) ? "ALL" : "SINGLE", | 1019 | (pr_reg->pr_reg_all_tg_pt) ? "ALL" : "SINGLE", |
1020 | TRANSPORT(dev)->name); | 1020 | dev->transport->name); |
1021 | printk(KERN_INFO "SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration:" | 1021 | pr_debug("SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration:" |
1022 | " 0x%08x APTPL: %d\n", tfo->get_fabric_name(), | 1022 | " 0x%08x APTPL: %d\n", tfo->get_fabric_name(), |
1023 | pr_reg->pr_res_key, pr_reg->pr_res_generation, | 1023 | pr_reg->pr_res_key, pr_reg->pr_res_generation, |
1024 | pr_reg->pr_reg_aptpl); | 1024 | pr_reg->pr_reg_aptpl); |
@@ -1035,10 +1035,10 @@ static void __core_scsi3_add_registration( | |||
1035 | int register_type, | 1035 | int register_type, |
1036 | int register_move) | 1036 | int register_move) |
1037 | { | 1037 | { |
1038 | struct se_subsystem_dev *su_dev = SU_DEV(dev); | 1038 | struct se_subsystem_dev *su_dev = dev->se_sub_dev; |
1039 | struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo; | 1039 | struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo; |
1040 | struct t10_pr_registration *pr_reg_tmp, *pr_reg_tmp_safe; | 1040 | struct t10_pr_registration *pr_reg_tmp, *pr_reg_tmp_safe; |
1041 | struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation; | 1041 | struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; |
1042 | 1042 | ||
1043 | /* | 1043 | /* |
1044 | * Increment PRgeneration counter for struct se_device upon a successful | 1044 | * Increment PRgeneration counter for struct se_device upon a successful |
@@ -1050,7 +1050,7 @@ static void __core_scsi3_add_registration( | |||
1050 | * for the REGISTER. | 1050 | * for the REGISTER. |
1051 | */ | 1051 | */ |
1052 | pr_reg->pr_res_generation = (register_move) ? | 1052 | pr_reg->pr_res_generation = (register_move) ? |
1053 | T10_RES(su_dev)->pr_generation++ : | 1053 | su_dev->t10_pr.pr_generation++ : |
1054 | core_scsi3_pr_generation(dev); | 1054 | core_scsi3_pr_generation(dev); |
1055 | 1055 | ||
1056 | spin_lock(&pr_tmpl->registration_lock); | 1056 | spin_lock(&pr_tmpl->registration_lock); |
@@ -1062,7 +1062,7 @@ static void __core_scsi3_add_registration( | |||
1062 | /* | 1062 | /* |
1063 | * Skip extra processing for ALL_TG_PT=0 or REGISTER_AND_MOVE. | 1063 | * Skip extra processing for ALL_TG_PT=0 or REGISTER_AND_MOVE. |
1064 | */ | 1064 | */ |
1065 | if (!(pr_reg->pr_reg_all_tg_pt) || (register_move)) | 1065 | if (!pr_reg->pr_reg_all_tg_pt || register_move) |
1066 | return; | 1066 | return; |
1067 | /* | 1067 | /* |
1068 | * Walk pr_reg->pr_reg_atp_list and add registrations for ALL_TG_PT=1 | 1068 | * Walk pr_reg->pr_reg_atp_list and add registrations for ALL_TG_PT=1 |
@@ -1106,8 +1106,8 @@ static int core_scsi3_alloc_registration( | |||
1106 | 1106 | ||
1107 | pr_reg = __core_scsi3_alloc_registration(dev, nacl, deve, isid, | 1107 | pr_reg = __core_scsi3_alloc_registration(dev, nacl, deve, isid, |
1108 | sa_res_key, all_tg_pt, aptpl); | 1108 | sa_res_key, all_tg_pt, aptpl); |
1109 | if (!(pr_reg)) | 1109 | if (!pr_reg) |
1110 | return -1; | 1110 | return -EPERM; |
1111 | 1111 | ||
1112 | __core_scsi3_add_registration(dev, nacl, pr_reg, | 1112 | __core_scsi3_add_registration(dev, nacl, pr_reg, |
1113 | register_type, register_move); | 1113 | register_type, register_move); |
@@ -1119,7 +1119,7 @@ static struct t10_pr_registration *__core_scsi3_locate_pr_reg( | |||
1119 | struct se_node_acl *nacl, | 1119 | struct se_node_acl *nacl, |
1120 | unsigned char *isid) | 1120 | unsigned char *isid) |
1121 | { | 1121 | { |
1122 | struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation; | 1122 | struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; |
1123 | struct t10_pr_registration *pr_reg, *pr_reg_tmp; | 1123 | struct t10_pr_registration *pr_reg, *pr_reg_tmp; |
1124 | struct se_portal_group *tpg; | 1124 | struct se_portal_group *tpg; |
1125 | 1125 | ||
@@ -1137,14 +1137,14 @@ static struct t10_pr_registration *__core_scsi3_locate_pr_reg( | |||
1137 | * If this registration does NOT contain a fabric provided | 1137 | * If this registration does NOT contain a fabric provided |
1138 | * ISID, then we have found a match. | 1138 | * ISID, then we have found a match. |
1139 | */ | 1139 | */ |
1140 | if (!(pr_reg->isid_present_at_reg)) { | 1140 | if (!pr_reg->isid_present_at_reg) { |
1141 | /* | 1141 | /* |
1142 | * Determine if this SCSI device server requires that | 1142 | * Determine if this SCSI device server requires that |
1143 | * SCSI Intiatior TransportID w/ ISIDs is enforced | 1143 | * SCSI Intiatior TransportID w/ ISIDs is enforced |
1144 | * for fabric modules (iSCSI) requiring them. | 1144 | * for fabric modules (iSCSI) requiring them. |
1145 | */ | 1145 | */ |
1146 | if (TPG_TFO(tpg)->sess_get_initiator_sid != NULL) { | 1146 | if (tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) { |
1147 | if (DEV_ATTRIB(dev)->enforce_pr_isids) | 1147 | if (dev->se_sub_dev->se_dev_attrib.enforce_pr_isids) |
1148 | continue; | 1148 | continue; |
1149 | } | 1149 | } |
1150 | atomic_inc(&pr_reg->pr_res_holders); | 1150 | atomic_inc(&pr_reg->pr_res_holders); |
@@ -1157,7 +1157,7 @@ static struct t10_pr_registration *__core_scsi3_locate_pr_reg( | |||
1157 | * SCSI Initiator Port TransportIDs, then we expect a valid | 1157 | * SCSI Initiator Port TransportIDs, then we expect a valid |
1158 | * matching ISID to be provided by the local SCSI Initiator Port. | 1158 | * matching ISID to be provided by the local SCSI Initiator Port. |
1159 | */ | 1159 | */ |
1160 | if (!(isid)) | 1160 | if (!isid) |
1161 | continue; | 1161 | continue; |
1162 | if (strcmp(isid, pr_reg->pr_reg_isid)) | 1162 | if (strcmp(isid, pr_reg->pr_reg_isid)) |
1163 | continue; | 1163 | continue; |
@@ -1180,9 +1180,9 @@ static struct t10_pr_registration *core_scsi3_locate_pr_reg( | |||
1180 | struct se_portal_group *tpg = nacl->se_tpg; | 1180 | struct se_portal_group *tpg = nacl->se_tpg; |
1181 | unsigned char buf[PR_REG_ISID_LEN], *isid_ptr = NULL; | 1181 | unsigned char buf[PR_REG_ISID_LEN], *isid_ptr = NULL; |
1182 | 1182 | ||
1183 | if (TPG_TFO(tpg)->sess_get_initiator_sid != NULL) { | 1183 | if (tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) { |
1184 | memset(&buf[0], 0, PR_REG_ISID_LEN); | 1184 | memset(&buf[0], 0, PR_REG_ISID_LEN); |
1185 | TPG_TFO(tpg)->sess_get_initiator_sid(sess, &buf[0], | 1185 | tpg->se_tpg_tfo->sess_get_initiator_sid(sess, &buf[0], |
1186 | PR_REG_ISID_LEN); | 1186 | PR_REG_ISID_LEN); |
1187 | isid_ptr = &buf[0]; | 1187 | isid_ptr = &buf[0]; |
1188 | } | 1188 | } |
@@ -1206,7 +1206,7 @@ static int core_scsi3_check_implict_release( | |||
1206 | 1206 | ||
1207 | spin_lock(&dev->dev_reservation_lock); | 1207 | spin_lock(&dev->dev_reservation_lock); |
1208 | pr_res_holder = dev->dev_pr_res_holder; | 1208 | pr_res_holder = dev->dev_pr_res_holder; |
1209 | if (!(pr_res_holder)) { | 1209 | if (!pr_res_holder) { |
1210 | spin_unlock(&dev->dev_reservation_lock); | 1210 | spin_unlock(&dev->dev_reservation_lock); |
1211 | return ret; | 1211 | return ret; |
1212 | } | 1212 | } |
@@ -1236,11 +1236,11 @@ static int core_scsi3_check_implict_release( | |||
1236 | (!strcmp(pr_res_holder->pr_reg_nacl->initiatorname, | 1236 | (!strcmp(pr_res_holder->pr_reg_nacl->initiatorname, |
1237 | pr_reg->pr_reg_nacl->initiatorname)) && | 1237 | pr_reg->pr_reg_nacl->initiatorname)) && |
1238 | (pr_res_holder->pr_res_key == pr_reg->pr_res_key)) { | 1238 | (pr_res_holder->pr_res_key == pr_reg->pr_res_key)) { |
1239 | printk(KERN_ERR "SPC-3 PR: Unable to perform ALL_TG_PT=1" | 1239 | pr_err("SPC-3 PR: Unable to perform ALL_TG_PT=1" |
1240 | " UNREGISTER while existing reservation with matching" | 1240 | " UNREGISTER while existing reservation with matching" |
1241 | " key 0x%016Lx is present from another SCSI Initiator" | 1241 | " key 0x%016Lx is present from another SCSI Initiator" |
1242 | " Port\n", pr_reg->pr_res_key); | 1242 | " Port\n", pr_reg->pr_res_key); |
1243 | ret = -1; | 1243 | ret = -EPERM; |
1244 | } | 1244 | } |
1245 | spin_unlock(&dev->dev_reservation_lock); | 1245 | spin_unlock(&dev->dev_reservation_lock); |
1246 | 1246 | ||
@@ -1248,7 +1248,7 @@ static int core_scsi3_check_implict_release( | |||
1248 | } | 1248 | } |
1249 | 1249 | ||
1250 | /* | 1250 | /* |
1251 | * Called with struct t10_reservation_template->registration_lock held. | 1251 | * Called with struct t10_reservation->registration_lock held. |
1252 | */ | 1252 | */ |
1253 | static void __core_scsi3_free_registration( | 1253 | static void __core_scsi3_free_registration( |
1254 | struct se_device *dev, | 1254 | struct se_device *dev, |
@@ -1258,7 +1258,7 @@ static void __core_scsi3_free_registration( | |||
1258 | { | 1258 | { |
1259 | struct target_core_fabric_ops *tfo = | 1259 | struct target_core_fabric_ops *tfo = |
1260 | pr_reg->pr_reg_nacl->se_tpg->se_tpg_tfo; | 1260 | pr_reg->pr_reg_nacl->se_tpg->se_tpg_tfo; |
1261 | struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation; | 1261 | struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; |
1262 | char i_buf[PR_REG_ISID_ID_LEN]; | 1262 | char i_buf[PR_REG_ISID_ID_LEN]; |
1263 | int prf_isid; | 1263 | int prf_isid; |
1264 | 1264 | ||
@@ -1283,25 +1283,25 @@ static void __core_scsi3_free_registration( | |||
1283 | */ | 1283 | */ |
1284 | while (atomic_read(&pr_reg->pr_res_holders) != 0) { | 1284 | while (atomic_read(&pr_reg->pr_res_holders) != 0) { |
1285 | spin_unlock(&pr_tmpl->registration_lock); | 1285 | spin_unlock(&pr_tmpl->registration_lock); |
1286 | printk("SPC-3 PR [%s] waiting for pr_res_holders\n", | 1286 | pr_debug("SPC-3 PR [%s] waiting for pr_res_holders\n", |
1287 | tfo->get_fabric_name()); | 1287 | tfo->get_fabric_name()); |
1288 | cpu_relax(); | 1288 | cpu_relax(); |
1289 | spin_lock(&pr_tmpl->registration_lock); | 1289 | spin_lock(&pr_tmpl->registration_lock); |
1290 | } | 1290 | } |
1291 | 1291 | ||
1292 | printk(KERN_INFO "SPC-3 PR [%s] Service Action: UNREGISTER Initiator" | 1292 | pr_debug("SPC-3 PR [%s] Service Action: UNREGISTER Initiator" |
1293 | " Node: %s%s\n", tfo->get_fabric_name(), | 1293 | " Node: %s%s\n", tfo->get_fabric_name(), |
1294 | pr_reg->pr_reg_nacl->initiatorname, | 1294 | pr_reg->pr_reg_nacl->initiatorname, |
1295 | (prf_isid) ? &i_buf[0] : ""); | 1295 | (prf_isid) ? &i_buf[0] : ""); |
1296 | printk(KERN_INFO "SPC-3 PR [%s] for %s TCM Subsystem %s Object Target" | 1296 | pr_debug("SPC-3 PR [%s] for %s TCM Subsystem %s Object Target" |
1297 | " Port(s)\n", tfo->get_fabric_name(), | 1297 | " Port(s)\n", tfo->get_fabric_name(), |
1298 | (pr_reg->pr_reg_all_tg_pt) ? "ALL" : "SINGLE", | 1298 | (pr_reg->pr_reg_all_tg_pt) ? "ALL" : "SINGLE", |
1299 | TRANSPORT(dev)->name); | 1299 | dev->transport->name); |
1300 | printk(KERN_INFO "SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration:" | 1300 | pr_debug("SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration:" |
1301 | " 0x%08x\n", tfo->get_fabric_name(), pr_reg->pr_res_key, | 1301 | " 0x%08x\n", tfo->get_fabric_name(), pr_reg->pr_res_key, |
1302 | pr_reg->pr_res_generation); | 1302 | pr_reg->pr_res_generation); |
1303 | 1303 | ||
1304 | if (!(preempt_and_abort_list)) { | 1304 | if (!preempt_and_abort_list) { |
1305 | pr_reg->pr_reg_deve = NULL; | 1305 | pr_reg->pr_reg_deve = NULL; |
1306 | pr_reg->pr_reg_nacl = NULL; | 1306 | pr_reg->pr_reg_nacl = NULL; |
1307 | kfree(pr_reg->pr_aptpl_buf); | 1307 | kfree(pr_reg->pr_aptpl_buf); |
@@ -1319,7 +1319,7 @@ void core_scsi3_free_pr_reg_from_nacl( | |||
1319 | struct se_device *dev, | 1319 | struct se_device *dev, |
1320 | struct se_node_acl *nacl) | 1320 | struct se_node_acl *nacl) |
1321 | { | 1321 | { |
1322 | struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation; | 1322 | struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; |
1323 | struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_res_holder; | 1323 | struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_res_holder; |
1324 | /* | 1324 | /* |
1325 | * If the passed se_node_acl matches the reservation holder, | 1325 | * If the passed se_node_acl matches the reservation holder, |
@@ -1349,7 +1349,7 @@ void core_scsi3_free_pr_reg_from_nacl( | |||
1349 | void core_scsi3_free_all_registrations( | 1349 | void core_scsi3_free_all_registrations( |
1350 | struct se_device *dev) | 1350 | struct se_device *dev) |
1351 | { | 1351 | { |
1352 | struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation; | 1352 | struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; |
1353 | struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_res_holder; | 1353 | struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_res_holder; |
1354 | 1354 | ||
1355 | spin_lock(&dev->dev_reservation_lock); | 1355 | spin_lock(&dev->dev_reservation_lock); |
@@ -1381,13 +1381,13 @@ void core_scsi3_free_all_registrations( | |||
1381 | 1381 | ||
1382 | static int core_scsi3_tpg_depend_item(struct se_portal_group *tpg) | 1382 | static int core_scsi3_tpg_depend_item(struct se_portal_group *tpg) |
1383 | { | 1383 | { |
1384 | return configfs_depend_item(TPG_TFO(tpg)->tf_subsys, | 1384 | return configfs_depend_item(tpg->se_tpg_tfo->tf_subsys, |
1385 | &tpg->tpg_group.cg_item); | 1385 | &tpg->tpg_group.cg_item); |
1386 | } | 1386 | } |
1387 | 1387 | ||
1388 | static void core_scsi3_tpg_undepend_item(struct se_portal_group *tpg) | 1388 | static void core_scsi3_tpg_undepend_item(struct se_portal_group *tpg) |
1389 | { | 1389 | { |
1390 | configfs_undepend_item(TPG_TFO(tpg)->tf_subsys, | 1390 | configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys, |
1391 | &tpg->tpg_group.cg_item); | 1391 | &tpg->tpg_group.cg_item); |
1392 | 1392 | ||
1393 | atomic_dec(&tpg->tpg_pr_ref_count); | 1393 | atomic_dec(&tpg->tpg_pr_ref_count); |
@@ -1401,7 +1401,7 @@ static int core_scsi3_nodeacl_depend_item(struct se_node_acl *nacl) | |||
1401 | if (nacl->dynamic_node_acl) | 1401 | if (nacl->dynamic_node_acl) |
1402 | return 0; | 1402 | return 0; |
1403 | 1403 | ||
1404 | return configfs_depend_item(TPG_TFO(tpg)->tf_subsys, | 1404 | return configfs_depend_item(tpg->se_tpg_tfo->tf_subsys, |
1405 | &nacl->acl_group.cg_item); | 1405 | &nacl->acl_group.cg_item); |
1406 | } | 1406 | } |
1407 | 1407 | ||
@@ -1415,7 +1415,7 @@ static void core_scsi3_nodeacl_undepend_item(struct se_node_acl *nacl) | |||
1415 | return; | 1415 | return; |
1416 | } | 1416 | } |
1417 | 1417 | ||
1418 | configfs_undepend_item(TPG_TFO(tpg)->tf_subsys, | 1418 | configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys, |
1419 | &nacl->acl_group.cg_item); | 1419 | &nacl->acl_group.cg_item); |
1420 | 1420 | ||
1421 | atomic_dec(&nacl->acl_pr_ref_count); | 1421 | atomic_dec(&nacl->acl_pr_ref_count); |
@@ -1430,13 +1430,13 @@ static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve) | |||
1430 | /* | 1430 | /* |
1431 | * For nacl->dynamic_node_acl=1 | 1431 | * For nacl->dynamic_node_acl=1 |
1432 | */ | 1432 | */ |
1433 | if (!(lun_acl)) | 1433 | if (!lun_acl) |
1434 | return 0; | 1434 | return 0; |
1435 | 1435 | ||
1436 | nacl = lun_acl->se_lun_nacl; | 1436 | nacl = lun_acl->se_lun_nacl; |
1437 | tpg = nacl->se_tpg; | 1437 | tpg = nacl->se_tpg; |
1438 | 1438 | ||
1439 | return configfs_depend_item(TPG_TFO(tpg)->tf_subsys, | 1439 | return configfs_depend_item(tpg->se_tpg_tfo->tf_subsys, |
1440 | &lun_acl->se_lun_group.cg_item); | 1440 | &lun_acl->se_lun_group.cg_item); |
1441 | } | 1441 | } |
1442 | 1442 | ||
@@ -1448,7 +1448,7 @@ static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve) | |||
1448 | /* | 1448 | /* |
1449 | * For nacl->dynamic_node_acl=1 | 1449 | * For nacl->dynamic_node_acl=1 |
1450 | */ | 1450 | */ |
1451 | if (!(lun_acl)) { | 1451 | if (!lun_acl) { |
1452 | atomic_dec(&se_deve->pr_ref_count); | 1452 | atomic_dec(&se_deve->pr_ref_count); |
1453 | smp_mb__after_atomic_dec(); | 1453 | smp_mb__after_atomic_dec(); |
1454 | return; | 1454 | return; |
@@ -1456,7 +1456,7 @@ static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve) | |||
1456 | nacl = lun_acl->se_lun_nacl; | 1456 | nacl = lun_acl->se_lun_nacl; |
1457 | tpg = nacl->se_tpg; | 1457 | tpg = nacl->se_tpg; |
1458 | 1458 | ||
1459 | configfs_undepend_item(TPG_TFO(tpg)->tf_subsys, | 1459 | configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys, |
1460 | &lun_acl->se_lun_group.cg_item); | 1460 | &lun_acl->se_lun_group.cg_item); |
1461 | 1461 | ||
1462 | atomic_dec(&se_deve->pr_ref_count); | 1462 | atomic_dec(&se_deve->pr_ref_count); |
@@ -1471,10 +1471,10 @@ static int core_scsi3_decode_spec_i_port( | |||
1471 | int all_tg_pt, | 1471 | int all_tg_pt, |
1472 | int aptpl) | 1472 | int aptpl) |
1473 | { | 1473 | { |
1474 | struct se_device *dev = SE_DEV(cmd); | 1474 | struct se_device *dev = cmd->se_dev; |
1475 | struct se_port *tmp_port; | 1475 | struct se_port *tmp_port; |
1476 | struct se_portal_group *dest_tpg = NULL, *tmp_tpg; | 1476 | struct se_portal_group *dest_tpg = NULL, *tmp_tpg; |
1477 | struct se_session *se_sess = SE_SESS(cmd); | 1477 | struct se_session *se_sess = cmd->se_sess; |
1478 | struct se_node_acl *dest_node_acl = NULL; | 1478 | struct se_node_acl *dest_node_acl = NULL; |
1479 | struct se_dev_entry *dest_se_deve = NULL, *local_se_deve; | 1479 | struct se_dev_entry *dest_se_deve = NULL, *local_se_deve; |
1480 | struct t10_pr_registration *dest_pr_reg, *local_pr_reg, *pr_reg_e; | 1480 | struct t10_pr_registration *dest_pr_reg, *local_pr_reg, *pr_reg_e; |
@@ -1482,7 +1482,7 @@ static int core_scsi3_decode_spec_i_port( | |||
1482 | struct list_head tid_dest_list; | 1482 | struct list_head tid_dest_list; |
1483 | struct pr_transport_id_holder *tidh_new, *tidh, *tidh_tmp; | 1483 | struct pr_transport_id_holder *tidh_new, *tidh, *tidh_tmp; |
1484 | struct target_core_fabric_ops *tmp_tf_ops; | 1484 | struct target_core_fabric_ops *tmp_tf_ops; |
1485 | unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf; | 1485 | unsigned char *buf; |
1486 | unsigned char *ptr, *i_str = NULL, proto_ident, tmp_proto_ident; | 1486 | unsigned char *ptr, *i_str = NULL, proto_ident, tmp_proto_ident; |
1487 | char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN]; | 1487 | char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN]; |
1488 | u32 tpdl, tid_len = 0; | 1488 | u32 tpdl, tid_len = 0; |
@@ -1500,8 +1500,8 @@ static int core_scsi3_decode_spec_i_port( | |||
1500 | * processing in the loop of tid_dest_list below. | 1500 | * processing in the loop of tid_dest_list below. |
1501 | */ | 1501 | */ |
1502 | tidh_new = kzalloc(sizeof(struct pr_transport_id_holder), GFP_KERNEL); | 1502 | tidh_new = kzalloc(sizeof(struct pr_transport_id_holder), GFP_KERNEL); |
1503 | if (!(tidh_new)) { | 1503 | if (!tidh_new) { |
1504 | printk(KERN_ERR "Unable to allocate tidh_new\n"); | 1504 | pr_err("Unable to allocate tidh_new\n"); |
1505 | return PYX_TRANSPORT_LU_COMM_FAILURE; | 1505 | return PYX_TRANSPORT_LU_COMM_FAILURE; |
1506 | } | 1506 | } |
1507 | INIT_LIST_HEAD(&tidh_new->dest_list); | 1507 | INIT_LIST_HEAD(&tidh_new->dest_list); |
@@ -1509,10 +1509,10 @@ static int core_scsi3_decode_spec_i_port( | |||
1509 | tidh_new->dest_node_acl = se_sess->se_node_acl; | 1509 | tidh_new->dest_node_acl = se_sess->se_node_acl; |
1510 | tidh_new->dest_se_deve = local_se_deve; | 1510 | tidh_new->dest_se_deve = local_se_deve; |
1511 | 1511 | ||
1512 | local_pr_reg = __core_scsi3_alloc_registration(SE_DEV(cmd), | 1512 | local_pr_reg = __core_scsi3_alloc_registration(cmd->se_dev, |
1513 | se_sess->se_node_acl, local_se_deve, l_isid, | 1513 | se_sess->se_node_acl, local_se_deve, l_isid, |
1514 | sa_res_key, all_tg_pt, aptpl); | 1514 | sa_res_key, all_tg_pt, aptpl); |
1515 | if (!(local_pr_reg)) { | 1515 | if (!local_pr_reg) { |
1516 | kfree(tidh_new); | 1516 | kfree(tidh_new); |
1517 | return PYX_TRANSPORT_LU_COMM_FAILURE; | 1517 | return PYX_TRANSPORT_LU_COMM_FAILURE; |
1518 | } | 1518 | } |
@@ -1524,6 +1524,8 @@ static int core_scsi3_decode_spec_i_port( | |||
1524 | */ | 1524 | */ |
1525 | tidh_new->dest_local_nexus = 1; | 1525 | tidh_new->dest_local_nexus = 1; |
1526 | list_add_tail(&tidh_new->dest_list, &tid_dest_list); | 1526 | list_add_tail(&tidh_new->dest_list, &tid_dest_list); |
1527 | |||
1528 | buf = transport_kmap_first_data_page(cmd); | ||
1527 | /* | 1529 | /* |
1528 | * For a PERSISTENT RESERVE OUT specify initiator ports payload, | 1530 | * For a PERSISTENT RESERVE OUT specify initiator ports payload, |
1529 | * first extract TransportID Parameter Data Length, and make sure | 1531 | * first extract TransportID Parameter Data Length, and make sure |
@@ -1535,7 +1537,7 @@ static int core_scsi3_decode_spec_i_port( | |||
1535 | tpdl |= buf[27] & 0xff; | 1537 | tpdl |= buf[27] & 0xff; |
1536 | 1538 | ||
1537 | if ((tpdl + 28) != cmd->data_length) { | 1539 | if ((tpdl + 28) != cmd->data_length) { |
1538 | printk(KERN_ERR "SPC-3 PR: Illegal tpdl: %u + 28 byte header" | 1540 | pr_err("SPC-3 PR: Illegal tpdl: %u + 28 byte header" |
1539 | " does not equal CDB data_length: %u\n", tpdl, | 1541 | " does not equal CDB data_length: %u\n", tpdl, |
1540 | cmd->data_length); | 1542 | cmd->data_length); |
1541 | ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; | 1543 | ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; |
@@ -1555,13 +1557,13 @@ static int core_scsi3_decode_spec_i_port( | |||
1555 | spin_lock(&dev->se_port_lock); | 1557 | spin_lock(&dev->se_port_lock); |
1556 | list_for_each_entry(tmp_port, &dev->dev_sep_list, sep_list) { | 1558 | list_for_each_entry(tmp_port, &dev->dev_sep_list, sep_list) { |
1557 | tmp_tpg = tmp_port->sep_tpg; | 1559 | tmp_tpg = tmp_port->sep_tpg; |
1558 | if (!(tmp_tpg)) | 1560 | if (!tmp_tpg) |
1559 | continue; | 1561 | continue; |
1560 | tmp_tf_ops = TPG_TFO(tmp_tpg); | 1562 | tmp_tf_ops = tmp_tpg->se_tpg_tfo; |
1561 | if (!(tmp_tf_ops)) | 1563 | if (!tmp_tf_ops) |
1562 | continue; | 1564 | continue; |
1563 | if (!(tmp_tf_ops->get_fabric_proto_ident) || | 1565 | if (!tmp_tf_ops->get_fabric_proto_ident || |
1564 | !(tmp_tf_ops->tpg_parse_pr_out_transport_id)) | 1566 | !tmp_tf_ops->tpg_parse_pr_out_transport_id) |
1565 | continue; | 1567 | continue; |
1566 | /* | 1568 | /* |
1567 | * Look for the matching proto_ident provided by | 1569 | * Look for the matching proto_ident provided by |
@@ -1575,7 +1577,7 @@ static int core_scsi3_decode_spec_i_port( | |||
1575 | i_str = tmp_tf_ops->tpg_parse_pr_out_transport_id( | 1577 | i_str = tmp_tf_ops->tpg_parse_pr_out_transport_id( |
1576 | tmp_tpg, (const char *)ptr, &tid_len, | 1578 | tmp_tpg, (const char *)ptr, &tid_len, |
1577 | &iport_ptr); | 1579 | &iport_ptr); |
1578 | if (!(i_str)) | 1580 | if (!i_str) |
1579 | continue; | 1581 | continue; |
1580 | 1582 | ||
1581 | atomic_inc(&tmp_tpg->tpg_pr_ref_count); | 1583 | atomic_inc(&tmp_tpg->tpg_pr_ref_count); |
@@ -1584,7 +1586,7 @@ static int core_scsi3_decode_spec_i_port( | |||
1584 | 1586 | ||
1585 | ret = core_scsi3_tpg_depend_item(tmp_tpg); | 1587 | ret = core_scsi3_tpg_depend_item(tmp_tpg); |
1586 | if (ret != 0) { | 1588 | if (ret != 0) { |
1587 | printk(KERN_ERR " core_scsi3_tpg_depend_item()" | 1589 | pr_err(" core_scsi3_tpg_depend_item()" |
1588 | " for tmp_tpg\n"); | 1590 | " for tmp_tpg\n"); |
1589 | atomic_dec(&tmp_tpg->tpg_pr_ref_count); | 1591 | atomic_dec(&tmp_tpg->tpg_pr_ref_count); |
1590 | smp_mb__after_atomic_dec(); | 1592 | smp_mb__after_atomic_dec(); |
@@ -1605,7 +1607,7 @@ static int core_scsi3_decode_spec_i_port( | |||
1605 | } | 1607 | } |
1606 | spin_unlock_bh(&tmp_tpg->acl_node_lock); | 1608 | spin_unlock_bh(&tmp_tpg->acl_node_lock); |
1607 | 1609 | ||
1608 | if (!(dest_node_acl)) { | 1610 | if (!dest_node_acl) { |
1609 | core_scsi3_tpg_undepend_item(tmp_tpg); | 1611 | core_scsi3_tpg_undepend_item(tmp_tpg); |
1610 | spin_lock(&dev->se_port_lock); | 1612 | spin_lock(&dev->se_port_lock); |
1611 | continue; | 1613 | continue; |
@@ -1613,7 +1615,7 @@ static int core_scsi3_decode_spec_i_port( | |||
1613 | 1615 | ||
1614 | ret = core_scsi3_nodeacl_depend_item(dest_node_acl); | 1616 | ret = core_scsi3_nodeacl_depend_item(dest_node_acl); |
1615 | if (ret != 0) { | 1617 | if (ret != 0) { |
1616 | printk(KERN_ERR "configfs_depend_item() failed" | 1618 | pr_err("configfs_depend_item() failed" |
1617 | " for dest_node_acl->acl_group\n"); | 1619 | " for dest_node_acl->acl_group\n"); |
1618 | atomic_dec(&dest_node_acl->acl_pr_ref_count); | 1620 | atomic_dec(&dest_node_acl->acl_pr_ref_count); |
1619 | smp_mb__after_atomic_dec(); | 1621 | smp_mb__after_atomic_dec(); |
@@ -1623,9 +1625,9 @@ static int core_scsi3_decode_spec_i_port( | |||
1623 | } | 1625 | } |
1624 | 1626 | ||
1625 | dest_tpg = tmp_tpg; | 1627 | dest_tpg = tmp_tpg; |
1626 | printk(KERN_INFO "SPC-3 PR SPEC_I_PT: Located %s Node:" | 1628 | pr_debug("SPC-3 PR SPEC_I_PT: Located %s Node:" |
1627 | " %s Port RTPI: %hu\n", | 1629 | " %s Port RTPI: %hu\n", |
1628 | TPG_TFO(dest_tpg)->get_fabric_name(), | 1630 | dest_tpg->se_tpg_tfo->get_fabric_name(), |
1629 | dest_node_acl->initiatorname, dest_rtpi); | 1631 | dest_node_acl->initiatorname, dest_rtpi); |
1630 | 1632 | ||
1631 | spin_lock(&dev->se_port_lock); | 1633 | spin_lock(&dev->se_port_lock); |
@@ -1633,20 +1635,20 @@ static int core_scsi3_decode_spec_i_port( | |||
1633 | } | 1635 | } |
1634 | spin_unlock(&dev->se_port_lock); | 1636 | spin_unlock(&dev->se_port_lock); |
1635 | 1637 | ||
1636 | if (!(dest_tpg)) { | 1638 | if (!dest_tpg) { |
1637 | printk(KERN_ERR "SPC-3 PR SPEC_I_PT: Unable to locate" | 1639 | pr_err("SPC-3 PR SPEC_I_PT: Unable to locate" |
1638 | " dest_tpg\n"); | 1640 | " dest_tpg\n"); |
1639 | ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; | 1641 | ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; |
1640 | goto out; | 1642 | goto out; |
1641 | } | 1643 | } |
1642 | #if 0 | 1644 | #if 0 |
1643 | printk("SPC-3 PR SPEC_I_PT: Got %s data_length: %u tpdl: %u" | 1645 | pr_debug("SPC-3 PR SPEC_I_PT: Got %s data_length: %u tpdl: %u" |
1644 | " tid_len: %d for %s + %s\n", | 1646 | " tid_len: %d for %s + %s\n", |
1645 | TPG_TFO(dest_tpg)->get_fabric_name(), cmd->data_length, | 1647 | dest_tpg->se_tpg_tfo->get_fabric_name(), cmd->data_length, |
1646 | tpdl, tid_len, i_str, iport_ptr); | 1648 | tpdl, tid_len, i_str, iport_ptr); |
1647 | #endif | 1649 | #endif |
1648 | if (tid_len > tpdl) { | 1650 | if (tid_len > tpdl) { |
1649 | printk(KERN_ERR "SPC-3 PR SPEC_I_PT: Illegal tid_len:" | 1651 | pr_err("SPC-3 PR SPEC_I_PT: Illegal tid_len:" |
1650 | " %u for Transport ID: %s\n", tid_len, ptr); | 1652 | " %u for Transport ID: %s\n", tid_len, ptr); |
1651 | core_scsi3_nodeacl_undepend_item(dest_node_acl); | 1653 | core_scsi3_nodeacl_undepend_item(dest_node_acl); |
1652 | core_scsi3_tpg_undepend_item(dest_tpg); | 1654 | core_scsi3_tpg_undepend_item(dest_tpg); |
@@ -1660,10 +1662,10 @@ static int core_scsi3_decode_spec_i_port( | |||
1660 | */ | 1662 | */ |
1661 | dest_se_deve = core_get_se_deve_from_rtpi(dest_node_acl, | 1663 | dest_se_deve = core_get_se_deve_from_rtpi(dest_node_acl, |
1662 | dest_rtpi); | 1664 | dest_rtpi); |
1663 | if (!(dest_se_deve)) { | 1665 | if (!dest_se_deve) { |
1664 | printk(KERN_ERR "Unable to locate %s dest_se_deve" | 1666 | pr_err("Unable to locate %s dest_se_deve" |
1665 | " from destination RTPI: %hu\n", | 1667 | " from destination RTPI: %hu\n", |
1666 | TPG_TFO(dest_tpg)->get_fabric_name(), | 1668 | dest_tpg->se_tpg_tfo->get_fabric_name(), |
1667 | dest_rtpi); | 1669 | dest_rtpi); |
1668 | 1670 | ||
1669 | core_scsi3_nodeacl_undepend_item(dest_node_acl); | 1671 | core_scsi3_nodeacl_undepend_item(dest_node_acl); |
@@ -1674,7 +1676,7 @@ static int core_scsi3_decode_spec_i_port( | |||
1674 | 1676 | ||
1675 | ret = core_scsi3_lunacl_depend_item(dest_se_deve); | 1677 | ret = core_scsi3_lunacl_depend_item(dest_se_deve); |
1676 | if (ret < 0) { | 1678 | if (ret < 0) { |
1677 | printk(KERN_ERR "core_scsi3_lunacl_depend_item()" | 1679 | pr_err("core_scsi3_lunacl_depend_item()" |
1678 | " failed\n"); | 1680 | " failed\n"); |
1679 | atomic_dec(&dest_se_deve->pr_ref_count); | 1681 | atomic_dec(&dest_se_deve->pr_ref_count); |
1680 | smp_mb__after_atomic_dec(); | 1682 | smp_mb__after_atomic_dec(); |
@@ -1684,9 +1686,9 @@ static int core_scsi3_decode_spec_i_port( | |||
1684 | goto out; | 1686 | goto out; |
1685 | } | 1687 | } |
1686 | #if 0 | 1688 | #if 0 |
1687 | printk(KERN_INFO "SPC-3 PR SPEC_I_PT: Located %s Node: %s" | 1689 | pr_debug("SPC-3 PR SPEC_I_PT: Located %s Node: %s" |
1688 | " dest_se_deve mapped_lun: %u\n", | 1690 | " dest_se_deve mapped_lun: %u\n", |
1689 | TPG_TFO(dest_tpg)->get_fabric_name(), | 1691 | dest_tpg->se_tpg_tfo->get_fabric_name(), |
1690 | dest_node_acl->initiatorname, dest_se_deve->mapped_lun); | 1692 | dest_node_acl->initiatorname, dest_se_deve->mapped_lun); |
1691 | #endif | 1693 | #endif |
1692 | /* | 1694 | /* |
@@ -1712,8 +1714,8 @@ static int core_scsi3_decode_spec_i_port( | |||
1712 | */ | 1714 | */ |
1713 | tidh_new = kzalloc(sizeof(struct pr_transport_id_holder), | 1715 | tidh_new = kzalloc(sizeof(struct pr_transport_id_holder), |
1714 | GFP_KERNEL); | 1716 | GFP_KERNEL); |
1715 | if (!(tidh_new)) { | 1717 | if (!tidh_new) { |
1716 | printk(KERN_ERR "Unable to allocate tidh_new\n"); | 1718 | pr_err("Unable to allocate tidh_new\n"); |
1717 | core_scsi3_lunacl_undepend_item(dest_se_deve); | 1719 | core_scsi3_lunacl_undepend_item(dest_se_deve); |
1718 | core_scsi3_nodeacl_undepend_item(dest_node_acl); | 1720 | core_scsi3_nodeacl_undepend_item(dest_node_acl); |
1719 | core_scsi3_tpg_undepend_item(dest_tpg); | 1721 | core_scsi3_tpg_undepend_item(dest_tpg); |
@@ -1741,10 +1743,10 @@ static int core_scsi3_decode_spec_i_port( | |||
1741 | * and then call __core_scsi3_add_registration() in the | 1743 | * and then call __core_scsi3_add_registration() in the |
1742 | * 2nd loop which will never fail. | 1744 | * 2nd loop which will never fail. |
1743 | */ | 1745 | */ |
1744 | dest_pr_reg = __core_scsi3_alloc_registration(SE_DEV(cmd), | 1746 | dest_pr_reg = __core_scsi3_alloc_registration(cmd->se_dev, |
1745 | dest_node_acl, dest_se_deve, iport_ptr, | 1747 | dest_node_acl, dest_se_deve, iport_ptr, |
1746 | sa_res_key, all_tg_pt, aptpl); | 1748 | sa_res_key, all_tg_pt, aptpl); |
1747 | if (!(dest_pr_reg)) { | 1749 | if (!dest_pr_reg) { |
1748 | core_scsi3_lunacl_undepend_item(dest_se_deve); | 1750 | core_scsi3_lunacl_undepend_item(dest_se_deve); |
1749 | core_scsi3_nodeacl_undepend_item(dest_node_acl); | 1751 | core_scsi3_nodeacl_undepend_item(dest_node_acl); |
1750 | core_scsi3_tpg_undepend_item(dest_tpg); | 1752 | core_scsi3_tpg_undepend_item(dest_tpg); |
@@ -1760,6 +1762,9 @@ static int core_scsi3_decode_spec_i_port( | |||
1760 | tid_len = 0; | 1762 | tid_len = 0; |
1761 | 1763 | ||
1762 | } | 1764 | } |
1765 | |||
1766 | transport_kunmap_first_data_page(cmd); | ||
1767 | |||
1763 | /* | 1768 | /* |
1764 | * Go ahead and create a registrations from tid_dest_list for the | 1769 | * Go ahead and create a registrations from tid_dest_list for the |
1765 | * SPEC_I_PT provided TransportID for the *tidh referenced dest_node_acl | 1770 | * SPEC_I_PT provided TransportID for the *tidh referenced dest_node_acl |
@@ -1787,12 +1792,12 @@ static int core_scsi3_decode_spec_i_port( | |||
1787 | prf_isid = core_pr_dump_initiator_port(dest_pr_reg, &i_buf[0], | 1792 | prf_isid = core_pr_dump_initiator_port(dest_pr_reg, &i_buf[0], |
1788 | PR_REG_ISID_ID_LEN); | 1793 | PR_REG_ISID_ID_LEN); |
1789 | 1794 | ||
1790 | __core_scsi3_add_registration(SE_DEV(cmd), dest_node_acl, | 1795 | __core_scsi3_add_registration(cmd->se_dev, dest_node_acl, |
1791 | dest_pr_reg, 0, 0); | 1796 | dest_pr_reg, 0, 0); |
1792 | 1797 | ||
1793 | printk(KERN_INFO "SPC-3 PR [%s] SPEC_I_PT: Successfully" | 1798 | pr_debug("SPC-3 PR [%s] SPEC_I_PT: Successfully" |
1794 | " registered Transport ID for Node: %s%s Mapped LUN:" | 1799 | " registered Transport ID for Node: %s%s Mapped LUN:" |
1795 | " %u\n", TPG_TFO(dest_tpg)->get_fabric_name(), | 1800 | " %u\n", dest_tpg->se_tpg_tfo->get_fabric_name(), |
1796 | dest_node_acl->initiatorname, (prf_isid) ? | 1801 | dest_node_acl->initiatorname, (prf_isid) ? |
1797 | &i_buf[0] : "", dest_se_deve->mapped_lun); | 1802 | &i_buf[0] : "", dest_se_deve->mapped_lun); |
1798 | 1803 | ||
@@ -1806,6 +1811,7 @@ static int core_scsi3_decode_spec_i_port( | |||
1806 | 1811 | ||
1807 | return 0; | 1812 | return 0; |
1808 | out: | 1813 | out: |
1814 | transport_kunmap_first_data_page(cmd); | ||
1809 | /* | 1815 | /* |
1810 | * For the failure case, release everything from tid_dest_list | 1816 | * For the failure case, release everything from tid_dest_list |
1811 | * including *dest_pr_reg and the configfs dependances.. | 1817 | * including *dest_pr_reg and the configfs dependances.. |
@@ -1855,7 +1861,7 @@ static int __core_scsi3_update_aptpl_buf( | |||
1855 | { | 1861 | { |
1856 | struct se_lun *lun; | 1862 | struct se_lun *lun; |
1857 | struct se_portal_group *tpg; | 1863 | struct se_portal_group *tpg; |
1858 | struct se_subsystem_dev *su_dev = SU_DEV(dev); | 1864 | struct se_subsystem_dev *su_dev = dev->se_sub_dev; |
1859 | struct t10_pr_registration *pr_reg; | 1865 | struct t10_pr_registration *pr_reg; |
1860 | unsigned char tmp[512], isid_buf[32]; | 1866 | unsigned char tmp[512], isid_buf[32]; |
1861 | ssize_t len = 0; | 1867 | ssize_t len = 0; |
@@ -1873,8 +1879,8 @@ static int __core_scsi3_update_aptpl_buf( | |||
1873 | /* | 1879 | /* |
1874 | * Walk the registration list.. | 1880 | * Walk the registration list.. |
1875 | */ | 1881 | */ |
1876 | spin_lock(&T10_RES(su_dev)->registration_lock); | 1882 | spin_lock(&su_dev->t10_pr.registration_lock); |
1877 | list_for_each_entry(pr_reg, &T10_RES(su_dev)->registration_list, | 1883 | list_for_each_entry(pr_reg, &su_dev->t10_pr.registration_list, |
1878 | pr_reg_list) { | 1884 | pr_reg_list) { |
1879 | 1885 | ||
1880 | tmp[0] = '\0'; | 1886 | tmp[0] = '\0'; |
@@ -1900,7 +1906,7 @@ static int __core_scsi3_update_aptpl_buf( | |||
1900 | "res_holder=1\nres_type=%02x\n" | 1906 | "res_holder=1\nres_type=%02x\n" |
1901 | "res_scope=%02x\nres_all_tg_pt=%d\n" | 1907 | "res_scope=%02x\nres_all_tg_pt=%d\n" |
1902 | "mapped_lun=%u\n", reg_count, | 1908 | "mapped_lun=%u\n", reg_count, |
1903 | TPG_TFO(tpg)->get_fabric_name(), | 1909 | tpg->se_tpg_tfo->get_fabric_name(), |
1904 | pr_reg->pr_reg_nacl->initiatorname, isid_buf, | 1910 | pr_reg->pr_reg_nacl->initiatorname, isid_buf, |
1905 | pr_reg->pr_res_key, pr_reg->pr_res_type, | 1911 | pr_reg->pr_res_key, pr_reg->pr_res_type, |
1906 | pr_reg->pr_res_scope, pr_reg->pr_reg_all_tg_pt, | 1912 | pr_reg->pr_res_scope, pr_reg->pr_reg_all_tg_pt, |
@@ -1910,17 +1916,17 @@ static int __core_scsi3_update_aptpl_buf( | |||
1910 | "initiator_fabric=%s\ninitiator_node=%s\n%s" | 1916 | "initiator_fabric=%s\ninitiator_node=%s\n%s" |
1911 | "sa_res_key=%llu\nres_holder=0\n" | 1917 | "sa_res_key=%llu\nres_holder=0\n" |
1912 | "res_all_tg_pt=%d\nmapped_lun=%u\n", | 1918 | "res_all_tg_pt=%d\nmapped_lun=%u\n", |
1913 | reg_count, TPG_TFO(tpg)->get_fabric_name(), | 1919 | reg_count, tpg->se_tpg_tfo->get_fabric_name(), |
1914 | pr_reg->pr_reg_nacl->initiatorname, isid_buf, | 1920 | pr_reg->pr_reg_nacl->initiatorname, isid_buf, |
1915 | pr_reg->pr_res_key, pr_reg->pr_reg_all_tg_pt, | 1921 | pr_reg->pr_res_key, pr_reg->pr_reg_all_tg_pt, |
1916 | pr_reg->pr_res_mapped_lun); | 1922 | pr_reg->pr_res_mapped_lun); |
1917 | } | 1923 | } |
1918 | 1924 | ||
1919 | if ((len + strlen(tmp) >= pr_aptpl_buf_len)) { | 1925 | if ((len + strlen(tmp) >= pr_aptpl_buf_len)) { |
1920 | printk(KERN_ERR "Unable to update renaming" | 1926 | pr_err("Unable to update renaming" |
1921 | " APTPL metadata\n"); | 1927 | " APTPL metadata\n"); |
1922 | spin_unlock(&T10_RES(su_dev)->registration_lock); | 1928 | spin_unlock(&su_dev->t10_pr.registration_lock); |
1923 | return -1; | 1929 | return -EMSGSIZE; |
1924 | } | 1930 | } |
1925 | len += sprintf(buf+len, "%s", tmp); | 1931 | len += sprintf(buf+len, "%s", tmp); |
1926 | 1932 | ||
@@ -1929,23 +1935,23 @@ static int __core_scsi3_update_aptpl_buf( | |||
1929 | */ | 1935 | */ |
1930 | snprintf(tmp, 512, "target_fabric=%s\ntarget_node=%s\n" | 1936 | snprintf(tmp, 512, "target_fabric=%s\ntarget_node=%s\n" |
1931 | "tpgt=%hu\nport_rtpi=%hu\ntarget_lun=%u\nPR_REG_END:" | 1937 | "tpgt=%hu\nport_rtpi=%hu\ntarget_lun=%u\nPR_REG_END:" |
1932 | " %d\n", TPG_TFO(tpg)->get_fabric_name(), | 1938 | " %d\n", tpg->se_tpg_tfo->get_fabric_name(), |
1933 | TPG_TFO(tpg)->tpg_get_wwn(tpg), | 1939 | tpg->se_tpg_tfo->tpg_get_wwn(tpg), |
1934 | TPG_TFO(tpg)->tpg_get_tag(tpg), | 1940 | tpg->se_tpg_tfo->tpg_get_tag(tpg), |
1935 | lun->lun_sep->sep_rtpi, lun->unpacked_lun, reg_count); | 1941 | lun->lun_sep->sep_rtpi, lun->unpacked_lun, reg_count); |
1936 | 1942 | ||
1937 | if ((len + strlen(tmp) >= pr_aptpl_buf_len)) { | 1943 | if ((len + strlen(tmp) >= pr_aptpl_buf_len)) { |
1938 | printk(KERN_ERR "Unable to update renaming" | 1944 | pr_err("Unable to update renaming" |
1939 | " APTPL metadata\n"); | 1945 | " APTPL metadata\n"); |
1940 | spin_unlock(&T10_RES(su_dev)->registration_lock); | 1946 | spin_unlock(&su_dev->t10_pr.registration_lock); |
1941 | return -1; | 1947 | return -EMSGSIZE; |
1942 | } | 1948 | } |
1943 | len += sprintf(buf+len, "%s", tmp); | 1949 | len += sprintf(buf+len, "%s", tmp); |
1944 | reg_count++; | 1950 | reg_count++; |
1945 | } | 1951 | } |
1946 | spin_unlock(&T10_RES(su_dev)->registration_lock); | 1952 | spin_unlock(&su_dev->t10_pr.registration_lock); |
1947 | 1953 | ||
1948 | if (!(reg_count)) | 1954 | if (!reg_count) |
1949 | len += sprintf(buf+len, "No Registrations or Reservations"); | 1955 | len += sprintf(buf+len, "No Registrations or Reservations"); |
1950 | 1956 | ||
1951 | return 0; | 1957 | return 0; |
@@ -1975,7 +1981,7 @@ static int __core_scsi3_write_aptpl_to_file( | |||
1975 | unsigned char *buf, | 1981 | unsigned char *buf, |
1976 | u32 pr_aptpl_buf_len) | 1982 | u32 pr_aptpl_buf_len) |
1977 | { | 1983 | { |
1978 | struct t10_wwn *wwn = &SU_DEV(dev)->t10_wwn; | 1984 | struct t10_wwn *wwn = &dev->se_sub_dev->t10_wwn; |
1979 | struct file *file; | 1985 | struct file *file; |
1980 | struct iovec iov[1]; | 1986 | struct iovec iov[1]; |
1981 | mm_segment_t old_fs; | 1987 | mm_segment_t old_fs; |
@@ -1987,21 +1993,21 @@ static int __core_scsi3_write_aptpl_to_file( | |||
1987 | memset(path, 0, 512); | 1993 | memset(path, 0, 512); |
1988 | 1994 | ||
1989 | if (strlen(&wwn->unit_serial[0]) >= 512) { | 1995 | if (strlen(&wwn->unit_serial[0]) >= 512) { |
1990 | printk(KERN_ERR "WWN value for struct se_device does not fit" | 1996 | pr_err("WWN value for struct se_device does not fit" |
1991 | " into path buffer\n"); | 1997 | " into path buffer\n"); |
1992 | return -1; | 1998 | return -EMSGSIZE; |
1993 | } | 1999 | } |
1994 | 2000 | ||
1995 | snprintf(path, 512, "/var/target/pr/aptpl_%s", &wwn->unit_serial[0]); | 2001 | snprintf(path, 512, "/var/target/pr/aptpl_%s", &wwn->unit_serial[0]); |
1996 | file = filp_open(path, flags, 0600); | 2002 | file = filp_open(path, flags, 0600); |
1997 | if (IS_ERR(file) || !file || !file->f_dentry) { | 2003 | if (IS_ERR(file) || !file || !file->f_dentry) { |
1998 | printk(KERN_ERR "filp_open(%s) for APTPL metadata" | 2004 | pr_err("filp_open(%s) for APTPL metadata" |
1999 | " failed\n", path); | 2005 | " failed\n", path); |
2000 | return -1; | 2006 | return (PTR_ERR(file) < 0 ? PTR_ERR(file) : -ENOENT); |
2001 | } | 2007 | } |
2002 | 2008 | ||
2003 | iov[0].iov_base = &buf[0]; | 2009 | iov[0].iov_base = &buf[0]; |
2004 | if (!(pr_aptpl_buf_len)) | 2010 | if (!pr_aptpl_buf_len) |
2005 | iov[0].iov_len = (strlen(&buf[0]) + 1); /* Add extra for NULL */ | 2011 | iov[0].iov_len = (strlen(&buf[0]) + 1); /* Add extra for NULL */ |
2006 | else | 2012 | else |
2007 | iov[0].iov_len = pr_aptpl_buf_len; | 2013 | iov[0].iov_len = pr_aptpl_buf_len; |
@@ -2012,9 +2018,9 @@ static int __core_scsi3_write_aptpl_to_file( | |||
2012 | set_fs(old_fs); | 2018 | set_fs(old_fs); |
2013 | 2019 | ||
2014 | if (ret < 0) { | 2020 | if (ret < 0) { |
2015 | printk("Error writing APTPL metadata file: %s\n", path); | 2021 | pr_debug("Error writing APTPL metadata file: %s\n", path); |
2016 | filp_close(file, NULL); | 2022 | filp_close(file, NULL); |
2017 | return -1; | 2023 | return -EIO; |
2018 | } | 2024 | } |
2019 | filp_close(file, NULL); | 2025 | filp_close(file, NULL); |
2020 | 2026 | ||
@@ -2032,7 +2038,7 @@ static int core_scsi3_update_and_write_aptpl( | |||
2032 | /* | 2038 | /* |
2033 | * Can be called with a NULL pointer from PROUT service action CLEAR | 2039 | * Can be called with a NULL pointer from PROUT service action CLEAR |
2034 | */ | 2040 | */ |
2035 | if (!(in_buf)) { | 2041 | if (!in_buf) { |
2036 | memset(null_buf, 0, 64); | 2042 | memset(null_buf, 0, 64); |
2037 | buf = &null_buf[0]; | 2043 | buf = &null_buf[0]; |
2038 | /* | 2044 | /* |
@@ -2049,14 +2055,14 @@ static int core_scsi3_update_and_write_aptpl( | |||
2049 | ret = core_scsi3_update_aptpl_buf(dev, buf, pr_aptpl_buf_len, | 2055 | ret = core_scsi3_update_aptpl_buf(dev, buf, pr_aptpl_buf_len, |
2050 | clear_aptpl_metadata); | 2056 | clear_aptpl_metadata); |
2051 | if (ret != 0) | 2057 | if (ret != 0) |
2052 | return -1; | 2058 | return ret; |
2053 | /* | 2059 | /* |
2054 | * __core_scsi3_write_aptpl_to_file() will call strlen() | 2060 | * __core_scsi3_write_aptpl_to_file() will call strlen() |
2055 | * on the passed buf to determine pr_aptpl_buf_len. | 2061 | * on the passed buf to determine pr_aptpl_buf_len. |
2056 | */ | 2062 | */ |
2057 | ret = __core_scsi3_write_aptpl_to_file(dev, buf, 0); | 2063 | ret = __core_scsi3_write_aptpl_to_file(dev, buf, 0); |
2058 | if (ret != 0) | 2064 | if (ret != 0) |
2059 | return -1; | 2065 | return ret; |
2060 | 2066 | ||
2061 | return ret; | 2067 | return ret; |
2062 | } | 2068 | } |
@@ -2070,28 +2076,28 @@ static int core_scsi3_emulate_pro_register( | |||
2070 | int spec_i_pt, | 2076 | int spec_i_pt, |
2071 | int ignore_key) | 2077 | int ignore_key) |
2072 | { | 2078 | { |
2073 | struct se_session *se_sess = SE_SESS(cmd); | 2079 | struct se_session *se_sess = cmd->se_sess; |
2074 | struct se_device *dev = SE_DEV(cmd); | 2080 | struct se_device *dev = cmd->se_dev; |
2075 | struct se_dev_entry *se_deve; | 2081 | struct se_dev_entry *se_deve; |
2076 | struct se_lun *se_lun = SE_LUN(cmd); | 2082 | struct se_lun *se_lun = cmd->se_lun; |
2077 | struct se_portal_group *se_tpg; | 2083 | struct se_portal_group *se_tpg; |
2078 | struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_reg_tmp, *pr_reg_e; | 2084 | struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_reg_tmp, *pr_reg_e; |
2079 | struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation; | 2085 | struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; |
2080 | /* Used for APTPL metadata w/ UNREGISTER */ | 2086 | /* Used for APTPL metadata w/ UNREGISTER */ |
2081 | unsigned char *pr_aptpl_buf = NULL; | 2087 | unsigned char *pr_aptpl_buf = NULL; |
2082 | unsigned char isid_buf[PR_REG_ISID_LEN], *isid_ptr = NULL; | 2088 | unsigned char isid_buf[PR_REG_ISID_LEN], *isid_ptr = NULL; |
2083 | int pr_holder = 0, ret = 0, type; | 2089 | int pr_holder = 0, ret = 0, type; |
2084 | 2090 | ||
2085 | if (!(se_sess) || !(se_lun)) { | 2091 | if (!se_sess || !se_lun) { |
2086 | printk(KERN_ERR "SPC-3 PR: se_sess || struct se_lun is NULL!\n"); | 2092 | pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n"); |
2087 | return PYX_TRANSPORT_LU_COMM_FAILURE; | 2093 | return PYX_TRANSPORT_LU_COMM_FAILURE; |
2088 | } | 2094 | } |
2089 | se_tpg = se_sess->se_tpg; | 2095 | se_tpg = se_sess->se_tpg; |
2090 | se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; | 2096 | se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; |
2091 | 2097 | ||
2092 | if (TPG_TFO(se_tpg)->sess_get_initiator_sid != NULL) { | 2098 | if (se_tpg->se_tpg_tfo->sess_get_initiator_sid) { |
2093 | memset(&isid_buf[0], 0, PR_REG_ISID_LEN); | 2099 | memset(&isid_buf[0], 0, PR_REG_ISID_LEN); |
2094 | TPG_TFO(se_tpg)->sess_get_initiator_sid(se_sess, &isid_buf[0], | 2100 | se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess, &isid_buf[0], |
2095 | PR_REG_ISID_LEN); | 2101 | PR_REG_ISID_LEN); |
2096 | isid_ptr = &isid_buf[0]; | 2102 | isid_ptr = &isid_buf[0]; |
2097 | } | 2103 | } |
@@ -2099,30 +2105,30 @@ static int core_scsi3_emulate_pro_register( | |||
2099 | * Follow logic from spc4r17 Section 5.7.7, Register Behaviors Table 47 | 2105 | * Follow logic from spc4r17 Section 5.7.7, Register Behaviors Table 47 |
2100 | */ | 2106 | */ |
2101 | pr_reg_e = core_scsi3_locate_pr_reg(dev, se_sess->se_node_acl, se_sess); | 2107 | pr_reg_e = core_scsi3_locate_pr_reg(dev, se_sess->se_node_acl, se_sess); |
2102 | if (!(pr_reg_e)) { | 2108 | if (!pr_reg_e) { |
2103 | if (res_key) { | 2109 | if (res_key) { |
2104 | printk(KERN_WARNING "SPC-3 PR: Reservation Key non-zero" | 2110 | pr_warn("SPC-3 PR: Reservation Key non-zero" |
2105 | " for SA REGISTER, returning CONFLICT\n"); | 2111 | " for SA REGISTER, returning CONFLICT\n"); |
2106 | return PYX_TRANSPORT_RESERVATION_CONFLICT; | 2112 | return PYX_TRANSPORT_RESERVATION_CONFLICT; |
2107 | } | 2113 | } |
2108 | /* | 2114 | /* |
2109 | * Do nothing but return GOOD status. | 2115 | * Do nothing but return GOOD status. |
2110 | */ | 2116 | */ |
2111 | if (!(sa_res_key)) | 2117 | if (!sa_res_key) |
2112 | return PYX_TRANSPORT_SENT_TO_TRANSPORT; | 2118 | return PYX_TRANSPORT_SENT_TO_TRANSPORT; |
2113 | 2119 | ||
2114 | if (!(spec_i_pt)) { | 2120 | if (!spec_i_pt) { |
2115 | /* | 2121 | /* |
2116 | * Perform the Service Action REGISTER on the Initiator | 2122 | * Perform the Service Action REGISTER on the Initiator |
2117 | * Port Endpoint that the PRO was received from on the | 2123 | * Port Endpoint that the PRO was received from on the |
2118 | * Logical Unit of the SCSI device server. | 2124 | * Logical Unit of the SCSI device server. |
2119 | */ | 2125 | */ |
2120 | ret = core_scsi3_alloc_registration(SE_DEV(cmd), | 2126 | ret = core_scsi3_alloc_registration(cmd->se_dev, |
2121 | se_sess->se_node_acl, se_deve, isid_ptr, | 2127 | se_sess->se_node_acl, se_deve, isid_ptr, |
2122 | sa_res_key, all_tg_pt, aptpl, | 2128 | sa_res_key, all_tg_pt, aptpl, |
2123 | ignore_key, 0); | 2129 | ignore_key, 0); |
2124 | if (ret != 0) { | 2130 | if (ret != 0) { |
2125 | printk(KERN_ERR "Unable to allocate" | 2131 | pr_err("Unable to allocate" |
2126 | " struct t10_pr_registration\n"); | 2132 | " struct t10_pr_registration\n"); |
2127 | return PYX_TRANSPORT_INVALID_PARAMETER_LIST; | 2133 | return PYX_TRANSPORT_INVALID_PARAMETER_LIST; |
2128 | } | 2134 | } |
@@ -2143,10 +2149,10 @@ static int core_scsi3_emulate_pro_register( | |||
2143 | /* | 2149 | /* |
2144 | * Nothing left to do for the APTPL=0 case. | 2150 | * Nothing left to do for the APTPL=0 case. |
2145 | */ | 2151 | */ |
2146 | if (!(aptpl)) { | 2152 | if (!aptpl) { |
2147 | pr_tmpl->pr_aptpl_active = 0; | 2153 | pr_tmpl->pr_aptpl_active = 0; |
2148 | core_scsi3_update_and_write_aptpl(SE_DEV(cmd), NULL, 0); | 2154 | core_scsi3_update_and_write_aptpl(cmd->se_dev, NULL, 0); |
2149 | printk("SPC-3 PR: Set APTPL Bit Deactivated for" | 2155 | pr_debug("SPC-3 PR: Set APTPL Bit Deactivated for" |
2150 | " REGISTER\n"); | 2156 | " REGISTER\n"); |
2151 | return 0; | 2157 | return 0; |
2152 | } | 2158 | } |
@@ -2155,15 +2161,15 @@ static int core_scsi3_emulate_pro_register( | |||
2155 | * update the APTPL metadata information using its | 2161 | * update the APTPL metadata information using its |
2156 | * preallocated *pr_reg->pr_aptpl_buf. | 2162 | * preallocated *pr_reg->pr_aptpl_buf. |
2157 | */ | 2163 | */ |
2158 | pr_reg = core_scsi3_locate_pr_reg(SE_DEV(cmd), | 2164 | pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, |
2159 | se_sess->se_node_acl, se_sess); | 2165 | se_sess->se_node_acl, se_sess); |
2160 | 2166 | ||
2161 | ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd), | 2167 | ret = core_scsi3_update_and_write_aptpl(cmd->se_dev, |
2162 | &pr_reg->pr_aptpl_buf[0], | 2168 | &pr_reg->pr_aptpl_buf[0], |
2163 | pr_tmpl->pr_aptpl_buf_len); | 2169 | pr_tmpl->pr_aptpl_buf_len); |
2164 | if (!(ret)) { | 2170 | if (!ret) { |
2165 | pr_tmpl->pr_aptpl_active = 1; | 2171 | pr_tmpl->pr_aptpl_active = 1; |
2166 | printk("SPC-3 PR: Set APTPL Bit Activated for REGISTER\n"); | 2172 | pr_debug("SPC-3 PR: Set APTPL Bit Activated for REGISTER\n"); |
2167 | } | 2173 | } |
2168 | 2174 | ||
2169 | core_scsi3_put_pr_reg(pr_reg); | 2175 | core_scsi3_put_pr_reg(pr_reg); |
@@ -2175,9 +2181,9 @@ static int core_scsi3_emulate_pro_register( | |||
2175 | pr_reg = pr_reg_e; | 2181 | pr_reg = pr_reg_e; |
2176 | type = pr_reg->pr_res_type; | 2182 | type = pr_reg->pr_res_type; |
2177 | 2183 | ||
2178 | if (!(ignore_key)) { | 2184 | if (!ignore_key) { |
2179 | if (res_key != pr_reg->pr_res_key) { | 2185 | if (res_key != pr_reg->pr_res_key) { |
2180 | printk(KERN_ERR "SPC-3 PR REGISTER: Received" | 2186 | pr_err("SPC-3 PR REGISTER: Received" |
2181 | " res_key: 0x%016Lx does not match" | 2187 | " res_key: 0x%016Lx does not match" |
2182 | " existing SA REGISTER res_key:" | 2188 | " existing SA REGISTER res_key:" |
2183 | " 0x%016Lx\n", res_key, | 2189 | " 0x%016Lx\n", res_key, |
@@ -2187,7 +2193,7 @@ static int core_scsi3_emulate_pro_register( | |||
2187 | } | 2193 | } |
2188 | } | 2194 | } |
2189 | if (spec_i_pt) { | 2195 | if (spec_i_pt) { |
2190 | printk(KERN_ERR "SPC-3 PR UNREGISTER: SPEC_I_PT" | 2196 | pr_err("SPC-3 PR UNREGISTER: SPEC_I_PT" |
2191 | " set while sa_res_key=0\n"); | 2197 | " set while sa_res_key=0\n"); |
2192 | core_scsi3_put_pr_reg(pr_reg); | 2198 | core_scsi3_put_pr_reg(pr_reg); |
2193 | return PYX_TRANSPORT_INVALID_PARAMETER_LIST; | 2199 | return PYX_TRANSPORT_INVALID_PARAMETER_LIST; |
@@ -2197,7 +2203,7 @@ static int core_scsi3_emulate_pro_register( | |||
2197 | * must also set ALL_TG_PT=1 in the incoming PROUT. | 2203 | * must also set ALL_TG_PT=1 in the incoming PROUT. |
2198 | */ | 2204 | */ |
2199 | if (pr_reg->pr_reg_all_tg_pt && !(all_tg_pt)) { | 2205 | if (pr_reg->pr_reg_all_tg_pt && !(all_tg_pt)) { |
2200 | printk(KERN_ERR "SPC-3 PR UNREGISTER: ALL_TG_PT=1" | 2206 | pr_err("SPC-3 PR UNREGISTER: ALL_TG_PT=1" |
2201 | " registration exists, but ALL_TG_PT=1 bit not" | 2207 | " registration exists, but ALL_TG_PT=1 bit not" |
2202 | " present in received PROUT\n"); | 2208 | " present in received PROUT\n"); |
2203 | core_scsi3_put_pr_reg(pr_reg); | 2209 | core_scsi3_put_pr_reg(pr_reg); |
@@ -2209,8 +2215,8 @@ static int core_scsi3_emulate_pro_register( | |||
2209 | if (aptpl) { | 2215 | if (aptpl) { |
2210 | pr_aptpl_buf = kzalloc(pr_tmpl->pr_aptpl_buf_len, | 2216 | pr_aptpl_buf = kzalloc(pr_tmpl->pr_aptpl_buf_len, |
2211 | GFP_KERNEL); | 2217 | GFP_KERNEL); |
2212 | if (!(pr_aptpl_buf)) { | 2218 | if (!pr_aptpl_buf) { |
2213 | printk(KERN_ERR "Unable to allocate" | 2219 | pr_err("Unable to allocate" |
2214 | " pr_aptpl_buf\n"); | 2220 | " pr_aptpl_buf\n"); |
2215 | core_scsi3_put_pr_reg(pr_reg); | 2221 | core_scsi3_put_pr_reg(pr_reg); |
2216 | return PYX_TRANSPORT_LU_COMM_FAILURE; | 2222 | return PYX_TRANSPORT_LU_COMM_FAILURE; |
@@ -2221,9 +2227,9 @@ static int core_scsi3_emulate_pro_register( | |||
2221 | * Nexus sa_res_key=1 Change Reservation Key for registered I_T | 2227 | * Nexus sa_res_key=1 Change Reservation Key for registered I_T |
2222 | * Nexus. | 2228 | * Nexus. |
2223 | */ | 2229 | */ |
2224 | if (!(sa_res_key)) { | 2230 | if (!sa_res_key) { |
2225 | pr_holder = core_scsi3_check_implict_release( | 2231 | pr_holder = core_scsi3_check_implict_release( |
2226 | SE_DEV(cmd), pr_reg); | 2232 | cmd->se_dev, pr_reg); |
2227 | if (pr_holder < 0) { | 2233 | if (pr_holder < 0) { |
2228 | kfree(pr_aptpl_buf); | 2234 | kfree(pr_aptpl_buf); |
2229 | core_scsi3_put_pr_reg(pr_reg); | 2235 | core_scsi3_put_pr_reg(pr_reg); |
@@ -2240,7 +2246,7 @@ static int core_scsi3_emulate_pro_register( | |||
2240 | &pr_tmpl->registration_list, | 2246 | &pr_tmpl->registration_list, |
2241 | pr_reg_list) { | 2247 | pr_reg_list) { |
2242 | 2248 | ||
2243 | if (!(pr_reg_p->pr_reg_all_tg_pt)) | 2249 | if (!pr_reg_p->pr_reg_all_tg_pt) |
2244 | continue; | 2250 | continue; |
2245 | 2251 | ||
2246 | if (pr_reg_p->pr_res_key != res_key) | 2252 | if (pr_reg_p->pr_res_key != res_key) |
@@ -2260,7 +2266,7 @@ static int core_scsi3_emulate_pro_register( | |||
2260 | /* | 2266 | /* |
2261 | * Release the calling I_T Nexus registration now.. | 2267 | * Release the calling I_T Nexus registration now.. |
2262 | */ | 2268 | */ |
2263 | __core_scsi3_free_registration(SE_DEV(cmd), pr_reg, | 2269 | __core_scsi3_free_registration(cmd->se_dev, pr_reg, |
2264 | NULL, 1); | 2270 | NULL, 1); |
2265 | /* | 2271 | /* |
2266 | * From spc4r17, section 5.7.11.3 Unregistering | 2272 | * From spc4r17, section 5.7.11.3 Unregistering |
@@ -2289,10 +2295,10 @@ static int core_scsi3_emulate_pro_register( | |||
2289 | } | 2295 | } |
2290 | spin_unlock(&pr_tmpl->registration_lock); | 2296 | spin_unlock(&pr_tmpl->registration_lock); |
2291 | 2297 | ||
2292 | if (!(aptpl)) { | 2298 | if (!aptpl) { |
2293 | pr_tmpl->pr_aptpl_active = 0; | 2299 | pr_tmpl->pr_aptpl_active = 0; |
2294 | core_scsi3_update_and_write_aptpl(dev, NULL, 0); | 2300 | core_scsi3_update_and_write_aptpl(dev, NULL, 0); |
2295 | printk("SPC-3 PR: Set APTPL Bit Deactivated" | 2301 | pr_debug("SPC-3 PR: Set APTPL Bit Deactivated" |
2296 | " for UNREGISTER\n"); | 2302 | " for UNREGISTER\n"); |
2297 | return 0; | 2303 | return 0; |
2298 | } | 2304 | } |
@@ -2300,9 +2306,9 @@ static int core_scsi3_emulate_pro_register( | |||
2300 | ret = core_scsi3_update_and_write_aptpl(dev, | 2306 | ret = core_scsi3_update_and_write_aptpl(dev, |
2301 | &pr_aptpl_buf[0], | 2307 | &pr_aptpl_buf[0], |
2302 | pr_tmpl->pr_aptpl_buf_len); | 2308 | pr_tmpl->pr_aptpl_buf_len); |
2303 | if (!(ret)) { | 2309 | if (!ret) { |
2304 | pr_tmpl->pr_aptpl_active = 1; | 2310 | pr_tmpl->pr_aptpl_active = 1; |
2305 | printk("SPC-3 PR: Set APTPL Bit Activated" | 2311 | pr_debug("SPC-3 PR: Set APTPL Bit Activated" |
2306 | " for UNREGISTER\n"); | 2312 | " for UNREGISTER\n"); |
2307 | } | 2313 | } |
2308 | 2314 | ||
@@ -2315,20 +2321,20 @@ static int core_scsi3_emulate_pro_register( | |||
2315 | * READ_KEYS service action. | 2321 | * READ_KEYS service action. |
2316 | */ | 2322 | */ |
2317 | pr_reg->pr_res_generation = core_scsi3_pr_generation( | 2323 | pr_reg->pr_res_generation = core_scsi3_pr_generation( |
2318 | SE_DEV(cmd)); | 2324 | cmd->se_dev); |
2319 | pr_reg->pr_res_key = sa_res_key; | 2325 | pr_reg->pr_res_key = sa_res_key; |
2320 | printk("SPC-3 PR [%s] REGISTER%s: Changed Reservation" | 2326 | pr_debug("SPC-3 PR [%s] REGISTER%s: Changed Reservation" |
2321 | " Key for %s to: 0x%016Lx PRgeneration:" | 2327 | " Key for %s to: 0x%016Lx PRgeneration:" |
2322 | " 0x%08x\n", CMD_TFO(cmd)->get_fabric_name(), | 2328 | " 0x%08x\n", cmd->se_tfo->get_fabric_name(), |
2323 | (ignore_key) ? "_AND_IGNORE_EXISTING_KEY" : "", | 2329 | (ignore_key) ? "_AND_IGNORE_EXISTING_KEY" : "", |
2324 | pr_reg->pr_reg_nacl->initiatorname, | 2330 | pr_reg->pr_reg_nacl->initiatorname, |
2325 | pr_reg->pr_res_key, pr_reg->pr_res_generation); | 2331 | pr_reg->pr_res_key, pr_reg->pr_res_generation); |
2326 | 2332 | ||
2327 | if (!(aptpl)) { | 2333 | if (!aptpl) { |
2328 | pr_tmpl->pr_aptpl_active = 0; | 2334 | pr_tmpl->pr_aptpl_active = 0; |
2329 | core_scsi3_update_and_write_aptpl(dev, NULL, 0); | 2335 | core_scsi3_update_and_write_aptpl(dev, NULL, 0); |
2330 | core_scsi3_put_pr_reg(pr_reg); | 2336 | core_scsi3_put_pr_reg(pr_reg); |
2331 | printk("SPC-3 PR: Set APTPL Bit Deactivated" | 2337 | pr_debug("SPC-3 PR: Set APTPL Bit Deactivated" |
2332 | " for REGISTER\n"); | 2338 | " for REGISTER\n"); |
2333 | return 0; | 2339 | return 0; |
2334 | } | 2340 | } |
@@ -2336,9 +2342,9 @@ static int core_scsi3_emulate_pro_register( | |||
2336 | ret = core_scsi3_update_and_write_aptpl(dev, | 2342 | ret = core_scsi3_update_and_write_aptpl(dev, |
2337 | &pr_aptpl_buf[0], | 2343 | &pr_aptpl_buf[0], |
2338 | pr_tmpl->pr_aptpl_buf_len); | 2344 | pr_tmpl->pr_aptpl_buf_len); |
2339 | if (!(ret)) { | 2345 | if (!ret) { |
2340 | pr_tmpl->pr_aptpl_active = 1; | 2346 | pr_tmpl->pr_aptpl_active = 1; |
2341 | printk("SPC-3 PR: Set APTPL Bit Activated" | 2347 | pr_debug("SPC-3 PR: Set APTPL Bit Activated" |
2342 | " for REGISTER\n"); | 2348 | " for REGISTER\n"); |
2343 | } | 2349 | } |
2344 | 2350 | ||
@@ -2378,19 +2384,19 @@ static int core_scsi3_pro_reserve( | |||
2378 | int scope, | 2384 | int scope, |
2379 | u64 res_key) | 2385 | u64 res_key) |
2380 | { | 2386 | { |
2381 | struct se_session *se_sess = SE_SESS(cmd); | 2387 | struct se_session *se_sess = cmd->se_sess; |
2382 | struct se_dev_entry *se_deve; | 2388 | struct se_dev_entry *se_deve; |
2383 | struct se_lun *se_lun = SE_LUN(cmd); | 2389 | struct se_lun *se_lun = cmd->se_lun; |
2384 | struct se_portal_group *se_tpg; | 2390 | struct se_portal_group *se_tpg; |
2385 | struct t10_pr_registration *pr_reg, *pr_res_holder; | 2391 | struct t10_pr_registration *pr_reg, *pr_res_holder; |
2386 | struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation; | 2392 | struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; |
2387 | char i_buf[PR_REG_ISID_ID_LEN]; | 2393 | char i_buf[PR_REG_ISID_ID_LEN]; |
2388 | int ret, prf_isid; | 2394 | int ret, prf_isid; |
2389 | 2395 | ||
2390 | memset(i_buf, 0, PR_REG_ISID_ID_LEN); | 2396 | memset(i_buf, 0, PR_REG_ISID_ID_LEN); |
2391 | 2397 | ||
2392 | if (!(se_sess) || !(se_lun)) { | 2398 | if (!se_sess || !se_lun) { |
2393 | printk(KERN_ERR "SPC-3 PR: se_sess || struct se_lun is NULL!\n"); | 2399 | pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n"); |
2394 | return PYX_TRANSPORT_LU_COMM_FAILURE; | 2400 | return PYX_TRANSPORT_LU_COMM_FAILURE; |
2395 | } | 2401 | } |
2396 | se_tpg = se_sess->se_tpg; | 2402 | se_tpg = se_sess->se_tpg; |
@@ -2398,10 +2404,10 @@ static int core_scsi3_pro_reserve( | |||
2398 | /* | 2404 | /* |
2399 | * Locate the existing *pr_reg via struct se_node_acl pointers | 2405 | * Locate the existing *pr_reg via struct se_node_acl pointers |
2400 | */ | 2406 | */ |
2401 | pr_reg = core_scsi3_locate_pr_reg(SE_DEV(cmd), se_sess->se_node_acl, | 2407 | pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl, |
2402 | se_sess); | 2408 | se_sess); |
2403 | if (!(pr_reg)) { | 2409 | if (!pr_reg) { |
2404 | printk(KERN_ERR "SPC-3 PR: Unable to locate" | 2410 | pr_err("SPC-3 PR: Unable to locate" |
2405 | " PR_REGISTERED *pr_reg for RESERVE\n"); | 2411 | " PR_REGISTERED *pr_reg for RESERVE\n"); |
2406 | return PYX_TRANSPORT_LU_COMM_FAILURE; | 2412 | return PYX_TRANSPORT_LU_COMM_FAILURE; |
2407 | } | 2413 | } |
@@ -2415,7 +2421,7 @@ static int core_scsi3_pro_reserve( | |||
2415 | * registered with the logical unit for the I_T nexus; and | 2421 | * registered with the logical unit for the I_T nexus; and |
2416 | */ | 2422 | */ |
2417 | if (res_key != pr_reg->pr_res_key) { | 2423 | if (res_key != pr_reg->pr_res_key) { |
2418 | printk(KERN_ERR "SPC-3 PR RESERVE: Received res_key: 0x%016Lx" | 2424 | pr_err("SPC-3 PR RESERVE: Received res_key: 0x%016Lx" |
2419 | " does not match existing SA REGISTER res_key:" | 2425 | " does not match existing SA REGISTER res_key:" |
2420 | " 0x%016Lx\n", res_key, pr_reg->pr_res_key); | 2426 | " 0x%016Lx\n", res_key, pr_reg->pr_res_key); |
2421 | core_scsi3_put_pr_reg(pr_reg); | 2427 | core_scsi3_put_pr_reg(pr_reg); |
@@ -2432,7 +2438,7 @@ static int core_scsi3_pro_reserve( | |||
2432 | * and that persistent reservation has a scope of LU_SCOPE. | 2438 | * and that persistent reservation has a scope of LU_SCOPE. |
2433 | */ | 2439 | */ |
2434 | if (scope != PR_SCOPE_LU_SCOPE) { | 2440 | if (scope != PR_SCOPE_LU_SCOPE) { |
2435 | printk(KERN_ERR "SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope); | 2441 | pr_err("SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope); |
2436 | core_scsi3_put_pr_reg(pr_reg); | 2442 | core_scsi3_put_pr_reg(pr_reg); |
2437 | return PYX_TRANSPORT_INVALID_PARAMETER_LIST; | 2443 | return PYX_TRANSPORT_INVALID_PARAMETER_LIST; |
2438 | } | 2444 | } |
@@ -2456,12 +2462,12 @@ static int core_scsi3_pro_reserve( | |||
2456 | */ | 2462 | */ |
2457 | if (pr_res_holder != pr_reg) { | 2463 | if (pr_res_holder != pr_reg) { |
2458 | struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl; | 2464 | struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl; |
2459 | printk(KERN_ERR "SPC-3 PR: Attempted RESERVE from" | 2465 | pr_err("SPC-3 PR: Attempted RESERVE from" |
2460 | " [%s]: %s while reservation already held by" | 2466 | " [%s]: %s while reservation already held by" |
2461 | " [%s]: %s, returning RESERVATION_CONFLICT\n", | 2467 | " [%s]: %s, returning RESERVATION_CONFLICT\n", |
2462 | CMD_TFO(cmd)->get_fabric_name(), | 2468 | cmd->se_tfo->get_fabric_name(), |
2463 | se_sess->se_node_acl->initiatorname, | 2469 | se_sess->se_node_acl->initiatorname, |
2464 | TPG_TFO(pr_res_nacl->se_tpg)->get_fabric_name(), | 2470 | pr_res_nacl->se_tpg->se_tpg_tfo->get_fabric_name(), |
2465 | pr_res_holder->pr_reg_nacl->initiatorname); | 2471 | pr_res_holder->pr_reg_nacl->initiatorname); |
2466 | 2472 | ||
2467 | spin_unlock(&dev->dev_reservation_lock); | 2473 | spin_unlock(&dev->dev_reservation_lock); |
@@ -2478,13 +2484,13 @@ static int core_scsi3_pro_reserve( | |||
2478 | if ((pr_res_holder->pr_res_type != type) || | 2484 | if ((pr_res_holder->pr_res_type != type) || |
2479 | (pr_res_holder->pr_res_scope != scope)) { | 2485 | (pr_res_holder->pr_res_scope != scope)) { |
2480 | struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl; | 2486 | struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl; |
2481 | printk(KERN_ERR "SPC-3 PR: Attempted RESERVE from" | 2487 | pr_err("SPC-3 PR: Attempted RESERVE from" |
2482 | " [%s]: %s trying to change TYPE and/or SCOPE," | 2488 | " [%s]: %s trying to change TYPE and/or SCOPE," |
2483 | " while reservation already held by [%s]: %s," | 2489 | " while reservation already held by [%s]: %s," |
2484 | " returning RESERVATION_CONFLICT\n", | 2490 | " returning RESERVATION_CONFLICT\n", |
2485 | CMD_TFO(cmd)->get_fabric_name(), | 2491 | cmd->se_tfo->get_fabric_name(), |
2486 | se_sess->se_node_acl->initiatorname, | 2492 | se_sess->se_node_acl->initiatorname, |
2487 | TPG_TFO(pr_res_nacl->se_tpg)->get_fabric_name(), | 2493 | pr_res_nacl->se_tpg->se_tpg_tfo->get_fabric_name(), |
2488 | pr_res_holder->pr_reg_nacl->initiatorname); | 2494 | pr_res_holder->pr_reg_nacl->initiatorname); |
2489 | 2495 | ||
2490 | spin_unlock(&dev->dev_reservation_lock); | 2496 | spin_unlock(&dev->dev_reservation_lock); |
@@ -2516,22 +2522,22 @@ static int core_scsi3_pro_reserve( | |||
2516 | prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0], | 2522 | prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0], |
2517 | PR_REG_ISID_ID_LEN); | 2523 | PR_REG_ISID_ID_LEN); |
2518 | 2524 | ||
2519 | printk(KERN_INFO "SPC-3 PR [%s] Service Action: RESERVE created new" | 2525 | pr_debug("SPC-3 PR [%s] Service Action: RESERVE created new" |
2520 | " reservation holder TYPE: %s ALL_TG_PT: %d\n", | 2526 | " reservation holder TYPE: %s ALL_TG_PT: %d\n", |
2521 | CMD_TFO(cmd)->get_fabric_name(), core_scsi3_pr_dump_type(type), | 2527 | cmd->se_tfo->get_fabric_name(), core_scsi3_pr_dump_type(type), |
2522 | (pr_reg->pr_reg_all_tg_pt) ? 1 : 0); | 2528 | (pr_reg->pr_reg_all_tg_pt) ? 1 : 0); |
2523 | printk(KERN_INFO "SPC-3 PR [%s] RESERVE Node: %s%s\n", | 2529 | pr_debug("SPC-3 PR [%s] RESERVE Node: %s%s\n", |
2524 | CMD_TFO(cmd)->get_fabric_name(), | 2530 | cmd->se_tfo->get_fabric_name(), |
2525 | se_sess->se_node_acl->initiatorname, | 2531 | se_sess->se_node_acl->initiatorname, |
2526 | (prf_isid) ? &i_buf[0] : ""); | 2532 | (prf_isid) ? &i_buf[0] : ""); |
2527 | spin_unlock(&dev->dev_reservation_lock); | 2533 | spin_unlock(&dev->dev_reservation_lock); |
2528 | 2534 | ||
2529 | if (pr_tmpl->pr_aptpl_active) { | 2535 | if (pr_tmpl->pr_aptpl_active) { |
2530 | ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd), | 2536 | ret = core_scsi3_update_and_write_aptpl(cmd->se_dev, |
2531 | &pr_reg->pr_aptpl_buf[0], | 2537 | &pr_reg->pr_aptpl_buf[0], |
2532 | pr_tmpl->pr_aptpl_buf_len); | 2538 | pr_tmpl->pr_aptpl_buf_len); |
2533 | if (!(ret)) | 2539 | if (!ret) |
2534 | printk(KERN_INFO "SPC-3 PR: Updated APTPL metadata" | 2540 | pr_debug("SPC-3 PR: Updated APTPL metadata" |
2535 | " for RESERVE\n"); | 2541 | " for RESERVE\n"); |
2536 | } | 2542 | } |
2537 | 2543 | ||
@@ -2558,7 +2564,7 @@ static int core_scsi3_emulate_pro_reserve( | |||
2558 | ret = core_scsi3_pro_reserve(cmd, dev, type, scope, res_key); | 2564 | ret = core_scsi3_pro_reserve(cmd, dev, type, scope, res_key); |
2559 | break; | 2565 | break; |
2560 | default: | 2566 | default: |
2561 | printk(KERN_ERR "SPC-3 PR: Unknown Service Action RESERVE Type:" | 2567 | pr_err("SPC-3 PR: Unknown Service Action RESERVE Type:" |
2562 | " 0x%02x\n", type); | 2568 | " 0x%02x\n", type); |
2563 | return PYX_TRANSPORT_INVALID_CDB_FIELD; | 2569 | return PYX_TRANSPORT_INVALID_CDB_FIELD; |
2564 | } | 2570 | } |
@@ -2587,12 +2593,12 @@ static void __core_scsi3_complete_pro_release( | |||
2587 | */ | 2593 | */ |
2588 | dev->dev_pr_res_holder = NULL; | 2594 | dev->dev_pr_res_holder = NULL; |
2589 | 2595 | ||
2590 | printk(KERN_INFO "SPC-3 PR [%s] Service Action: %s RELEASE cleared" | 2596 | pr_debug("SPC-3 PR [%s] Service Action: %s RELEASE cleared" |
2591 | " reservation holder TYPE: %s ALL_TG_PT: %d\n", | 2597 | " reservation holder TYPE: %s ALL_TG_PT: %d\n", |
2592 | tfo->get_fabric_name(), (explict) ? "explict" : "implict", | 2598 | tfo->get_fabric_name(), (explict) ? "explict" : "implict", |
2593 | core_scsi3_pr_dump_type(pr_reg->pr_res_type), | 2599 | core_scsi3_pr_dump_type(pr_reg->pr_res_type), |
2594 | (pr_reg->pr_reg_all_tg_pt) ? 1 : 0); | 2600 | (pr_reg->pr_reg_all_tg_pt) ? 1 : 0); |
2595 | printk(KERN_INFO "SPC-3 PR [%s] RELEASE Node: %s%s\n", | 2601 | pr_debug("SPC-3 PR [%s] RELEASE Node: %s%s\n", |
2596 | tfo->get_fabric_name(), se_nacl->initiatorname, | 2602 | tfo->get_fabric_name(), se_nacl->initiatorname, |
2597 | (prf_isid) ? &i_buf[0] : ""); | 2603 | (prf_isid) ? &i_buf[0] : ""); |
2598 | /* | 2604 | /* |
@@ -2608,22 +2614,22 @@ static int core_scsi3_emulate_pro_release( | |||
2608 | u64 res_key) | 2614 | u64 res_key) |
2609 | { | 2615 | { |
2610 | struct se_device *dev = cmd->se_dev; | 2616 | struct se_device *dev = cmd->se_dev; |
2611 | struct se_session *se_sess = SE_SESS(cmd); | 2617 | struct se_session *se_sess = cmd->se_sess; |
2612 | struct se_lun *se_lun = SE_LUN(cmd); | 2618 | struct se_lun *se_lun = cmd->se_lun; |
2613 | struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_res_holder; | 2619 | struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_res_holder; |
2614 | struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation; | 2620 | struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; |
2615 | int ret, all_reg = 0; | 2621 | int ret, all_reg = 0; |
2616 | 2622 | ||
2617 | if (!(se_sess) || !(se_lun)) { | 2623 | if (!se_sess || !se_lun) { |
2618 | printk(KERN_ERR "SPC-3 PR: se_sess || struct se_lun is NULL!\n"); | 2624 | pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n"); |
2619 | return PYX_TRANSPORT_LU_COMM_FAILURE; | 2625 | return PYX_TRANSPORT_LU_COMM_FAILURE; |
2620 | } | 2626 | } |
2621 | /* | 2627 | /* |
2622 | * Locate the existing *pr_reg via struct se_node_acl pointers | 2628 | * Locate the existing *pr_reg via struct se_node_acl pointers |
2623 | */ | 2629 | */ |
2624 | pr_reg = core_scsi3_locate_pr_reg(dev, se_sess->se_node_acl, se_sess); | 2630 | pr_reg = core_scsi3_locate_pr_reg(dev, se_sess->se_node_acl, se_sess); |
2625 | if (!(pr_reg)) { | 2631 | if (!pr_reg) { |
2626 | printk(KERN_ERR "SPC-3 PR: Unable to locate" | 2632 | pr_err("SPC-3 PR: Unable to locate" |
2627 | " PR_REGISTERED *pr_reg for RELEASE\n"); | 2633 | " PR_REGISTERED *pr_reg for RELEASE\n"); |
2628 | return PYX_TRANSPORT_LU_COMM_FAILURE; | 2634 | return PYX_TRANSPORT_LU_COMM_FAILURE; |
2629 | } | 2635 | } |
@@ -2641,7 +2647,7 @@ static int core_scsi3_emulate_pro_release( | |||
2641 | */ | 2647 | */ |
2642 | spin_lock(&dev->dev_reservation_lock); | 2648 | spin_lock(&dev->dev_reservation_lock); |
2643 | pr_res_holder = dev->dev_pr_res_holder; | 2649 | pr_res_holder = dev->dev_pr_res_holder; |
2644 | if (!(pr_res_holder)) { | 2650 | if (!pr_res_holder) { |
2645 | /* | 2651 | /* |
2646 | * No persistent reservation, return GOOD status. | 2652 | * No persistent reservation, return GOOD status. |
2647 | */ | 2653 | */ |
@@ -2678,7 +2684,7 @@ static int core_scsi3_emulate_pro_release( | |||
2678 | * that is registered with the logical unit for the I_T nexus; | 2684 | * that is registered with the logical unit for the I_T nexus; |
2679 | */ | 2685 | */ |
2680 | if (res_key != pr_reg->pr_res_key) { | 2686 | if (res_key != pr_reg->pr_res_key) { |
2681 | printk(KERN_ERR "SPC-3 PR RELEASE: Received res_key: 0x%016Lx" | 2687 | pr_err("SPC-3 PR RELEASE: Received res_key: 0x%016Lx" |
2682 | " does not match existing SA REGISTER res_key:" | 2688 | " does not match existing SA REGISTER res_key:" |
2683 | " 0x%016Lx\n", res_key, pr_reg->pr_res_key); | 2689 | " 0x%016Lx\n", res_key, pr_reg->pr_res_key); |
2684 | spin_unlock(&dev->dev_reservation_lock); | 2690 | spin_unlock(&dev->dev_reservation_lock); |
@@ -2694,13 +2700,13 @@ static int core_scsi3_emulate_pro_release( | |||
2694 | if ((pr_res_holder->pr_res_type != type) || | 2700 | if ((pr_res_holder->pr_res_type != type) || |
2695 | (pr_res_holder->pr_res_scope != scope)) { | 2701 | (pr_res_holder->pr_res_scope != scope)) { |
2696 | struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl; | 2702 | struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl; |
2697 | printk(KERN_ERR "SPC-3 PR RELEASE: Attempted to release" | 2703 | pr_err("SPC-3 PR RELEASE: Attempted to release" |
2698 | " reservation from [%s]: %s with different TYPE " | 2704 | " reservation from [%s]: %s with different TYPE " |
2699 | "and/or SCOPE while reservation already held by" | 2705 | "and/or SCOPE while reservation already held by" |
2700 | " [%s]: %s, returning RESERVATION_CONFLICT\n", | 2706 | " [%s]: %s, returning RESERVATION_CONFLICT\n", |
2701 | CMD_TFO(cmd)->get_fabric_name(), | 2707 | cmd->se_tfo->get_fabric_name(), |
2702 | se_sess->se_node_acl->initiatorname, | 2708 | se_sess->se_node_acl->initiatorname, |
2703 | TPG_TFO(pr_res_nacl->se_tpg)->get_fabric_name(), | 2709 | pr_res_nacl->se_tpg->se_tpg_tfo->get_fabric_name(), |
2704 | pr_res_holder->pr_reg_nacl->initiatorname); | 2710 | pr_res_holder->pr_reg_nacl->initiatorname); |
2705 | 2711 | ||
2706 | spin_unlock(&dev->dev_reservation_lock); | 2712 | spin_unlock(&dev->dev_reservation_lock); |
@@ -2758,11 +2764,11 @@ static int core_scsi3_emulate_pro_release( | |||
2758 | 2764 | ||
2759 | write_aptpl: | 2765 | write_aptpl: |
2760 | if (pr_tmpl->pr_aptpl_active) { | 2766 | if (pr_tmpl->pr_aptpl_active) { |
2761 | ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd), | 2767 | ret = core_scsi3_update_and_write_aptpl(cmd->se_dev, |
2762 | &pr_reg->pr_aptpl_buf[0], | 2768 | &pr_reg->pr_aptpl_buf[0], |
2763 | pr_tmpl->pr_aptpl_buf_len); | 2769 | pr_tmpl->pr_aptpl_buf_len); |
2764 | if (!(ret)) | 2770 | if (!ret) |
2765 | printk("SPC-3 PR: Updated APTPL metadata for RELEASE\n"); | 2771 | pr_debug("SPC-3 PR: Updated APTPL metadata for RELEASE\n"); |
2766 | } | 2772 | } |
2767 | 2773 | ||
2768 | core_scsi3_put_pr_reg(pr_reg); | 2774 | core_scsi3_put_pr_reg(pr_reg); |
@@ -2775,18 +2781,18 @@ static int core_scsi3_emulate_pro_clear( | |||
2775 | { | 2781 | { |
2776 | struct se_device *dev = cmd->se_dev; | 2782 | struct se_device *dev = cmd->se_dev; |
2777 | struct se_node_acl *pr_reg_nacl; | 2783 | struct se_node_acl *pr_reg_nacl; |
2778 | struct se_session *se_sess = SE_SESS(cmd); | 2784 | struct se_session *se_sess = cmd->se_sess; |
2779 | struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation; | 2785 | struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; |
2780 | struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder; | 2786 | struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder; |
2781 | u32 pr_res_mapped_lun = 0; | 2787 | u32 pr_res_mapped_lun = 0; |
2782 | int calling_it_nexus = 0; | 2788 | int calling_it_nexus = 0; |
2783 | /* | 2789 | /* |
2784 | * Locate the existing *pr_reg via struct se_node_acl pointers | 2790 | * Locate the existing *pr_reg via struct se_node_acl pointers |
2785 | */ | 2791 | */ |
2786 | pr_reg_n = core_scsi3_locate_pr_reg(SE_DEV(cmd), | 2792 | pr_reg_n = core_scsi3_locate_pr_reg(cmd->se_dev, |
2787 | se_sess->se_node_acl, se_sess); | 2793 | se_sess->se_node_acl, se_sess); |
2788 | if (!(pr_reg_n)) { | 2794 | if (!pr_reg_n) { |
2789 | printk(KERN_ERR "SPC-3 PR: Unable to locate" | 2795 | pr_err("SPC-3 PR: Unable to locate" |
2790 | " PR_REGISTERED *pr_reg for CLEAR\n"); | 2796 | " PR_REGISTERED *pr_reg for CLEAR\n"); |
2791 | return PYX_TRANSPORT_LU_COMM_FAILURE; | 2797 | return PYX_TRANSPORT_LU_COMM_FAILURE; |
2792 | } | 2798 | } |
@@ -2802,7 +2808,7 @@ static int core_scsi3_emulate_pro_clear( | |||
2802 | * that is registered with the logical unit for the I_T nexus. | 2808 | * that is registered with the logical unit for the I_T nexus. |
2803 | */ | 2809 | */ |
2804 | if (res_key != pr_reg_n->pr_res_key) { | 2810 | if (res_key != pr_reg_n->pr_res_key) { |
2805 | printk(KERN_ERR "SPC-3 PR REGISTER: Received" | 2811 | pr_err("SPC-3 PR REGISTER: Received" |
2806 | " res_key: 0x%016Lx does not match" | 2812 | " res_key: 0x%016Lx does not match" |
2807 | " existing SA REGISTER res_key:" | 2813 | " existing SA REGISTER res_key:" |
2808 | " 0x%016Lx\n", res_key, pr_reg_n->pr_res_key); | 2814 | " 0x%016Lx\n", res_key, pr_reg_n->pr_res_key); |
@@ -2839,18 +2845,18 @@ static int core_scsi3_emulate_pro_clear( | |||
2839 | * command with CLEAR service action was received, with the | 2845 | * command with CLEAR service action was received, with the |
2840 | * additional sense code set to RESERVATIONS PREEMPTED. | 2846 | * additional sense code set to RESERVATIONS PREEMPTED. |
2841 | */ | 2847 | */ |
2842 | if (!(calling_it_nexus)) | 2848 | if (!calling_it_nexus) |
2843 | core_scsi3_ua_allocate(pr_reg_nacl, pr_res_mapped_lun, | 2849 | core_scsi3_ua_allocate(pr_reg_nacl, pr_res_mapped_lun, |
2844 | 0x2A, ASCQ_2AH_RESERVATIONS_PREEMPTED); | 2850 | 0x2A, ASCQ_2AH_RESERVATIONS_PREEMPTED); |
2845 | } | 2851 | } |
2846 | spin_unlock(&pr_tmpl->registration_lock); | 2852 | spin_unlock(&pr_tmpl->registration_lock); |
2847 | 2853 | ||
2848 | printk(KERN_INFO "SPC-3 PR [%s] Service Action: CLEAR complete\n", | 2854 | pr_debug("SPC-3 PR [%s] Service Action: CLEAR complete\n", |
2849 | CMD_TFO(cmd)->get_fabric_name()); | 2855 | cmd->se_tfo->get_fabric_name()); |
2850 | 2856 | ||
2851 | if (pr_tmpl->pr_aptpl_active) { | 2857 | if (pr_tmpl->pr_aptpl_active) { |
2852 | core_scsi3_update_and_write_aptpl(SE_DEV(cmd), NULL, 0); | 2858 | core_scsi3_update_and_write_aptpl(cmd->se_dev, NULL, 0); |
2853 | printk(KERN_INFO "SPC-3 PR: Updated APTPL metadata" | 2859 | pr_debug("SPC-3 PR: Updated APTPL metadata" |
2854 | " for CLEAR\n"); | 2860 | " for CLEAR\n"); |
2855 | } | 2861 | } |
2856 | 2862 | ||
@@ -2889,12 +2895,12 @@ static void __core_scsi3_complete_pro_preempt( | |||
2889 | pr_reg->pr_res_type = type; | 2895 | pr_reg->pr_res_type = type; |
2890 | pr_reg->pr_res_scope = scope; | 2896 | pr_reg->pr_res_scope = scope; |
2891 | 2897 | ||
2892 | printk(KERN_INFO "SPC-3 PR [%s] Service Action: PREEMPT%s created new" | 2898 | pr_debug("SPC-3 PR [%s] Service Action: PREEMPT%s created new" |
2893 | " reservation holder TYPE: %s ALL_TG_PT: %d\n", | 2899 | " reservation holder TYPE: %s ALL_TG_PT: %d\n", |
2894 | tfo->get_fabric_name(), (abort) ? "_AND_ABORT" : "", | 2900 | tfo->get_fabric_name(), (abort) ? "_AND_ABORT" : "", |
2895 | core_scsi3_pr_dump_type(type), | 2901 | core_scsi3_pr_dump_type(type), |
2896 | (pr_reg->pr_reg_all_tg_pt) ? 1 : 0); | 2902 | (pr_reg->pr_reg_all_tg_pt) ? 1 : 0); |
2897 | printk(KERN_INFO "SPC-3 PR [%s] PREEMPT%s from Node: %s%s\n", | 2903 | pr_debug("SPC-3 PR [%s] PREEMPT%s from Node: %s%s\n", |
2898 | tfo->get_fabric_name(), (abort) ? "_AND_ABORT" : "", | 2904 | tfo->get_fabric_name(), (abort) ? "_AND_ABORT" : "", |
2899 | nacl->initiatorname, (prf_isid) ? &i_buf[0] : ""); | 2905 | nacl->initiatorname, (prf_isid) ? &i_buf[0] : ""); |
2900 | /* | 2906 | /* |
@@ -2920,7 +2926,7 @@ static void core_scsi3_release_preempt_and_abort( | |||
2920 | if (pr_reg_holder == pr_reg) | 2926 | if (pr_reg_holder == pr_reg) |
2921 | continue; | 2927 | continue; |
2922 | if (pr_reg->pr_res_holder) { | 2928 | if (pr_reg->pr_res_holder) { |
2923 | printk(KERN_WARNING "pr_reg->pr_res_holder still set\n"); | 2929 | pr_warn("pr_reg->pr_res_holder still set\n"); |
2924 | continue; | 2930 | continue; |
2925 | } | 2931 | } |
2926 | 2932 | ||
@@ -2954,25 +2960,25 @@ static int core_scsi3_pro_preempt( | |||
2954 | u64 sa_res_key, | 2960 | u64 sa_res_key, |
2955 | int abort) | 2961 | int abort) |
2956 | { | 2962 | { |
2957 | struct se_device *dev = SE_DEV(cmd); | 2963 | struct se_device *dev = cmd->se_dev; |
2958 | struct se_dev_entry *se_deve; | 2964 | struct se_dev_entry *se_deve; |
2959 | struct se_node_acl *pr_reg_nacl; | 2965 | struct se_node_acl *pr_reg_nacl; |
2960 | struct se_session *se_sess = SE_SESS(cmd); | 2966 | struct se_session *se_sess = cmd->se_sess; |
2961 | struct list_head preempt_and_abort_list; | 2967 | struct list_head preempt_and_abort_list; |
2962 | struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder; | 2968 | struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder; |
2963 | struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation; | 2969 | struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; |
2964 | u32 pr_res_mapped_lun = 0; | 2970 | u32 pr_res_mapped_lun = 0; |
2965 | int all_reg = 0, calling_it_nexus = 0, released_regs = 0; | 2971 | int all_reg = 0, calling_it_nexus = 0, released_regs = 0; |
2966 | int prh_type = 0, prh_scope = 0, ret; | 2972 | int prh_type = 0, prh_scope = 0, ret; |
2967 | 2973 | ||
2968 | if (!(se_sess)) | 2974 | if (!se_sess) |
2969 | return PYX_TRANSPORT_LU_COMM_FAILURE; | 2975 | return PYX_TRANSPORT_LU_COMM_FAILURE; |
2970 | 2976 | ||
2971 | se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; | 2977 | se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; |
2972 | pr_reg_n = core_scsi3_locate_pr_reg(SE_DEV(cmd), se_sess->se_node_acl, | 2978 | pr_reg_n = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl, |
2973 | se_sess); | 2979 | se_sess); |
2974 | if (!(pr_reg_n)) { | 2980 | if (!pr_reg_n) { |
2975 | printk(KERN_ERR "SPC-3 PR: Unable to locate" | 2981 | pr_err("SPC-3 PR: Unable to locate" |
2976 | " PR_REGISTERED *pr_reg for PREEMPT%s\n", | 2982 | " PR_REGISTERED *pr_reg for PREEMPT%s\n", |
2977 | (abort) ? "_AND_ABORT" : ""); | 2983 | (abort) ? "_AND_ABORT" : ""); |
2978 | return PYX_TRANSPORT_RESERVATION_CONFLICT; | 2984 | return PYX_TRANSPORT_RESERVATION_CONFLICT; |
@@ -2982,7 +2988,7 @@ static int core_scsi3_pro_preempt( | |||
2982 | return PYX_TRANSPORT_RESERVATION_CONFLICT; | 2988 | return PYX_TRANSPORT_RESERVATION_CONFLICT; |
2983 | } | 2989 | } |
2984 | if (scope != PR_SCOPE_LU_SCOPE) { | 2990 | if (scope != PR_SCOPE_LU_SCOPE) { |
2985 | printk(KERN_ERR "SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope); | 2991 | pr_err("SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope); |
2986 | core_scsi3_put_pr_reg(pr_reg_n); | 2992 | core_scsi3_put_pr_reg(pr_reg_n); |
2987 | return PYX_TRANSPORT_INVALID_PARAMETER_LIST; | 2993 | return PYX_TRANSPORT_INVALID_PARAMETER_LIST; |
2988 | } | 2994 | } |
@@ -2995,7 +3001,7 @@ static int core_scsi3_pro_preempt( | |||
2995 | (pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG))) | 3001 | (pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG))) |
2996 | all_reg = 1; | 3002 | all_reg = 1; |
2997 | 3003 | ||
2998 | if (!(all_reg) && !(sa_res_key)) { | 3004 | if (!all_reg && !sa_res_key) { |
2999 | spin_unlock(&dev->dev_reservation_lock); | 3005 | spin_unlock(&dev->dev_reservation_lock); |
3000 | core_scsi3_put_pr_reg(pr_reg_n); | 3006 | core_scsi3_put_pr_reg(pr_reg_n); |
3001 | return PYX_TRANSPORT_INVALID_PARAMETER_LIST; | 3007 | return PYX_TRANSPORT_INVALID_PARAMETER_LIST; |
@@ -3009,7 +3015,7 @@ static int core_scsi3_pro_preempt( | |||
3009 | * server shall perform a preempt by doing the following in an | 3015 | * server shall perform a preempt by doing the following in an |
3010 | * uninterrupted series of actions. (See below..) | 3016 | * uninterrupted series of actions. (See below..) |
3011 | */ | 3017 | */ |
3012 | if (!(pr_res_holder) || (pr_res_holder->pr_res_key != sa_res_key)) { | 3018 | if (!pr_res_holder || (pr_res_holder->pr_res_key != sa_res_key)) { |
3013 | /* | 3019 | /* |
3014 | * No existing or SA Reservation Key matching reservations.. | 3020 | * No existing or SA Reservation Key matching reservations.. |
3015 | * | 3021 | * |
@@ -3036,7 +3042,7 @@ static int core_scsi3_pro_preempt( | |||
3036 | * was received, with the additional sense code set | 3042 | * was received, with the additional sense code set |
3037 | * to REGISTRATIONS PREEMPTED. | 3043 | * to REGISTRATIONS PREEMPTED. |
3038 | */ | 3044 | */ |
3039 | if (!(all_reg)) { | 3045 | if (!all_reg) { |
3040 | if (pr_reg->pr_res_key != sa_res_key) | 3046 | if (pr_reg->pr_res_key != sa_res_key) |
3041 | continue; | 3047 | continue; |
3042 | 3048 | ||
@@ -3076,7 +3082,7 @@ static int core_scsi3_pro_preempt( | |||
3076 | NULL, 0); | 3082 | NULL, 0); |
3077 | released_regs++; | 3083 | released_regs++; |
3078 | } | 3084 | } |
3079 | if (!(calling_it_nexus)) | 3085 | if (!calling_it_nexus) |
3080 | core_scsi3_ua_allocate(pr_reg_nacl, | 3086 | core_scsi3_ua_allocate(pr_reg_nacl, |
3081 | pr_res_mapped_lun, 0x2A, | 3087 | pr_res_mapped_lun, 0x2A, |
3082 | ASCQ_2AH_RESERVATIONS_PREEMPTED); | 3088 | ASCQ_2AH_RESERVATIONS_PREEMPTED); |
@@ -3089,7 +3095,7 @@ static int core_scsi3_pro_preempt( | |||
3089 | * registered reservation key, then the device server shall | 3095 | * registered reservation key, then the device server shall |
3090 | * complete the command with RESERVATION CONFLICT status. | 3096 | * complete the command with RESERVATION CONFLICT status. |
3091 | */ | 3097 | */ |
3092 | if (!(released_regs)) { | 3098 | if (!released_regs) { |
3093 | spin_unlock(&dev->dev_reservation_lock); | 3099 | spin_unlock(&dev->dev_reservation_lock); |
3094 | core_scsi3_put_pr_reg(pr_reg_n); | 3100 | core_scsi3_put_pr_reg(pr_reg_n); |
3095 | return PYX_TRANSPORT_RESERVATION_CONFLICT; | 3101 | return PYX_TRANSPORT_RESERVATION_CONFLICT; |
@@ -3111,17 +3117,17 @@ static int core_scsi3_pro_preempt( | |||
3111 | spin_unlock(&dev->dev_reservation_lock); | 3117 | spin_unlock(&dev->dev_reservation_lock); |
3112 | 3118 | ||
3113 | if (pr_tmpl->pr_aptpl_active) { | 3119 | if (pr_tmpl->pr_aptpl_active) { |
3114 | ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd), | 3120 | ret = core_scsi3_update_and_write_aptpl(cmd->se_dev, |
3115 | &pr_reg_n->pr_aptpl_buf[0], | 3121 | &pr_reg_n->pr_aptpl_buf[0], |
3116 | pr_tmpl->pr_aptpl_buf_len); | 3122 | pr_tmpl->pr_aptpl_buf_len); |
3117 | if (!(ret)) | 3123 | if (!ret) |
3118 | printk(KERN_INFO "SPC-3 PR: Updated APTPL" | 3124 | pr_debug("SPC-3 PR: Updated APTPL" |
3119 | " metadata for PREEMPT%s\n", (abort) ? | 3125 | " metadata for PREEMPT%s\n", (abort) ? |
3120 | "_AND_ABORT" : ""); | 3126 | "_AND_ABORT" : ""); |
3121 | } | 3127 | } |
3122 | 3128 | ||
3123 | core_scsi3_put_pr_reg(pr_reg_n); | 3129 | core_scsi3_put_pr_reg(pr_reg_n); |
3124 | core_scsi3_pr_generation(SE_DEV(cmd)); | 3130 | core_scsi3_pr_generation(cmd->se_dev); |
3125 | return 0; | 3131 | return 0; |
3126 | } | 3132 | } |
3127 | /* | 3133 | /* |
@@ -3247,16 +3253,16 @@ static int core_scsi3_pro_preempt( | |||
3247 | } | 3253 | } |
3248 | 3254 | ||
3249 | if (pr_tmpl->pr_aptpl_active) { | 3255 | if (pr_tmpl->pr_aptpl_active) { |
3250 | ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd), | 3256 | ret = core_scsi3_update_and_write_aptpl(cmd->se_dev, |
3251 | &pr_reg_n->pr_aptpl_buf[0], | 3257 | &pr_reg_n->pr_aptpl_buf[0], |
3252 | pr_tmpl->pr_aptpl_buf_len); | 3258 | pr_tmpl->pr_aptpl_buf_len); |
3253 | if (!(ret)) | 3259 | if (!ret) |
3254 | printk("SPC-3 PR: Updated APTPL metadata for PREEMPT" | 3260 | pr_debug("SPC-3 PR: Updated APTPL metadata for PREEMPT" |
3255 | "%s\n", (abort) ? "_AND_ABORT" : ""); | 3261 | "%s\n", (abort) ? "_AND_ABORT" : ""); |
3256 | } | 3262 | } |
3257 | 3263 | ||
3258 | core_scsi3_put_pr_reg(pr_reg_n); | 3264 | core_scsi3_put_pr_reg(pr_reg_n); |
3259 | core_scsi3_pr_generation(SE_DEV(cmd)); | 3265 | core_scsi3_pr_generation(cmd->se_dev); |
3260 | return 0; | 3266 | return 0; |
3261 | } | 3267 | } |
3262 | 3268 | ||
@@ -3281,7 +3287,7 @@ static int core_scsi3_emulate_pro_preempt( | |||
3281 | res_key, sa_res_key, abort); | 3287 | res_key, sa_res_key, abort); |
3282 | break; | 3288 | break; |
3283 | default: | 3289 | default: |
3284 | printk(KERN_ERR "SPC-3 PR: Unknown Service Action PREEMPT%s" | 3290 | pr_err("SPC-3 PR: Unknown Service Action PREEMPT%s" |
3285 | " Type: 0x%02x\n", (abort) ? "_AND_ABORT" : "", type); | 3291 | " Type: 0x%02x\n", (abort) ? "_AND_ABORT" : "", type); |
3286 | return PYX_TRANSPORT_INVALID_CDB_FIELD; | 3292 | return PYX_TRANSPORT_INVALID_CDB_FIELD; |
3287 | } | 3293 | } |
@@ -3297,17 +3303,17 @@ static int core_scsi3_emulate_pro_register_and_move( | |||
3297 | int aptpl, | 3303 | int aptpl, |
3298 | int unreg) | 3304 | int unreg) |
3299 | { | 3305 | { |
3300 | struct se_session *se_sess = SE_SESS(cmd); | 3306 | struct se_session *se_sess = cmd->se_sess; |
3301 | struct se_device *dev = SE_DEV(cmd); | 3307 | struct se_device *dev = cmd->se_dev; |
3302 | struct se_dev_entry *se_deve, *dest_se_deve = NULL; | 3308 | struct se_dev_entry *se_deve, *dest_se_deve = NULL; |
3303 | struct se_lun *se_lun = SE_LUN(cmd); | 3309 | struct se_lun *se_lun = cmd->se_lun; |
3304 | struct se_node_acl *pr_res_nacl, *pr_reg_nacl, *dest_node_acl = NULL; | 3310 | struct se_node_acl *pr_res_nacl, *pr_reg_nacl, *dest_node_acl = NULL; |
3305 | struct se_port *se_port; | 3311 | struct se_port *se_port; |
3306 | struct se_portal_group *se_tpg, *dest_se_tpg = NULL; | 3312 | struct se_portal_group *se_tpg, *dest_se_tpg = NULL; |
3307 | struct target_core_fabric_ops *dest_tf_ops = NULL, *tf_ops; | 3313 | struct target_core_fabric_ops *dest_tf_ops = NULL, *tf_ops; |
3308 | struct t10_pr_registration *pr_reg, *pr_res_holder, *dest_pr_reg; | 3314 | struct t10_pr_registration *pr_reg, *pr_res_holder, *dest_pr_reg; |
3309 | struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation; | 3315 | struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; |
3310 | unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf; | 3316 | unsigned char *buf; |
3311 | unsigned char *initiator_str; | 3317 | unsigned char *initiator_str; |
3312 | char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN]; | 3318 | char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN]; |
3313 | u32 tid_len, tmp_tid_len; | 3319 | u32 tid_len, tmp_tid_len; |
@@ -3315,14 +3321,14 @@ static int core_scsi3_emulate_pro_register_and_move( | |||
3315 | unsigned short rtpi; | 3321 | unsigned short rtpi; |
3316 | unsigned char proto_ident; | 3322 | unsigned char proto_ident; |
3317 | 3323 | ||
3318 | if (!(se_sess) || !(se_lun)) { | 3324 | if (!se_sess || !se_lun) { |
3319 | printk(KERN_ERR "SPC-3 PR: se_sess || struct se_lun is NULL!\n"); | 3325 | pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n"); |
3320 | return PYX_TRANSPORT_LU_COMM_FAILURE; | 3326 | return PYX_TRANSPORT_LU_COMM_FAILURE; |
3321 | } | 3327 | } |
3322 | memset(dest_iport, 0, 64); | 3328 | memset(dest_iport, 0, 64); |
3323 | memset(i_buf, 0, PR_REG_ISID_ID_LEN); | 3329 | memset(i_buf, 0, PR_REG_ISID_ID_LEN); |
3324 | se_tpg = se_sess->se_tpg; | 3330 | se_tpg = se_sess->se_tpg; |
3325 | tf_ops = TPG_TFO(se_tpg); | 3331 | tf_ops = se_tpg->se_tpg_tfo; |
3326 | se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; | 3332 | se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; |
3327 | /* | 3333 | /* |
3328 | * Follow logic from spc4r17 Section 5.7.8, Table 50 -- | 3334 | * Follow logic from spc4r17 Section 5.7.8, Table 50 -- |
@@ -3330,10 +3336,10 @@ static int core_scsi3_emulate_pro_register_and_move( | |||
3330 | * | 3336 | * |
3331 | * Locate the existing *pr_reg via struct se_node_acl pointers | 3337 | * Locate the existing *pr_reg via struct se_node_acl pointers |
3332 | */ | 3338 | */ |
3333 | pr_reg = core_scsi3_locate_pr_reg(SE_DEV(cmd), se_sess->se_node_acl, | 3339 | pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl, |
3334 | se_sess); | 3340 | se_sess); |
3335 | if (!(pr_reg)) { | 3341 | if (!pr_reg) { |
3336 | printk(KERN_ERR "SPC-3 PR: Unable to locate PR_REGISTERED" | 3342 | pr_err("SPC-3 PR: Unable to locate PR_REGISTERED" |
3337 | " *pr_reg for REGISTER_AND_MOVE\n"); | 3343 | " *pr_reg for REGISTER_AND_MOVE\n"); |
3338 | return PYX_TRANSPORT_LU_COMM_FAILURE; | 3344 | return PYX_TRANSPORT_LU_COMM_FAILURE; |
3339 | } | 3345 | } |
@@ -3342,7 +3348,7 @@ static int core_scsi3_emulate_pro_register_and_move( | |||
3342 | * provided during this initiator's I_T nexus registration. | 3348 | * provided during this initiator's I_T nexus registration. |
3343 | */ | 3349 | */ |
3344 | if (res_key != pr_reg->pr_res_key) { | 3350 | if (res_key != pr_reg->pr_res_key) { |
3345 | printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: Received" | 3351 | pr_warn("SPC-3 PR REGISTER_AND_MOVE: Received" |
3346 | " res_key: 0x%016Lx does not match existing SA REGISTER" | 3352 | " res_key: 0x%016Lx does not match existing SA REGISTER" |
3347 | " res_key: 0x%016Lx\n", res_key, pr_reg->pr_res_key); | 3353 | " res_key: 0x%016Lx\n", res_key, pr_reg->pr_res_key); |
3348 | core_scsi3_put_pr_reg(pr_reg); | 3354 | core_scsi3_put_pr_reg(pr_reg); |
@@ -3351,26 +3357,30 @@ static int core_scsi3_emulate_pro_register_and_move( | |||
3351 | /* | 3357 | /* |
3352 | * The service active reservation key needs to be non zero | 3358 | * The service active reservation key needs to be non zero |
3353 | */ | 3359 | */ |
3354 | if (!(sa_res_key)) { | 3360 | if (!sa_res_key) { |
3355 | printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: Received zero" | 3361 | pr_warn("SPC-3 PR REGISTER_AND_MOVE: Received zero" |
3356 | " sa_res_key\n"); | 3362 | " sa_res_key\n"); |
3357 | core_scsi3_put_pr_reg(pr_reg); | 3363 | core_scsi3_put_pr_reg(pr_reg); |
3358 | return PYX_TRANSPORT_INVALID_PARAMETER_LIST; | 3364 | return PYX_TRANSPORT_INVALID_PARAMETER_LIST; |
3359 | } | 3365 | } |
3366 | |||
3360 | /* | 3367 | /* |
3361 | * Determine the Relative Target Port Identifier where the reservation | 3368 | * Determine the Relative Target Port Identifier where the reservation |
3362 | * will be moved to for the TransportID containing SCSI initiator WWN | 3369 | * will be moved to for the TransportID containing SCSI initiator WWN |
3363 | * information. | 3370 | * information. |
3364 | */ | 3371 | */ |
3372 | buf = transport_kmap_first_data_page(cmd); | ||
3365 | rtpi = (buf[18] & 0xff) << 8; | 3373 | rtpi = (buf[18] & 0xff) << 8; |
3366 | rtpi |= buf[19] & 0xff; | 3374 | rtpi |= buf[19] & 0xff; |
3367 | tid_len = (buf[20] & 0xff) << 24; | 3375 | tid_len = (buf[20] & 0xff) << 24; |
3368 | tid_len |= (buf[21] & 0xff) << 16; | 3376 | tid_len |= (buf[21] & 0xff) << 16; |
3369 | tid_len |= (buf[22] & 0xff) << 8; | 3377 | tid_len |= (buf[22] & 0xff) << 8; |
3370 | tid_len |= buf[23] & 0xff; | 3378 | tid_len |= buf[23] & 0xff; |
3379 | transport_kunmap_first_data_page(cmd); | ||
3380 | buf = NULL; | ||
3371 | 3381 | ||
3372 | if ((tid_len + 24) != cmd->data_length) { | 3382 | if ((tid_len + 24) != cmd->data_length) { |
3373 | printk(KERN_ERR "SPC-3 PR: Illegal tid_len: %u + 24 byte header" | 3383 | pr_err("SPC-3 PR: Illegal tid_len: %u + 24 byte header" |
3374 | " does not equal CDB data_length: %u\n", tid_len, | 3384 | " does not equal CDB data_length: %u\n", tid_len, |
3375 | cmd->data_length); | 3385 | cmd->data_length); |
3376 | core_scsi3_put_pr_reg(pr_reg); | 3386 | core_scsi3_put_pr_reg(pr_reg); |
@@ -3382,10 +3392,10 @@ static int core_scsi3_emulate_pro_register_and_move( | |||
3382 | if (se_port->sep_rtpi != rtpi) | 3392 | if (se_port->sep_rtpi != rtpi) |
3383 | continue; | 3393 | continue; |
3384 | dest_se_tpg = se_port->sep_tpg; | 3394 | dest_se_tpg = se_port->sep_tpg; |
3385 | if (!(dest_se_tpg)) | 3395 | if (!dest_se_tpg) |
3386 | continue; | 3396 | continue; |
3387 | dest_tf_ops = TPG_TFO(dest_se_tpg); | 3397 | dest_tf_ops = dest_se_tpg->se_tpg_tfo; |
3388 | if (!(dest_tf_ops)) | 3398 | if (!dest_tf_ops) |
3389 | continue; | 3399 | continue; |
3390 | 3400 | ||
3391 | atomic_inc(&dest_se_tpg->tpg_pr_ref_count); | 3401 | atomic_inc(&dest_se_tpg->tpg_pr_ref_count); |
@@ -3394,7 +3404,7 @@ static int core_scsi3_emulate_pro_register_and_move( | |||
3394 | 3404 | ||
3395 | ret = core_scsi3_tpg_depend_item(dest_se_tpg); | 3405 | ret = core_scsi3_tpg_depend_item(dest_se_tpg); |
3396 | if (ret != 0) { | 3406 | if (ret != 0) { |
3397 | printk(KERN_ERR "core_scsi3_tpg_depend_item() failed" | 3407 | pr_err("core_scsi3_tpg_depend_item() failed" |
3398 | " for dest_se_tpg\n"); | 3408 | " for dest_se_tpg\n"); |
3399 | atomic_dec(&dest_se_tpg->tpg_pr_ref_count); | 3409 | atomic_dec(&dest_se_tpg->tpg_pr_ref_count); |
3400 | smp_mb__after_atomic_dec(); | 3410 | smp_mb__after_atomic_dec(); |
@@ -3407,20 +3417,22 @@ static int core_scsi3_emulate_pro_register_and_move( | |||
3407 | } | 3417 | } |
3408 | spin_unlock(&dev->se_port_lock); | 3418 | spin_unlock(&dev->se_port_lock); |
3409 | 3419 | ||
3410 | if (!(dest_se_tpg) || (!dest_tf_ops)) { | 3420 | if (!dest_se_tpg || !dest_tf_ops) { |
3411 | printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: Unable to locate" | 3421 | pr_err("SPC-3 PR REGISTER_AND_MOVE: Unable to locate" |
3412 | " fabric ops from Relative Target Port Identifier:" | 3422 | " fabric ops from Relative Target Port Identifier:" |
3413 | " %hu\n", rtpi); | 3423 | " %hu\n", rtpi); |
3414 | core_scsi3_put_pr_reg(pr_reg); | 3424 | core_scsi3_put_pr_reg(pr_reg); |
3415 | return PYX_TRANSPORT_INVALID_PARAMETER_LIST; | 3425 | return PYX_TRANSPORT_INVALID_PARAMETER_LIST; |
3416 | } | 3426 | } |
3427 | |||
3428 | buf = transport_kmap_first_data_page(cmd); | ||
3417 | proto_ident = (buf[24] & 0x0f); | 3429 | proto_ident = (buf[24] & 0x0f); |
3418 | #if 0 | 3430 | #if 0 |
3419 | printk("SPC-3 PR REGISTER_AND_MOVE: Extracted Protocol Identifier:" | 3431 | pr_debug("SPC-3 PR REGISTER_AND_MOVE: Extracted Protocol Identifier:" |
3420 | " 0x%02x\n", proto_ident); | 3432 | " 0x%02x\n", proto_ident); |
3421 | #endif | 3433 | #endif |
3422 | if (proto_ident != dest_tf_ops->get_fabric_proto_ident(dest_se_tpg)) { | 3434 | if (proto_ident != dest_tf_ops->get_fabric_proto_ident(dest_se_tpg)) { |
3423 | printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: Received" | 3435 | pr_err("SPC-3 PR REGISTER_AND_MOVE: Received" |
3424 | " proto_ident: 0x%02x does not match ident: 0x%02x" | 3436 | " proto_ident: 0x%02x does not match ident: 0x%02x" |
3425 | " from fabric: %s\n", proto_ident, | 3437 | " from fabric: %s\n", proto_ident, |
3426 | dest_tf_ops->get_fabric_proto_ident(dest_se_tpg), | 3438 | dest_tf_ops->get_fabric_proto_ident(dest_se_tpg), |
@@ -3429,7 +3441,7 @@ static int core_scsi3_emulate_pro_register_and_move( | |||
3429 | goto out; | 3441 | goto out; |
3430 | } | 3442 | } |
3431 | if (dest_tf_ops->tpg_parse_pr_out_transport_id == NULL) { | 3443 | if (dest_tf_ops->tpg_parse_pr_out_transport_id == NULL) { |
3432 | printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: Fabric does not" | 3444 | pr_err("SPC-3 PR REGISTER_AND_MOVE: Fabric does not" |
3433 | " containg a valid tpg_parse_pr_out_transport_id" | 3445 | " containg a valid tpg_parse_pr_out_transport_id" |
3434 | " function pointer\n"); | 3446 | " function pointer\n"); |
3435 | ret = PYX_TRANSPORT_LU_COMM_FAILURE; | 3447 | ret = PYX_TRANSPORT_LU_COMM_FAILURE; |
@@ -3437,14 +3449,17 @@ static int core_scsi3_emulate_pro_register_and_move( | |||
3437 | } | 3449 | } |
3438 | initiator_str = dest_tf_ops->tpg_parse_pr_out_transport_id(dest_se_tpg, | 3450 | initiator_str = dest_tf_ops->tpg_parse_pr_out_transport_id(dest_se_tpg, |
3439 | (const char *)&buf[24], &tmp_tid_len, &iport_ptr); | 3451 | (const char *)&buf[24], &tmp_tid_len, &iport_ptr); |
3440 | if (!(initiator_str)) { | 3452 | if (!initiator_str) { |
3441 | printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: Unable to locate" | 3453 | pr_err("SPC-3 PR REGISTER_AND_MOVE: Unable to locate" |
3442 | " initiator_str from Transport ID\n"); | 3454 | " initiator_str from Transport ID\n"); |
3443 | ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; | 3455 | ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; |
3444 | goto out; | 3456 | goto out; |
3445 | } | 3457 | } |
3446 | 3458 | ||
3447 | printk(KERN_INFO "SPC-3 PR [%s] Extracted initiator %s identifier: %s" | 3459 | transport_kunmap_first_data_page(cmd); |
3460 | buf = NULL; | ||
3461 | |||
3462 | pr_debug("SPC-3 PR [%s] Extracted initiator %s identifier: %s" | ||
3448 | " %s\n", dest_tf_ops->get_fabric_name(), (iport_ptr != NULL) ? | 3463 | " %s\n", dest_tf_ops->get_fabric_name(), (iport_ptr != NULL) ? |
3449 | "port" : "device", initiator_str, (iport_ptr != NULL) ? | 3464 | "port" : "device", initiator_str, (iport_ptr != NULL) ? |
3450 | iport_ptr : ""); | 3465 | iport_ptr : ""); |
@@ -3459,18 +3474,18 @@ static int core_scsi3_emulate_pro_register_and_move( | |||
3459 | pr_reg_nacl = pr_reg->pr_reg_nacl; | 3474 | pr_reg_nacl = pr_reg->pr_reg_nacl; |
3460 | matching_iname = (!strcmp(initiator_str, | 3475 | matching_iname = (!strcmp(initiator_str, |
3461 | pr_reg_nacl->initiatorname)) ? 1 : 0; | 3476 | pr_reg_nacl->initiatorname)) ? 1 : 0; |
3462 | if (!(matching_iname)) | 3477 | if (!matching_iname) |
3463 | goto after_iport_check; | 3478 | goto after_iport_check; |
3464 | 3479 | ||
3465 | if (!(iport_ptr) || !(pr_reg->isid_present_at_reg)) { | 3480 | if (!iport_ptr || !pr_reg->isid_present_at_reg) { |
3466 | printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: TransportID: %s" | 3481 | pr_err("SPC-3 PR REGISTER_AND_MOVE: TransportID: %s" |
3467 | " matches: %s on received I_T Nexus\n", initiator_str, | 3482 | " matches: %s on received I_T Nexus\n", initiator_str, |
3468 | pr_reg_nacl->initiatorname); | 3483 | pr_reg_nacl->initiatorname); |
3469 | ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; | 3484 | ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; |
3470 | goto out; | 3485 | goto out; |
3471 | } | 3486 | } |
3472 | if (!(strcmp(iport_ptr, pr_reg->pr_reg_isid))) { | 3487 | if (!strcmp(iport_ptr, pr_reg->pr_reg_isid)) { |
3473 | printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: TransportID: %s %s" | 3488 | pr_err("SPC-3 PR REGISTER_AND_MOVE: TransportID: %s %s" |
3474 | " matches: %s %s on received I_T Nexus\n", | 3489 | " matches: %s %s on received I_T Nexus\n", |
3475 | initiator_str, iport_ptr, pr_reg_nacl->initiatorname, | 3490 | initiator_str, iport_ptr, pr_reg_nacl->initiatorname, |
3476 | pr_reg->pr_reg_isid); | 3491 | pr_reg->pr_reg_isid); |
@@ -3490,8 +3505,8 @@ after_iport_check: | |||
3490 | } | 3505 | } |
3491 | spin_unlock_bh(&dest_se_tpg->acl_node_lock); | 3506 | spin_unlock_bh(&dest_se_tpg->acl_node_lock); |
3492 | 3507 | ||
3493 | if (!(dest_node_acl)) { | 3508 | if (!dest_node_acl) { |
3494 | printk(KERN_ERR "Unable to locate %s dest_node_acl for" | 3509 | pr_err("Unable to locate %s dest_node_acl for" |
3495 | " TransportID%s\n", dest_tf_ops->get_fabric_name(), | 3510 | " TransportID%s\n", dest_tf_ops->get_fabric_name(), |
3496 | initiator_str); | 3511 | initiator_str); |
3497 | ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; | 3512 | ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; |
@@ -3499,7 +3514,7 @@ after_iport_check: | |||
3499 | } | 3514 | } |
3500 | ret = core_scsi3_nodeacl_depend_item(dest_node_acl); | 3515 | ret = core_scsi3_nodeacl_depend_item(dest_node_acl); |
3501 | if (ret != 0) { | 3516 | if (ret != 0) { |
3502 | printk(KERN_ERR "core_scsi3_nodeacl_depend_item() for" | 3517 | pr_err("core_scsi3_nodeacl_depend_item() for" |
3503 | " dest_node_acl\n"); | 3518 | " dest_node_acl\n"); |
3504 | atomic_dec(&dest_node_acl->acl_pr_ref_count); | 3519 | atomic_dec(&dest_node_acl->acl_pr_ref_count); |
3505 | smp_mb__after_atomic_dec(); | 3520 | smp_mb__after_atomic_dec(); |
@@ -3508,7 +3523,7 @@ after_iport_check: | |||
3508 | goto out; | 3523 | goto out; |
3509 | } | 3524 | } |
3510 | #if 0 | 3525 | #if 0 |
3511 | printk(KERN_INFO "SPC-3 PR REGISTER_AND_MOVE: Found %s dest_node_acl:" | 3526 | pr_debug("SPC-3 PR REGISTER_AND_MOVE: Found %s dest_node_acl:" |
3512 | " %s from TransportID\n", dest_tf_ops->get_fabric_name(), | 3527 | " %s from TransportID\n", dest_tf_ops->get_fabric_name(), |
3513 | dest_node_acl->initiatorname); | 3528 | dest_node_acl->initiatorname); |
3514 | #endif | 3529 | #endif |
@@ -3517,8 +3532,8 @@ after_iport_check: | |||
3517 | * PORT IDENTIFIER. | 3532 | * PORT IDENTIFIER. |
3518 | */ | 3533 | */ |
3519 | dest_se_deve = core_get_se_deve_from_rtpi(dest_node_acl, rtpi); | 3534 | dest_se_deve = core_get_se_deve_from_rtpi(dest_node_acl, rtpi); |
3520 | if (!(dest_se_deve)) { | 3535 | if (!dest_se_deve) { |
3521 | printk(KERN_ERR "Unable to locate %s dest_se_deve from RTPI:" | 3536 | pr_err("Unable to locate %s dest_se_deve from RTPI:" |
3522 | " %hu\n", dest_tf_ops->get_fabric_name(), rtpi); | 3537 | " %hu\n", dest_tf_ops->get_fabric_name(), rtpi); |
3523 | ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; | 3538 | ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; |
3524 | goto out; | 3539 | goto out; |
@@ -3526,7 +3541,7 @@ after_iport_check: | |||
3526 | 3541 | ||
3527 | ret = core_scsi3_lunacl_depend_item(dest_se_deve); | 3542 | ret = core_scsi3_lunacl_depend_item(dest_se_deve); |
3528 | if (ret < 0) { | 3543 | if (ret < 0) { |
3529 | printk(KERN_ERR "core_scsi3_lunacl_depend_item() failed\n"); | 3544 | pr_err("core_scsi3_lunacl_depend_item() failed\n"); |
3530 | atomic_dec(&dest_se_deve->pr_ref_count); | 3545 | atomic_dec(&dest_se_deve->pr_ref_count); |
3531 | smp_mb__after_atomic_dec(); | 3546 | smp_mb__after_atomic_dec(); |
3532 | dest_se_deve = NULL; | 3547 | dest_se_deve = NULL; |
@@ -3534,7 +3549,7 @@ after_iport_check: | |||
3534 | goto out; | 3549 | goto out; |
3535 | } | 3550 | } |
3536 | #if 0 | 3551 | #if 0 |
3537 | printk(KERN_INFO "SPC-3 PR REGISTER_AND_MOVE: Located %s node %s LUN" | 3552 | pr_debug("SPC-3 PR REGISTER_AND_MOVE: Located %s node %s LUN" |
3538 | " ACL for dest_se_deve->mapped_lun: %u\n", | 3553 | " ACL for dest_se_deve->mapped_lun: %u\n", |
3539 | dest_tf_ops->get_fabric_name(), dest_node_acl->initiatorname, | 3554 | dest_tf_ops->get_fabric_name(), dest_node_acl->initiatorname, |
3540 | dest_se_deve->mapped_lun); | 3555 | dest_se_deve->mapped_lun); |
@@ -3545,8 +3560,8 @@ after_iport_check: | |||
3545 | */ | 3560 | */ |
3546 | spin_lock(&dev->dev_reservation_lock); | 3561 | spin_lock(&dev->dev_reservation_lock); |
3547 | pr_res_holder = dev->dev_pr_res_holder; | 3562 | pr_res_holder = dev->dev_pr_res_holder; |
3548 | if (!(pr_res_holder)) { | 3563 | if (!pr_res_holder) { |
3549 | printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: No reservation" | 3564 | pr_warn("SPC-3 PR REGISTER_AND_MOVE: No reservation" |
3550 | " currently held\n"); | 3565 | " currently held\n"); |
3551 | spin_unlock(&dev->dev_reservation_lock); | 3566 | spin_unlock(&dev->dev_reservation_lock); |
3552 | ret = PYX_TRANSPORT_INVALID_CDB_FIELD; | 3567 | ret = PYX_TRANSPORT_INVALID_CDB_FIELD; |
@@ -3559,7 +3574,7 @@ after_iport_check: | |||
3559 | * Register behaviors for a REGISTER AND MOVE service action | 3574 | * Register behaviors for a REGISTER AND MOVE service action |
3560 | */ | 3575 | */ |
3561 | if (pr_res_holder != pr_reg) { | 3576 | if (pr_res_holder != pr_reg) { |
3562 | printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: Calling I_T" | 3577 | pr_warn("SPC-3 PR REGISTER_AND_MOVE: Calling I_T" |
3563 | " Nexus is not reservation holder\n"); | 3578 | " Nexus is not reservation holder\n"); |
3564 | spin_unlock(&dev->dev_reservation_lock); | 3579 | spin_unlock(&dev->dev_reservation_lock); |
3565 | ret = PYX_TRANSPORT_RESERVATION_CONFLICT; | 3580 | ret = PYX_TRANSPORT_RESERVATION_CONFLICT; |
@@ -3576,7 +3591,7 @@ after_iport_check: | |||
3576 | */ | 3591 | */ |
3577 | if ((pr_res_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) || | 3592 | if ((pr_res_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) || |
3578 | (pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) { | 3593 | (pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) { |
3579 | printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: Unable to move" | 3594 | pr_warn("SPC-3 PR REGISTER_AND_MOVE: Unable to move" |
3580 | " reservation for type: %s\n", | 3595 | " reservation for type: %s\n", |
3581 | core_scsi3_pr_dump_type(pr_res_holder->pr_res_type)); | 3596 | core_scsi3_pr_dump_type(pr_res_holder->pr_res_type)); |
3582 | spin_unlock(&dev->dev_reservation_lock); | 3597 | spin_unlock(&dev->dev_reservation_lock); |
@@ -3611,8 +3626,8 @@ after_iport_check: | |||
3611 | */ | 3626 | */ |
3612 | dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl, | 3627 | dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl, |
3613 | iport_ptr); | 3628 | iport_ptr); |
3614 | if (!(dest_pr_reg)) { | 3629 | if (!dest_pr_reg) { |
3615 | ret = core_scsi3_alloc_registration(SE_DEV(cmd), | 3630 | ret = core_scsi3_alloc_registration(cmd->se_dev, |
3616 | dest_node_acl, dest_se_deve, iport_ptr, | 3631 | dest_node_acl, dest_se_deve, iport_ptr, |
3617 | sa_res_key, 0, aptpl, 2, 1); | 3632 | sa_res_key, 0, aptpl, 2, 1); |
3618 | if (ret != 0) { | 3633 | if (ret != 0) { |
@@ -3644,16 +3659,16 @@ after_iport_check: | |||
3644 | /* | 3659 | /* |
3645 | * Increment PRGeneration for existing registrations.. | 3660 | * Increment PRGeneration for existing registrations.. |
3646 | */ | 3661 | */ |
3647 | if (!(new_reg)) | 3662 | if (!new_reg) |
3648 | dest_pr_reg->pr_res_generation = pr_tmpl->pr_generation++; | 3663 | dest_pr_reg->pr_res_generation = pr_tmpl->pr_generation++; |
3649 | spin_unlock(&dev->dev_reservation_lock); | 3664 | spin_unlock(&dev->dev_reservation_lock); |
3650 | 3665 | ||
3651 | printk(KERN_INFO "SPC-3 PR [%s] Service Action: REGISTER_AND_MOVE" | 3666 | pr_debug("SPC-3 PR [%s] Service Action: REGISTER_AND_MOVE" |
3652 | " created new reservation holder TYPE: %s on object RTPI:" | 3667 | " created new reservation holder TYPE: %s on object RTPI:" |
3653 | " %hu PRGeneration: 0x%08x\n", dest_tf_ops->get_fabric_name(), | 3668 | " %hu PRGeneration: 0x%08x\n", dest_tf_ops->get_fabric_name(), |
3654 | core_scsi3_pr_dump_type(type), rtpi, | 3669 | core_scsi3_pr_dump_type(type), rtpi, |
3655 | dest_pr_reg->pr_res_generation); | 3670 | dest_pr_reg->pr_res_generation); |
3656 | printk(KERN_INFO "SPC-3 PR Successfully moved reservation from" | 3671 | pr_debug("SPC-3 PR Successfully moved reservation from" |
3657 | " %s Fabric Node: %s%s -> %s Fabric Node: %s %s\n", | 3672 | " %s Fabric Node: %s%s -> %s Fabric Node: %s %s\n", |
3658 | tf_ops->get_fabric_name(), pr_reg_nacl->initiatorname, | 3673 | tf_ops->get_fabric_name(), pr_reg_nacl->initiatorname, |
3659 | (prf_isid) ? &i_buf[0] : "", dest_tf_ops->get_fabric_name(), | 3674 | (prf_isid) ? &i_buf[0] : "", dest_tf_ops->get_fabric_name(), |
@@ -3681,24 +3696,28 @@ after_iport_check: | |||
3681 | * Clear the APTPL metadata if APTPL has been disabled, otherwise | 3696 | * Clear the APTPL metadata if APTPL has been disabled, otherwise |
3682 | * write out the updated metadata to struct file for this SCSI device. | 3697 | * write out the updated metadata to struct file for this SCSI device. |
3683 | */ | 3698 | */ |
3684 | if (!(aptpl)) { | 3699 | if (!aptpl) { |
3685 | pr_tmpl->pr_aptpl_active = 0; | 3700 | pr_tmpl->pr_aptpl_active = 0; |
3686 | core_scsi3_update_and_write_aptpl(SE_DEV(cmd), NULL, 0); | 3701 | core_scsi3_update_and_write_aptpl(cmd->se_dev, NULL, 0); |
3687 | printk("SPC-3 PR: Set APTPL Bit Deactivated for" | 3702 | pr_debug("SPC-3 PR: Set APTPL Bit Deactivated for" |
3688 | " REGISTER_AND_MOVE\n"); | 3703 | " REGISTER_AND_MOVE\n"); |
3689 | } else { | 3704 | } else { |
3690 | pr_tmpl->pr_aptpl_active = 1; | 3705 | pr_tmpl->pr_aptpl_active = 1; |
3691 | ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd), | 3706 | ret = core_scsi3_update_and_write_aptpl(cmd->se_dev, |
3692 | &dest_pr_reg->pr_aptpl_buf[0], | 3707 | &dest_pr_reg->pr_aptpl_buf[0], |
3693 | pr_tmpl->pr_aptpl_buf_len); | 3708 | pr_tmpl->pr_aptpl_buf_len); |
3694 | if (!(ret)) | 3709 | if (!ret) |
3695 | printk("SPC-3 PR: Set APTPL Bit Activated for" | 3710 | pr_debug("SPC-3 PR: Set APTPL Bit Activated for" |
3696 | " REGISTER_AND_MOVE\n"); | 3711 | " REGISTER_AND_MOVE\n"); |
3697 | } | 3712 | } |
3698 | 3713 | ||
3714 | transport_kunmap_first_data_page(cmd); | ||
3715 | |||
3699 | core_scsi3_put_pr_reg(dest_pr_reg); | 3716 | core_scsi3_put_pr_reg(dest_pr_reg); |
3700 | return 0; | 3717 | return 0; |
3701 | out: | 3718 | out: |
3719 | if (buf) | ||
3720 | transport_kunmap_first_data_page(cmd); | ||
3702 | if (dest_se_deve) | 3721 | if (dest_se_deve) |
3703 | core_scsi3_lunacl_undepend_item(dest_se_deve); | 3722 | core_scsi3_lunacl_undepend_item(dest_se_deve); |
3704 | if (dest_node_acl) | 3723 | if (dest_node_acl) |
@@ -3723,7 +3742,7 @@ static unsigned long long core_scsi3_extract_reservation_key(unsigned char *cdb) | |||
3723 | */ | 3742 | */ |
3724 | static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb) | 3743 | static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb) |
3725 | { | 3744 | { |
3726 | unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf; | 3745 | unsigned char *buf; |
3727 | u64 res_key, sa_res_key; | 3746 | u64 res_key, sa_res_key; |
3728 | int sa, scope, type, aptpl; | 3747 | int sa, scope, type, aptpl; |
3729 | int spec_i_pt = 0, all_tg_pt = 0, unreg = 0; | 3748 | int spec_i_pt = 0, all_tg_pt = 0, unreg = 0; |
@@ -3731,11 +3750,11 @@ static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb) | |||
3731 | * FIXME: A NULL struct se_session pointer means an this is not coming from | 3750 | * FIXME: A NULL struct se_session pointer means an this is not coming from |
3732 | * a $FABRIC_MOD's nexus, but from internal passthrough ops. | 3751 | * a $FABRIC_MOD's nexus, but from internal passthrough ops. |
3733 | */ | 3752 | */ |
3734 | if (!(SE_SESS(cmd))) | 3753 | if (!cmd->se_sess) |
3735 | return PYX_TRANSPORT_LU_COMM_FAILURE; | 3754 | return PYX_TRANSPORT_LU_COMM_FAILURE; |
3736 | 3755 | ||
3737 | if (cmd->data_length < 24) { | 3756 | if (cmd->data_length < 24) { |
3738 | printk(KERN_WARNING "SPC-PR: Received PR OUT parameter list" | 3757 | pr_warn("SPC-PR: Received PR OUT parameter list" |
3739 | " length too small: %u\n", cmd->data_length); | 3758 | " length too small: %u\n", cmd->data_length); |
3740 | return PYX_TRANSPORT_INVALID_PARAMETER_LIST; | 3759 | return PYX_TRANSPORT_INVALID_PARAMETER_LIST; |
3741 | } | 3760 | } |
@@ -3745,6 +3764,8 @@ static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb) | |||
3745 | sa = (cdb[1] & 0x1f); | 3764 | sa = (cdb[1] & 0x1f); |
3746 | scope = (cdb[2] & 0xf0); | 3765 | scope = (cdb[2] & 0xf0); |
3747 | type = (cdb[2] & 0x0f); | 3766 | type = (cdb[2] & 0x0f); |
3767 | |||
3768 | buf = transport_kmap_first_data_page(cmd); | ||
3748 | /* | 3769 | /* |
3749 | * From PERSISTENT_RESERVE_OUT parameter list (payload) | 3770 | * From PERSISTENT_RESERVE_OUT parameter list (payload) |
3750 | */ | 3771 | */ |
@@ -3762,6 +3783,9 @@ static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb) | |||
3762 | aptpl = (buf[17] & 0x01); | 3783 | aptpl = (buf[17] & 0x01); |
3763 | unreg = (buf[17] & 0x02); | 3784 | unreg = (buf[17] & 0x02); |
3764 | } | 3785 | } |
3786 | transport_kunmap_first_data_page(cmd); | ||
3787 | buf = NULL; | ||
3788 | |||
3765 | /* | 3789 | /* |
3766 | * SPEC_I_PT=1 is only valid for Service action: REGISTER | 3790 | * SPEC_I_PT=1 is only valid for Service action: REGISTER |
3767 | */ | 3791 | */ |
@@ -3776,9 +3800,9 @@ static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb) | |||
3776 | * the sense key set to ILLEGAL REQUEST, and the additional sense | 3800 | * the sense key set to ILLEGAL REQUEST, and the additional sense |
3777 | * code set to PARAMETER LIST LENGTH ERROR. | 3801 | * code set to PARAMETER LIST LENGTH ERROR. |
3778 | */ | 3802 | */ |
3779 | if (!(spec_i_pt) && ((cdb[1] & 0x1f) != PRO_REGISTER_AND_MOVE) && | 3803 | if (!spec_i_pt && ((cdb[1] & 0x1f) != PRO_REGISTER_AND_MOVE) && |
3780 | (cmd->data_length != 24)) { | 3804 | (cmd->data_length != 24)) { |
3781 | printk(KERN_WARNING "SPC-PR: Received PR OUT illegal parameter" | 3805 | pr_warn("SPC-PR: Received PR OUT illegal parameter" |
3782 | " list length: %u\n", cmd->data_length); | 3806 | " list length: %u\n", cmd->data_length); |
3783 | return PYX_TRANSPORT_INVALID_PARAMETER_LIST; | 3807 | return PYX_TRANSPORT_INVALID_PARAMETER_LIST; |
3784 | } | 3808 | } |
@@ -3812,7 +3836,7 @@ static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb) | |||
3812 | return core_scsi3_emulate_pro_register_and_move(cmd, res_key, | 3836 | return core_scsi3_emulate_pro_register_and_move(cmd, res_key, |
3813 | sa_res_key, aptpl, unreg); | 3837 | sa_res_key, aptpl, unreg); |
3814 | default: | 3838 | default: |
3815 | printk(KERN_ERR "Unknown PERSISTENT_RESERVE_OUT service" | 3839 | pr_err("Unknown PERSISTENT_RESERVE_OUT service" |
3816 | " action: 0x%02x\n", cdb[1] & 0x1f); | 3840 | " action: 0x%02x\n", cdb[1] & 0x1f); |
3817 | return PYX_TRANSPORT_INVALID_CDB_FIELD; | 3841 | return PYX_TRANSPORT_INVALID_CDB_FIELD; |
3818 | } | 3842 | } |
@@ -3827,25 +3851,26 @@ static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb) | |||
3827 | */ | 3851 | */ |
3828 | static int core_scsi3_pri_read_keys(struct se_cmd *cmd) | 3852 | static int core_scsi3_pri_read_keys(struct se_cmd *cmd) |
3829 | { | 3853 | { |
3830 | struct se_device *se_dev = SE_DEV(cmd); | 3854 | struct se_device *se_dev = cmd->se_dev; |
3831 | struct se_subsystem_dev *su_dev = SU_DEV(se_dev); | 3855 | struct se_subsystem_dev *su_dev = se_dev->se_sub_dev; |
3832 | struct t10_pr_registration *pr_reg; | 3856 | struct t10_pr_registration *pr_reg; |
3833 | unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf; | 3857 | unsigned char *buf; |
3834 | u32 add_len = 0, off = 8; | 3858 | u32 add_len = 0, off = 8; |
3835 | 3859 | ||
3836 | if (cmd->data_length < 8) { | 3860 | if (cmd->data_length < 8) { |
3837 | printk(KERN_ERR "PRIN SA READ_KEYS SCSI Data Length: %u" | 3861 | pr_err("PRIN SA READ_KEYS SCSI Data Length: %u" |
3838 | " too small\n", cmd->data_length); | 3862 | " too small\n", cmd->data_length); |
3839 | return PYX_TRANSPORT_INVALID_CDB_FIELD; | 3863 | return PYX_TRANSPORT_INVALID_CDB_FIELD; |
3840 | } | 3864 | } |
3841 | 3865 | ||
3842 | buf[0] = ((T10_RES(su_dev)->pr_generation >> 24) & 0xff); | 3866 | buf = transport_kmap_first_data_page(cmd); |
3843 | buf[1] = ((T10_RES(su_dev)->pr_generation >> 16) & 0xff); | 3867 | buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff); |
3844 | buf[2] = ((T10_RES(su_dev)->pr_generation >> 8) & 0xff); | 3868 | buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff); |
3845 | buf[3] = (T10_RES(su_dev)->pr_generation & 0xff); | 3869 | buf[2] = ((su_dev->t10_pr.pr_generation >> 8) & 0xff); |
3870 | buf[3] = (su_dev->t10_pr.pr_generation & 0xff); | ||
3846 | 3871 | ||
3847 | spin_lock(&T10_RES(su_dev)->registration_lock); | 3872 | spin_lock(&su_dev->t10_pr.registration_lock); |
3848 | list_for_each_entry(pr_reg, &T10_RES(su_dev)->registration_list, | 3873 | list_for_each_entry(pr_reg, &su_dev->t10_pr.registration_list, |
3849 | pr_reg_list) { | 3874 | pr_reg_list) { |
3850 | /* | 3875 | /* |
3851 | * Check for overflow of 8byte PRI READ_KEYS payload and | 3876 | * Check for overflow of 8byte PRI READ_KEYS payload and |
@@ -3865,13 +3890,15 @@ static int core_scsi3_pri_read_keys(struct se_cmd *cmd) | |||
3865 | 3890 | ||
3866 | add_len += 8; | 3891 | add_len += 8; |
3867 | } | 3892 | } |
3868 | spin_unlock(&T10_RES(su_dev)->registration_lock); | 3893 | spin_unlock(&su_dev->t10_pr.registration_lock); |
3869 | 3894 | ||
3870 | buf[4] = ((add_len >> 24) & 0xff); | 3895 | buf[4] = ((add_len >> 24) & 0xff); |
3871 | buf[5] = ((add_len >> 16) & 0xff); | 3896 | buf[5] = ((add_len >> 16) & 0xff); |
3872 | buf[6] = ((add_len >> 8) & 0xff); | 3897 | buf[6] = ((add_len >> 8) & 0xff); |
3873 | buf[7] = (add_len & 0xff); | 3898 | buf[7] = (add_len & 0xff); |
3874 | 3899 | ||
3900 | transport_kunmap_first_data_page(cmd); | ||
3901 | |||
3875 | return 0; | 3902 | return 0; |
3876 | } | 3903 | } |
3877 | 3904 | ||
@@ -3882,23 +3909,24 @@ static int core_scsi3_pri_read_keys(struct se_cmd *cmd) | |||
3882 | */ | 3909 | */ |
3883 | static int core_scsi3_pri_read_reservation(struct se_cmd *cmd) | 3910 | static int core_scsi3_pri_read_reservation(struct se_cmd *cmd) |
3884 | { | 3911 | { |
3885 | struct se_device *se_dev = SE_DEV(cmd); | 3912 | struct se_device *se_dev = cmd->se_dev; |
3886 | struct se_subsystem_dev *su_dev = SU_DEV(se_dev); | 3913 | struct se_subsystem_dev *su_dev = se_dev->se_sub_dev; |
3887 | struct t10_pr_registration *pr_reg; | 3914 | struct t10_pr_registration *pr_reg; |
3888 | unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf; | 3915 | unsigned char *buf; |
3889 | u64 pr_res_key; | 3916 | u64 pr_res_key; |
3890 | u32 add_len = 16; /* Hardcoded to 16 when a reservation is held. */ | 3917 | u32 add_len = 16; /* Hardcoded to 16 when a reservation is held. */ |
3891 | 3918 | ||
3892 | if (cmd->data_length < 8) { | 3919 | if (cmd->data_length < 8) { |
3893 | printk(KERN_ERR "PRIN SA READ_RESERVATIONS SCSI Data Length: %u" | 3920 | pr_err("PRIN SA READ_RESERVATIONS SCSI Data Length: %u" |
3894 | " too small\n", cmd->data_length); | 3921 | " too small\n", cmd->data_length); |
3895 | return PYX_TRANSPORT_INVALID_CDB_FIELD; | 3922 | return PYX_TRANSPORT_INVALID_CDB_FIELD; |
3896 | } | 3923 | } |
3897 | 3924 | ||
3898 | buf[0] = ((T10_RES(su_dev)->pr_generation >> 24) & 0xff); | 3925 | buf = transport_kmap_first_data_page(cmd); |
3899 | buf[1] = ((T10_RES(su_dev)->pr_generation >> 16) & 0xff); | 3926 | buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff); |
3900 | buf[2] = ((T10_RES(su_dev)->pr_generation >> 8) & 0xff); | 3927 | buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff); |
3901 | buf[3] = (T10_RES(su_dev)->pr_generation & 0xff); | 3928 | buf[2] = ((su_dev->t10_pr.pr_generation >> 8) & 0xff); |
3929 | buf[3] = (su_dev->t10_pr.pr_generation & 0xff); | ||
3902 | 3930 | ||
3903 | spin_lock(&se_dev->dev_reservation_lock); | 3931 | spin_lock(&se_dev->dev_reservation_lock); |
3904 | pr_reg = se_dev->dev_pr_res_holder; | 3932 | pr_reg = se_dev->dev_pr_res_holder; |
@@ -3911,10 +3939,9 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd) | |||
3911 | buf[6] = ((add_len >> 8) & 0xff); | 3939 | buf[6] = ((add_len >> 8) & 0xff); |
3912 | buf[7] = (add_len & 0xff); | 3940 | buf[7] = (add_len & 0xff); |
3913 | 3941 | ||
3914 | if (cmd->data_length < 22) { | 3942 | if (cmd->data_length < 22) |
3915 | spin_unlock(&se_dev->dev_reservation_lock); | 3943 | goto err; |
3916 | return 0; | 3944 | |
3917 | } | ||
3918 | /* | 3945 | /* |
3919 | * Set the Reservation key. | 3946 | * Set the Reservation key. |
3920 | * | 3947 | * |
@@ -3951,7 +3978,10 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd) | |||
3951 | buf[21] = (pr_reg->pr_res_scope & 0xf0) | | 3978 | buf[21] = (pr_reg->pr_res_scope & 0xf0) | |
3952 | (pr_reg->pr_res_type & 0x0f); | 3979 | (pr_reg->pr_res_type & 0x0f); |
3953 | } | 3980 | } |
3981 | |||
3982 | err: | ||
3954 | spin_unlock(&se_dev->dev_reservation_lock); | 3983 | spin_unlock(&se_dev->dev_reservation_lock); |
3984 | transport_kunmap_first_data_page(cmd); | ||
3955 | 3985 | ||
3956 | return 0; | 3986 | return 0; |
3957 | } | 3987 | } |
@@ -3963,17 +3993,19 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd) | |||
3963 | */ | 3993 | */ |
3964 | static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd) | 3994 | static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd) |
3965 | { | 3995 | { |
3966 | struct se_device *dev = SE_DEV(cmd); | 3996 | struct se_device *dev = cmd->se_dev; |
3967 | struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation; | 3997 | struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; |
3968 | unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf; | 3998 | unsigned char *buf; |
3969 | u16 add_len = 8; /* Hardcoded to 8. */ | 3999 | u16 add_len = 8; /* Hardcoded to 8. */ |
3970 | 4000 | ||
3971 | if (cmd->data_length < 6) { | 4001 | if (cmd->data_length < 6) { |
3972 | printk(KERN_ERR "PRIN SA REPORT_CAPABILITIES SCSI Data Length:" | 4002 | pr_err("PRIN SA REPORT_CAPABILITIES SCSI Data Length:" |
3973 | " %u too small\n", cmd->data_length); | 4003 | " %u too small\n", cmd->data_length); |
3974 | return PYX_TRANSPORT_INVALID_CDB_FIELD; | 4004 | return PYX_TRANSPORT_INVALID_CDB_FIELD; |
3975 | } | 4005 | } |
3976 | 4006 | ||
4007 | buf = transport_kmap_first_data_page(cmd); | ||
4008 | |||
3977 | buf[0] = ((add_len << 8) & 0xff); | 4009 | buf[0] = ((add_len << 8) & 0xff); |
3978 | buf[1] = (add_len & 0xff); | 4010 | buf[1] = (add_len & 0xff); |
3979 | buf[2] |= 0x10; /* CRH: Compatible Reservation Hanlding bit. */ | 4011 | buf[2] |= 0x10; /* CRH: Compatible Reservation Hanlding bit. */ |
@@ -4004,6 +4036,8 @@ static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd) | |||
4004 | buf[4] |= 0x02; /* PR_TYPE_WRITE_EXCLUSIVE */ | 4036 | buf[4] |= 0x02; /* PR_TYPE_WRITE_EXCLUSIVE */ |
4005 | buf[5] |= 0x01; /* PR_TYPE_EXCLUSIVE_ACCESS_ALLREG */ | 4037 | buf[5] |= 0x01; /* PR_TYPE_EXCLUSIVE_ACCESS_ALLREG */ |
4006 | 4038 | ||
4039 | transport_kunmap_first_data_page(cmd); | ||
4040 | |||
4007 | return 0; | 4041 | return 0; |
4008 | } | 4042 | } |
4009 | 4043 | ||
@@ -4014,27 +4048,29 @@ static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd) | |||
4014 | */ | 4048 | */ |
4015 | static int core_scsi3_pri_read_full_status(struct se_cmd *cmd) | 4049 | static int core_scsi3_pri_read_full_status(struct se_cmd *cmd) |
4016 | { | 4050 | { |
4017 | struct se_device *se_dev = SE_DEV(cmd); | 4051 | struct se_device *se_dev = cmd->se_dev; |
4018 | struct se_node_acl *se_nacl; | 4052 | struct se_node_acl *se_nacl; |
4019 | struct se_subsystem_dev *su_dev = SU_DEV(se_dev); | 4053 | struct se_subsystem_dev *su_dev = se_dev->se_sub_dev; |
4020 | struct se_portal_group *se_tpg; | 4054 | struct se_portal_group *se_tpg; |
4021 | struct t10_pr_registration *pr_reg, *pr_reg_tmp; | 4055 | struct t10_pr_registration *pr_reg, *pr_reg_tmp; |
4022 | struct t10_reservation_template *pr_tmpl = &SU_DEV(se_dev)->t10_reservation; | 4056 | struct t10_reservation *pr_tmpl = &se_dev->se_sub_dev->t10_pr; |
4023 | unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf; | 4057 | unsigned char *buf; |
4024 | u32 add_desc_len = 0, add_len = 0, desc_len, exp_desc_len; | 4058 | u32 add_desc_len = 0, add_len = 0, desc_len, exp_desc_len; |
4025 | u32 off = 8; /* off into first Full Status descriptor */ | 4059 | u32 off = 8; /* off into first Full Status descriptor */ |
4026 | int format_code = 0; | 4060 | int format_code = 0; |
4027 | 4061 | ||
4028 | if (cmd->data_length < 8) { | 4062 | if (cmd->data_length < 8) { |
4029 | printk(KERN_ERR "PRIN SA READ_FULL_STATUS SCSI Data Length: %u" | 4063 | pr_err("PRIN SA READ_FULL_STATUS SCSI Data Length: %u" |
4030 | " too small\n", cmd->data_length); | 4064 | " too small\n", cmd->data_length); |
4031 | return PYX_TRANSPORT_INVALID_CDB_FIELD; | 4065 | return PYX_TRANSPORT_INVALID_CDB_FIELD; |
4032 | } | 4066 | } |
4033 | 4067 | ||
4034 | buf[0] = ((T10_RES(su_dev)->pr_generation >> 24) & 0xff); | 4068 | buf = transport_kmap_first_data_page(cmd); |
4035 | buf[1] = ((T10_RES(su_dev)->pr_generation >> 16) & 0xff); | 4069 | |
4036 | buf[2] = ((T10_RES(su_dev)->pr_generation >> 8) & 0xff); | 4070 | buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff); |
4037 | buf[3] = (T10_RES(su_dev)->pr_generation & 0xff); | 4071 | buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff); |
4072 | buf[2] = ((su_dev->t10_pr.pr_generation >> 8) & 0xff); | ||
4073 | buf[3] = (su_dev->t10_pr.pr_generation & 0xff); | ||
4038 | 4074 | ||
4039 | spin_lock(&pr_tmpl->registration_lock); | 4075 | spin_lock(&pr_tmpl->registration_lock); |
4040 | list_for_each_entry_safe(pr_reg, pr_reg_tmp, | 4076 | list_for_each_entry_safe(pr_reg, pr_reg_tmp, |
@@ -4051,11 +4087,11 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd) | |||
4051 | * Determine expected length of $FABRIC_MOD specific | 4087 | * Determine expected length of $FABRIC_MOD specific |
4052 | * TransportID full status descriptor.. | 4088 | * TransportID full status descriptor.. |
4053 | */ | 4089 | */ |
4054 | exp_desc_len = TPG_TFO(se_tpg)->tpg_get_pr_transport_id_len( | 4090 | exp_desc_len = se_tpg->se_tpg_tfo->tpg_get_pr_transport_id_len( |
4055 | se_tpg, se_nacl, pr_reg, &format_code); | 4091 | se_tpg, se_nacl, pr_reg, &format_code); |
4056 | 4092 | ||
4057 | if ((exp_desc_len + add_len) > cmd->data_length) { | 4093 | if ((exp_desc_len + add_len) > cmd->data_length) { |
4058 | printk(KERN_WARNING "SPC-3 PRIN READ_FULL_STATUS ran" | 4094 | pr_warn("SPC-3 PRIN READ_FULL_STATUS ran" |
4059 | " out of buffer: %d\n", cmd->data_length); | 4095 | " out of buffer: %d\n", cmd->data_length); |
4060 | spin_lock(&pr_tmpl->registration_lock); | 4096 | spin_lock(&pr_tmpl->registration_lock); |
4061 | atomic_dec(&pr_reg->pr_res_holders); | 4097 | atomic_dec(&pr_reg->pr_res_holders); |
@@ -4105,7 +4141,7 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd) | |||
4105 | * bit is set to one, the contents of the RELATIVE TARGET PORT | 4141 | * bit is set to one, the contents of the RELATIVE TARGET PORT |
4106 | * IDENTIFIER field are not defined by this standard. | 4142 | * IDENTIFIER field are not defined by this standard. |
4107 | */ | 4143 | */ |
4108 | if (!(pr_reg->pr_reg_all_tg_pt)) { | 4144 | if (!pr_reg->pr_reg_all_tg_pt) { |
4109 | struct se_port *port = pr_reg->pr_reg_tg_pt_lun->lun_sep; | 4145 | struct se_port *port = pr_reg->pr_reg_tg_pt_lun->lun_sep; |
4110 | 4146 | ||
4111 | buf[off++] = ((port->sep_rtpi >> 8) & 0xff); | 4147 | buf[off++] = ((port->sep_rtpi >> 8) & 0xff); |
@@ -4116,7 +4152,7 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd) | |||
4116 | /* | 4152 | /* |
4117 | * Now, have the $FABRIC_MOD fill in the protocol identifier | 4153 | * Now, have the $FABRIC_MOD fill in the protocol identifier |
4118 | */ | 4154 | */ |
4119 | desc_len = TPG_TFO(se_tpg)->tpg_get_pr_transport_id(se_tpg, | 4155 | desc_len = se_tpg->se_tpg_tfo->tpg_get_pr_transport_id(se_tpg, |
4120 | se_nacl, pr_reg, &format_code, &buf[off+4]); | 4156 | se_nacl, pr_reg, &format_code, &buf[off+4]); |
4121 | 4157 | ||
4122 | spin_lock(&pr_tmpl->registration_lock); | 4158 | spin_lock(&pr_tmpl->registration_lock); |
@@ -4150,6 +4186,8 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd) | |||
4150 | buf[6] = ((add_len >> 8) & 0xff); | 4186 | buf[6] = ((add_len >> 8) & 0xff); |
4151 | buf[7] = (add_len & 0xff); | 4187 | buf[7] = (add_len & 0xff); |
4152 | 4188 | ||
4189 | transport_kunmap_first_data_page(cmd); | ||
4190 | |||
4153 | return 0; | 4191 | return 0; |
4154 | } | 4192 | } |
4155 | 4193 | ||
@@ -4165,7 +4203,7 @@ static int core_scsi3_emulate_pr_in(struct se_cmd *cmd, unsigned char *cdb) | |||
4165 | case PRI_READ_FULL_STATUS: | 4203 | case PRI_READ_FULL_STATUS: |
4166 | return core_scsi3_pri_read_full_status(cmd); | 4204 | return core_scsi3_pri_read_full_status(cmd); |
4167 | default: | 4205 | default: |
4168 | printk(KERN_ERR "Unknown PERSISTENT_RESERVE_IN service" | 4206 | pr_err("Unknown PERSISTENT_RESERVE_IN service" |
4169 | " action: 0x%02x\n", cdb[1] & 0x1f); | 4207 | " action: 0x%02x\n", cdb[1] & 0x1f); |
4170 | return PYX_TRANSPORT_INVALID_CDB_FIELD; | 4208 | return PYX_TRANSPORT_INVALID_CDB_FIELD; |
4171 | } | 4209 | } |
@@ -4174,7 +4212,7 @@ static int core_scsi3_emulate_pr_in(struct se_cmd *cmd, unsigned char *cdb) | |||
4174 | 4212 | ||
4175 | int core_scsi3_emulate_pr(struct se_cmd *cmd) | 4213 | int core_scsi3_emulate_pr(struct se_cmd *cmd) |
4176 | { | 4214 | { |
4177 | unsigned char *cdb = &T_TASK(cmd)->t_task_cdb[0]; | 4215 | unsigned char *cdb = &cmd->t_task_cdb[0]; |
4178 | struct se_device *dev = cmd->se_dev; | 4216 | struct se_device *dev = cmd->se_dev; |
4179 | /* | 4217 | /* |
4180 | * Following spc2r20 5.5.1 Reservations overview: | 4218 | * Following spc2r20 5.5.1 Reservations overview: |
@@ -4186,7 +4224,7 @@ int core_scsi3_emulate_pr(struct se_cmd *cmd) | |||
4186 | * CONFLICT status. | 4224 | * CONFLICT status. |
4187 | */ | 4225 | */ |
4188 | if (dev->dev_flags & DF_SPC2_RESERVATIONS) { | 4226 | if (dev->dev_flags & DF_SPC2_RESERVATIONS) { |
4189 | printk(KERN_ERR "Received PERSISTENT_RESERVE CDB while legacy" | 4227 | pr_err("Received PERSISTENT_RESERVE CDB while legacy" |
4190 | " SPC-2 reservation is held, returning" | 4228 | " SPC-2 reservation is held, returning" |
4191 | " RESERVATION_CONFLICT\n"); | 4229 | " RESERVATION_CONFLICT\n"); |
4192 | return PYX_TRANSPORT_RESERVATION_CONFLICT; | 4230 | return PYX_TRANSPORT_RESERVATION_CONFLICT; |
@@ -4213,39 +4251,39 @@ static int core_pt_seq_non_holder( | |||
4213 | int core_setup_reservations(struct se_device *dev, int force_pt) | 4251 | int core_setup_reservations(struct se_device *dev, int force_pt) |
4214 | { | 4252 | { |
4215 | struct se_subsystem_dev *su_dev = dev->se_sub_dev; | 4253 | struct se_subsystem_dev *su_dev = dev->se_sub_dev; |
4216 | struct t10_reservation_template *rest = &su_dev->t10_reservation; | 4254 | struct t10_reservation *rest = &su_dev->t10_pr; |
4217 | /* | 4255 | /* |
4218 | * If this device is from Target_Core_Mod/pSCSI, use the reservations | 4256 | * If this device is from Target_Core_Mod/pSCSI, use the reservations |
4219 | * of the Underlying SCSI hardware. In Linux/SCSI terms, this can | 4257 | * of the Underlying SCSI hardware. In Linux/SCSI terms, this can |
4220 | * cause a problem because libata and some SATA RAID HBAs appear | 4258 | * cause a problem because libata and some SATA RAID HBAs appear |
4221 | * under Linux/SCSI, but to emulate reservations themselves. | 4259 | * under Linux/SCSI, but to emulate reservations themselves. |
4222 | */ | 4260 | */ |
4223 | if (((TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) && | 4261 | if (((dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) && |
4224 | !(DEV_ATTRIB(dev)->emulate_reservations)) || force_pt) { | 4262 | !(dev->se_sub_dev->se_dev_attrib.emulate_reservations)) || force_pt) { |
4225 | rest->res_type = SPC_PASSTHROUGH; | 4263 | rest->res_type = SPC_PASSTHROUGH; |
4226 | rest->pr_ops.t10_reservation_check = &core_pt_reservation_check; | 4264 | rest->pr_ops.t10_reservation_check = &core_pt_reservation_check; |
4227 | rest->pr_ops.t10_seq_non_holder = &core_pt_seq_non_holder; | 4265 | rest->pr_ops.t10_seq_non_holder = &core_pt_seq_non_holder; |
4228 | printk(KERN_INFO "%s: Using SPC_PASSTHROUGH, no reservation" | 4266 | pr_debug("%s: Using SPC_PASSTHROUGH, no reservation" |
4229 | " emulation\n", TRANSPORT(dev)->name); | 4267 | " emulation\n", dev->transport->name); |
4230 | return 0; | 4268 | return 0; |
4231 | } | 4269 | } |
4232 | /* | 4270 | /* |
4233 | * If SPC-3 or above is reported by real or emulated struct se_device, | 4271 | * If SPC-3 or above is reported by real or emulated struct se_device, |
4234 | * use emulated Persistent Reservations. | 4272 | * use emulated Persistent Reservations. |
4235 | */ | 4273 | */ |
4236 | if (TRANSPORT(dev)->get_device_rev(dev) >= SCSI_3) { | 4274 | if (dev->transport->get_device_rev(dev) >= SCSI_3) { |
4237 | rest->res_type = SPC3_PERSISTENT_RESERVATIONS; | 4275 | rest->res_type = SPC3_PERSISTENT_RESERVATIONS; |
4238 | rest->pr_ops.t10_reservation_check = &core_scsi3_pr_reservation_check; | 4276 | rest->pr_ops.t10_reservation_check = &core_scsi3_pr_reservation_check; |
4239 | rest->pr_ops.t10_seq_non_holder = &core_scsi3_pr_seq_non_holder; | 4277 | rest->pr_ops.t10_seq_non_holder = &core_scsi3_pr_seq_non_holder; |
4240 | printk(KERN_INFO "%s: Using SPC3_PERSISTENT_RESERVATIONS" | 4278 | pr_debug("%s: Using SPC3_PERSISTENT_RESERVATIONS" |
4241 | " emulation\n", TRANSPORT(dev)->name); | 4279 | " emulation\n", dev->transport->name); |
4242 | } else { | 4280 | } else { |
4243 | rest->res_type = SPC2_RESERVATIONS; | 4281 | rest->res_type = SPC2_RESERVATIONS; |
4244 | rest->pr_ops.t10_reservation_check = &core_scsi2_reservation_check; | 4282 | rest->pr_ops.t10_reservation_check = &core_scsi2_reservation_check; |
4245 | rest->pr_ops.t10_seq_non_holder = | 4283 | rest->pr_ops.t10_seq_non_holder = |
4246 | &core_scsi2_reservation_seq_non_holder; | 4284 | &core_scsi2_reservation_seq_non_holder; |
4247 | printk(KERN_INFO "%s: Using SPC2_RESERVATIONS emulation\n", | 4285 | pr_debug("%s: Using SPC2_RESERVATIONS emulation\n", |
4248 | TRANSPORT(dev)->name); | 4286 | dev->transport->name); |
4249 | } | 4287 | } |
4250 | 4288 | ||
4251 | return 0; | 4289 | return 0; |
diff --git a/drivers/target/target_core_pr.h b/drivers/target/target_core_pr.h index 5603bcfd86d3..c8f47d064584 100644 --- a/drivers/target/target_core_pr.h +++ b/drivers/target/target_core_pr.h | |||
@@ -49,7 +49,7 @@ extern int core_pr_dump_initiator_port(struct t10_pr_registration *, | |||
49 | char *, u32); | 49 | char *, u32); |
50 | extern int core_scsi2_emulate_crh(struct se_cmd *); | 50 | extern int core_scsi2_emulate_crh(struct se_cmd *); |
51 | extern int core_scsi3_alloc_aptpl_registration( | 51 | extern int core_scsi3_alloc_aptpl_registration( |
52 | struct t10_reservation_template *, u64, | 52 | struct t10_reservation *, u64, |
53 | unsigned char *, unsigned char *, u32, | 53 | unsigned char *, unsigned char *, u32, |
54 | unsigned char *, u16, u32, int, int, u8); | 54 | unsigned char *, u16, u32, int, int, u8); |
55 | extern int core_scsi3_check_aptpl_registration(struct se_device *, | 55 | extern int core_scsi3_check_aptpl_registration(struct se_device *, |
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index 331d423fd0e0..2b7b0da9146d 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c | |||
@@ -55,24 +55,6 @@ static struct se_subsystem_api pscsi_template; | |||
55 | 55 | ||
56 | static void pscsi_req_done(struct request *, int); | 56 | static void pscsi_req_done(struct request *, int); |
57 | 57 | ||
58 | /* pscsi_get_sh(): | ||
59 | * | ||
60 | * | ||
61 | */ | ||
62 | static struct Scsi_Host *pscsi_get_sh(u32 host_no) | ||
63 | { | ||
64 | struct Scsi_Host *sh = NULL; | ||
65 | |||
66 | sh = scsi_host_lookup(host_no); | ||
67 | if (IS_ERR(sh)) { | ||
68 | printk(KERN_ERR "Unable to locate SCSI HBA with Host ID:" | ||
69 | " %u\n", host_no); | ||
70 | return NULL; | ||
71 | } | ||
72 | |||
73 | return sh; | ||
74 | } | ||
75 | |||
76 | /* pscsi_attach_hba(): | 58 | /* pscsi_attach_hba(): |
77 | * | 59 | * |
78 | * pscsi_get_sh() used scsi_host_lookup() to locate struct Scsi_Host. | 60 | * pscsi_get_sh() used scsi_host_lookup() to locate struct Scsi_Host. |
@@ -80,28 +62,23 @@ static struct Scsi_Host *pscsi_get_sh(u32 host_no) | |||
80 | */ | 62 | */ |
81 | static int pscsi_attach_hba(struct se_hba *hba, u32 host_id) | 63 | static int pscsi_attach_hba(struct se_hba *hba, u32 host_id) |
82 | { | 64 | { |
83 | int hba_depth; | ||
84 | struct pscsi_hba_virt *phv; | 65 | struct pscsi_hba_virt *phv; |
85 | 66 | ||
86 | phv = kzalloc(sizeof(struct pscsi_hba_virt), GFP_KERNEL); | 67 | phv = kzalloc(sizeof(struct pscsi_hba_virt), GFP_KERNEL); |
87 | if (!(phv)) { | 68 | if (!phv) { |
88 | printk(KERN_ERR "Unable to allocate struct pscsi_hba_virt\n"); | 69 | pr_err("Unable to allocate struct pscsi_hba_virt\n"); |
89 | return -1; | 70 | return -ENOMEM; |
90 | } | 71 | } |
91 | phv->phv_host_id = host_id; | 72 | phv->phv_host_id = host_id; |
92 | phv->phv_mode = PHV_VIRUTAL_HOST_ID; | 73 | phv->phv_mode = PHV_VIRUTAL_HOST_ID; |
93 | hba_depth = PSCSI_VIRTUAL_HBA_DEPTH; | ||
94 | atomic_set(&hba->left_queue_depth, hba_depth); | ||
95 | atomic_set(&hba->max_queue_depth, hba_depth); | ||
96 | 74 | ||
97 | hba->hba_ptr = (void *)phv; | 75 | hba->hba_ptr = phv; |
98 | 76 | ||
99 | printk(KERN_INFO "CORE_HBA[%d] - TCM SCSI HBA Driver %s on" | 77 | pr_debug("CORE_HBA[%d] - TCM SCSI HBA Driver %s on" |
100 | " Generic Target Core Stack %s\n", hba->hba_id, | 78 | " Generic Target Core Stack %s\n", hba->hba_id, |
101 | PSCSI_VERSION, TARGET_CORE_MOD_VERSION); | 79 | PSCSI_VERSION, TARGET_CORE_MOD_VERSION); |
102 | printk(KERN_INFO "CORE_HBA[%d] - Attached SCSI HBA to Generic" | 80 | pr_debug("CORE_HBA[%d] - Attached SCSI HBA to Generic\n", |
103 | " Target Core with TCQ Depth: %d\n", hba->hba_id, | 81 | hba->hba_id); |
104 | atomic_read(&hba->max_queue_depth)); | ||
105 | 82 | ||
106 | return 0; | 83 | return 0; |
107 | } | 84 | } |
@@ -114,12 +91,12 @@ static void pscsi_detach_hba(struct se_hba *hba) | |||
114 | if (scsi_host) { | 91 | if (scsi_host) { |
115 | scsi_host_put(scsi_host); | 92 | scsi_host_put(scsi_host); |
116 | 93 | ||
117 | printk(KERN_INFO "CORE_HBA[%d] - Detached SCSI HBA: %s from" | 94 | pr_debug("CORE_HBA[%d] - Detached SCSI HBA: %s from" |
118 | " Generic Target Core\n", hba->hba_id, | 95 | " Generic Target Core\n", hba->hba_id, |
119 | (scsi_host->hostt->name) ? (scsi_host->hostt->name) : | 96 | (scsi_host->hostt->name) ? (scsi_host->hostt->name) : |
120 | "Unknown"); | 97 | "Unknown"); |
121 | } else | 98 | } else |
122 | printk(KERN_INFO "CORE_HBA[%d] - Detached Virtual SCSI HBA" | 99 | pr_debug("CORE_HBA[%d] - Detached Virtual SCSI HBA" |
123 | " from Generic Target Core\n", hba->hba_id); | 100 | " from Generic Target Core\n", hba->hba_id); |
124 | 101 | ||
125 | kfree(phv); | 102 | kfree(phv); |
@@ -130,20 +107,17 @@ static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag) | |||
130 | { | 107 | { |
131 | struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)hba->hba_ptr; | 108 | struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)hba->hba_ptr; |
132 | struct Scsi_Host *sh = phv->phv_lld_host; | 109 | struct Scsi_Host *sh = phv->phv_lld_host; |
133 | int hba_depth = PSCSI_VIRTUAL_HBA_DEPTH; | ||
134 | /* | 110 | /* |
135 | * Release the struct Scsi_Host | 111 | * Release the struct Scsi_Host |
136 | */ | 112 | */ |
137 | if (!(mode_flag)) { | 113 | if (!mode_flag) { |
138 | if (!(sh)) | 114 | if (!sh) |
139 | return 0; | 115 | return 0; |
140 | 116 | ||
141 | phv->phv_lld_host = NULL; | 117 | phv->phv_lld_host = NULL; |
142 | phv->phv_mode = PHV_VIRUTAL_HOST_ID; | 118 | phv->phv_mode = PHV_VIRUTAL_HOST_ID; |
143 | atomic_set(&hba->left_queue_depth, hba_depth); | ||
144 | atomic_set(&hba->max_queue_depth, hba_depth); | ||
145 | 119 | ||
146 | printk(KERN_INFO "CORE_HBA[%d] - Disabled pSCSI HBA Passthrough" | 120 | pr_debug("CORE_HBA[%d] - Disabled pSCSI HBA Passthrough" |
147 | " %s\n", hba->hba_id, (sh->hostt->name) ? | 121 | " %s\n", hba->hba_id, (sh->hostt->name) ? |
148 | (sh->hostt->name) : "Unknown"); | 122 | (sh->hostt->name) : "Unknown"); |
149 | 123 | ||
@@ -154,27 +128,17 @@ static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag) | |||
154 | * Otherwise, locate struct Scsi_Host from the original passed | 128 | * Otherwise, locate struct Scsi_Host from the original passed |
155 | * pSCSI Host ID and enable for phba mode | 129 | * pSCSI Host ID and enable for phba mode |
156 | */ | 130 | */ |
157 | sh = pscsi_get_sh(phv->phv_host_id); | 131 | sh = scsi_host_lookup(phv->phv_host_id); |
158 | if (!(sh)) { | 132 | if (IS_ERR(sh)) { |
159 | printk(KERN_ERR "pSCSI: Unable to locate SCSI Host for" | 133 | pr_err("pSCSI: Unable to locate SCSI Host for" |
160 | " phv_host_id: %d\n", phv->phv_host_id); | 134 | " phv_host_id: %d\n", phv->phv_host_id); |
161 | return -1; | 135 | return PTR_ERR(sh); |
162 | } | 136 | } |
163 | /* | ||
164 | * Usually the SCSI LLD will use the hostt->can_queue value to define | ||
165 | * its HBA TCQ depth. Some other drivers (like 2.6 megaraid) don't set | ||
166 | * this at all and set sh->can_queue at runtime. | ||
167 | */ | ||
168 | hba_depth = (sh->hostt->can_queue > sh->can_queue) ? | ||
169 | sh->hostt->can_queue : sh->can_queue; | ||
170 | |||
171 | atomic_set(&hba->left_queue_depth, hba_depth); | ||
172 | atomic_set(&hba->max_queue_depth, hba_depth); | ||
173 | 137 | ||
174 | phv->phv_lld_host = sh; | 138 | phv->phv_lld_host = sh; |
175 | phv->phv_mode = PHV_LLD_SCSI_HOST_NO; | 139 | phv->phv_mode = PHV_LLD_SCSI_HOST_NO; |
176 | 140 | ||
177 | printk(KERN_INFO "CORE_HBA[%d] - Enabled pSCSI HBA Passthrough %s\n", | 141 | pr_debug("CORE_HBA[%d] - Enabled pSCSI HBA Passthrough %s\n", |
178 | hba->hba_id, (sh->hostt->name) ? (sh->hostt->name) : "Unknown"); | 142 | hba->hba_id, (sh->hostt->name) ? (sh->hostt->name) : "Unknown"); |
179 | 143 | ||
180 | return 1; | 144 | return 1; |
@@ -236,7 +200,7 @@ pscsi_get_inquiry_vpd_serial(struct scsi_device *sdev, struct t10_wwn *wwn) | |||
236 | 200 | ||
237 | buf = kzalloc(INQUIRY_VPD_SERIAL_LEN, GFP_KERNEL); | 201 | buf = kzalloc(INQUIRY_VPD_SERIAL_LEN, GFP_KERNEL); |
238 | if (!buf) | 202 | if (!buf) |
239 | return -1; | 203 | return -ENOMEM; |
240 | 204 | ||
241 | memset(cdb, 0, MAX_COMMAND_SIZE); | 205 | memset(cdb, 0, MAX_COMMAND_SIZE); |
242 | cdb[0] = INQUIRY; | 206 | cdb[0] = INQUIRY; |
@@ -259,7 +223,7 @@ pscsi_get_inquiry_vpd_serial(struct scsi_device *sdev, struct t10_wwn *wwn) | |||
259 | 223 | ||
260 | out_free: | 224 | out_free: |
261 | kfree(buf); | 225 | kfree(buf); |
262 | return -1; | 226 | return -EPERM; |
263 | } | 227 | } |
264 | 228 | ||
265 | static void | 229 | static void |
@@ -293,15 +257,15 @@ pscsi_get_inquiry_vpd_device_ident(struct scsi_device *sdev, | |||
293 | page_83 = &buf[off]; | 257 | page_83 = &buf[off]; |
294 | ident_len = page_83[3]; | 258 | ident_len = page_83[3]; |
295 | if (!ident_len) { | 259 | if (!ident_len) { |
296 | printk(KERN_ERR "page_83[3]: identifier" | 260 | pr_err("page_83[3]: identifier" |
297 | " length zero!\n"); | 261 | " length zero!\n"); |
298 | break; | 262 | break; |
299 | } | 263 | } |
300 | printk(KERN_INFO "T10 VPD Identifer Length: %d\n", ident_len); | 264 | pr_debug("T10 VPD Identifer Length: %d\n", ident_len); |
301 | 265 | ||
302 | vpd = kzalloc(sizeof(struct t10_vpd), GFP_KERNEL); | 266 | vpd = kzalloc(sizeof(struct t10_vpd), GFP_KERNEL); |
303 | if (!vpd) { | 267 | if (!vpd) { |
304 | printk(KERN_ERR "Unable to allocate memory for" | 268 | pr_err("Unable to allocate memory for" |
305 | " struct t10_vpd\n"); | 269 | " struct t10_vpd\n"); |
306 | goto out; | 270 | goto out; |
307 | } | 271 | } |
@@ -353,7 +317,7 @@ static struct se_device *pscsi_add_device_to_list( | |||
353 | if (!sd->queue_depth) { | 317 | if (!sd->queue_depth) { |
354 | sd->queue_depth = PSCSI_DEFAULT_QUEUEDEPTH; | 318 | sd->queue_depth = PSCSI_DEFAULT_QUEUEDEPTH; |
355 | 319 | ||
356 | printk(KERN_ERR "Set broken SCSI Device %d:%d:%d" | 320 | pr_err("Set broken SCSI Device %d:%d:%d" |
357 | " queue_depth to %d\n", sd->channel, sd->id, | 321 | " queue_depth to %d\n", sd->channel, sd->id, |
358 | sd->lun, sd->queue_depth); | 322 | sd->lun, sd->queue_depth); |
359 | } | 323 | } |
@@ -364,10 +328,8 @@ static struct se_device *pscsi_add_device_to_list( | |||
364 | q = sd->request_queue; | 328 | q = sd->request_queue; |
365 | limits = &dev_limits.limits; | 329 | limits = &dev_limits.limits; |
366 | limits->logical_block_size = sd->sector_size; | 330 | limits->logical_block_size = sd->sector_size; |
367 | limits->max_hw_sectors = (sd->host->max_sectors > queue_max_hw_sectors(q)) ? | 331 | limits->max_hw_sectors = min_t(int, sd->host->max_sectors, queue_max_hw_sectors(q)); |
368 | queue_max_hw_sectors(q) : sd->host->max_sectors; | 332 | limits->max_sectors = min_t(int, sd->host->max_sectors, queue_max_sectors(q)); |
369 | limits->max_sectors = (sd->host->max_sectors > queue_max_sectors(q)) ? | ||
370 | queue_max_sectors(q) : sd->host->max_sectors; | ||
371 | dev_limits.hw_queue_depth = sd->queue_depth; | 333 | dev_limits.hw_queue_depth = sd->queue_depth; |
372 | dev_limits.queue_depth = sd->queue_depth; | 334 | dev_limits.queue_depth = sd->queue_depth; |
373 | /* | 335 | /* |
@@ -391,9 +353,9 @@ static struct se_device *pscsi_add_device_to_list( | |||
391 | pdv->pdv_sd = sd; | 353 | pdv->pdv_sd = sd; |
392 | 354 | ||
393 | dev = transport_add_device_to_core_hba(hba, &pscsi_template, | 355 | dev = transport_add_device_to_core_hba(hba, &pscsi_template, |
394 | se_dev, dev_flags, (void *)pdv, | 356 | se_dev, dev_flags, pdv, |
395 | &dev_limits, NULL, NULL); | 357 | &dev_limits, NULL, NULL); |
396 | if (!(dev)) { | 358 | if (!dev) { |
397 | pdv->pdv_sd = NULL; | 359 | pdv->pdv_sd = NULL; |
398 | return NULL; | 360 | return NULL; |
399 | } | 361 | } |
@@ -423,14 +385,14 @@ static void *pscsi_allocate_virtdevice(struct se_hba *hba, const char *name) | |||
423 | struct pscsi_dev_virt *pdv; | 385 | struct pscsi_dev_virt *pdv; |
424 | 386 | ||
425 | pdv = kzalloc(sizeof(struct pscsi_dev_virt), GFP_KERNEL); | 387 | pdv = kzalloc(sizeof(struct pscsi_dev_virt), GFP_KERNEL); |
426 | if (!(pdv)) { | 388 | if (!pdv) { |
427 | printk(KERN_ERR "Unable to allocate memory for struct pscsi_dev_virt\n"); | 389 | pr_err("Unable to allocate memory for struct pscsi_dev_virt\n"); |
428 | return NULL; | 390 | return NULL; |
429 | } | 391 | } |
430 | pdv->pdv_se_hba = hba; | 392 | pdv->pdv_se_hba = hba; |
431 | 393 | ||
432 | printk(KERN_INFO "PSCSI: Allocated pdv: %p for %s\n", pdv, name); | 394 | pr_debug("PSCSI: Allocated pdv: %p for %s\n", pdv, name); |
433 | return (void *)pdv; | 395 | return pdv; |
434 | } | 396 | } |
435 | 397 | ||
436 | /* | 398 | /* |
@@ -450,7 +412,7 @@ static struct se_device *pscsi_create_type_disk( | |||
450 | u32 dev_flags = 0; | 412 | u32 dev_flags = 0; |
451 | 413 | ||
452 | if (scsi_device_get(sd)) { | 414 | if (scsi_device_get(sd)) { |
453 | printk(KERN_ERR "scsi_device_get() failed for %d:%d:%d:%d\n", | 415 | pr_err("scsi_device_get() failed for %d:%d:%d:%d\n", |
454 | sh->host_no, sd->channel, sd->id, sd->lun); | 416 | sh->host_no, sd->channel, sd->id, sd->lun); |
455 | spin_unlock_irq(sh->host_lock); | 417 | spin_unlock_irq(sh->host_lock); |
456 | return NULL; | 418 | return NULL; |
@@ -463,19 +425,19 @@ static struct se_device *pscsi_create_type_disk( | |||
463 | bd = blkdev_get_by_path(se_dev->se_dev_udev_path, | 425 | bd = blkdev_get_by_path(se_dev->se_dev_udev_path, |
464 | FMODE_WRITE|FMODE_READ|FMODE_EXCL, pdv); | 426 | FMODE_WRITE|FMODE_READ|FMODE_EXCL, pdv); |
465 | if (IS_ERR(bd)) { | 427 | if (IS_ERR(bd)) { |
466 | printk(KERN_ERR "pSCSI: blkdev_get_by_path() failed\n"); | 428 | pr_err("pSCSI: blkdev_get_by_path() failed\n"); |
467 | scsi_device_put(sd); | 429 | scsi_device_put(sd); |
468 | return NULL; | 430 | return NULL; |
469 | } | 431 | } |
470 | pdv->pdv_bd = bd; | 432 | pdv->pdv_bd = bd; |
471 | 433 | ||
472 | dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags); | 434 | dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags); |
473 | if (!(dev)) { | 435 | if (!dev) { |
474 | blkdev_put(pdv->pdv_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL); | 436 | blkdev_put(pdv->pdv_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL); |
475 | scsi_device_put(sd); | 437 | scsi_device_put(sd); |
476 | return NULL; | 438 | return NULL; |
477 | } | 439 | } |
478 | printk(KERN_INFO "CORE_PSCSI[%d] - Added TYPE_DISK for %d:%d:%d:%d\n", | 440 | pr_debug("CORE_PSCSI[%d] - Added TYPE_DISK for %d:%d:%d:%d\n", |
479 | phv->phv_host_id, sh->host_no, sd->channel, sd->id, sd->lun); | 441 | phv->phv_host_id, sh->host_no, sd->channel, sd->id, sd->lun); |
480 | 442 | ||
481 | return dev; | 443 | return dev; |
@@ -497,7 +459,7 @@ static struct se_device *pscsi_create_type_rom( | |||
497 | u32 dev_flags = 0; | 459 | u32 dev_flags = 0; |
498 | 460 | ||
499 | if (scsi_device_get(sd)) { | 461 | if (scsi_device_get(sd)) { |
500 | printk(KERN_ERR "scsi_device_get() failed for %d:%d:%d:%d\n", | 462 | pr_err("scsi_device_get() failed for %d:%d:%d:%d\n", |
501 | sh->host_no, sd->channel, sd->id, sd->lun); | 463 | sh->host_no, sd->channel, sd->id, sd->lun); |
502 | spin_unlock_irq(sh->host_lock); | 464 | spin_unlock_irq(sh->host_lock); |
503 | return NULL; | 465 | return NULL; |
@@ -505,11 +467,11 @@ static struct se_device *pscsi_create_type_rom( | |||
505 | spin_unlock_irq(sh->host_lock); | 467 | spin_unlock_irq(sh->host_lock); |
506 | 468 | ||
507 | dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags); | 469 | dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags); |
508 | if (!(dev)) { | 470 | if (!dev) { |
509 | scsi_device_put(sd); | 471 | scsi_device_put(sd); |
510 | return NULL; | 472 | return NULL; |
511 | } | 473 | } |
512 | printk(KERN_INFO "CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n", | 474 | pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n", |
513 | phv->phv_host_id, scsi_device_type(sd->type), sh->host_no, | 475 | phv->phv_host_id, scsi_device_type(sd->type), sh->host_no, |
514 | sd->channel, sd->id, sd->lun); | 476 | sd->channel, sd->id, sd->lun); |
515 | 477 | ||
@@ -533,10 +495,10 @@ static struct se_device *pscsi_create_type_other( | |||
533 | 495 | ||
534 | spin_unlock_irq(sh->host_lock); | 496 | spin_unlock_irq(sh->host_lock); |
535 | dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags); | 497 | dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags); |
536 | if (!(dev)) | 498 | if (!dev) |
537 | return NULL; | 499 | return NULL; |
538 | 500 | ||
539 | printk(KERN_INFO "CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n", | 501 | pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n", |
540 | phv->phv_host_id, scsi_device_type(sd->type), sh->host_no, | 502 | phv->phv_host_id, scsi_device_type(sd->type), sh->host_no, |
541 | sd->channel, sd->id, sd->lun); | 503 | sd->channel, sd->id, sd->lun); |
542 | 504 | ||
@@ -555,8 +517,8 @@ static struct se_device *pscsi_create_virtdevice( | |||
555 | struct Scsi_Host *sh = phv->phv_lld_host; | 517 | struct Scsi_Host *sh = phv->phv_lld_host; |
556 | int legacy_mode_enable = 0; | 518 | int legacy_mode_enable = 0; |
557 | 519 | ||
558 | if (!(pdv)) { | 520 | if (!pdv) { |
559 | printk(KERN_ERR "Unable to locate struct pscsi_dev_virt" | 521 | pr_err("Unable to locate struct pscsi_dev_virt" |
560 | " parameter\n"); | 522 | " parameter\n"); |
561 | return ERR_PTR(-EINVAL); | 523 | return ERR_PTR(-EINVAL); |
562 | } | 524 | } |
@@ -564,9 +526,9 @@ static struct se_device *pscsi_create_virtdevice( | |||
564 | * If not running in PHV_LLD_SCSI_HOST_NO mode, locate the | 526 | * If not running in PHV_LLD_SCSI_HOST_NO mode, locate the |
565 | * struct Scsi_Host we will need to bring the TCM/pSCSI object online | 527 | * struct Scsi_Host we will need to bring the TCM/pSCSI object online |
566 | */ | 528 | */ |
567 | if (!(sh)) { | 529 | if (!sh) { |
568 | if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) { | 530 | if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) { |
569 | printk(KERN_ERR "pSCSI: Unable to locate struct" | 531 | pr_err("pSCSI: Unable to locate struct" |
570 | " Scsi_Host for PHV_LLD_SCSI_HOST_NO\n"); | 532 | " Scsi_Host for PHV_LLD_SCSI_HOST_NO\n"); |
571 | return ERR_PTR(-ENODEV); | 533 | return ERR_PTR(-ENODEV); |
572 | } | 534 | } |
@@ -575,7 +537,7 @@ static struct se_device *pscsi_create_virtdevice( | |||
575 | * reference, we enforce that udev_path has been set | 537 | * reference, we enforce that udev_path has been set |
576 | */ | 538 | */ |
577 | if (!(se_dev->su_dev_flags & SDF_USING_UDEV_PATH)) { | 539 | if (!(se_dev->su_dev_flags & SDF_USING_UDEV_PATH)) { |
578 | printk(KERN_ERR "pSCSI: udev_path attribute has not" | 540 | pr_err("pSCSI: udev_path attribute has not" |
579 | " been set before ENABLE=1\n"); | 541 | " been set before ENABLE=1\n"); |
580 | return ERR_PTR(-EINVAL); | 542 | return ERR_PTR(-EINVAL); |
581 | } | 543 | } |
@@ -586,8 +548,8 @@ static struct se_device *pscsi_create_virtdevice( | |||
586 | */ | 548 | */ |
587 | if (!(pdv->pdv_flags & PDF_HAS_VIRT_HOST_ID)) { | 549 | if (!(pdv->pdv_flags & PDF_HAS_VIRT_HOST_ID)) { |
588 | spin_lock(&hba->device_lock); | 550 | spin_lock(&hba->device_lock); |
589 | if (!(list_empty(&hba->hba_dev_list))) { | 551 | if (!list_empty(&hba->hba_dev_list)) { |
590 | printk(KERN_ERR "pSCSI: Unable to set hba_mode" | 552 | pr_err("pSCSI: Unable to set hba_mode" |
591 | " with active devices\n"); | 553 | " with active devices\n"); |
592 | spin_unlock(&hba->device_lock); | 554 | spin_unlock(&hba->device_lock); |
593 | return ERR_PTR(-EEXIST); | 555 | return ERR_PTR(-EEXIST); |
@@ -601,16 +563,16 @@ static struct se_device *pscsi_create_virtdevice( | |||
601 | hba->hba_flags |= HBA_FLAGS_PSCSI_MODE; | 563 | hba->hba_flags |= HBA_FLAGS_PSCSI_MODE; |
602 | sh = phv->phv_lld_host; | 564 | sh = phv->phv_lld_host; |
603 | } else { | 565 | } else { |
604 | sh = pscsi_get_sh(pdv->pdv_host_id); | 566 | sh = scsi_host_lookup(pdv->pdv_host_id); |
605 | if (!(sh)) { | 567 | if (IS_ERR(sh)) { |
606 | printk(KERN_ERR "pSCSI: Unable to locate" | 568 | pr_err("pSCSI: Unable to locate" |
607 | " pdv_host_id: %d\n", pdv->pdv_host_id); | 569 | " pdv_host_id: %d\n", pdv->pdv_host_id); |
608 | return ERR_PTR(-ENODEV); | 570 | return (struct se_device *) sh; |
609 | } | 571 | } |
610 | } | 572 | } |
611 | } else { | 573 | } else { |
612 | if (phv->phv_mode == PHV_VIRUTAL_HOST_ID) { | 574 | if (phv->phv_mode == PHV_VIRUTAL_HOST_ID) { |
613 | printk(KERN_ERR "pSCSI: PHV_VIRUTAL_HOST_ID set while" | 575 | pr_err("pSCSI: PHV_VIRUTAL_HOST_ID set while" |
614 | " struct Scsi_Host exists\n"); | 576 | " struct Scsi_Host exists\n"); |
615 | return ERR_PTR(-EEXIST); | 577 | return ERR_PTR(-EEXIST); |
616 | } | 578 | } |
@@ -639,7 +601,7 @@ static struct se_device *pscsi_create_virtdevice( | |||
639 | break; | 601 | break; |
640 | } | 602 | } |
641 | 603 | ||
642 | if (!(dev)) { | 604 | if (!dev) { |
643 | if (phv->phv_mode == PHV_VIRUTAL_HOST_ID) | 605 | if (phv->phv_mode == PHV_VIRUTAL_HOST_ID) |
644 | scsi_host_put(sh); | 606 | scsi_host_put(sh); |
645 | else if (legacy_mode_enable) { | 607 | else if (legacy_mode_enable) { |
@@ -653,7 +615,7 @@ static struct se_device *pscsi_create_virtdevice( | |||
653 | } | 615 | } |
654 | spin_unlock_irq(sh->host_lock); | 616 | spin_unlock_irq(sh->host_lock); |
655 | 617 | ||
656 | printk(KERN_ERR "pSCSI: Unable to locate %d:%d:%d:%d\n", sh->host_no, | 618 | pr_err("pSCSI: Unable to locate %d:%d:%d:%d\n", sh->host_no, |
657 | pdv->pdv_channel_id, pdv->pdv_target_id, pdv->pdv_lun_id); | 619 | pdv->pdv_channel_id, pdv->pdv_target_id, pdv->pdv_lun_id); |
658 | 620 | ||
659 | if (phv->phv_mode == PHV_VIRUTAL_HOST_ID) | 621 | if (phv->phv_mode == PHV_VIRUTAL_HOST_ID) |
@@ -728,13 +690,12 @@ static int pscsi_transport_complete(struct se_task *task) | |||
728 | */ | 690 | */ |
729 | if (((cdb[0] == MODE_SENSE) || (cdb[0] == MODE_SENSE_10)) && | 691 | if (((cdb[0] == MODE_SENSE) || (cdb[0] == MODE_SENSE_10)) && |
730 | (status_byte(result) << 1) == SAM_STAT_GOOD) { | 692 | (status_byte(result) << 1) == SAM_STAT_GOOD) { |
731 | if (!TASK_CMD(task)->se_deve) | 693 | if (!task->task_se_cmd->se_deve) |
732 | goto after_mode_sense; | 694 | goto after_mode_sense; |
733 | 695 | ||
734 | if (TASK_CMD(task)->se_deve->lun_flags & | 696 | if (task->task_se_cmd->se_deve->lun_flags & |
735 | TRANSPORT_LUNFLAGS_READ_ONLY) { | 697 | TRANSPORT_LUNFLAGS_READ_ONLY) { |
736 | unsigned char *buf = (unsigned char *) | 698 | unsigned char *buf = transport_kmap_first_data_page(task->task_se_cmd); |
737 | T_TASK(task->task_se_cmd)->t_task_buf; | ||
738 | 699 | ||
739 | if (cdb[0] == MODE_SENSE_10) { | 700 | if (cdb[0] == MODE_SENSE_10) { |
740 | if (!(buf[3] & 0x80)) | 701 | if (!(buf[3] & 0x80)) |
@@ -743,6 +704,8 @@ static int pscsi_transport_complete(struct se_task *task) | |||
743 | if (!(buf[2] & 0x80)) | 704 | if (!(buf[2] & 0x80)) |
744 | buf[2] |= 0x80; | 705 | buf[2] |= 0x80; |
745 | } | 706 | } |
707 | |||
708 | transport_kunmap_first_data_page(task->task_se_cmd); | ||
746 | } | 709 | } |
747 | } | 710 | } |
748 | after_mode_sense: | 711 | after_mode_sense: |
@@ -766,8 +729,8 @@ after_mode_sense: | |||
766 | u32 blocksize; | 729 | u32 blocksize; |
767 | 730 | ||
768 | buf = sg_virt(&sg[0]); | 731 | buf = sg_virt(&sg[0]); |
769 | if (!(buf)) { | 732 | if (!buf) { |
770 | printk(KERN_ERR "Unable to get buf for scatterlist\n"); | 733 | pr_err("Unable to get buf for scatterlist\n"); |
771 | goto after_mode_select; | 734 | goto after_mode_select; |
772 | } | 735 | } |
773 | 736 | ||
@@ -797,34 +760,20 @@ after_mode_select: | |||
797 | } | 760 | } |
798 | 761 | ||
799 | static struct se_task * | 762 | static struct se_task * |
800 | pscsi_alloc_task(struct se_cmd *cmd) | 763 | pscsi_alloc_task(unsigned char *cdb) |
801 | { | 764 | { |
802 | struct pscsi_plugin_task *pt; | 765 | struct pscsi_plugin_task *pt; |
803 | unsigned char *cdb = T_TASK(cmd)->t_task_cdb; | ||
804 | 766 | ||
805 | pt = kzalloc(sizeof(struct pscsi_plugin_task), GFP_KERNEL); | 767 | /* |
768 | * Dynamically alloc cdb space, since it may be larger than | ||
769 | * TCM_MAX_COMMAND_SIZE | ||
770 | */ | ||
771 | pt = kzalloc(sizeof(*pt) + scsi_command_size(cdb), GFP_KERNEL); | ||
806 | if (!pt) { | 772 | if (!pt) { |
807 | printk(KERN_ERR "Unable to allocate struct pscsi_plugin_task\n"); | 773 | pr_err("Unable to allocate struct pscsi_plugin_task\n"); |
808 | return NULL; | 774 | return NULL; |
809 | } | 775 | } |
810 | 776 | ||
811 | /* | ||
812 | * If TCM Core is signaling a > TCM_MAX_COMMAND_SIZE allocation, | ||
813 | * allocate the extended CDB buffer for per struct se_task context | ||
814 | * pt->pscsi_cdb now. | ||
815 | */ | ||
816 | if (T_TASK(cmd)->t_task_cdb != T_TASK(cmd)->__t_task_cdb) { | ||
817 | |||
818 | pt->pscsi_cdb = kzalloc(scsi_command_size(cdb), GFP_KERNEL); | ||
819 | if (!(pt->pscsi_cdb)) { | ||
820 | printk(KERN_ERR "pSCSI: Unable to allocate extended" | ||
821 | " pt->pscsi_cdb\n"); | ||
822 | kfree(pt); | ||
823 | return NULL; | ||
824 | } | ||
825 | } else | ||
826 | pt->pscsi_cdb = &pt->__pscsi_cdb[0]; | ||
827 | |||
828 | return &pt->pscsi_task; | 777 | return &pt->pscsi_task; |
829 | } | 778 | } |
830 | 779 | ||
@@ -849,7 +798,7 @@ static inline void pscsi_blk_init_request( | |||
849 | * also set the end_io_data pointer.to struct se_task. | 798 | * also set the end_io_data pointer.to struct se_task. |
850 | */ | 799 | */ |
851 | req->end_io = pscsi_req_done; | 800 | req->end_io = pscsi_req_done; |
852 | req->end_io_data = (void *)task; | 801 | req->end_io_data = task; |
853 | /* | 802 | /* |
854 | * Load the referenced struct se_task's SCSI CDB into | 803 | * Load the referenced struct se_task's SCSI CDB into |
855 | * include/linux/blkdev.h:struct request->cmd | 804 | * include/linux/blkdev.h:struct request->cmd |
@@ -859,7 +808,7 @@ static inline void pscsi_blk_init_request( | |||
859 | /* | 808 | /* |
860 | * Setup pointer for outgoing sense data. | 809 | * Setup pointer for outgoing sense data. |
861 | */ | 810 | */ |
862 | req->sense = (void *)&pt->pscsi_sense[0]; | 811 | req->sense = &pt->pscsi_sense[0]; |
863 | req->sense_len = 0; | 812 | req->sense_len = 0; |
864 | } | 813 | } |
865 | 814 | ||
@@ -874,8 +823,8 @@ static int pscsi_blk_get_request(struct se_task *task) | |||
874 | pt->pscsi_req = blk_get_request(pdv->pdv_sd->request_queue, | 823 | pt->pscsi_req = blk_get_request(pdv->pdv_sd->request_queue, |
875 | (task->task_data_direction == DMA_TO_DEVICE), | 824 | (task->task_data_direction == DMA_TO_DEVICE), |
876 | GFP_KERNEL); | 825 | GFP_KERNEL); |
877 | if (!(pt->pscsi_req) || IS_ERR(pt->pscsi_req)) { | 826 | if (!pt->pscsi_req || IS_ERR(pt->pscsi_req)) { |
878 | printk(KERN_ERR "PSCSI: blk_get_request() failed: %ld\n", | 827 | pr_err("PSCSI: blk_get_request() failed: %ld\n", |
879 | IS_ERR(pt->pscsi_req)); | 828 | IS_ERR(pt->pscsi_req)); |
880 | return PYX_TRANSPORT_LU_COMM_FAILURE; | 829 | return PYX_TRANSPORT_LU_COMM_FAILURE; |
881 | } | 830 | } |
@@ -920,15 +869,8 @@ static int pscsi_do_task(struct se_task *task) | |||
920 | static void pscsi_free_task(struct se_task *task) | 869 | static void pscsi_free_task(struct se_task *task) |
921 | { | 870 | { |
922 | struct pscsi_plugin_task *pt = PSCSI_TASK(task); | 871 | struct pscsi_plugin_task *pt = PSCSI_TASK(task); |
923 | struct se_cmd *cmd = task->task_se_cmd; | ||
924 | 872 | ||
925 | /* | 873 | /* |
926 | * Release the extended CDB allocation from pscsi_alloc_task() | ||
927 | * if one exists. | ||
928 | */ | ||
929 | if (T_TASK(cmd)->t_task_cdb != T_TASK(cmd)->__t_task_cdb) | ||
930 | kfree(pt->pscsi_cdb); | ||
931 | /* | ||
932 | * We do not release the bio(s) here associated with this task, as | 874 | * We do not release the bio(s) here associated with this task, as |
933 | * this is handled by bio_put() and pscsi_bi_endio(). | 875 | * this is handled by bio_put() and pscsi_bi_endio(). |
934 | */ | 876 | */ |
@@ -973,7 +915,7 @@ static ssize_t pscsi_set_configfs_dev_params(struct se_hba *hba, | |||
973 | switch (token) { | 915 | switch (token) { |
974 | case Opt_scsi_host_id: | 916 | case Opt_scsi_host_id: |
975 | if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) { | 917 | if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) { |
976 | printk(KERN_ERR "PSCSI[%d]: Unable to accept" | 918 | pr_err("PSCSI[%d]: Unable to accept" |
977 | " scsi_host_id while phv_mode ==" | 919 | " scsi_host_id while phv_mode ==" |
978 | " PHV_LLD_SCSI_HOST_NO\n", | 920 | " PHV_LLD_SCSI_HOST_NO\n", |
979 | phv->phv_host_id); | 921 | phv->phv_host_id); |
@@ -982,14 +924,14 @@ static ssize_t pscsi_set_configfs_dev_params(struct se_hba *hba, | |||
982 | } | 924 | } |
983 | match_int(args, &arg); | 925 | match_int(args, &arg); |
984 | pdv->pdv_host_id = arg; | 926 | pdv->pdv_host_id = arg; |
985 | printk(KERN_INFO "PSCSI[%d]: Referencing SCSI Host ID:" | 927 | pr_debug("PSCSI[%d]: Referencing SCSI Host ID:" |
986 | " %d\n", phv->phv_host_id, pdv->pdv_host_id); | 928 | " %d\n", phv->phv_host_id, pdv->pdv_host_id); |
987 | pdv->pdv_flags |= PDF_HAS_VIRT_HOST_ID; | 929 | pdv->pdv_flags |= PDF_HAS_VIRT_HOST_ID; |
988 | break; | 930 | break; |
989 | case Opt_scsi_channel_id: | 931 | case Opt_scsi_channel_id: |
990 | match_int(args, &arg); | 932 | match_int(args, &arg); |
991 | pdv->pdv_channel_id = arg; | 933 | pdv->pdv_channel_id = arg; |
992 | printk(KERN_INFO "PSCSI[%d]: Referencing SCSI Channel" | 934 | pr_debug("PSCSI[%d]: Referencing SCSI Channel" |
993 | " ID: %d\n", phv->phv_host_id, | 935 | " ID: %d\n", phv->phv_host_id, |
994 | pdv->pdv_channel_id); | 936 | pdv->pdv_channel_id); |
995 | pdv->pdv_flags |= PDF_HAS_CHANNEL_ID; | 937 | pdv->pdv_flags |= PDF_HAS_CHANNEL_ID; |
@@ -997,7 +939,7 @@ static ssize_t pscsi_set_configfs_dev_params(struct se_hba *hba, | |||
997 | case Opt_scsi_target_id: | 939 | case Opt_scsi_target_id: |
998 | match_int(args, &arg); | 940 | match_int(args, &arg); |
999 | pdv->pdv_target_id = arg; | 941 | pdv->pdv_target_id = arg; |
1000 | printk(KERN_INFO "PSCSI[%d]: Referencing SCSI Target" | 942 | pr_debug("PSCSI[%d]: Referencing SCSI Target" |
1001 | " ID: %d\n", phv->phv_host_id, | 943 | " ID: %d\n", phv->phv_host_id, |
1002 | pdv->pdv_target_id); | 944 | pdv->pdv_target_id); |
1003 | pdv->pdv_flags |= PDF_HAS_TARGET_ID; | 945 | pdv->pdv_flags |= PDF_HAS_TARGET_ID; |
@@ -1005,7 +947,7 @@ static ssize_t pscsi_set_configfs_dev_params(struct se_hba *hba, | |||
1005 | case Opt_scsi_lun_id: | 947 | case Opt_scsi_lun_id: |
1006 | match_int(args, &arg); | 948 | match_int(args, &arg); |
1007 | pdv->pdv_lun_id = arg; | 949 | pdv->pdv_lun_id = arg; |
1008 | printk(KERN_INFO "PSCSI[%d]: Referencing SCSI LUN ID:" | 950 | pr_debug("PSCSI[%d]: Referencing SCSI LUN ID:" |
1009 | " %d\n", phv->phv_host_id, pdv->pdv_lun_id); | 951 | " %d\n", phv->phv_host_id, pdv->pdv_lun_id); |
1010 | pdv->pdv_flags |= PDF_HAS_LUN_ID; | 952 | pdv->pdv_flags |= PDF_HAS_LUN_ID; |
1011 | break; | 953 | break; |
@@ -1028,9 +970,9 @@ static ssize_t pscsi_check_configfs_dev_params( | |||
1028 | if (!(pdv->pdv_flags & PDF_HAS_CHANNEL_ID) || | 970 | if (!(pdv->pdv_flags & PDF_HAS_CHANNEL_ID) || |
1029 | !(pdv->pdv_flags & PDF_HAS_TARGET_ID) || | 971 | !(pdv->pdv_flags & PDF_HAS_TARGET_ID) || |
1030 | !(pdv->pdv_flags & PDF_HAS_LUN_ID)) { | 972 | !(pdv->pdv_flags & PDF_HAS_LUN_ID)) { |
1031 | printk(KERN_ERR "Missing scsi_channel_id=, scsi_target_id= and" | 973 | pr_err("Missing scsi_channel_id=, scsi_target_id= and" |
1032 | " scsi_lun_id= parameters\n"); | 974 | " scsi_lun_id= parameters\n"); |
1033 | return -1; | 975 | return -EINVAL; |
1034 | } | 976 | } |
1035 | 977 | ||
1036 | return 0; | 978 | return 0; |
@@ -1090,7 +1032,7 @@ static void pscsi_bi_endio(struct bio *bio, int error) | |||
1090 | bio_put(bio); | 1032 | bio_put(bio); |
1091 | } | 1033 | } |
1092 | 1034 | ||
1093 | static inline struct bio *pscsi_get_bio(struct pscsi_dev_virt *pdv, int sg_num) | 1035 | static inline struct bio *pscsi_get_bio(int sg_num) |
1094 | { | 1036 | { |
1095 | struct bio *bio; | 1037 | struct bio *bio; |
1096 | /* | 1038 | /* |
@@ -1098,8 +1040,8 @@ static inline struct bio *pscsi_get_bio(struct pscsi_dev_virt *pdv, int sg_num) | |||
1098 | * in block/blk-core.c:blk_make_request() | 1040 | * in block/blk-core.c:blk_make_request() |
1099 | */ | 1041 | */ |
1100 | bio = bio_kmalloc(GFP_KERNEL, sg_num); | 1042 | bio = bio_kmalloc(GFP_KERNEL, sg_num); |
1101 | if (!(bio)) { | 1043 | if (!bio) { |
1102 | printk(KERN_ERR "PSCSI: bio_kmalloc() failed\n"); | 1044 | pr_err("PSCSI: bio_kmalloc() failed\n"); |
1103 | return NULL; | 1045 | return NULL; |
1104 | } | 1046 | } |
1105 | bio->bi_end_io = pscsi_bi_endio; | 1047 | bio->bi_end_io = pscsi_bi_endio; |
@@ -1107,13 +1049,7 @@ static inline struct bio *pscsi_get_bio(struct pscsi_dev_virt *pdv, int sg_num) | |||
1107 | return bio; | 1049 | return bio; |
1108 | } | 1050 | } |
1109 | 1051 | ||
1110 | #if 0 | 1052 | static int __pscsi_map_SG( |
1111 | #define DEBUG_PSCSI(x...) printk(x) | ||
1112 | #else | ||
1113 | #define DEBUG_PSCSI(x...) | ||
1114 | #endif | ||
1115 | |||
1116 | static int __pscsi_map_task_SG( | ||
1117 | struct se_task *task, | 1053 | struct se_task *task, |
1118 | struct scatterlist *task_sg, | 1054 | struct scatterlist *task_sg, |
1119 | u32 task_sg_num, | 1055 | u32 task_sg_num, |
@@ -1134,7 +1070,7 @@ static int __pscsi_map_task_SG( | |||
1134 | return 0; | 1070 | return 0; |
1135 | /* | 1071 | /* |
1136 | * For SCF_SCSI_DATA_SG_IO_CDB, Use fs/bio.c:bio_add_page() to setup | 1072 | * For SCF_SCSI_DATA_SG_IO_CDB, Use fs/bio.c:bio_add_page() to setup |
1137 | * the bio_vec maplist from TC< struct se_mem -> task->task_sg -> | 1073 | * the bio_vec maplist from task->task_sg -> |
1138 | * struct scatterlist memory. The struct se_task->task_sg[] currently needs | 1074 | * struct scatterlist memory. The struct se_task->task_sg[] currently needs |
1139 | * to be attached to struct bios for submission to Linux/SCSI using | 1075 | * to be attached to struct bios for submission to Linux/SCSI using |
1140 | * struct request to struct scsi_device->request_queue. | 1076 | * struct request to struct scsi_device->request_queue. |
@@ -1143,34 +1079,34 @@ static int __pscsi_map_task_SG( | |||
1143 | * is ported to upstream SCSI passthrough functionality that accepts | 1079 | * is ported to upstream SCSI passthrough functionality that accepts |
1144 | * struct scatterlist->page_link or struct page as a paraemeter. | 1080 | * struct scatterlist->page_link or struct page as a paraemeter. |
1145 | */ | 1081 | */ |
1146 | DEBUG_PSCSI("PSCSI: nr_pages: %d\n", nr_pages); | 1082 | pr_debug("PSCSI: nr_pages: %d\n", nr_pages); |
1147 | 1083 | ||
1148 | for_each_sg(task_sg, sg, task_sg_num, i) { | 1084 | for_each_sg(task_sg, sg, task_sg_num, i) { |
1149 | page = sg_page(sg); | 1085 | page = sg_page(sg); |
1150 | off = sg->offset; | 1086 | off = sg->offset; |
1151 | len = sg->length; | 1087 | len = sg->length; |
1152 | 1088 | ||
1153 | DEBUG_PSCSI("PSCSI: i: %d page: %p len: %d off: %d\n", i, | 1089 | pr_debug("PSCSI: i: %d page: %p len: %d off: %d\n", i, |
1154 | page, len, off); | 1090 | page, len, off); |
1155 | 1091 | ||
1156 | while (len > 0 && data_len > 0) { | 1092 | while (len > 0 && data_len > 0) { |
1157 | bytes = min_t(unsigned int, len, PAGE_SIZE - off); | 1093 | bytes = min_t(unsigned int, len, PAGE_SIZE - off); |
1158 | bytes = min(bytes, data_len); | 1094 | bytes = min(bytes, data_len); |
1159 | 1095 | ||
1160 | if (!(bio)) { | 1096 | if (!bio) { |
1161 | nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages); | 1097 | nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages); |
1162 | nr_pages -= nr_vecs; | 1098 | nr_pages -= nr_vecs; |
1163 | /* | 1099 | /* |
1164 | * Calls bio_kmalloc() and sets bio->bi_end_io() | 1100 | * Calls bio_kmalloc() and sets bio->bi_end_io() |
1165 | */ | 1101 | */ |
1166 | bio = pscsi_get_bio(pdv, nr_vecs); | 1102 | bio = pscsi_get_bio(nr_vecs); |
1167 | if (!(bio)) | 1103 | if (!bio) |
1168 | goto fail; | 1104 | goto fail; |
1169 | 1105 | ||
1170 | if (rw) | 1106 | if (rw) |
1171 | bio->bi_rw |= REQ_WRITE; | 1107 | bio->bi_rw |= REQ_WRITE; |
1172 | 1108 | ||
1173 | DEBUG_PSCSI("PSCSI: Allocated bio: %p," | 1109 | pr_debug("PSCSI: Allocated bio: %p," |
1174 | " dir: %s nr_vecs: %d\n", bio, | 1110 | " dir: %s nr_vecs: %d\n", bio, |
1175 | (rw) ? "rw" : "r", nr_vecs); | 1111 | (rw) ? "rw" : "r", nr_vecs); |
1176 | /* | 1112 | /* |
@@ -1185,7 +1121,7 @@ static int __pscsi_map_task_SG( | |||
1185 | tbio = tbio->bi_next = bio; | 1121 | tbio = tbio->bi_next = bio; |
1186 | } | 1122 | } |
1187 | 1123 | ||
1188 | DEBUG_PSCSI("PSCSI: Calling bio_add_pc_page() i: %d" | 1124 | pr_debug("PSCSI: Calling bio_add_pc_page() i: %d" |
1189 | " bio: %p page: %p len: %d off: %d\n", i, bio, | 1125 | " bio: %p page: %p len: %d off: %d\n", i, bio, |
1190 | page, len, off); | 1126 | page, len, off); |
1191 | 1127 | ||
@@ -1194,11 +1130,11 @@ static int __pscsi_map_task_SG( | |||
1194 | if (rc != bytes) | 1130 | if (rc != bytes) |
1195 | goto fail; | 1131 | goto fail; |
1196 | 1132 | ||
1197 | DEBUG_PSCSI("PSCSI: bio->bi_vcnt: %d nr_vecs: %d\n", | 1133 | pr_debug("PSCSI: bio->bi_vcnt: %d nr_vecs: %d\n", |
1198 | bio->bi_vcnt, nr_vecs); | 1134 | bio->bi_vcnt, nr_vecs); |
1199 | 1135 | ||
1200 | if (bio->bi_vcnt > nr_vecs) { | 1136 | if (bio->bi_vcnt > nr_vecs) { |
1201 | DEBUG_PSCSI("PSCSI: Reached bio->bi_vcnt max:" | 1137 | pr_debug("PSCSI: Reached bio->bi_vcnt max:" |
1202 | " %d i: %d bio: %p, allocating another" | 1138 | " %d i: %d bio: %p, allocating another" |
1203 | " bio\n", bio->bi_vcnt, i, bio); | 1139 | " bio\n", bio->bi_vcnt, i, bio); |
1204 | /* | 1140 | /* |
@@ -1220,15 +1156,15 @@ static int __pscsi_map_task_SG( | |||
1220 | * Setup the primary pt->pscsi_req used for non BIDI and BIDI-COMMAND | 1156 | * Setup the primary pt->pscsi_req used for non BIDI and BIDI-COMMAND |
1221 | * primary SCSI WRITE poayload mapped for struct se_task->task_sg[] | 1157 | * primary SCSI WRITE poayload mapped for struct se_task->task_sg[] |
1222 | */ | 1158 | */ |
1223 | if (!(bidi_read)) { | 1159 | if (!bidi_read) { |
1224 | /* | 1160 | /* |
1225 | * Starting with v2.6.31, call blk_make_request() passing in *hbio to | 1161 | * Starting with v2.6.31, call blk_make_request() passing in *hbio to |
1226 | * allocate the pSCSI task a struct request. | 1162 | * allocate the pSCSI task a struct request. |
1227 | */ | 1163 | */ |
1228 | pt->pscsi_req = blk_make_request(pdv->pdv_sd->request_queue, | 1164 | pt->pscsi_req = blk_make_request(pdv->pdv_sd->request_queue, |
1229 | hbio, GFP_KERNEL); | 1165 | hbio, GFP_KERNEL); |
1230 | if (!(pt->pscsi_req)) { | 1166 | if (!pt->pscsi_req) { |
1231 | printk(KERN_ERR "pSCSI: blk_make_request() failed\n"); | 1167 | pr_err("pSCSI: blk_make_request() failed\n"); |
1232 | goto fail; | 1168 | goto fail; |
1233 | } | 1169 | } |
1234 | /* | 1170 | /* |
@@ -1237,7 +1173,7 @@ static int __pscsi_map_task_SG( | |||
1237 | */ | 1173 | */ |
1238 | pscsi_blk_init_request(task, pt, pt->pscsi_req, 0); | 1174 | pscsi_blk_init_request(task, pt, pt->pscsi_req, 0); |
1239 | 1175 | ||
1240 | return task->task_sg_num; | 1176 | return task->task_sg_nents; |
1241 | } | 1177 | } |
1242 | /* | 1178 | /* |
1243 | * Setup the secondary pt->pscsi_req->next_rq used for the extra BIDI-COMMAND | 1179 | * Setup the secondary pt->pscsi_req->next_rq used for the extra BIDI-COMMAND |
@@ -1245,13 +1181,13 @@ static int __pscsi_map_task_SG( | |||
1245 | */ | 1181 | */ |
1246 | pt->pscsi_req->next_rq = blk_make_request(pdv->pdv_sd->request_queue, | 1182 | pt->pscsi_req->next_rq = blk_make_request(pdv->pdv_sd->request_queue, |
1247 | hbio, GFP_KERNEL); | 1183 | hbio, GFP_KERNEL); |
1248 | if (!(pt->pscsi_req->next_rq)) { | 1184 | if (!pt->pscsi_req->next_rq) { |
1249 | printk(KERN_ERR "pSCSI: blk_make_request() failed for BIDI\n"); | 1185 | pr_err("pSCSI: blk_make_request() failed for BIDI\n"); |
1250 | goto fail; | 1186 | goto fail; |
1251 | } | 1187 | } |
1252 | pscsi_blk_init_request(task, pt, pt->pscsi_req->next_rq, 1); | 1188 | pscsi_blk_init_request(task, pt, pt->pscsi_req->next_rq, 1); |
1253 | 1189 | ||
1254 | return task->task_sg_num; | 1190 | return task->task_sg_nents; |
1255 | fail: | 1191 | fail: |
1256 | while (hbio) { | 1192 | while (hbio) { |
1257 | bio = hbio; | 1193 | bio = hbio; |
@@ -1262,7 +1198,10 @@ fail: | |||
1262 | return ret; | 1198 | return ret; |
1263 | } | 1199 | } |
1264 | 1200 | ||
1265 | static int pscsi_map_task_SG(struct se_task *task) | 1201 | /* |
1202 | * pSCSI maps both ->map_control_SG() and ->map_data_SG() to a single call. | ||
1203 | */ | ||
1204 | static int pscsi_map_SG(struct se_task *task) | ||
1266 | { | 1205 | { |
1267 | int ret; | 1206 | int ret; |
1268 | 1207 | ||
@@ -1270,14 +1209,14 @@ static int pscsi_map_task_SG(struct se_task *task) | |||
1270 | * Setup the main struct request for the task->task_sg[] payload | 1209 | * Setup the main struct request for the task->task_sg[] payload |
1271 | */ | 1210 | */ |
1272 | 1211 | ||
1273 | ret = __pscsi_map_task_SG(task, task->task_sg, task->task_sg_num, 0); | 1212 | ret = __pscsi_map_SG(task, task->task_sg, task->task_sg_nents, 0); |
1274 | if (ret >= 0 && task->task_sg_bidi) { | 1213 | if (ret >= 0 && task->task_sg_bidi) { |
1275 | /* | 1214 | /* |
1276 | * If present, set up the extra BIDI-COMMAND SCSI READ | 1215 | * If present, set up the extra BIDI-COMMAND SCSI READ |
1277 | * struct request and payload. | 1216 | * struct request and payload. |
1278 | */ | 1217 | */ |
1279 | ret = __pscsi_map_task_SG(task, task->task_sg_bidi, | 1218 | ret = __pscsi_map_SG(task, task->task_sg_bidi, |
1280 | task->task_sg_num, 1); | 1219 | task->task_sg_nents, 1); |
1281 | } | 1220 | } |
1282 | 1221 | ||
1283 | if (ret < 0) | 1222 | if (ret < 0) |
@@ -1285,33 +1224,6 @@ static int pscsi_map_task_SG(struct se_task *task) | |||
1285 | return 0; | 1224 | return 0; |
1286 | } | 1225 | } |
1287 | 1226 | ||
1288 | /* pscsi_map_task_non_SG(): | ||
1289 | * | ||
1290 | * | ||
1291 | */ | ||
1292 | static int pscsi_map_task_non_SG(struct se_task *task) | ||
1293 | { | ||
1294 | struct se_cmd *cmd = TASK_CMD(task); | ||
1295 | struct pscsi_plugin_task *pt = PSCSI_TASK(task); | ||
1296 | struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr; | ||
1297 | int ret = 0; | ||
1298 | |||
1299 | if (pscsi_blk_get_request(task) < 0) | ||
1300 | return PYX_TRANSPORT_LU_COMM_FAILURE; | ||
1301 | |||
1302 | if (!task->task_size) | ||
1303 | return 0; | ||
1304 | |||
1305 | ret = blk_rq_map_kern(pdv->pdv_sd->request_queue, | ||
1306 | pt->pscsi_req, T_TASK(cmd)->t_task_buf, | ||
1307 | task->task_size, GFP_KERNEL); | ||
1308 | if (ret < 0) { | ||
1309 | printk(KERN_ERR "PSCSI: blk_rq_map_kern() failed: %d\n", ret); | ||
1310 | return PYX_TRANSPORT_LU_COMM_FAILURE; | ||
1311 | } | ||
1312 | return 0; | ||
1313 | } | ||
1314 | |||
1315 | static int pscsi_CDB_none(struct se_task *task) | 1227 | static int pscsi_CDB_none(struct se_task *task) |
1316 | { | 1228 | { |
1317 | return pscsi_blk_get_request(task); | 1229 | return pscsi_blk_get_request(task); |
@@ -1383,9 +1295,9 @@ static inline void pscsi_process_SAM_status( | |||
1383 | struct pscsi_plugin_task *pt) | 1295 | struct pscsi_plugin_task *pt) |
1384 | { | 1296 | { |
1385 | task->task_scsi_status = status_byte(pt->pscsi_result); | 1297 | task->task_scsi_status = status_byte(pt->pscsi_result); |
1386 | if ((task->task_scsi_status)) { | 1298 | if (task->task_scsi_status) { |
1387 | task->task_scsi_status <<= 1; | 1299 | task->task_scsi_status <<= 1; |
1388 | printk(KERN_INFO "PSCSI Status Byte exception at task: %p CDB:" | 1300 | pr_debug("PSCSI Status Byte exception at task: %p CDB:" |
1389 | " 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0], | 1301 | " 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0], |
1390 | pt->pscsi_result); | 1302 | pt->pscsi_result); |
1391 | } | 1303 | } |
@@ -1395,18 +1307,16 @@ static inline void pscsi_process_SAM_status( | |||
1395 | transport_complete_task(task, (!task->task_scsi_status)); | 1307 | transport_complete_task(task, (!task->task_scsi_status)); |
1396 | break; | 1308 | break; |
1397 | default: | 1309 | default: |
1398 | printk(KERN_INFO "PSCSI Host Byte exception at task: %p CDB:" | 1310 | pr_debug("PSCSI Host Byte exception at task: %p CDB:" |
1399 | " 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0], | 1311 | " 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0], |
1400 | pt->pscsi_result); | 1312 | pt->pscsi_result); |
1401 | task->task_scsi_status = SAM_STAT_CHECK_CONDITION; | 1313 | task->task_scsi_status = SAM_STAT_CHECK_CONDITION; |
1402 | task->task_error_status = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; | 1314 | task->task_error_status = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; |
1403 | TASK_CMD(task)->transport_error_status = | 1315 | task->task_se_cmd->transport_error_status = |
1404 | PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; | 1316 | PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; |
1405 | transport_complete_task(task, 0); | 1317 | transport_complete_task(task, 0); |
1406 | break; | 1318 | break; |
1407 | } | 1319 | } |
1408 | |||
1409 | return; | ||
1410 | } | 1320 | } |
1411 | 1321 | ||
1412 | static void pscsi_req_done(struct request *req, int uptodate) | 1322 | static void pscsi_req_done(struct request *req, int uptodate) |
@@ -1433,8 +1343,8 @@ static struct se_subsystem_api pscsi_template = { | |||
1433 | .owner = THIS_MODULE, | 1343 | .owner = THIS_MODULE, |
1434 | .transport_type = TRANSPORT_PLUGIN_PHBA_PDEV, | 1344 | .transport_type = TRANSPORT_PLUGIN_PHBA_PDEV, |
1435 | .cdb_none = pscsi_CDB_none, | 1345 | .cdb_none = pscsi_CDB_none, |
1436 | .map_task_non_SG = pscsi_map_task_non_SG, | 1346 | .map_control_SG = pscsi_map_SG, |
1437 | .map_task_SG = pscsi_map_task_SG, | 1347 | .map_data_SG = pscsi_map_SG, |
1438 | .attach_hba = pscsi_attach_hba, | 1348 | .attach_hba = pscsi_attach_hba, |
1439 | .detach_hba = pscsi_detach_hba, | 1349 | .detach_hba = pscsi_detach_hba, |
1440 | .pmode_enable_hba = pscsi_pmode_enable_hba, | 1350 | .pmode_enable_hba = pscsi_pmode_enable_hba, |
diff --git a/drivers/target/target_core_pscsi.h b/drivers/target/target_core_pscsi.h index a4cd5d352c3a..ebf4f1ae2c83 100644 --- a/drivers/target/target_core_pscsi.h +++ b/drivers/target/target_core_pscsi.h | |||
@@ -2,7 +2,6 @@ | |||
2 | #define TARGET_CORE_PSCSI_H | 2 | #define TARGET_CORE_PSCSI_H |
3 | 3 | ||
4 | #define PSCSI_VERSION "v4.0" | 4 | #define PSCSI_VERSION "v4.0" |
5 | #define PSCSI_VIRTUAL_HBA_DEPTH 2048 | ||
6 | 5 | ||
7 | /* used in pscsi_find_alloc_len() */ | 6 | /* used in pscsi_find_alloc_len() */ |
8 | #ifndef INQUIRY_DATA_SIZE | 7 | #ifndef INQUIRY_DATA_SIZE |
@@ -24,13 +23,12 @@ | |||
24 | 23 | ||
25 | struct pscsi_plugin_task { | 24 | struct pscsi_plugin_task { |
26 | struct se_task pscsi_task; | 25 | struct se_task pscsi_task; |
27 | unsigned char *pscsi_cdb; | ||
28 | unsigned char __pscsi_cdb[TCM_MAX_COMMAND_SIZE]; | ||
29 | unsigned char pscsi_sense[SCSI_SENSE_BUFFERSIZE]; | 26 | unsigned char pscsi_sense[SCSI_SENSE_BUFFERSIZE]; |
30 | int pscsi_direction; | 27 | int pscsi_direction; |
31 | int pscsi_result; | 28 | int pscsi_result; |
32 | u32 pscsi_resid; | 29 | u32 pscsi_resid; |
33 | struct request *pscsi_req; | 30 | struct request *pscsi_req; |
31 | unsigned char pscsi_cdb[0]; | ||
34 | } ____cacheline_aligned; | 32 | } ____cacheline_aligned; |
35 | 33 | ||
36 | #define PDF_HAS_CHANNEL_ID 0x01 | 34 | #define PDF_HAS_CHANNEL_ID 0x01 |
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c index 7837dd365a9d..3dd81d24d9a9 100644 --- a/drivers/target/target_core_rd.c +++ b/drivers/target/target_core_rd.c | |||
@@ -44,12 +44,8 @@ | |||
44 | 44 | ||
45 | #include "target_core_rd.h" | 45 | #include "target_core_rd.h" |
46 | 46 | ||
47 | static struct se_subsystem_api rd_dr_template; | ||
48 | static struct se_subsystem_api rd_mcp_template; | 47 | static struct se_subsystem_api rd_mcp_template; |
49 | 48 | ||
50 | /* #define DEBUG_RAMDISK_MCP */ | ||
51 | /* #define DEBUG_RAMDISK_DR */ | ||
52 | |||
53 | /* rd_attach_hba(): (Part of se_subsystem_api_t template) | 49 | /* rd_attach_hba(): (Part of se_subsystem_api_t template) |
54 | * | 50 | * |
55 | * | 51 | * |
@@ -59,24 +55,21 @@ static int rd_attach_hba(struct se_hba *hba, u32 host_id) | |||
59 | struct rd_host *rd_host; | 55 | struct rd_host *rd_host; |
60 | 56 | ||
61 | rd_host = kzalloc(sizeof(struct rd_host), GFP_KERNEL); | 57 | rd_host = kzalloc(sizeof(struct rd_host), GFP_KERNEL); |
62 | if (!(rd_host)) { | 58 | if (!rd_host) { |
63 | printk(KERN_ERR "Unable to allocate memory for struct rd_host\n"); | 59 | pr_err("Unable to allocate memory for struct rd_host\n"); |
64 | return -ENOMEM; | 60 | return -ENOMEM; |
65 | } | 61 | } |
66 | 62 | ||
67 | rd_host->rd_host_id = host_id; | 63 | rd_host->rd_host_id = host_id; |
68 | 64 | ||
69 | atomic_set(&hba->left_queue_depth, RD_HBA_QUEUE_DEPTH); | 65 | hba->hba_ptr = rd_host; |
70 | atomic_set(&hba->max_queue_depth, RD_HBA_QUEUE_DEPTH); | ||
71 | hba->hba_ptr = (void *) rd_host; | ||
72 | 66 | ||
73 | printk(KERN_INFO "CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on" | 67 | pr_debug("CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on" |
74 | " Generic Target Core Stack %s\n", hba->hba_id, | 68 | " Generic Target Core Stack %s\n", hba->hba_id, |
75 | RD_HBA_VERSION, TARGET_CORE_MOD_VERSION); | 69 | RD_HBA_VERSION, TARGET_CORE_MOD_VERSION); |
76 | printk(KERN_INFO "CORE_HBA[%d] - Attached Ramdisk HBA: %u to Generic" | 70 | pr_debug("CORE_HBA[%d] - Attached Ramdisk HBA: %u to Generic" |
77 | " Target Core TCQ Depth: %d MaxSectors: %u\n", hba->hba_id, | 71 | " MaxSectors: %u\n", hba->hba_id, |
78 | rd_host->rd_host_id, atomic_read(&hba->max_queue_depth), | 72 | rd_host->rd_host_id, RD_MAX_SECTORS); |
79 | RD_MAX_SECTORS); | ||
80 | 73 | ||
81 | return 0; | 74 | return 0; |
82 | } | 75 | } |
@@ -85,7 +78,7 @@ static void rd_detach_hba(struct se_hba *hba) | |||
85 | { | 78 | { |
86 | struct rd_host *rd_host = hba->hba_ptr; | 79 | struct rd_host *rd_host = hba->hba_ptr; |
87 | 80 | ||
88 | printk(KERN_INFO "CORE_HBA[%d] - Detached Ramdisk HBA: %u from" | 81 | pr_debug("CORE_HBA[%d] - Detached Ramdisk HBA: %u from" |
89 | " Generic Target Core\n", hba->hba_id, rd_host->rd_host_id); | 82 | " Generic Target Core\n", hba->hba_id, rd_host->rd_host_id); |
90 | 83 | ||
91 | kfree(rd_host); | 84 | kfree(rd_host); |
@@ -114,7 +107,7 @@ static void rd_release_device_space(struct rd_dev *rd_dev) | |||
114 | 107 | ||
115 | for (j = 0; j < sg_per_table; j++) { | 108 | for (j = 0; j < sg_per_table; j++) { |
116 | pg = sg_page(&sg[j]); | 109 | pg = sg_page(&sg[j]); |
117 | if ((pg)) { | 110 | if (pg) { |
118 | __free_page(pg); | 111 | __free_page(pg); |
119 | page_count++; | 112 | page_count++; |
120 | } | 113 | } |
@@ -123,7 +116,7 @@ static void rd_release_device_space(struct rd_dev *rd_dev) | |||
123 | kfree(sg); | 116 | kfree(sg); |
124 | } | 117 | } |
125 | 118 | ||
126 | printk(KERN_INFO "CORE_RD[%u] - Released device space for Ramdisk" | 119 | pr_debug("CORE_RD[%u] - Released device space for Ramdisk" |
127 | " Device ID: %u, pages %u in %u tables total bytes %lu\n", | 120 | " Device ID: %u, pages %u in %u tables total bytes %lu\n", |
128 | rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count, | 121 | rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count, |
129 | rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE); | 122 | rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE); |
@@ -148,7 +141,7 @@ static int rd_build_device_space(struct rd_dev *rd_dev) | |||
148 | struct scatterlist *sg; | 141 | struct scatterlist *sg; |
149 | 142 | ||
150 | if (rd_dev->rd_page_count <= 0) { | 143 | if (rd_dev->rd_page_count <= 0) { |
151 | printk(KERN_ERR "Illegal page count: %u for Ramdisk device\n", | 144 | pr_err("Illegal page count: %u for Ramdisk device\n", |
152 | rd_dev->rd_page_count); | 145 | rd_dev->rd_page_count); |
153 | return -EINVAL; | 146 | return -EINVAL; |
154 | } | 147 | } |
@@ -157,8 +150,8 @@ static int rd_build_device_space(struct rd_dev *rd_dev) | |||
157 | sg_tables = (total_sg_needed / max_sg_per_table) + 1; | 150 | sg_tables = (total_sg_needed / max_sg_per_table) + 1; |
158 | 151 | ||
159 | sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL); | 152 | sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL); |
160 | if (!(sg_table)) { | 153 | if (!sg_table) { |
161 | printk(KERN_ERR "Unable to allocate memory for Ramdisk" | 154 | pr_err("Unable to allocate memory for Ramdisk" |
162 | " scatterlist tables\n"); | 155 | " scatterlist tables\n"); |
163 | return -ENOMEM; | 156 | return -ENOMEM; |
164 | } | 157 | } |
@@ -172,13 +165,13 @@ static int rd_build_device_space(struct rd_dev *rd_dev) | |||
172 | 165 | ||
173 | sg = kzalloc(sg_per_table * sizeof(struct scatterlist), | 166 | sg = kzalloc(sg_per_table * sizeof(struct scatterlist), |
174 | GFP_KERNEL); | 167 | GFP_KERNEL); |
175 | if (!(sg)) { | 168 | if (!sg) { |
176 | printk(KERN_ERR "Unable to allocate scatterlist array" | 169 | pr_err("Unable to allocate scatterlist array" |
177 | " for struct rd_dev\n"); | 170 | " for struct rd_dev\n"); |
178 | return -ENOMEM; | 171 | return -ENOMEM; |
179 | } | 172 | } |
180 | 173 | ||
181 | sg_init_table((struct scatterlist *)&sg[0], sg_per_table); | 174 | sg_init_table(sg, sg_per_table); |
182 | 175 | ||
183 | sg_table[i].sg_table = sg; | 176 | sg_table[i].sg_table = sg; |
184 | sg_table[i].rd_sg_count = sg_per_table; | 177 | sg_table[i].rd_sg_count = sg_per_table; |
@@ -188,8 +181,8 @@ static int rd_build_device_space(struct rd_dev *rd_dev) | |||
188 | 181 | ||
189 | for (j = 0; j < sg_per_table; j++) { | 182 | for (j = 0; j < sg_per_table; j++) { |
190 | pg = alloc_pages(GFP_KERNEL, 0); | 183 | pg = alloc_pages(GFP_KERNEL, 0); |
191 | if (!(pg)) { | 184 | if (!pg) { |
192 | printk(KERN_ERR "Unable to allocate scatterlist" | 185 | pr_err("Unable to allocate scatterlist" |
193 | " pages for struct rd_dev_sg_table\n"); | 186 | " pages for struct rd_dev_sg_table\n"); |
194 | return -ENOMEM; | 187 | return -ENOMEM; |
195 | } | 188 | } |
@@ -201,7 +194,7 @@ static int rd_build_device_space(struct rd_dev *rd_dev) | |||
201 | total_sg_needed -= sg_per_table; | 194 | total_sg_needed -= sg_per_table; |
202 | } | 195 | } |
203 | 196 | ||
204 | printk(KERN_INFO "CORE_RD[%u] - Built Ramdisk Device ID: %u space of" | 197 | pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u space of" |
205 | " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id, | 198 | " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id, |
206 | rd_dev->rd_dev_id, rd_dev->rd_page_count, | 199 | rd_dev->rd_dev_id, rd_dev->rd_page_count, |
207 | rd_dev->sg_table_count); | 200 | rd_dev->sg_table_count); |
@@ -218,8 +211,8 @@ static void *rd_allocate_virtdevice( | |||
218 | struct rd_host *rd_host = hba->hba_ptr; | 211 | struct rd_host *rd_host = hba->hba_ptr; |
219 | 212 | ||
220 | rd_dev = kzalloc(sizeof(struct rd_dev), GFP_KERNEL); | 213 | rd_dev = kzalloc(sizeof(struct rd_dev), GFP_KERNEL); |
221 | if (!(rd_dev)) { | 214 | if (!rd_dev) { |
222 | printk(KERN_ERR "Unable to allocate memory for struct rd_dev\n"); | 215 | pr_err("Unable to allocate memory for struct rd_dev\n"); |
223 | return NULL; | 216 | return NULL; |
224 | } | 217 | } |
225 | 218 | ||
@@ -229,11 +222,6 @@ static void *rd_allocate_virtdevice( | |||
229 | return rd_dev; | 222 | return rd_dev; |
230 | } | 223 | } |
231 | 224 | ||
232 | static void *rd_DIRECT_allocate_virtdevice(struct se_hba *hba, const char *name) | ||
233 | { | ||
234 | return rd_allocate_virtdevice(hba, name, 1); | ||
235 | } | ||
236 | |||
237 | static void *rd_MEMCPY_allocate_virtdevice(struct se_hba *hba, const char *name) | 225 | static void *rd_MEMCPY_allocate_virtdevice(struct se_hba *hba, const char *name) |
238 | { | 226 | { |
239 | return rd_allocate_virtdevice(hba, name, 0); | 227 | return rd_allocate_virtdevice(hba, name, 0); |
@@ -273,16 +261,15 @@ static struct se_device *rd_create_virtdevice( | |||
273 | dev_limits.queue_depth = RD_DEVICE_QUEUE_DEPTH; | 261 | dev_limits.queue_depth = RD_DEVICE_QUEUE_DEPTH; |
274 | 262 | ||
275 | dev = transport_add_device_to_core_hba(hba, | 263 | dev = transport_add_device_to_core_hba(hba, |
276 | (rd_dev->rd_direct) ? &rd_dr_template : | 264 | &rd_mcp_template, se_dev, dev_flags, rd_dev, |
277 | &rd_mcp_template, se_dev, dev_flags, (void *)rd_dev, | ||
278 | &dev_limits, prod, rev); | 265 | &dev_limits, prod, rev); |
279 | if (!(dev)) | 266 | if (!dev) |
280 | goto fail; | 267 | goto fail; |
281 | 268 | ||
282 | rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++; | 269 | rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++; |
283 | rd_dev->rd_queue_depth = dev->queue_depth; | 270 | rd_dev->rd_queue_depth = dev->queue_depth; |
284 | 271 | ||
285 | printk(KERN_INFO "CORE_RD[%u] - Added TCM %s Ramdisk Device ID: %u of" | 272 | pr_debug("CORE_RD[%u] - Added TCM %s Ramdisk Device ID: %u of" |
286 | " %u pages in %u tables, %lu total bytes\n", | 273 | " %u pages in %u tables, %lu total bytes\n", |
287 | rd_host->rd_host_id, (!rd_dev->rd_direct) ? "MEMCPY" : | 274 | rd_host->rd_host_id, (!rd_dev->rd_direct) ? "MEMCPY" : |
288 | "DIRECT", rd_dev->rd_dev_id, rd_dev->rd_page_count, | 275 | "DIRECT", rd_dev->rd_dev_id, rd_dev->rd_page_count, |
@@ -296,14 +283,6 @@ fail: | |||
296 | return ERR_PTR(ret); | 283 | return ERR_PTR(ret); |
297 | } | 284 | } |
298 | 285 | ||
299 | static struct se_device *rd_DIRECT_create_virtdevice( | ||
300 | struct se_hba *hba, | ||
301 | struct se_subsystem_dev *se_dev, | ||
302 | void *p) | ||
303 | { | ||
304 | return rd_create_virtdevice(hba, se_dev, p, 1); | ||
305 | } | ||
306 | |||
307 | static struct se_device *rd_MEMCPY_create_virtdevice( | 286 | static struct se_device *rd_MEMCPY_create_virtdevice( |
308 | struct se_hba *hba, | 287 | struct se_hba *hba, |
309 | struct se_subsystem_dev *se_dev, | 288 | struct se_subsystem_dev *se_dev, |
@@ -330,16 +309,15 @@ static inline struct rd_request *RD_REQ(struct se_task *task) | |||
330 | } | 309 | } |
331 | 310 | ||
332 | static struct se_task * | 311 | static struct se_task * |
333 | rd_alloc_task(struct se_cmd *cmd) | 312 | rd_alloc_task(unsigned char *cdb) |
334 | { | 313 | { |
335 | struct rd_request *rd_req; | 314 | struct rd_request *rd_req; |
336 | 315 | ||
337 | rd_req = kzalloc(sizeof(struct rd_request), GFP_KERNEL); | 316 | rd_req = kzalloc(sizeof(struct rd_request), GFP_KERNEL); |
338 | if (!rd_req) { | 317 | if (!rd_req) { |
339 | printk(KERN_ERR "Unable to allocate struct rd_request\n"); | 318 | pr_err("Unable to allocate struct rd_request\n"); |
340 | return NULL; | 319 | return NULL; |
341 | } | 320 | } |
342 | rd_req->rd_dev = SE_DEV(cmd)->dev_ptr; | ||
343 | 321 | ||
344 | return &rd_req->rd_task; | 322 | return &rd_req->rd_task; |
345 | } | 323 | } |
@@ -360,7 +338,7 @@ static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page) | |||
360 | return sg_table; | 338 | return sg_table; |
361 | } | 339 | } |
362 | 340 | ||
363 | printk(KERN_ERR "Unable to locate struct rd_dev_sg_table for page: %u\n", | 341 | pr_err("Unable to locate struct rd_dev_sg_table for page: %u\n", |
364 | page); | 342 | page); |
365 | 343 | ||
366 | return NULL; | 344 | return NULL; |
@@ -373,7 +351,7 @@ static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page) | |||
373 | static int rd_MEMCPY_read(struct rd_request *req) | 351 | static int rd_MEMCPY_read(struct rd_request *req) |
374 | { | 352 | { |
375 | struct se_task *task = &req->rd_task; | 353 | struct se_task *task = &req->rd_task; |
376 | struct rd_dev *dev = req->rd_dev; | 354 | struct rd_dev *dev = req->rd_task.se_dev->dev_ptr; |
377 | struct rd_dev_sg_table *table; | 355 | struct rd_dev_sg_table *table; |
378 | struct scatterlist *sg_d, *sg_s; | 356 | struct scatterlist *sg_d, *sg_s; |
379 | void *dst, *src; | 357 | void *dst, *src; |
@@ -382,32 +360,32 @@ static int rd_MEMCPY_read(struct rd_request *req) | |||
382 | u32 rd_offset = req->rd_offset; | 360 | u32 rd_offset = req->rd_offset; |
383 | 361 | ||
384 | table = rd_get_sg_table(dev, req->rd_page); | 362 | table = rd_get_sg_table(dev, req->rd_page); |
385 | if (!(table)) | 363 | if (!table) |
386 | return -1; | 364 | return -EINVAL; |
387 | 365 | ||
388 | table_sg_end = (table->page_end_offset - req->rd_page); | 366 | table_sg_end = (table->page_end_offset - req->rd_page); |
389 | sg_d = task->task_sg; | 367 | sg_d = task->task_sg; |
390 | sg_s = &table->sg_table[req->rd_page - table->page_start_offset]; | 368 | sg_s = &table->sg_table[req->rd_page - table->page_start_offset]; |
391 | #ifdef DEBUG_RAMDISK_MCP | 369 | |
392 | printk(KERN_INFO "RD[%u]: Read LBA: %llu, Size: %u Page: %u, Offset:" | 370 | pr_debug("RD[%u]: Read LBA: %llu, Size: %u Page: %u, Offset:" |
393 | " %u\n", dev->rd_dev_id, task->task_lba, req->rd_size, | 371 | " %u\n", dev->rd_dev_id, task->task_lba, req->rd_size, |
394 | req->rd_page, req->rd_offset); | 372 | req->rd_page, req->rd_offset); |
395 | #endif | 373 | |
396 | src_offset = rd_offset; | 374 | src_offset = rd_offset; |
397 | 375 | ||
398 | while (req->rd_size) { | 376 | while (req->rd_size) { |
399 | if ((sg_d[i].length - dst_offset) < | 377 | if ((sg_d[i].length - dst_offset) < |
400 | (sg_s[j].length - src_offset)) { | 378 | (sg_s[j].length - src_offset)) { |
401 | length = (sg_d[i].length - dst_offset); | 379 | length = (sg_d[i].length - dst_offset); |
402 | #ifdef DEBUG_RAMDISK_MCP | 380 | |
403 | printk(KERN_INFO "Step 1 - sg_d[%d]: %p length: %d" | 381 | pr_debug("Step 1 - sg_d[%d]: %p length: %d" |
404 | " offset: %u sg_s[%d].length: %u\n", i, | 382 | " offset: %u sg_s[%d].length: %u\n", i, |
405 | &sg_d[i], sg_d[i].length, sg_d[i].offset, j, | 383 | &sg_d[i], sg_d[i].length, sg_d[i].offset, j, |
406 | sg_s[j].length); | 384 | sg_s[j].length); |
407 | printk(KERN_INFO "Step 1 - length: %u dst_offset: %u" | 385 | pr_debug("Step 1 - length: %u dst_offset: %u" |
408 | " src_offset: %u\n", length, dst_offset, | 386 | " src_offset: %u\n", length, dst_offset, |
409 | src_offset); | 387 | src_offset); |
410 | #endif | 388 | |
411 | if (length > req->rd_size) | 389 | if (length > req->rd_size) |
412 | length = req->rd_size; | 390 | length = req->rd_size; |
413 | 391 | ||
@@ -424,15 +402,15 @@ static int rd_MEMCPY_read(struct rd_request *req) | |||
424 | page_end = 0; | 402 | page_end = 0; |
425 | } else { | 403 | } else { |
426 | length = (sg_s[j].length - src_offset); | 404 | length = (sg_s[j].length - src_offset); |
427 | #ifdef DEBUG_RAMDISK_MCP | 405 | |
428 | printk(KERN_INFO "Step 2 - sg_d[%d]: %p length: %d" | 406 | pr_debug("Step 2 - sg_d[%d]: %p length: %d" |
429 | " offset: %u sg_s[%d].length: %u\n", i, | 407 | " offset: %u sg_s[%d].length: %u\n", i, |
430 | &sg_d[i], sg_d[i].length, sg_d[i].offset, | 408 | &sg_d[i], sg_d[i].length, sg_d[i].offset, |
431 | j, sg_s[j].length); | 409 | j, sg_s[j].length); |
432 | printk(KERN_INFO "Step 2 - length: %u dst_offset: %u" | 410 | pr_debug("Step 2 - length: %u dst_offset: %u" |
433 | " src_offset: %u\n", length, dst_offset, | 411 | " src_offset: %u\n", length, dst_offset, |
434 | src_offset); | 412 | src_offset); |
435 | #endif | 413 | |
436 | if (length > req->rd_size) | 414 | if (length > req->rd_size) |
437 | length = req->rd_size; | 415 | length = req->rd_size; |
438 | 416 | ||
@@ -456,32 +434,29 @@ static int rd_MEMCPY_read(struct rd_request *req) | |||
456 | 434 | ||
457 | memcpy(dst, src, length); | 435 | memcpy(dst, src, length); |
458 | 436 | ||
459 | #ifdef DEBUG_RAMDISK_MCP | 437 | pr_debug("page: %u, remaining size: %u, length: %u," |
460 | printk(KERN_INFO "page: %u, remaining size: %u, length: %u," | ||
461 | " i: %u, j: %u\n", req->rd_page, | 438 | " i: %u, j: %u\n", req->rd_page, |
462 | (req->rd_size - length), length, i, j); | 439 | (req->rd_size - length), length, i, j); |
463 | #endif | 440 | |
464 | req->rd_size -= length; | 441 | req->rd_size -= length; |
465 | if (!(req->rd_size)) | 442 | if (!req->rd_size) |
466 | return 0; | 443 | return 0; |
467 | 444 | ||
468 | if (!page_end) | 445 | if (!page_end) |
469 | continue; | 446 | continue; |
470 | 447 | ||
471 | if (++req->rd_page <= table->page_end_offset) { | 448 | if (++req->rd_page <= table->page_end_offset) { |
472 | #ifdef DEBUG_RAMDISK_MCP | 449 | pr_debug("page: %u in same page table\n", |
473 | printk(KERN_INFO "page: %u in same page table\n", | ||
474 | req->rd_page); | 450 | req->rd_page); |
475 | #endif | ||
476 | continue; | 451 | continue; |
477 | } | 452 | } |
478 | #ifdef DEBUG_RAMDISK_MCP | 453 | |
479 | printk(KERN_INFO "getting new page table for page: %u\n", | 454 | pr_debug("getting new page table for page: %u\n", |
480 | req->rd_page); | 455 | req->rd_page); |
481 | #endif | 456 | |
482 | table = rd_get_sg_table(dev, req->rd_page); | 457 | table = rd_get_sg_table(dev, req->rd_page); |
483 | if (!(table)) | 458 | if (!table) |
484 | return -1; | 459 | return -EINVAL; |
485 | 460 | ||
486 | sg_s = &table->sg_table[j = 0]; | 461 | sg_s = &table->sg_table[j = 0]; |
487 | } | 462 | } |
@@ -496,7 +471,7 @@ static int rd_MEMCPY_read(struct rd_request *req) | |||
496 | static int rd_MEMCPY_write(struct rd_request *req) | 471 | static int rd_MEMCPY_write(struct rd_request *req) |
497 | { | 472 | { |
498 | struct se_task *task = &req->rd_task; | 473 | struct se_task *task = &req->rd_task; |
499 | struct rd_dev *dev = req->rd_dev; | 474 | struct rd_dev *dev = req->rd_task.se_dev->dev_ptr; |
500 | struct rd_dev_sg_table *table; | 475 | struct rd_dev_sg_table *table; |
501 | struct scatterlist *sg_d, *sg_s; | 476 | struct scatterlist *sg_d, *sg_s; |
502 | void *dst, *src; | 477 | void *dst, *src; |
@@ -505,32 +480,32 @@ static int rd_MEMCPY_write(struct rd_request *req) | |||
505 | u32 rd_offset = req->rd_offset; | 480 | u32 rd_offset = req->rd_offset; |
506 | 481 | ||
507 | table = rd_get_sg_table(dev, req->rd_page); | 482 | table = rd_get_sg_table(dev, req->rd_page); |
508 | if (!(table)) | 483 | if (!table) |
509 | return -1; | 484 | return -EINVAL; |
510 | 485 | ||
511 | table_sg_end = (table->page_end_offset - req->rd_page); | 486 | table_sg_end = (table->page_end_offset - req->rd_page); |
512 | sg_d = &table->sg_table[req->rd_page - table->page_start_offset]; | 487 | sg_d = &table->sg_table[req->rd_page - table->page_start_offset]; |
513 | sg_s = task->task_sg; | 488 | sg_s = task->task_sg; |
514 | #ifdef DEBUG_RAMDISK_MCP | 489 | |
515 | printk(KERN_INFO "RD[%d] Write LBA: %llu, Size: %u, Page: %u," | 490 | pr_debug("RD[%d] Write LBA: %llu, Size: %u, Page: %u," |
516 | " Offset: %u\n", dev->rd_dev_id, task->task_lba, req->rd_size, | 491 | " Offset: %u\n", dev->rd_dev_id, task->task_lba, req->rd_size, |
517 | req->rd_page, req->rd_offset); | 492 | req->rd_page, req->rd_offset); |
518 | #endif | 493 | |
519 | dst_offset = rd_offset; | 494 | dst_offset = rd_offset; |
520 | 495 | ||
521 | while (req->rd_size) { | 496 | while (req->rd_size) { |
522 | if ((sg_s[i].length - src_offset) < | 497 | if ((sg_s[i].length - src_offset) < |
523 | (sg_d[j].length - dst_offset)) { | 498 | (sg_d[j].length - dst_offset)) { |
524 | length = (sg_s[i].length - src_offset); | 499 | length = (sg_s[i].length - src_offset); |
525 | #ifdef DEBUG_RAMDISK_MCP | 500 | |
526 | printk(KERN_INFO "Step 1 - sg_s[%d]: %p length: %d" | 501 | pr_debug("Step 1 - sg_s[%d]: %p length: %d" |
527 | " offset: %d sg_d[%d].length: %u\n", i, | 502 | " offset: %d sg_d[%d].length: %u\n", i, |
528 | &sg_s[i], sg_s[i].length, sg_s[i].offset, | 503 | &sg_s[i], sg_s[i].length, sg_s[i].offset, |
529 | j, sg_d[j].length); | 504 | j, sg_d[j].length); |
530 | printk(KERN_INFO "Step 1 - length: %u src_offset: %u" | 505 | pr_debug("Step 1 - length: %u src_offset: %u" |
531 | " dst_offset: %u\n", length, src_offset, | 506 | " dst_offset: %u\n", length, src_offset, |
532 | dst_offset); | 507 | dst_offset); |
533 | #endif | 508 | |
534 | if (length > req->rd_size) | 509 | if (length > req->rd_size) |
535 | length = req->rd_size; | 510 | length = req->rd_size; |
536 | 511 | ||
@@ -547,15 +522,15 @@ static int rd_MEMCPY_write(struct rd_request *req) | |||
547 | page_end = 0; | 522 | page_end = 0; |
548 | } else { | 523 | } else { |
549 | length = (sg_d[j].length - dst_offset); | 524 | length = (sg_d[j].length - dst_offset); |
550 | #ifdef DEBUG_RAMDISK_MCP | 525 | |
551 | printk(KERN_INFO "Step 2 - sg_s[%d]: %p length: %d" | 526 | pr_debug("Step 2 - sg_s[%d]: %p length: %d" |
552 | " offset: %d sg_d[%d].length: %u\n", i, | 527 | " offset: %d sg_d[%d].length: %u\n", i, |
553 | &sg_s[i], sg_s[i].length, sg_s[i].offset, | 528 | &sg_s[i], sg_s[i].length, sg_s[i].offset, |
554 | j, sg_d[j].length); | 529 | j, sg_d[j].length); |
555 | printk(KERN_INFO "Step 2 - length: %u src_offset: %u" | 530 | pr_debug("Step 2 - length: %u src_offset: %u" |
556 | " dst_offset: %u\n", length, src_offset, | 531 | " dst_offset: %u\n", length, src_offset, |
557 | dst_offset); | 532 | dst_offset); |
558 | #endif | 533 | |
559 | if (length > req->rd_size) | 534 | if (length > req->rd_size) |
560 | length = req->rd_size; | 535 | length = req->rd_size; |
561 | 536 | ||
@@ -579,32 +554,29 @@ static int rd_MEMCPY_write(struct rd_request *req) | |||
579 | 554 | ||
580 | memcpy(dst, src, length); | 555 | memcpy(dst, src, length); |
581 | 556 | ||
582 | #ifdef DEBUG_RAMDISK_MCP | 557 | pr_debug("page: %u, remaining size: %u, length: %u," |
583 | printk(KERN_INFO "page: %u, remaining size: %u, length: %u," | ||
584 | " i: %u, j: %u\n", req->rd_page, | 558 | " i: %u, j: %u\n", req->rd_page, |
585 | (req->rd_size - length), length, i, j); | 559 | (req->rd_size - length), length, i, j); |
586 | #endif | 560 | |
587 | req->rd_size -= length; | 561 | req->rd_size -= length; |
588 | if (!(req->rd_size)) | 562 | if (!req->rd_size) |
589 | return 0; | 563 | return 0; |
590 | 564 | ||
591 | if (!page_end) | 565 | if (!page_end) |
592 | continue; | 566 | continue; |
593 | 567 | ||
594 | if (++req->rd_page <= table->page_end_offset) { | 568 | if (++req->rd_page <= table->page_end_offset) { |
595 | #ifdef DEBUG_RAMDISK_MCP | 569 | pr_debug("page: %u in same page table\n", |
596 | printk(KERN_INFO "page: %u in same page table\n", | ||
597 | req->rd_page); | 570 | req->rd_page); |
598 | #endif | ||
599 | continue; | 571 | continue; |
600 | } | 572 | } |
601 | #ifdef DEBUG_RAMDISK_MCP | 573 | |
602 | printk(KERN_INFO "getting new page table for page: %u\n", | 574 | pr_debug("getting new page table for page: %u\n", |
603 | req->rd_page); | 575 | req->rd_page); |
604 | #endif | 576 | |
605 | table = rd_get_sg_table(dev, req->rd_page); | 577 | table = rd_get_sg_table(dev, req->rd_page); |
606 | if (!(table)) | 578 | if (!table) |
607 | return -1; | 579 | return -EINVAL; |
608 | 580 | ||
609 | sg_d = &table->sg_table[j = 0]; | 581 | sg_d = &table->sg_table[j = 0]; |
610 | } | 582 | } |
@@ -623,11 +595,11 @@ static int rd_MEMCPY_do_task(struct se_task *task) | |||
623 | unsigned long long lba; | 595 | unsigned long long lba; |
624 | int ret; | 596 | int ret; |
625 | 597 | ||
626 | req->rd_page = (task->task_lba * DEV_ATTRIB(dev)->block_size) / PAGE_SIZE; | 598 | req->rd_page = (task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size) / PAGE_SIZE; |
627 | lba = task->task_lba; | 599 | lba = task->task_lba; |
628 | req->rd_offset = (do_div(lba, | 600 | req->rd_offset = (do_div(lba, |
629 | (PAGE_SIZE / DEV_ATTRIB(dev)->block_size))) * | 601 | (PAGE_SIZE / dev->se_sub_dev->se_dev_attrib.block_size))) * |
630 | DEV_ATTRIB(dev)->block_size; | 602 | dev->se_sub_dev->se_dev_attrib.block_size; |
631 | req->rd_size = task->task_size; | 603 | req->rd_size = task->task_size; |
632 | 604 | ||
633 | if (task->task_data_direction == DMA_FROM_DEVICE) | 605 | if (task->task_data_direction == DMA_FROM_DEVICE) |
@@ -644,274 +616,6 @@ static int rd_MEMCPY_do_task(struct se_task *task) | |||
644 | return PYX_TRANSPORT_SENT_TO_TRANSPORT; | 616 | return PYX_TRANSPORT_SENT_TO_TRANSPORT; |
645 | } | 617 | } |
646 | 618 | ||
647 | /* rd_DIRECT_with_offset(): | ||
648 | * | ||
649 | * | ||
650 | */ | ||
651 | static int rd_DIRECT_with_offset( | ||
652 | struct se_task *task, | ||
653 | struct list_head *se_mem_list, | ||
654 | u32 *se_mem_cnt, | ||
655 | u32 *task_offset) | ||
656 | { | ||
657 | struct rd_request *req = RD_REQ(task); | ||
658 | struct rd_dev *dev = req->rd_dev; | ||
659 | struct rd_dev_sg_table *table; | ||
660 | struct se_mem *se_mem; | ||
661 | struct scatterlist *sg_s; | ||
662 | u32 j = 0, set_offset = 1; | ||
663 | u32 get_next_table = 0, offset_length, table_sg_end; | ||
664 | |||
665 | table = rd_get_sg_table(dev, req->rd_page); | ||
666 | if (!(table)) | ||
667 | return -1; | ||
668 | |||
669 | table_sg_end = (table->page_end_offset - req->rd_page); | ||
670 | sg_s = &table->sg_table[req->rd_page - table->page_start_offset]; | ||
671 | #ifdef DEBUG_RAMDISK_DR | ||
672 | printk(KERN_INFO "%s DIRECT LBA: %llu, Size: %u Page: %u, Offset: %u\n", | ||
673 | (task->task_data_direction == DMA_TO_DEVICE) ? | ||
674 | "Write" : "Read", | ||
675 | task->task_lba, req->rd_size, req->rd_page, req->rd_offset); | ||
676 | #endif | ||
677 | while (req->rd_size) { | ||
678 | se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL); | ||
679 | if (!(se_mem)) { | ||
680 | printk(KERN_ERR "Unable to allocate struct se_mem\n"); | ||
681 | return -1; | ||
682 | } | ||
683 | INIT_LIST_HEAD(&se_mem->se_list); | ||
684 | |||
685 | if (set_offset) { | ||
686 | offset_length = sg_s[j].length - req->rd_offset; | ||
687 | if (offset_length > req->rd_size) | ||
688 | offset_length = req->rd_size; | ||
689 | |||
690 | se_mem->se_page = sg_page(&sg_s[j++]); | ||
691 | se_mem->se_off = req->rd_offset; | ||
692 | se_mem->se_len = offset_length; | ||
693 | |||
694 | set_offset = 0; | ||
695 | get_next_table = (j > table_sg_end); | ||
696 | goto check_eot; | ||
697 | } | ||
698 | |||
699 | offset_length = (req->rd_size < req->rd_offset) ? | ||
700 | req->rd_size : req->rd_offset; | ||
701 | |||
702 | se_mem->se_page = sg_page(&sg_s[j]); | ||
703 | se_mem->se_len = offset_length; | ||
704 | |||
705 | set_offset = 1; | ||
706 | |||
707 | check_eot: | ||
708 | #ifdef DEBUG_RAMDISK_DR | ||
709 | printk(KERN_INFO "page: %u, size: %u, offset_length: %u, j: %u" | ||
710 | " se_mem: %p, se_page: %p se_off: %u se_len: %u\n", | ||
711 | req->rd_page, req->rd_size, offset_length, j, se_mem, | ||
712 | se_mem->se_page, se_mem->se_off, se_mem->se_len); | ||
713 | #endif | ||
714 | list_add_tail(&se_mem->se_list, se_mem_list); | ||
715 | (*se_mem_cnt)++; | ||
716 | |||
717 | req->rd_size -= offset_length; | ||
718 | if (!(req->rd_size)) | ||
719 | goto out; | ||
720 | |||
721 | if (!set_offset && !get_next_table) | ||
722 | continue; | ||
723 | |||
724 | if (++req->rd_page <= table->page_end_offset) { | ||
725 | #ifdef DEBUG_RAMDISK_DR | ||
726 | printk(KERN_INFO "page: %u in same page table\n", | ||
727 | req->rd_page); | ||
728 | #endif | ||
729 | continue; | ||
730 | } | ||
731 | #ifdef DEBUG_RAMDISK_DR | ||
732 | printk(KERN_INFO "getting new page table for page: %u\n", | ||
733 | req->rd_page); | ||
734 | #endif | ||
735 | table = rd_get_sg_table(dev, req->rd_page); | ||
736 | if (!(table)) | ||
737 | return -1; | ||
738 | |||
739 | sg_s = &table->sg_table[j = 0]; | ||
740 | } | ||
741 | |||
742 | out: | ||
743 | T_TASK(task->task_se_cmd)->t_tasks_se_num += *se_mem_cnt; | ||
744 | #ifdef DEBUG_RAMDISK_DR | ||
745 | printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n", | ||
746 | *se_mem_cnt); | ||
747 | #endif | ||
748 | return 0; | ||
749 | } | ||
750 | |||
751 | /* rd_DIRECT_without_offset(): | ||
752 | * | ||
753 | * | ||
754 | */ | ||
755 | static int rd_DIRECT_without_offset( | ||
756 | struct se_task *task, | ||
757 | struct list_head *se_mem_list, | ||
758 | u32 *se_mem_cnt, | ||
759 | u32 *task_offset) | ||
760 | { | ||
761 | struct rd_request *req = RD_REQ(task); | ||
762 | struct rd_dev *dev = req->rd_dev; | ||
763 | struct rd_dev_sg_table *table; | ||
764 | struct se_mem *se_mem; | ||
765 | struct scatterlist *sg_s; | ||
766 | u32 length, j = 0; | ||
767 | |||
768 | table = rd_get_sg_table(dev, req->rd_page); | ||
769 | if (!(table)) | ||
770 | return -1; | ||
771 | |||
772 | sg_s = &table->sg_table[req->rd_page - table->page_start_offset]; | ||
773 | #ifdef DEBUG_RAMDISK_DR | ||
774 | printk(KERN_INFO "%s DIRECT LBA: %llu, Size: %u, Page: %u\n", | ||
775 | (task->task_data_direction == DMA_TO_DEVICE) ? | ||
776 | "Write" : "Read", | ||
777 | task->task_lba, req->rd_size, req->rd_page); | ||
778 | #endif | ||
779 | while (req->rd_size) { | ||
780 | se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL); | ||
781 | if (!(se_mem)) { | ||
782 | printk(KERN_ERR "Unable to allocate struct se_mem\n"); | ||
783 | return -1; | ||
784 | } | ||
785 | INIT_LIST_HEAD(&se_mem->se_list); | ||
786 | |||
787 | length = (req->rd_size < sg_s[j].length) ? | ||
788 | req->rd_size : sg_s[j].length; | ||
789 | |||
790 | se_mem->se_page = sg_page(&sg_s[j++]); | ||
791 | se_mem->se_len = length; | ||
792 | |||
793 | #ifdef DEBUG_RAMDISK_DR | ||
794 | printk(KERN_INFO "page: %u, size: %u, j: %u se_mem: %p," | ||
795 | " se_page: %p se_off: %u se_len: %u\n", req->rd_page, | ||
796 | req->rd_size, j, se_mem, se_mem->se_page, | ||
797 | se_mem->se_off, se_mem->se_len); | ||
798 | #endif | ||
799 | list_add_tail(&se_mem->se_list, se_mem_list); | ||
800 | (*se_mem_cnt)++; | ||
801 | |||
802 | req->rd_size -= length; | ||
803 | if (!(req->rd_size)) | ||
804 | goto out; | ||
805 | |||
806 | if (++req->rd_page <= table->page_end_offset) { | ||
807 | #ifdef DEBUG_RAMDISK_DR | ||
808 | printk("page: %u in same page table\n", | ||
809 | req->rd_page); | ||
810 | #endif | ||
811 | continue; | ||
812 | } | ||
813 | #ifdef DEBUG_RAMDISK_DR | ||
814 | printk(KERN_INFO "getting new page table for page: %u\n", | ||
815 | req->rd_page); | ||
816 | #endif | ||
817 | table = rd_get_sg_table(dev, req->rd_page); | ||
818 | if (!(table)) | ||
819 | return -1; | ||
820 | |||
821 | sg_s = &table->sg_table[j = 0]; | ||
822 | } | ||
823 | |||
824 | out: | ||
825 | T_TASK(task->task_se_cmd)->t_tasks_se_num += *se_mem_cnt; | ||
826 | #ifdef DEBUG_RAMDISK_DR | ||
827 | printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n", | ||
828 | *se_mem_cnt); | ||
829 | #endif | ||
830 | return 0; | ||
831 | } | ||
832 | |||
833 | /* rd_DIRECT_do_se_mem_map(): | ||
834 | * | ||
835 | * | ||
836 | */ | ||
837 | static int rd_DIRECT_do_se_mem_map( | ||
838 | struct se_task *task, | ||
839 | struct list_head *se_mem_list, | ||
840 | void *in_mem, | ||
841 | struct se_mem *in_se_mem, | ||
842 | struct se_mem **out_se_mem, | ||
843 | u32 *se_mem_cnt, | ||
844 | u32 *task_offset_in) | ||
845 | { | ||
846 | struct se_cmd *cmd = task->task_se_cmd; | ||
847 | struct rd_request *req = RD_REQ(task); | ||
848 | u32 task_offset = *task_offset_in; | ||
849 | unsigned long long lba; | ||
850 | int ret; | ||
851 | |||
852 | req->rd_page = ((task->task_lba * DEV_ATTRIB(task->se_dev)->block_size) / | ||
853 | PAGE_SIZE); | ||
854 | lba = task->task_lba; | ||
855 | req->rd_offset = (do_div(lba, | ||
856 | (PAGE_SIZE / DEV_ATTRIB(task->se_dev)->block_size))) * | ||
857 | DEV_ATTRIB(task->se_dev)->block_size; | ||
858 | req->rd_size = task->task_size; | ||
859 | |||
860 | if (req->rd_offset) | ||
861 | ret = rd_DIRECT_with_offset(task, se_mem_list, se_mem_cnt, | ||
862 | task_offset_in); | ||
863 | else | ||
864 | ret = rd_DIRECT_without_offset(task, se_mem_list, se_mem_cnt, | ||
865 | task_offset_in); | ||
866 | |||
867 | if (ret < 0) | ||
868 | return ret; | ||
869 | |||
870 | if (CMD_TFO(cmd)->task_sg_chaining == 0) | ||
871 | return 0; | ||
872 | /* | ||
873 | * Currently prevent writers from multiple HW fabrics doing | ||
874 | * pci_map_sg() to RD_DR's internal scatterlist memory. | ||
875 | */ | ||
876 | if (cmd->data_direction == DMA_TO_DEVICE) { | ||
877 | printk(KERN_ERR "DMA_TO_DEVICE not supported for" | ||
878 | " RAMDISK_DR with task_sg_chaining=1\n"); | ||
879 | return -1; | ||
880 | } | ||
881 | /* | ||
882 | * Special case for if task_sg_chaining is enabled, then | ||
883 | * we setup struct se_task->task_sg[], as it will be used by | ||
884 | * transport_do_task_sg_chain() for creating chainged SGLs | ||
885 | * across multiple struct se_task->task_sg[]. | ||
886 | */ | ||
887 | if (!(transport_calc_sg_num(task, | ||
888 | list_entry(T_TASK(cmd)->t_mem_list->next, | ||
889 | struct se_mem, se_list), | ||
890 | task_offset))) | ||
891 | return -1; | ||
892 | |||
893 | return transport_map_mem_to_sg(task, se_mem_list, task->task_sg, | ||
894 | list_entry(T_TASK(cmd)->t_mem_list->next, | ||
895 | struct se_mem, se_list), | ||
896 | out_se_mem, se_mem_cnt, task_offset_in); | ||
897 | } | ||
898 | |||
899 | /* rd_DIRECT_do_task(): (Part of se_subsystem_api_t template) | ||
900 | * | ||
901 | * | ||
902 | */ | ||
903 | static int rd_DIRECT_do_task(struct se_task *task) | ||
904 | { | ||
905 | /* | ||
906 | * At this point the locally allocated RD tables have been mapped | ||
907 | * to struct se_mem elements in rd_DIRECT_do_se_mem_map(). | ||
908 | */ | ||
909 | task->task_scsi_status = GOOD; | ||
910 | transport_complete_task(task, 1); | ||
911 | |||
912 | return PYX_TRANSPORT_SENT_TO_TRANSPORT; | ||
913 | } | ||
914 | |||
915 | /* rd_free_task(): (Part of se_subsystem_api_t template) | 619 | /* rd_free_task(): (Part of se_subsystem_api_t template) |
916 | * | 620 | * |
917 | * | 621 | * |
@@ -956,7 +660,7 @@ static ssize_t rd_set_configfs_dev_params( | |||
956 | case Opt_rd_pages: | 660 | case Opt_rd_pages: |
957 | match_int(args, &arg); | 661 | match_int(args, &arg); |
958 | rd_dev->rd_page_count = arg; | 662 | rd_dev->rd_page_count = arg; |
959 | printk(KERN_INFO "RAMDISK: Referencing Page" | 663 | pr_debug("RAMDISK: Referencing Page" |
960 | " Count: %u\n", rd_dev->rd_page_count); | 664 | " Count: %u\n", rd_dev->rd_page_count); |
961 | rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT; | 665 | rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT; |
962 | break; | 666 | break; |
@@ -974,8 +678,8 @@ static ssize_t rd_check_configfs_dev_params(struct se_hba *hba, struct se_subsys | |||
974 | struct rd_dev *rd_dev = se_dev->se_dev_su_ptr; | 678 | struct rd_dev *rd_dev = se_dev->se_dev_su_ptr; |
975 | 679 | ||
976 | if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) { | 680 | if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) { |
977 | printk(KERN_INFO "Missing rd_pages= parameter\n"); | 681 | pr_debug("Missing rd_pages= parameter\n"); |
978 | return -1; | 682 | return -EINVAL; |
979 | } | 683 | } |
980 | 684 | ||
981 | return 0; | 685 | return 0; |
@@ -1021,32 +725,11 @@ static sector_t rd_get_blocks(struct se_device *dev) | |||
1021 | { | 725 | { |
1022 | struct rd_dev *rd_dev = dev->dev_ptr; | 726 | struct rd_dev *rd_dev = dev->dev_ptr; |
1023 | unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) / | 727 | unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) / |
1024 | DEV_ATTRIB(dev)->block_size) - 1; | 728 | dev->se_sub_dev->se_dev_attrib.block_size) - 1; |
1025 | 729 | ||
1026 | return blocks_long; | 730 | return blocks_long; |
1027 | } | 731 | } |
1028 | 732 | ||
1029 | static struct se_subsystem_api rd_dr_template = { | ||
1030 | .name = "rd_dr", | ||
1031 | .transport_type = TRANSPORT_PLUGIN_VHBA_VDEV, | ||
1032 | .attach_hba = rd_attach_hba, | ||
1033 | .detach_hba = rd_detach_hba, | ||
1034 | .allocate_virtdevice = rd_DIRECT_allocate_virtdevice, | ||
1035 | .create_virtdevice = rd_DIRECT_create_virtdevice, | ||
1036 | .free_device = rd_free_device, | ||
1037 | .alloc_task = rd_alloc_task, | ||
1038 | .do_task = rd_DIRECT_do_task, | ||
1039 | .free_task = rd_free_task, | ||
1040 | .check_configfs_dev_params = rd_check_configfs_dev_params, | ||
1041 | .set_configfs_dev_params = rd_set_configfs_dev_params, | ||
1042 | .show_configfs_dev_params = rd_show_configfs_dev_params, | ||
1043 | .get_cdb = rd_get_cdb, | ||
1044 | .get_device_rev = rd_get_device_rev, | ||
1045 | .get_device_type = rd_get_device_type, | ||
1046 | .get_blocks = rd_get_blocks, | ||
1047 | .do_se_mem_map = rd_DIRECT_do_se_mem_map, | ||
1048 | }; | ||
1049 | |||
1050 | static struct se_subsystem_api rd_mcp_template = { | 733 | static struct se_subsystem_api rd_mcp_template = { |
1051 | .name = "rd_mcp", | 734 | .name = "rd_mcp", |
1052 | .transport_type = TRANSPORT_PLUGIN_VHBA_VDEV, | 735 | .transport_type = TRANSPORT_PLUGIN_VHBA_VDEV, |
@@ -1071,13 +754,8 @@ int __init rd_module_init(void) | |||
1071 | { | 754 | { |
1072 | int ret; | 755 | int ret; |
1073 | 756 | ||
1074 | ret = transport_subsystem_register(&rd_dr_template); | ||
1075 | if (ret < 0) | ||
1076 | return ret; | ||
1077 | |||
1078 | ret = transport_subsystem_register(&rd_mcp_template); | 757 | ret = transport_subsystem_register(&rd_mcp_template); |
1079 | if (ret < 0) { | 758 | if (ret < 0) { |
1080 | transport_subsystem_release(&rd_dr_template); | ||
1081 | return ret; | 759 | return ret; |
1082 | } | 760 | } |
1083 | 761 | ||
@@ -1086,6 +764,5 @@ int __init rd_module_init(void) | |||
1086 | 764 | ||
1087 | void rd_module_exit(void) | 765 | void rd_module_exit(void) |
1088 | { | 766 | { |
1089 | transport_subsystem_release(&rd_dr_template); | ||
1090 | transport_subsystem_release(&rd_mcp_template); | 767 | transport_subsystem_release(&rd_mcp_template); |
1091 | } | 768 | } |
diff --git a/drivers/target/target_core_rd.h b/drivers/target/target_core_rd.h index 3ea19e29d8ec..0d027732cd00 100644 --- a/drivers/target/target_core_rd.h +++ b/drivers/target/target_core_rd.h | |||
@@ -7,8 +7,6 @@ | |||
7 | 7 | ||
8 | /* Largest piece of memory kmalloc can allocate */ | 8 | /* Largest piece of memory kmalloc can allocate */ |
9 | #define RD_MAX_ALLOCATION_SIZE 65536 | 9 | #define RD_MAX_ALLOCATION_SIZE 65536 |
10 | /* Maximum queuedepth for the Ramdisk HBA */ | ||
11 | #define RD_HBA_QUEUE_DEPTH 256 | ||
12 | #define RD_DEVICE_QUEUE_DEPTH 32 | 10 | #define RD_DEVICE_QUEUE_DEPTH 32 |
13 | #define RD_MAX_DEVICE_QUEUE_DEPTH 128 | 11 | #define RD_MAX_DEVICE_QUEUE_DEPTH 128 |
14 | #define RD_BLOCKSIZE 512 | 12 | #define RD_BLOCKSIZE 512 |
@@ -34,8 +32,6 @@ struct rd_request { | |||
34 | u32 rd_page_count; | 32 | u32 rd_page_count; |
35 | /* Scatterlist count */ | 33 | /* Scatterlist count */ |
36 | u32 rd_size; | 34 | u32 rd_size; |
37 | /* Ramdisk device */ | ||
38 | struct rd_dev *rd_dev; | ||
39 | } ____cacheline_aligned; | 35 | } ____cacheline_aligned; |
40 | 36 | ||
41 | struct rd_dev_sg_table { | 37 | struct rd_dev_sg_table { |
diff --git a/drivers/target/target_core_scdb.c b/drivers/target/target_core_scdb.c index dc6fed037ab3..72843441d4fa 100644 --- a/drivers/target/target_core_scdb.c +++ b/drivers/target/target_core_scdb.c | |||
@@ -42,13 +42,13 @@ | |||
42 | */ | 42 | */ |
43 | void split_cdb_XX_6( | 43 | void split_cdb_XX_6( |
44 | unsigned long long lba, | 44 | unsigned long long lba, |
45 | u32 *sectors, | 45 | u32 sectors, |
46 | unsigned char *cdb) | 46 | unsigned char *cdb) |
47 | { | 47 | { |
48 | cdb[1] = (lba >> 16) & 0x1f; | 48 | cdb[1] = (lba >> 16) & 0x1f; |
49 | cdb[2] = (lba >> 8) & 0xff; | 49 | cdb[2] = (lba >> 8) & 0xff; |
50 | cdb[3] = lba & 0xff; | 50 | cdb[3] = lba & 0xff; |
51 | cdb[4] = *sectors & 0xff; | 51 | cdb[4] = sectors & 0xff; |
52 | } | 52 | } |
53 | 53 | ||
54 | /* split_cdb_XX_10(): | 54 | /* split_cdb_XX_10(): |
@@ -57,11 +57,11 @@ void split_cdb_XX_6( | |||
57 | */ | 57 | */ |
58 | void split_cdb_XX_10( | 58 | void split_cdb_XX_10( |
59 | unsigned long long lba, | 59 | unsigned long long lba, |
60 | u32 *sectors, | 60 | u32 sectors, |
61 | unsigned char *cdb) | 61 | unsigned char *cdb) |
62 | { | 62 | { |
63 | put_unaligned_be32(lba, &cdb[2]); | 63 | put_unaligned_be32(lba, &cdb[2]); |
64 | put_unaligned_be16(*sectors, &cdb[7]); | 64 | put_unaligned_be16(sectors, &cdb[7]); |
65 | } | 65 | } |
66 | 66 | ||
67 | /* split_cdb_XX_12(): | 67 | /* split_cdb_XX_12(): |
@@ -70,11 +70,11 @@ void split_cdb_XX_10( | |||
70 | */ | 70 | */ |
71 | void split_cdb_XX_12( | 71 | void split_cdb_XX_12( |
72 | unsigned long long lba, | 72 | unsigned long long lba, |
73 | u32 *sectors, | 73 | u32 sectors, |
74 | unsigned char *cdb) | 74 | unsigned char *cdb) |
75 | { | 75 | { |
76 | put_unaligned_be32(lba, &cdb[2]); | 76 | put_unaligned_be32(lba, &cdb[2]); |
77 | put_unaligned_be32(*sectors, &cdb[6]); | 77 | put_unaligned_be32(sectors, &cdb[6]); |
78 | } | 78 | } |
79 | 79 | ||
80 | /* split_cdb_XX_16(): | 80 | /* split_cdb_XX_16(): |
@@ -83,11 +83,11 @@ void split_cdb_XX_12( | |||
83 | */ | 83 | */ |
84 | void split_cdb_XX_16( | 84 | void split_cdb_XX_16( |
85 | unsigned long long lba, | 85 | unsigned long long lba, |
86 | u32 *sectors, | 86 | u32 sectors, |
87 | unsigned char *cdb) | 87 | unsigned char *cdb) |
88 | { | 88 | { |
89 | put_unaligned_be64(lba, &cdb[2]); | 89 | put_unaligned_be64(lba, &cdb[2]); |
90 | put_unaligned_be32(*sectors, &cdb[10]); | 90 | put_unaligned_be32(sectors, &cdb[10]); |
91 | } | 91 | } |
92 | 92 | ||
93 | /* | 93 | /* |
@@ -97,9 +97,9 @@ void split_cdb_XX_16( | |||
97 | */ | 97 | */ |
98 | void split_cdb_XX_32( | 98 | void split_cdb_XX_32( |
99 | unsigned long long lba, | 99 | unsigned long long lba, |
100 | u32 *sectors, | 100 | u32 sectors, |
101 | unsigned char *cdb) | 101 | unsigned char *cdb) |
102 | { | 102 | { |
103 | put_unaligned_be64(lba, &cdb[12]); | 103 | put_unaligned_be64(lba, &cdb[12]); |
104 | put_unaligned_be32(*sectors, &cdb[28]); | 104 | put_unaligned_be32(sectors, &cdb[28]); |
105 | } | 105 | } |
diff --git a/drivers/target/target_core_scdb.h b/drivers/target/target_core_scdb.h index 98cd1c01ed83..48e9ccc9585e 100644 --- a/drivers/target/target_core_scdb.h +++ b/drivers/target/target_core_scdb.h | |||
@@ -1,10 +1,10 @@ | |||
1 | #ifndef TARGET_CORE_SCDB_H | 1 | #ifndef TARGET_CORE_SCDB_H |
2 | #define TARGET_CORE_SCDB_H | 2 | #define TARGET_CORE_SCDB_H |
3 | 3 | ||
4 | extern void split_cdb_XX_6(unsigned long long, u32 *, unsigned char *); | 4 | extern void split_cdb_XX_6(unsigned long long, u32, unsigned char *); |
5 | extern void split_cdb_XX_10(unsigned long long, u32 *, unsigned char *); | 5 | extern void split_cdb_XX_10(unsigned long long, u32, unsigned char *); |
6 | extern void split_cdb_XX_12(unsigned long long, u32 *, unsigned char *); | 6 | extern void split_cdb_XX_12(unsigned long long, u32, unsigned char *); |
7 | extern void split_cdb_XX_16(unsigned long long, u32 *, unsigned char *); | 7 | extern void split_cdb_XX_16(unsigned long long, u32, unsigned char *); |
8 | extern void split_cdb_XX_32(unsigned long long, u32 *, unsigned char *); | 8 | extern void split_cdb_XX_32(unsigned long long, u32, unsigned char *); |
9 | 9 | ||
10 | #endif /* TARGET_CORE_SCDB_H */ | 10 | #endif /* TARGET_CORE_SCDB_H */ |
diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c index 5e3a067a7475..a8d6e1dee938 100644 --- a/drivers/target/target_core_stat.c +++ b/drivers/target/target_core_stat.c | |||
@@ -402,8 +402,8 @@ static ssize_t target_stat_scsi_lu_show_attr_lu_name( | |||
402 | return -ENODEV; | 402 | return -ENODEV; |
403 | /* scsiLuWwnName */ | 403 | /* scsiLuWwnName */ |
404 | return snprintf(page, PAGE_SIZE, "%s\n", | 404 | return snprintf(page, PAGE_SIZE, "%s\n", |
405 | (strlen(DEV_T10_WWN(dev)->unit_serial)) ? | 405 | (strlen(dev->se_sub_dev->t10_wwn.unit_serial)) ? |
406 | (char *)&DEV_T10_WWN(dev)->unit_serial[0] : "None"); | 406 | dev->se_sub_dev->t10_wwn.unit_serial : "None"); |
407 | } | 407 | } |
408 | DEV_STAT_SCSI_LU_ATTR_RO(lu_name); | 408 | DEV_STAT_SCSI_LU_ATTR_RO(lu_name); |
409 | 409 | ||
@@ -413,17 +413,17 @@ static ssize_t target_stat_scsi_lu_show_attr_vend( | |||
413 | struct se_subsystem_dev *se_subdev = container_of(sgrps, | 413 | struct se_subsystem_dev *se_subdev = container_of(sgrps, |
414 | struct se_subsystem_dev, dev_stat_grps); | 414 | struct se_subsystem_dev, dev_stat_grps); |
415 | struct se_device *dev = se_subdev->se_dev_ptr; | 415 | struct se_device *dev = se_subdev->se_dev_ptr; |
416 | int j; | 416 | int i; |
417 | char str[28]; | 417 | char str[sizeof(dev->se_sub_dev->t10_wwn.vendor)+1]; |
418 | 418 | ||
419 | if (!dev) | 419 | if (!dev) |
420 | return -ENODEV; | 420 | return -ENODEV; |
421 | |||
421 | /* scsiLuVendorId */ | 422 | /* scsiLuVendorId */ |
422 | memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28); | 423 | for (i = 0; i < sizeof(dev->se_sub_dev->t10_wwn.vendor); i++) |
423 | for (j = 0; j < 8; j++) | 424 | str[i] = ISPRINT(dev->se_sub_dev->t10_wwn.vendor[i]) ? |
424 | str[j] = ISPRINT(DEV_T10_WWN(dev)->vendor[j]) ? | 425 | dev->se_sub_dev->t10_wwn.vendor[i] : ' '; |
425 | DEV_T10_WWN(dev)->vendor[j] : 0x20; | 426 | str[i] = '\0'; |
426 | str[8] = 0; | ||
427 | return snprintf(page, PAGE_SIZE, "%s\n", str); | 427 | return snprintf(page, PAGE_SIZE, "%s\n", str); |
428 | } | 428 | } |
429 | DEV_STAT_SCSI_LU_ATTR_RO(vend); | 429 | DEV_STAT_SCSI_LU_ATTR_RO(vend); |
@@ -434,18 +434,17 @@ static ssize_t target_stat_scsi_lu_show_attr_prod( | |||
434 | struct se_subsystem_dev *se_subdev = container_of(sgrps, | 434 | struct se_subsystem_dev *se_subdev = container_of(sgrps, |
435 | struct se_subsystem_dev, dev_stat_grps); | 435 | struct se_subsystem_dev, dev_stat_grps); |
436 | struct se_device *dev = se_subdev->se_dev_ptr; | 436 | struct se_device *dev = se_subdev->se_dev_ptr; |
437 | int j; | 437 | int i; |
438 | char str[28]; | 438 | char str[sizeof(dev->se_sub_dev->t10_wwn.model)+1]; |
439 | 439 | ||
440 | if (!dev) | 440 | if (!dev) |
441 | return -ENODEV; | 441 | return -ENODEV; |
442 | 442 | ||
443 | /* scsiLuProductId */ | 443 | /* scsiLuProductId */ |
444 | memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28); | 444 | for (i = 0; i < sizeof(dev->se_sub_dev->t10_wwn.vendor); i++) |
445 | for (j = 0; j < 16; j++) | 445 | str[i] = ISPRINT(dev->se_sub_dev->t10_wwn.model[i]) ? |
446 | str[j] = ISPRINT(DEV_T10_WWN(dev)->model[j]) ? | 446 | dev->se_sub_dev->t10_wwn.model[i] : ' '; |
447 | DEV_T10_WWN(dev)->model[j] : 0x20; | 447 | str[i] = '\0'; |
448 | str[16] = 0; | ||
449 | return snprintf(page, PAGE_SIZE, "%s\n", str); | 448 | return snprintf(page, PAGE_SIZE, "%s\n", str); |
450 | } | 449 | } |
451 | DEV_STAT_SCSI_LU_ATTR_RO(prod); | 450 | DEV_STAT_SCSI_LU_ATTR_RO(prod); |
@@ -456,18 +455,17 @@ static ssize_t target_stat_scsi_lu_show_attr_rev( | |||
456 | struct se_subsystem_dev *se_subdev = container_of(sgrps, | 455 | struct se_subsystem_dev *se_subdev = container_of(sgrps, |
457 | struct se_subsystem_dev, dev_stat_grps); | 456 | struct se_subsystem_dev, dev_stat_grps); |
458 | struct se_device *dev = se_subdev->se_dev_ptr; | 457 | struct se_device *dev = se_subdev->se_dev_ptr; |
459 | int j; | 458 | int i; |
460 | char str[28]; | 459 | char str[sizeof(dev->se_sub_dev->t10_wwn.revision)+1]; |
461 | 460 | ||
462 | if (!dev) | 461 | if (!dev) |
463 | return -ENODEV; | 462 | return -ENODEV; |
464 | 463 | ||
465 | /* scsiLuRevisionId */ | 464 | /* scsiLuRevisionId */ |
466 | memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28); | 465 | for (i = 0; i < sizeof(dev->se_sub_dev->t10_wwn.revision); i++) |
467 | for (j = 0; j < 4; j++) | 466 | str[i] = ISPRINT(dev->se_sub_dev->t10_wwn.revision[i]) ? |
468 | str[j] = ISPRINT(DEV_T10_WWN(dev)->revision[j]) ? | 467 | dev->se_sub_dev->t10_wwn.revision[i] : ' '; |
469 | DEV_T10_WWN(dev)->revision[j] : 0x20; | 468 | str[i] = '\0'; |
470 | str[4] = 0; | ||
471 | return snprintf(page, PAGE_SIZE, "%s\n", str); | 469 | return snprintf(page, PAGE_SIZE, "%s\n", str); |
472 | } | 470 | } |
473 | DEV_STAT_SCSI_LU_ATTR_RO(rev); | 471 | DEV_STAT_SCSI_LU_ATTR_RO(rev); |
@@ -484,7 +482,7 @@ static ssize_t target_stat_scsi_lu_show_attr_dev_type( | |||
484 | 482 | ||
485 | /* scsiLuPeripheralType */ | 483 | /* scsiLuPeripheralType */ |
486 | return snprintf(page, PAGE_SIZE, "%u\n", | 484 | return snprintf(page, PAGE_SIZE, "%u\n", |
487 | TRANSPORT(dev)->get_device_type(dev)); | 485 | dev->transport->get_device_type(dev)); |
488 | } | 486 | } |
489 | DEV_STAT_SCSI_LU_ATTR_RO(dev_type); | 487 | DEV_STAT_SCSI_LU_ATTR_RO(dev_type); |
490 | 488 | ||
@@ -668,18 +666,18 @@ static struct config_item_type target_stat_scsi_lu_cit = { | |||
668 | */ | 666 | */ |
669 | void target_stat_setup_dev_default_groups(struct se_subsystem_dev *se_subdev) | 667 | void target_stat_setup_dev_default_groups(struct se_subsystem_dev *se_subdev) |
670 | { | 668 | { |
671 | struct config_group *dev_stat_grp = &DEV_STAT_GRP(se_subdev)->stat_group; | 669 | struct config_group *dev_stat_grp = &se_subdev->dev_stat_grps.stat_group; |
672 | 670 | ||
673 | config_group_init_type_name(&DEV_STAT_GRP(se_subdev)->scsi_dev_group, | 671 | config_group_init_type_name(&se_subdev->dev_stat_grps.scsi_dev_group, |
674 | "scsi_dev", &target_stat_scsi_dev_cit); | 672 | "scsi_dev", &target_stat_scsi_dev_cit); |
675 | config_group_init_type_name(&DEV_STAT_GRP(se_subdev)->scsi_tgt_dev_group, | 673 | config_group_init_type_name(&se_subdev->dev_stat_grps.scsi_tgt_dev_group, |
676 | "scsi_tgt_dev", &target_stat_scsi_tgt_dev_cit); | 674 | "scsi_tgt_dev", &target_stat_scsi_tgt_dev_cit); |
677 | config_group_init_type_name(&DEV_STAT_GRP(se_subdev)->scsi_lu_group, | 675 | config_group_init_type_name(&se_subdev->dev_stat_grps.scsi_lu_group, |
678 | "scsi_lu", &target_stat_scsi_lu_cit); | 676 | "scsi_lu", &target_stat_scsi_lu_cit); |
679 | 677 | ||
680 | dev_stat_grp->default_groups[0] = &DEV_STAT_GRP(se_subdev)->scsi_dev_group; | 678 | dev_stat_grp->default_groups[0] = &se_subdev->dev_stat_grps.scsi_dev_group; |
681 | dev_stat_grp->default_groups[1] = &DEV_STAT_GRP(se_subdev)->scsi_tgt_dev_group; | 679 | dev_stat_grp->default_groups[1] = &se_subdev->dev_stat_grps.scsi_tgt_dev_group; |
682 | dev_stat_grp->default_groups[2] = &DEV_STAT_GRP(se_subdev)->scsi_lu_group; | 680 | dev_stat_grp->default_groups[2] = &se_subdev->dev_stat_grps.scsi_lu_group; |
683 | dev_stat_grp->default_groups[3] = NULL; | 681 | dev_stat_grp->default_groups[3] = NULL; |
684 | } | 682 | } |
685 | 683 | ||
@@ -922,7 +920,7 @@ static ssize_t target_stat_scsi_tgt_port_show_attr_name( | |||
922 | tpg = sep->sep_tpg; | 920 | tpg = sep->sep_tpg; |
923 | 921 | ||
924 | ret = snprintf(page, PAGE_SIZE, "%sPort#%u\n", | 922 | ret = snprintf(page, PAGE_SIZE, "%sPort#%u\n", |
925 | TPG_TFO(tpg)->get_fabric_name(), sep->sep_index); | 923 | tpg->se_tpg_tfo->get_fabric_name(), sep->sep_index); |
926 | spin_unlock(&lun->lun_sep_lock); | 924 | spin_unlock(&lun->lun_sep_lock); |
927 | return ret; | 925 | return ret; |
928 | } | 926 | } |
@@ -945,8 +943,8 @@ static ssize_t target_stat_scsi_tgt_port_show_attr_port_index( | |||
945 | tpg = sep->sep_tpg; | 943 | tpg = sep->sep_tpg; |
946 | 944 | ||
947 | ret = snprintf(page, PAGE_SIZE, "%s%s%d\n", | 945 | ret = snprintf(page, PAGE_SIZE, "%s%s%d\n", |
948 | TPG_TFO(tpg)->tpg_get_wwn(tpg), "+t+", | 946 | tpg->se_tpg_tfo->tpg_get_wwn(tpg), "+t+", |
949 | TPG_TFO(tpg)->tpg_get_tag(tpg)); | 947 | tpg->se_tpg_tfo->tpg_get_tag(tpg)); |
950 | spin_unlock(&lun->lun_sep_lock); | 948 | spin_unlock(&lun->lun_sep_lock); |
951 | return ret; | 949 | return ret; |
952 | } | 950 | } |
@@ -1128,7 +1126,7 @@ static ssize_t target_stat_scsi_transport_show_attr_device( | |||
1128 | tpg = sep->sep_tpg; | 1126 | tpg = sep->sep_tpg; |
1129 | /* scsiTransportType */ | 1127 | /* scsiTransportType */ |
1130 | ret = snprintf(page, PAGE_SIZE, "scsiTransport%s\n", | 1128 | ret = snprintf(page, PAGE_SIZE, "scsiTransport%s\n", |
1131 | TPG_TFO(tpg)->get_fabric_name()); | 1129 | tpg->se_tpg_tfo->get_fabric_name()); |
1132 | spin_unlock(&lun->lun_sep_lock); | 1130 | spin_unlock(&lun->lun_sep_lock); |
1133 | return ret; | 1131 | return ret; |
1134 | } | 1132 | } |
@@ -1150,7 +1148,7 @@ static ssize_t target_stat_scsi_transport_show_attr_indx( | |||
1150 | } | 1148 | } |
1151 | tpg = sep->sep_tpg; | 1149 | tpg = sep->sep_tpg; |
1152 | ret = snprintf(page, PAGE_SIZE, "%u\n", | 1150 | ret = snprintf(page, PAGE_SIZE, "%u\n", |
1153 | TPG_TFO(tpg)->tpg_get_inst_index(tpg)); | 1151 | tpg->se_tpg_tfo->tpg_get_inst_index(tpg)); |
1154 | spin_unlock(&lun->lun_sep_lock); | 1152 | spin_unlock(&lun->lun_sep_lock); |
1155 | return ret; | 1153 | return ret; |
1156 | } | 1154 | } |
@@ -1173,10 +1171,10 @@ static ssize_t target_stat_scsi_transport_show_attr_dev_name( | |||
1173 | return -ENODEV; | 1171 | return -ENODEV; |
1174 | } | 1172 | } |
1175 | tpg = sep->sep_tpg; | 1173 | tpg = sep->sep_tpg; |
1176 | wwn = DEV_T10_WWN(dev); | 1174 | wwn = &dev->se_sub_dev->t10_wwn; |
1177 | /* scsiTransportDevName */ | 1175 | /* scsiTransportDevName */ |
1178 | ret = snprintf(page, PAGE_SIZE, "%s+%s\n", | 1176 | ret = snprintf(page, PAGE_SIZE, "%s+%s\n", |
1179 | TPG_TFO(tpg)->tpg_get_wwn(tpg), | 1177 | tpg->se_tpg_tfo->tpg_get_wwn(tpg), |
1180 | (strlen(wwn->unit_serial)) ? wwn->unit_serial : | 1178 | (strlen(wwn->unit_serial)) ? wwn->unit_serial : |
1181 | wwn->vendor); | 1179 | wwn->vendor); |
1182 | spin_unlock(&lun->lun_sep_lock); | 1180 | spin_unlock(&lun->lun_sep_lock); |
@@ -1212,18 +1210,18 @@ static struct config_item_type target_stat_scsi_transport_cit = { | |||
1212 | */ | 1210 | */ |
1213 | void target_stat_setup_port_default_groups(struct se_lun *lun) | 1211 | void target_stat_setup_port_default_groups(struct se_lun *lun) |
1214 | { | 1212 | { |
1215 | struct config_group *port_stat_grp = &PORT_STAT_GRP(lun)->stat_group; | 1213 | struct config_group *port_stat_grp = &lun->port_stat_grps.stat_group; |
1216 | 1214 | ||
1217 | config_group_init_type_name(&PORT_STAT_GRP(lun)->scsi_port_group, | 1215 | config_group_init_type_name(&lun->port_stat_grps.scsi_port_group, |
1218 | "scsi_port", &target_stat_scsi_port_cit); | 1216 | "scsi_port", &target_stat_scsi_port_cit); |
1219 | config_group_init_type_name(&PORT_STAT_GRP(lun)->scsi_tgt_port_group, | 1217 | config_group_init_type_name(&lun->port_stat_grps.scsi_tgt_port_group, |
1220 | "scsi_tgt_port", &target_stat_scsi_tgt_port_cit); | 1218 | "scsi_tgt_port", &target_stat_scsi_tgt_port_cit); |
1221 | config_group_init_type_name(&PORT_STAT_GRP(lun)->scsi_transport_group, | 1219 | config_group_init_type_name(&lun->port_stat_grps.scsi_transport_group, |
1222 | "scsi_transport", &target_stat_scsi_transport_cit); | 1220 | "scsi_transport", &target_stat_scsi_transport_cit); |
1223 | 1221 | ||
1224 | port_stat_grp->default_groups[0] = &PORT_STAT_GRP(lun)->scsi_port_group; | 1222 | port_stat_grp->default_groups[0] = &lun->port_stat_grps.scsi_port_group; |
1225 | port_stat_grp->default_groups[1] = &PORT_STAT_GRP(lun)->scsi_tgt_port_group; | 1223 | port_stat_grp->default_groups[1] = &lun->port_stat_grps.scsi_tgt_port_group; |
1226 | port_stat_grp->default_groups[2] = &PORT_STAT_GRP(lun)->scsi_transport_group; | 1224 | port_stat_grp->default_groups[2] = &lun->port_stat_grps.scsi_transport_group; |
1227 | port_stat_grp->default_groups[3] = NULL; | 1225 | port_stat_grp->default_groups[3] = NULL; |
1228 | } | 1226 | } |
1229 | 1227 | ||
@@ -1264,7 +1262,7 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_inst( | |||
1264 | tpg = nacl->se_tpg; | 1262 | tpg = nacl->se_tpg; |
1265 | /* scsiInstIndex */ | 1263 | /* scsiInstIndex */ |
1266 | ret = snprintf(page, PAGE_SIZE, "%u\n", | 1264 | ret = snprintf(page, PAGE_SIZE, "%u\n", |
1267 | TPG_TFO(tpg)->tpg_get_inst_index(tpg)); | 1265 | tpg->se_tpg_tfo->tpg_get_inst_index(tpg)); |
1268 | spin_unlock_irq(&nacl->device_list_lock); | 1266 | spin_unlock_irq(&nacl->device_list_lock); |
1269 | return ret; | 1267 | return ret; |
1270 | } | 1268 | } |
@@ -1314,7 +1312,7 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_port( | |||
1314 | } | 1312 | } |
1315 | tpg = nacl->se_tpg; | 1313 | tpg = nacl->se_tpg; |
1316 | /* scsiAuthIntrTgtPortIndex */ | 1314 | /* scsiAuthIntrTgtPortIndex */ |
1317 | ret = snprintf(page, PAGE_SIZE, "%u\n", TPG_TFO(tpg)->tpg_get_tag(tpg)); | 1315 | ret = snprintf(page, PAGE_SIZE, "%u\n", tpg->se_tpg_tfo->tpg_get_tag(tpg)); |
1318 | spin_unlock_irq(&nacl->device_list_lock); | 1316 | spin_unlock_irq(&nacl->device_list_lock); |
1319 | return ret; | 1317 | return ret; |
1320 | } | 1318 | } |
@@ -1632,7 +1630,7 @@ static ssize_t target_stat_scsi_att_intr_port_show_attr_inst( | |||
1632 | tpg = nacl->se_tpg; | 1630 | tpg = nacl->se_tpg; |
1633 | /* scsiInstIndex */ | 1631 | /* scsiInstIndex */ |
1634 | ret = snprintf(page, PAGE_SIZE, "%u\n", | 1632 | ret = snprintf(page, PAGE_SIZE, "%u\n", |
1635 | TPG_TFO(tpg)->tpg_get_inst_index(tpg)); | 1633 | tpg->se_tpg_tfo->tpg_get_inst_index(tpg)); |
1636 | spin_unlock_irq(&nacl->device_list_lock); | 1634 | spin_unlock_irq(&nacl->device_list_lock); |
1637 | return ret; | 1635 | return ret; |
1638 | } | 1636 | } |
@@ -1682,7 +1680,7 @@ static ssize_t target_stat_scsi_att_intr_port_show_attr_port( | |||
1682 | } | 1680 | } |
1683 | tpg = nacl->se_tpg; | 1681 | tpg = nacl->se_tpg; |
1684 | /* scsiPortIndex */ | 1682 | /* scsiPortIndex */ |
1685 | ret = snprintf(page, PAGE_SIZE, "%u\n", TPG_TFO(tpg)->tpg_get_tag(tpg)); | 1683 | ret = snprintf(page, PAGE_SIZE, "%u\n", tpg->se_tpg_tfo->tpg_get_tag(tpg)); |
1686 | spin_unlock_irq(&nacl->device_list_lock); | 1684 | spin_unlock_irq(&nacl->device_list_lock); |
1687 | return ret; | 1685 | return ret; |
1688 | } | 1686 | } |
@@ -1708,7 +1706,7 @@ static ssize_t target_stat_scsi_att_intr_port_show_attr_indx( | |||
1708 | tpg = nacl->se_tpg; | 1706 | tpg = nacl->se_tpg; |
1709 | /* scsiAttIntrPortIndex */ | 1707 | /* scsiAttIntrPortIndex */ |
1710 | ret = snprintf(page, PAGE_SIZE, "%u\n", | 1708 | ret = snprintf(page, PAGE_SIZE, "%u\n", |
1711 | TPG_TFO(tpg)->sess_get_index(se_sess)); | 1709 | tpg->se_tpg_tfo->sess_get_index(se_sess)); |
1712 | spin_unlock_irq(&nacl->nacl_sess_lock); | 1710 | spin_unlock_irq(&nacl->nacl_sess_lock); |
1713 | return ret; | 1711 | return ret; |
1714 | } | 1712 | } |
@@ -1757,8 +1755,8 @@ static ssize_t target_stat_scsi_att_intr_port_show_attr_port_ident( | |||
1757 | tpg = nacl->se_tpg; | 1755 | tpg = nacl->se_tpg; |
1758 | /* scsiAttIntrPortName+scsiAttIntrPortIdentifier */ | 1756 | /* scsiAttIntrPortName+scsiAttIntrPortIdentifier */ |
1759 | memset(buf, 0, 64); | 1757 | memset(buf, 0, 64); |
1760 | if (TPG_TFO(tpg)->sess_get_initiator_sid != NULL) | 1758 | if (tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) |
1761 | TPG_TFO(tpg)->sess_get_initiator_sid(se_sess, | 1759 | tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess, |
1762 | (unsigned char *)&buf[0], 64); | 1760 | (unsigned char *)&buf[0], 64); |
1763 | 1761 | ||
1764 | ret = snprintf(page, PAGE_SIZE, "%s+i+%s\n", nacl->initiatorname, buf); | 1762 | ret = snprintf(page, PAGE_SIZE, "%s+i+%s\n", nacl->initiatorname, buf); |
@@ -1797,14 +1795,14 @@ static struct config_item_type target_stat_scsi_att_intr_port_cit = { | |||
1797 | */ | 1795 | */ |
1798 | void target_stat_setup_mappedlun_default_groups(struct se_lun_acl *lacl) | 1796 | void target_stat_setup_mappedlun_default_groups(struct se_lun_acl *lacl) |
1799 | { | 1797 | { |
1800 | struct config_group *ml_stat_grp = &ML_STAT_GRPS(lacl)->stat_group; | 1798 | struct config_group *ml_stat_grp = &lacl->ml_stat_grps.stat_group; |
1801 | 1799 | ||
1802 | config_group_init_type_name(&ML_STAT_GRPS(lacl)->scsi_auth_intr_group, | 1800 | config_group_init_type_name(&lacl->ml_stat_grps.scsi_auth_intr_group, |
1803 | "scsi_auth_intr", &target_stat_scsi_auth_intr_cit); | 1801 | "scsi_auth_intr", &target_stat_scsi_auth_intr_cit); |
1804 | config_group_init_type_name(&ML_STAT_GRPS(lacl)->scsi_att_intr_port_group, | 1802 | config_group_init_type_name(&lacl->ml_stat_grps.scsi_att_intr_port_group, |
1805 | "scsi_att_intr_port", &target_stat_scsi_att_intr_port_cit); | 1803 | "scsi_att_intr_port", &target_stat_scsi_att_intr_port_cit); |
1806 | 1804 | ||
1807 | ml_stat_grp->default_groups[0] = &ML_STAT_GRPS(lacl)->scsi_auth_intr_group; | 1805 | ml_stat_grp->default_groups[0] = &lacl->ml_stat_grps.scsi_auth_intr_group; |
1808 | ml_stat_grp->default_groups[1] = &ML_STAT_GRPS(lacl)->scsi_att_intr_port_group; | 1806 | ml_stat_grp->default_groups[1] = &lacl->ml_stat_grps.scsi_att_intr_port_group; |
1809 | ml_stat_grp->default_groups[2] = NULL; | 1807 | ml_stat_grp->default_groups[2] = NULL; |
1810 | } | 1808 | } |
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index 179063d81cdd..27d4925e51c3 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c | |||
@@ -41,13 +41,6 @@ | |||
41 | #include "target_core_alua.h" | 41 | #include "target_core_alua.h" |
42 | #include "target_core_pr.h" | 42 | #include "target_core_pr.h" |
43 | 43 | ||
44 | #define DEBUG_LUN_RESET | ||
45 | #ifdef DEBUG_LUN_RESET | ||
46 | #define DEBUG_LR(x...) printk(KERN_INFO x) | ||
47 | #else | ||
48 | #define DEBUG_LR(x...) | ||
49 | #endif | ||
50 | |||
51 | struct se_tmr_req *core_tmr_alloc_req( | 44 | struct se_tmr_req *core_tmr_alloc_req( |
52 | struct se_cmd *se_cmd, | 45 | struct se_cmd *se_cmd, |
53 | void *fabric_tmr_ptr, | 46 | void *fabric_tmr_ptr, |
@@ -57,8 +50,8 @@ struct se_tmr_req *core_tmr_alloc_req( | |||
57 | 50 | ||
58 | tmr = kmem_cache_zalloc(se_tmr_req_cache, (in_interrupt()) ? | 51 | tmr = kmem_cache_zalloc(se_tmr_req_cache, (in_interrupt()) ? |
59 | GFP_ATOMIC : GFP_KERNEL); | 52 | GFP_ATOMIC : GFP_KERNEL); |
60 | if (!(tmr)) { | 53 | if (!tmr) { |
61 | printk(KERN_ERR "Unable to allocate struct se_tmr_req\n"); | 54 | pr_err("Unable to allocate struct se_tmr_req\n"); |
62 | return ERR_PTR(-ENOMEM); | 55 | return ERR_PTR(-ENOMEM); |
63 | } | 56 | } |
64 | tmr->task_cmd = se_cmd; | 57 | tmr->task_cmd = se_cmd; |
@@ -80,9 +73,9 @@ void core_tmr_release_req( | |||
80 | return; | 73 | return; |
81 | } | 74 | } |
82 | 75 | ||
83 | spin_lock(&dev->se_tmr_lock); | 76 | spin_lock_irq(&dev->se_tmr_lock); |
84 | list_del(&tmr->tmr_list); | 77 | list_del(&tmr->tmr_list); |
85 | spin_unlock(&dev->se_tmr_lock); | 78 | spin_unlock_irq(&dev->se_tmr_lock); |
86 | 79 | ||
87 | kmem_cache_free(se_tmr_req_cache, tmr); | 80 | kmem_cache_free(se_tmr_req_cache, tmr); |
88 | } | 81 | } |
@@ -93,14 +86,14 @@ static void core_tmr_handle_tas_abort( | |||
93 | int tas, | 86 | int tas, |
94 | int fe_count) | 87 | int fe_count) |
95 | { | 88 | { |
96 | if (!(fe_count)) { | 89 | if (!fe_count) { |
97 | transport_cmd_finish_abort(cmd, 1); | 90 | transport_cmd_finish_abort(cmd, 1); |
98 | return; | 91 | return; |
99 | } | 92 | } |
100 | /* | 93 | /* |
101 | * TASK ABORTED status (TAS) bit support | 94 | * TASK ABORTED status (TAS) bit support |
102 | */ | 95 | */ |
103 | if (((tmr_nacl != NULL) && | 96 | if ((tmr_nacl && |
104 | (tmr_nacl == cmd->se_sess->se_node_acl)) || tas) | 97 | (tmr_nacl == cmd->se_sess->se_node_acl)) || tas) |
105 | transport_send_task_abort(cmd); | 98 | transport_send_task_abort(cmd); |
106 | 99 | ||
@@ -113,15 +106,14 @@ int core_tmr_lun_reset( | |||
113 | struct list_head *preempt_and_abort_list, | 106 | struct list_head *preempt_and_abort_list, |
114 | struct se_cmd *prout_cmd) | 107 | struct se_cmd *prout_cmd) |
115 | { | 108 | { |
116 | struct se_cmd *cmd; | 109 | struct se_cmd *cmd, *tcmd; |
117 | struct se_queue_req *qr, *qr_tmp; | ||
118 | struct se_node_acl *tmr_nacl = NULL; | 110 | struct se_node_acl *tmr_nacl = NULL; |
119 | struct se_portal_group *tmr_tpg = NULL; | 111 | struct se_portal_group *tmr_tpg = NULL; |
120 | struct se_queue_obj *qobj = dev->dev_queue_obj; | 112 | struct se_queue_obj *qobj = &dev->dev_queue_obj; |
121 | struct se_tmr_req *tmr_p, *tmr_pp; | 113 | struct se_tmr_req *tmr_p, *tmr_pp; |
122 | struct se_task *task, *task_tmp; | 114 | struct se_task *task, *task_tmp; |
123 | unsigned long flags; | 115 | unsigned long flags; |
124 | int fe_count, state, tas; | 116 | int fe_count, tas; |
125 | /* | 117 | /* |
126 | * TASK_ABORTED status bit, this is configurable via ConfigFS | 118 | * TASK_ABORTED status bit, this is configurable via ConfigFS |
127 | * struct se_device attributes. spc4r17 section 7.4.6 Control mode page | 119 | * struct se_device attributes. spc4r17 section 7.4.6 Control mode page |
@@ -133,7 +125,7 @@ int core_tmr_lun_reset( | |||
133 | * which the command was received shall be completed with TASK ABORTED | 125 | * which the command was received shall be completed with TASK ABORTED |
134 | * status (see SAM-4). | 126 | * status (see SAM-4). |
135 | */ | 127 | */ |
136 | tas = DEV_ATTRIB(dev)->emulate_tas; | 128 | tas = dev->se_sub_dev->se_dev_attrib.emulate_tas; |
137 | /* | 129 | /* |
138 | * Determine if this se_tmr is coming from a $FABRIC_MOD | 130 | * Determine if this se_tmr is coming from a $FABRIC_MOD |
139 | * or struct se_device passthrough.. | 131 | * or struct se_device passthrough.. |
@@ -142,20 +134,20 @@ int core_tmr_lun_reset( | |||
142 | tmr_nacl = tmr->task_cmd->se_sess->se_node_acl; | 134 | tmr_nacl = tmr->task_cmd->se_sess->se_node_acl; |
143 | tmr_tpg = tmr->task_cmd->se_sess->se_tpg; | 135 | tmr_tpg = tmr->task_cmd->se_sess->se_tpg; |
144 | if (tmr_nacl && tmr_tpg) { | 136 | if (tmr_nacl && tmr_tpg) { |
145 | DEBUG_LR("LUN_RESET: TMR caller fabric: %s" | 137 | pr_debug("LUN_RESET: TMR caller fabric: %s" |
146 | " initiator port %s\n", | 138 | " initiator port %s\n", |
147 | TPG_TFO(tmr_tpg)->get_fabric_name(), | 139 | tmr_tpg->se_tpg_tfo->get_fabric_name(), |
148 | tmr_nacl->initiatorname); | 140 | tmr_nacl->initiatorname); |
149 | } | 141 | } |
150 | } | 142 | } |
151 | DEBUG_LR("LUN_RESET: %s starting for [%s], tas: %d\n", | 143 | pr_debug("LUN_RESET: %s starting for [%s], tas: %d\n", |
152 | (preempt_and_abort_list) ? "Preempt" : "TMR", | 144 | (preempt_and_abort_list) ? "Preempt" : "TMR", |
153 | TRANSPORT(dev)->name, tas); | 145 | dev->transport->name, tas); |
154 | /* | 146 | /* |
155 | * Release all pending and outgoing TMRs aside from the received | 147 | * Release all pending and outgoing TMRs aside from the received |
156 | * LUN_RESET tmr.. | 148 | * LUN_RESET tmr.. |
157 | */ | 149 | */ |
158 | spin_lock(&dev->se_tmr_lock); | 150 | spin_lock_irq(&dev->se_tmr_lock); |
159 | list_for_each_entry_safe(tmr_p, tmr_pp, &dev->dev_tmr_list, tmr_list) { | 151 | list_for_each_entry_safe(tmr_p, tmr_pp, &dev->dev_tmr_list, tmr_list) { |
160 | /* | 152 | /* |
161 | * Allow the received TMR to return with FUNCTION_COMPLETE. | 153 | * Allow the received TMR to return with FUNCTION_COMPLETE. |
@@ -164,8 +156,8 @@ int core_tmr_lun_reset( | |||
164 | continue; | 156 | continue; |
165 | 157 | ||
166 | cmd = tmr_p->task_cmd; | 158 | cmd = tmr_p->task_cmd; |
167 | if (!(cmd)) { | 159 | if (!cmd) { |
168 | printk(KERN_ERR "Unable to locate struct se_cmd for TMR\n"); | 160 | pr_err("Unable to locate struct se_cmd for TMR\n"); |
169 | continue; | 161 | continue; |
170 | } | 162 | } |
171 | /* | 163 | /* |
@@ -173,33 +165,33 @@ int core_tmr_lun_reset( | |||
173 | * parameter (eg: for PROUT PREEMPT_AND_ABORT service action | 165 | * parameter (eg: for PROUT PREEMPT_AND_ABORT service action |
174 | * skip non regisration key matching TMRs. | 166 | * skip non regisration key matching TMRs. |
175 | */ | 167 | */ |
176 | if ((preempt_and_abort_list != NULL) && | 168 | if (preempt_and_abort_list && |
177 | (core_scsi3_check_cdb_abort_and_preempt( | 169 | (core_scsi3_check_cdb_abort_and_preempt( |
178 | preempt_and_abort_list, cmd) != 0)) | 170 | preempt_and_abort_list, cmd) != 0)) |
179 | continue; | 171 | continue; |
180 | spin_unlock(&dev->se_tmr_lock); | 172 | spin_unlock_irq(&dev->se_tmr_lock); |
181 | 173 | ||
182 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 174 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
183 | if (!(atomic_read(&T_TASK(cmd)->t_transport_active))) { | 175 | if (!atomic_read(&cmd->t_transport_active)) { |
184 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 176 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
185 | spin_lock(&dev->se_tmr_lock); | 177 | spin_lock_irq(&dev->se_tmr_lock); |
186 | continue; | 178 | continue; |
187 | } | 179 | } |
188 | if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) { | 180 | if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) { |
189 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 181 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
190 | spin_lock(&dev->se_tmr_lock); | 182 | spin_lock_irq(&dev->se_tmr_lock); |
191 | continue; | 183 | continue; |
192 | } | 184 | } |
193 | DEBUG_LR("LUN_RESET: %s releasing TMR %p Function: 0x%02x," | 185 | pr_debug("LUN_RESET: %s releasing TMR %p Function: 0x%02x," |
194 | " Response: 0x%02x, t_state: %d\n", | 186 | " Response: 0x%02x, t_state: %d\n", |
195 | (preempt_and_abort_list) ? "Preempt" : "", tmr_p, | 187 | (preempt_and_abort_list) ? "Preempt" : "", tmr_p, |
196 | tmr_p->function, tmr_p->response, cmd->t_state); | 188 | tmr_p->function, tmr_p->response, cmd->t_state); |
197 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 189 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
198 | 190 | ||
199 | transport_cmd_finish_abort_tmr(cmd); | 191 | transport_cmd_finish_abort_tmr(cmd); |
200 | spin_lock(&dev->se_tmr_lock); | 192 | spin_lock_irq(&dev->se_tmr_lock); |
201 | } | 193 | } |
202 | spin_unlock(&dev->se_tmr_lock); | 194 | spin_unlock_irq(&dev->se_tmr_lock); |
203 | /* | 195 | /* |
204 | * Complete outstanding struct se_task CDBs with TASK_ABORTED SAM status. | 196 | * Complete outstanding struct se_task CDBs with TASK_ABORTED SAM status. |
205 | * This is following sam4r17, section 5.6 Aborting commands, Table 38 | 197 | * This is following sam4r17, section 5.6 Aborting commands, Table 38 |
@@ -224,23 +216,17 @@ int core_tmr_lun_reset( | |||
224 | spin_lock_irqsave(&dev->execute_task_lock, flags); | 216 | spin_lock_irqsave(&dev->execute_task_lock, flags); |
225 | list_for_each_entry_safe(task, task_tmp, &dev->state_task_list, | 217 | list_for_each_entry_safe(task, task_tmp, &dev->state_task_list, |
226 | t_state_list) { | 218 | t_state_list) { |
227 | if (!(TASK_CMD(task))) { | 219 | if (!task->task_se_cmd) { |
228 | printk(KERN_ERR "TASK_CMD(task) is NULL!\n"); | 220 | pr_err("task->task_se_cmd is NULL!\n"); |
229 | continue; | 221 | continue; |
230 | } | 222 | } |
231 | cmd = TASK_CMD(task); | 223 | cmd = task->task_se_cmd; |
232 | 224 | ||
233 | if (!T_TASK(cmd)) { | ||
234 | printk(KERN_ERR "T_TASK(cmd) is NULL for task: %p cmd:" | ||
235 | " %p ITT: 0x%08x\n", task, cmd, | ||
236 | CMD_TFO(cmd)->get_task_tag(cmd)); | ||
237 | continue; | ||
238 | } | ||
239 | /* | 225 | /* |
240 | * For PREEMPT_AND_ABORT usage, only process commands | 226 | * For PREEMPT_AND_ABORT usage, only process commands |
241 | * with a matching reservation key. | 227 | * with a matching reservation key. |
242 | */ | 228 | */ |
243 | if ((preempt_and_abort_list != NULL) && | 229 | if (preempt_and_abort_list && |
244 | (core_scsi3_check_cdb_abort_and_preempt( | 230 | (core_scsi3_check_cdb_abort_and_preempt( |
245 | preempt_and_abort_list, cmd) != 0)) | 231 | preempt_and_abort_list, cmd) != 0)) |
246 | continue; | 232 | continue; |
@@ -254,38 +240,38 @@ int core_tmr_lun_reset( | |||
254 | atomic_set(&task->task_state_active, 0); | 240 | atomic_set(&task->task_state_active, 0); |
255 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); | 241 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); |
256 | 242 | ||
257 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 243 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
258 | DEBUG_LR("LUN_RESET: %s cmd: %p task: %p" | 244 | pr_debug("LUN_RESET: %s cmd: %p task: %p" |
259 | " ITT/CmdSN: 0x%08x/0x%08x, i_state: %d, t_state/" | 245 | " ITT/CmdSN: 0x%08x/0x%08x, i_state: %d, t_state/" |
260 | "def_t_state: %d/%d cdb: 0x%02x\n", | 246 | "def_t_state: %d/%d cdb: 0x%02x\n", |
261 | (preempt_and_abort_list) ? "Preempt" : "", cmd, task, | 247 | (preempt_and_abort_list) ? "Preempt" : "", cmd, task, |
262 | CMD_TFO(cmd)->get_task_tag(cmd), 0, | 248 | cmd->se_tfo->get_task_tag(cmd), 0, |
263 | CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state, | 249 | cmd->se_tfo->get_cmd_state(cmd), cmd->t_state, |
264 | cmd->deferred_t_state, T_TASK(cmd)->t_task_cdb[0]); | 250 | cmd->deferred_t_state, cmd->t_task_cdb[0]); |
265 | DEBUG_LR("LUN_RESET: ITT[0x%08x] - pr_res_key: 0x%016Lx" | 251 | pr_debug("LUN_RESET: ITT[0x%08x] - pr_res_key: 0x%016Lx" |
266 | " t_task_cdbs: %d t_task_cdbs_left: %d" | 252 | " t_task_cdbs: %d t_task_cdbs_left: %d" |
267 | " t_task_cdbs_sent: %d -- t_transport_active: %d" | 253 | " t_task_cdbs_sent: %d -- t_transport_active: %d" |
268 | " t_transport_stop: %d t_transport_sent: %d\n", | 254 | " t_transport_stop: %d t_transport_sent: %d\n", |
269 | CMD_TFO(cmd)->get_task_tag(cmd), cmd->pr_res_key, | 255 | cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key, |
270 | T_TASK(cmd)->t_task_cdbs, | 256 | cmd->t_task_list_num, |
271 | atomic_read(&T_TASK(cmd)->t_task_cdbs_left), | 257 | atomic_read(&cmd->t_task_cdbs_left), |
272 | atomic_read(&T_TASK(cmd)->t_task_cdbs_sent), | 258 | atomic_read(&cmd->t_task_cdbs_sent), |
273 | atomic_read(&T_TASK(cmd)->t_transport_active), | 259 | atomic_read(&cmd->t_transport_active), |
274 | atomic_read(&T_TASK(cmd)->t_transport_stop), | 260 | atomic_read(&cmd->t_transport_stop), |
275 | atomic_read(&T_TASK(cmd)->t_transport_sent)); | 261 | atomic_read(&cmd->t_transport_sent)); |
276 | 262 | ||
277 | if (atomic_read(&task->task_active)) { | 263 | if (atomic_read(&task->task_active)) { |
278 | atomic_set(&task->task_stop, 1); | 264 | atomic_set(&task->task_stop, 1); |
279 | spin_unlock_irqrestore( | 265 | spin_unlock_irqrestore( |
280 | &T_TASK(cmd)->t_state_lock, flags); | 266 | &cmd->t_state_lock, flags); |
281 | 267 | ||
282 | DEBUG_LR("LUN_RESET: Waiting for task: %p to shutdown" | 268 | pr_debug("LUN_RESET: Waiting for task: %p to shutdown" |
283 | " for dev: %p\n", task, dev); | 269 | " for dev: %p\n", task, dev); |
284 | wait_for_completion(&task->task_stop_comp); | 270 | wait_for_completion(&task->task_stop_comp); |
285 | DEBUG_LR("LUN_RESET Completed task: %p shutdown for" | 271 | pr_debug("LUN_RESET Completed task: %p shutdown for" |
286 | " dev: %p\n", task, dev); | 272 | " dev: %p\n", task, dev); |
287 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 273 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
288 | atomic_dec(&T_TASK(cmd)->t_task_cdbs_left); | 274 | atomic_dec(&cmd->t_task_cdbs_left); |
289 | 275 | ||
290 | atomic_set(&task->task_active, 0); | 276 | atomic_set(&task->task_active, 0); |
291 | atomic_set(&task->task_stop, 0); | 277 | atomic_set(&task->task_stop, 0); |
@@ -295,34 +281,34 @@ int core_tmr_lun_reset( | |||
295 | } | 281 | } |
296 | __transport_stop_task_timer(task, &flags); | 282 | __transport_stop_task_timer(task, &flags); |
297 | 283 | ||
298 | if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_ex_left))) { | 284 | if (!atomic_dec_and_test(&cmd->t_task_cdbs_ex_left)) { |
299 | spin_unlock_irqrestore( | 285 | spin_unlock_irqrestore( |
300 | &T_TASK(cmd)->t_state_lock, flags); | 286 | &cmd->t_state_lock, flags); |
301 | DEBUG_LR("LUN_RESET: Skipping task: %p, dev: %p for" | 287 | pr_debug("LUN_RESET: Skipping task: %p, dev: %p for" |
302 | " t_task_cdbs_ex_left: %d\n", task, dev, | 288 | " t_task_cdbs_ex_left: %d\n", task, dev, |
303 | atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left)); | 289 | atomic_read(&cmd->t_task_cdbs_ex_left)); |
304 | 290 | ||
305 | spin_lock_irqsave(&dev->execute_task_lock, flags); | 291 | spin_lock_irqsave(&dev->execute_task_lock, flags); |
306 | continue; | 292 | continue; |
307 | } | 293 | } |
308 | fe_count = atomic_read(&T_TASK(cmd)->t_fe_count); | 294 | fe_count = atomic_read(&cmd->t_fe_count); |
309 | 295 | ||
310 | if (atomic_read(&T_TASK(cmd)->t_transport_active)) { | 296 | if (atomic_read(&cmd->t_transport_active)) { |
311 | DEBUG_LR("LUN_RESET: got t_transport_active = 1 for" | 297 | pr_debug("LUN_RESET: got t_transport_active = 1 for" |
312 | " task: %p, t_fe_count: %d dev: %p\n", task, | 298 | " task: %p, t_fe_count: %d dev: %p\n", task, |
313 | fe_count, dev); | 299 | fe_count, dev); |
314 | atomic_set(&T_TASK(cmd)->t_transport_aborted, 1); | 300 | atomic_set(&cmd->t_transport_aborted, 1); |
315 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, | 301 | spin_unlock_irqrestore(&cmd->t_state_lock, |
316 | flags); | 302 | flags); |
317 | core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); | 303 | core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); |
318 | 304 | ||
319 | spin_lock_irqsave(&dev->execute_task_lock, flags); | 305 | spin_lock_irqsave(&dev->execute_task_lock, flags); |
320 | continue; | 306 | continue; |
321 | } | 307 | } |
322 | DEBUG_LR("LUN_RESET: Got t_transport_active = 0 for task: %p," | 308 | pr_debug("LUN_RESET: Got t_transport_active = 0 for task: %p," |
323 | " t_fe_count: %d dev: %p\n", task, fe_count, dev); | 309 | " t_fe_count: %d dev: %p\n", task, fe_count, dev); |
324 | atomic_set(&T_TASK(cmd)->t_transport_aborted, 1); | 310 | atomic_set(&cmd->t_transport_aborted, 1); |
325 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 311 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
326 | core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); | 312 | core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); |
327 | 313 | ||
328 | spin_lock_irqsave(&dev->execute_task_lock, flags); | 314 | spin_lock_irqsave(&dev->execute_task_lock, flags); |
@@ -337,25 +323,12 @@ int core_tmr_lun_reset( | |||
337 | * reference, otherwise the struct se_cmd is released. | 323 | * reference, otherwise the struct se_cmd is released. |
338 | */ | 324 | */ |
339 | spin_lock_irqsave(&qobj->cmd_queue_lock, flags); | 325 | spin_lock_irqsave(&qobj->cmd_queue_lock, flags); |
340 | list_for_each_entry_safe(qr, qr_tmp, &qobj->qobj_list, qr_list) { | 326 | list_for_each_entry_safe(cmd, tcmd, &qobj->qobj_list, se_queue_node) { |
341 | cmd = (struct se_cmd *)qr->cmd; | ||
342 | if (!(cmd)) { | ||
343 | /* | ||
344 | * Skip these for non PREEMPT_AND_ABORT usage.. | ||
345 | */ | ||
346 | if (preempt_and_abort_list != NULL) | ||
347 | continue; | ||
348 | |||
349 | atomic_dec(&qobj->queue_cnt); | ||
350 | list_del(&qr->qr_list); | ||
351 | kfree(qr); | ||
352 | continue; | ||
353 | } | ||
354 | /* | 327 | /* |
355 | * For PREEMPT_AND_ABORT usage, only process commands | 328 | * For PREEMPT_AND_ABORT usage, only process commands |
356 | * with a matching reservation key. | 329 | * with a matching reservation key. |
357 | */ | 330 | */ |
358 | if ((preempt_and_abort_list != NULL) && | 331 | if (preempt_and_abort_list && |
359 | (core_scsi3_check_cdb_abort_and_preempt( | 332 | (core_scsi3_check_cdb_abort_and_preempt( |
360 | preempt_and_abort_list, cmd) != 0)) | 333 | preempt_and_abort_list, cmd) != 0)) |
361 | continue; | 334 | continue; |
@@ -365,30 +338,22 @@ int core_tmr_lun_reset( | |||
365 | if (prout_cmd == cmd) | 338 | if (prout_cmd == cmd) |
366 | continue; | 339 | continue; |
367 | 340 | ||
368 | atomic_dec(&T_TASK(cmd)->t_transport_queue_active); | 341 | atomic_dec(&cmd->t_transport_queue_active); |
369 | atomic_dec(&qobj->queue_cnt); | 342 | atomic_dec(&qobj->queue_cnt); |
370 | list_del(&qr->qr_list); | 343 | list_del(&cmd->se_queue_node); |
371 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); | 344 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); |
372 | 345 | ||
373 | state = qr->state; | 346 | pr_debug("LUN_RESET: %s from Device Queue: cmd: %p t_state:" |
374 | kfree(qr); | ||
375 | |||
376 | DEBUG_LR("LUN_RESET: %s from Device Queue: cmd: %p t_state:" | ||
377 | " %d t_fe_count: %d\n", (preempt_and_abort_list) ? | 347 | " %d t_fe_count: %d\n", (preempt_and_abort_list) ? |
378 | "Preempt" : "", cmd, state, | 348 | "Preempt" : "", cmd, cmd->t_state, |
379 | atomic_read(&T_TASK(cmd)->t_fe_count)); | 349 | atomic_read(&cmd->t_fe_count)); |
380 | /* | 350 | /* |
381 | * Signal that the command has failed via cmd->se_cmd_flags, | 351 | * Signal that the command has failed via cmd->se_cmd_flags, |
382 | * and call TFO->new_cmd_failure() to wakeup any fabric | ||
383 | * dependent code used to wait for unsolicited data out | ||
384 | * allocation to complete. The fabric module is expected | ||
385 | * to dump any remaining unsolicited data out for the aborted | ||
386 | * command at this point. | ||
387 | */ | 352 | */ |
388 | transport_new_cmd_failure(cmd); | 353 | transport_new_cmd_failure(cmd); |
389 | 354 | ||
390 | core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, | 355 | core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, |
391 | atomic_read(&T_TASK(cmd)->t_fe_count)); | 356 | atomic_read(&cmd->t_fe_count)); |
392 | spin_lock_irqsave(&qobj->cmd_queue_lock, flags); | 357 | spin_lock_irqsave(&qobj->cmd_queue_lock, flags); |
393 | } | 358 | } |
394 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); | 359 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); |
@@ -396,21 +361,21 @@ int core_tmr_lun_reset( | |||
396 | * Clear any legacy SPC-2 reservation when called during | 361 | * Clear any legacy SPC-2 reservation when called during |
397 | * LOGICAL UNIT RESET | 362 | * LOGICAL UNIT RESET |
398 | */ | 363 | */ |
399 | if (!(preempt_and_abort_list) && | 364 | if (!preempt_and_abort_list && |
400 | (dev->dev_flags & DF_SPC2_RESERVATIONS)) { | 365 | (dev->dev_flags & DF_SPC2_RESERVATIONS)) { |
401 | spin_lock(&dev->dev_reservation_lock); | 366 | spin_lock(&dev->dev_reservation_lock); |
402 | dev->dev_reserved_node_acl = NULL; | 367 | dev->dev_reserved_node_acl = NULL; |
403 | dev->dev_flags &= ~DF_SPC2_RESERVATIONS; | 368 | dev->dev_flags &= ~DF_SPC2_RESERVATIONS; |
404 | spin_unlock(&dev->dev_reservation_lock); | 369 | spin_unlock(&dev->dev_reservation_lock); |
405 | printk(KERN_INFO "LUN_RESET: SCSI-2 Released reservation\n"); | 370 | pr_debug("LUN_RESET: SCSI-2 Released reservation\n"); |
406 | } | 371 | } |
407 | 372 | ||
408 | spin_lock_irq(&dev->stats_lock); | 373 | spin_lock_irq(&dev->stats_lock); |
409 | dev->num_resets++; | 374 | dev->num_resets++; |
410 | spin_unlock_irq(&dev->stats_lock); | 375 | spin_unlock_irq(&dev->stats_lock); |
411 | 376 | ||
412 | DEBUG_LR("LUN_RESET: %s for [%s] Complete\n", | 377 | pr_debug("LUN_RESET: %s for [%s] Complete\n", |
413 | (preempt_and_abort_list) ? "Preempt" : "TMR", | 378 | (preempt_and_abort_list) ? "Preempt" : "TMR", |
414 | TRANSPORT(dev)->name); | 379 | dev->transport->name); |
415 | return 0; | 380 | return 0; |
416 | } | 381 | } |
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c index 5ec745fed931..4f1ba4c5ef11 100644 --- a/drivers/target/target_core_tpg.c +++ b/drivers/target/target_core_tpg.c | |||
@@ -44,6 +44,12 @@ | |||
44 | #include <target/target_core_fabric_ops.h> | 44 | #include <target/target_core_fabric_ops.h> |
45 | 45 | ||
46 | #include "target_core_hba.h" | 46 | #include "target_core_hba.h" |
47 | #include "target_core_stat.h" | ||
48 | |||
49 | extern struct se_device *g_lun0_dev; | ||
50 | |||
51 | static DEFINE_SPINLOCK(tpg_lock); | ||
52 | static LIST_HEAD(tpg_list); | ||
47 | 53 | ||
48 | /* core_clear_initiator_node_from_tpg(): | 54 | /* core_clear_initiator_node_from_tpg(): |
49 | * | 55 | * |
@@ -66,9 +72,9 @@ static void core_clear_initiator_node_from_tpg( | |||
66 | continue; | 72 | continue; |
67 | 73 | ||
68 | if (!deve->se_lun) { | 74 | if (!deve->se_lun) { |
69 | printk(KERN_ERR "%s device entries device pointer is" | 75 | pr_err("%s device entries device pointer is" |
70 | " NULL, but Initiator has access.\n", | 76 | " NULL, but Initiator has access.\n", |
71 | TPG_TFO(tpg)->get_fabric_name()); | 77 | tpg->se_tpg_tfo->get_fabric_name()); |
72 | continue; | 78 | continue; |
73 | } | 79 | } |
74 | 80 | ||
@@ -80,14 +86,13 @@ static void core_clear_initiator_node_from_tpg( | |||
80 | spin_lock(&lun->lun_acl_lock); | 86 | spin_lock(&lun->lun_acl_lock); |
81 | list_for_each_entry_safe(acl, acl_tmp, | 87 | list_for_each_entry_safe(acl, acl_tmp, |
82 | &lun->lun_acl_list, lacl_list) { | 88 | &lun->lun_acl_list, lacl_list) { |
83 | if (!(strcmp(acl->initiatorname, | 89 | if (!strcmp(acl->initiatorname, nacl->initiatorname) && |
84 | nacl->initiatorname)) && | 90 | (acl->mapped_lun == deve->mapped_lun)) |
85 | (acl->mapped_lun == deve->mapped_lun)) | ||
86 | break; | 91 | break; |
87 | } | 92 | } |
88 | 93 | ||
89 | if (!acl) { | 94 | if (!acl) { |
90 | printk(KERN_ERR "Unable to locate struct se_lun_acl for %s," | 95 | pr_err("Unable to locate struct se_lun_acl for %s," |
91 | " mapped_lun: %u\n", nacl->initiatorname, | 96 | " mapped_lun: %u\n", nacl->initiatorname, |
92 | deve->mapped_lun); | 97 | deve->mapped_lun); |
93 | spin_unlock(&lun->lun_acl_lock); | 98 | spin_unlock(&lun->lun_acl_lock); |
@@ -115,7 +120,7 @@ struct se_node_acl *__core_tpg_get_initiator_node_acl( | |||
115 | struct se_node_acl *acl; | 120 | struct se_node_acl *acl; |
116 | 121 | ||
117 | list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { | 122 | list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { |
118 | if (!(strcmp(acl->initiatorname, initiatorname))) | 123 | if (!strcmp(acl->initiatorname, initiatorname)) |
119 | return acl; | 124 | return acl; |
120 | } | 125 | } |
121 | 126 | ||
@@ -134,8 +139,8 @@ struct se_node_acl *core_tpg_get_initiator_node_acl( | |||
134 | 139 | ||
135 | spin_lock_bh(&tpg->acl_node_lock); | 140 | spin_lock_bh(&tpg->acl_node_lock); |
136 | list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { | 141 | list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { |
137 | if (!(strcmp(acl->initiatorname, initiatorname)) && | 142 | if (!strcmp(acl->initiatorname, initiatorname) && |
138 | (!(acl->dynamic_node_acl))) { | 143 | !acl->dynamic_node_acl) { |
139 | spin_unlock_bh(&tpg->acl_node_lock); | 144 | spin_unlock_bh(&tpg->acl_node_lock); |
140 | return acl; | 145 | return acl; |
141 | } | 146 | } |
@@ -171,7 +176,7 @@ void core_tpg_add_node_to_devs( | |||
171 | * By default in LIO-Target $FABRIC_MOD, | 176 | * By default in LIO-Target $FABRIC_MOD, |
172 | * demo_mode_write_protect is ON, or READ_ONLY; | 177 | * demo_mode_write_protect is ON, or READ_ONLY; |
173 | */ | 178 | */ |
174 | if (!(TPG_TFO(tpg)->tpg_check_demo_mode_write_protect(tpg))) { | 179 | if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) { |
175 | if (dev->dev_flags & DF_READ_ONLY) | 180 | if (dev->dev_flags & DF_READ_ONLY) |
176 | lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; | 181 | lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; |
177 | else | 182 | else |
@@ -181,16 +186,16 @@ void core_tpg_add_node_to_devs( | |||
181 | * Allow only optical drives to issue R/W in default RO | 186 | * Allow only optical drives to issue R/W in default RO |
182 | * demo mode. | 187 | * demo mode. |
183 | */ | 188 | */ |
184 | if (TRANSPORT(dev)->get_device_type(dev) == TYPE_DISK) | 189 | if (dev->transport->get_device_type(dev) == TYPE_DISK) |
185 | lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; | 190 | lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; |
186 | else | 191 | else |
187 | lun_access = TRANSPORT_LUNFLAGS_READ_WRITE; | 192 | lun_access = TRANSPORT_LUNFLAGS_READ_WRITE; |
188 | } | 193 | } |
189 | 194 | ||
190 | printk(KERN_INFO "TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s" | 195 | pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s" |
191 | " access for LUN in Demo Mode\n", | 196 | " access for LUN in Demo Mode\n", |
192 | TPG_TFO(tpg)->get_fabric_name(), | 197 | tpg->se_tpg_tfo->get_fabric_name(), |
193 | TPG_TFO(tpg)->tpg_get_tag(tpg), lun->unpacked_lun, | 198 | tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, |
194 | (lun_access == TRANSPORT_LUNFLAGS_READ_WRITE) ? | 199 | (lun_access == TRANSPORT_LUNFLAGS_READ_WRITE) ? |
195 | "READ-WRITE" : "READ-ONLY"); | 200 | "READ-WRITE" : "READ-ONLY"); |
196 | 201 | ||
@@ -210,8 +215,8 @@ static int core_set_queue_depth_for_node( | |||
210 | struct se_node_acl *acl) | 215 | struct se_node_acl *acl) |
211 | { | 216 | { |
212 | if (!acl->queue_depth) { | 217 | if (!acl->queue_depth) { |
213 | printk(KERN_ERR "Queue depth for %s Initiator Node: %s is 0," | 218 | pr_err("Queue depth for %s Initiator Node: %s is 0," |
214 | "defaulting to 1.\n", TPG_TFO(tpg)->get_fabric_name(), | 219 | "defaulting to 1.\n", tpg->se_tpg_tfo->get_fabric_name(), |
215 | acl->initiatorname); | 220 | acl->initiatorname); |
216 | acl->queue_depth = 1; | 221 | acl->queue_depth = 1; |
217 | } | 222 | } |
@@ -230,10 +235,10 @@ static int core_create_device_list_for_node(struct se_node_acl *nacl) | |||
230 | 235 | ||
231 | nacl->device_list = kzalloc(sizeof(struct se_dev_entry) * | 236 | nacl->device_list = kzalloc(sizeof(struct se_dev_entry) * |
232 | TRANSPORT_MAX_LUNS_PER_TPG, GFP_KERNEL); | 237 | TRANSPORT_MAX_LUNS_PER_TPG, GFP_KERNEL); |
233 | if (!(nacl->device_list)) { | 238 | if (!nacl->device_list) { |
234 | printk(KERN_ERR "Unable to allocate memory for" | 239 | pr_err("Unable to allocate memory for" |
235 | " struct se_node_acl->device_list\n"); | 240 | " struct se_node_acl->device_list\n"); |
236 | return -1; | 241 | return -ENOMEM; |
237 | } | 242 | } |
238 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { | 243 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { |
239 | deve = &nacl->device_list[i]; | 244 | deve = &nacl->device_list[i]; |
@@ -259,14 +264,14 @@ struct se_node_acl *core_tpg_check_initiator_node_acl( | |||
259 | struct se_node_acl *acl; | 264 | struct se_node_acl *acl; |
260 | 265 | ||
261 | acl = core_tpg_get_initiator_node_acl(tpg, initiatorname); | 266 | acl = core_tpg_get_initiator_node_acl(tpg, initiatorname); |
262 | if ((acl)) | 267 | if (acl) |
263 | return acl; | 268 | return acl; |
264 | 269 | ||
265 | if (!(TPG_TFO(tpg)->tpg_check_demo_mode(tpg))) | 270 | if (!tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) |
266 | return NULL; | 271 | return NULL; |
267 | 272 | ||
268 | acl = TPG_TFO(tpg)->tpg_alloc_fabric_acl(tpg); | 273 | acl = tpg->se_tpg_tfo->tpg_alloc_fabric_acl(tpg); |
269 | if (!(acl)) | 274 | if (!acl) |
270 | return NULL; | 275 | return NULL; |
271 | 276 | ||
272 | INIT_LIST_HEAD(&acl->acl_list); | 277 | INIT_LIST_HEAD(&acl->acl_list); |
@@ -274,23 +279,23 @@ struct se_node_acl *core_tpg_check_initiator_node_acl( | |||
274 | spin_lock_init(&acl->device_list_lock); | 279 | spin_lock_init(&acl->device_list_lock); |
275 | spin_lock_init(&acl->nacl_sess_lock); | 280 | spin_lock_init(&acl->nacl_sess_lock); |
276 | atomic_set(&acl->acl_pr_ref_count, 0); | 281 | atomic_set(&acl->acl_pr_ref_count, 0); |
277 | acl->queue_depth = TPG_TFO(tpg)->tpg_get_default_depth(tpg); | 282 | acl->queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg); |
278 | snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); | 283 | snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); |
279 | acl->se_tpg = tpg; | 284 | acl->se_tpg = tpg; |
280 | acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX); | 285 | acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX); |
281 | spin_lock_init(&acl->stats_lock); | 286 | spin_lock_init(&acl->stats_lock); |
282 | acl->dynamic_node_acl = 1; | 287 | acl->dynamic_node_acl = 1; |
283 | 288 | ||
284 | TPG_TFO(tpg)->set_default_node_attributes(acl); | 289 | tpg->se_tpg_tfo->set_default_node_attributes(acl); |
285 | 290 | ||
286 | if (core_create_device_list_for_node(acl) < 0) { | 291 | if (core_create_device_list_for_node(acl) < 0) { |
287 | TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl); | 292 | tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl); |
288 | return NULL; | 293 | return NULL; |
289 | } | 294 | } |
290 | 295 | ||
291 | if (core_set_queue_depth_for_node(tpg, acl) < 0) { | 296 | if (core_set_queue_depth_for_node(tpg, acl) < 0) { |
292 | core_free_device_list_for_node(acl, tpg); | 297 | core_free_device_list_for_node(acl, tpg); |
293 | TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl); | 298 | tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl); |
294 | return NULL; | 299 | return NULL; |
295 | } | 300 | } |
296 | 301 | ||
@@ -301,10 +306,10 @@ struct se_node_acl *core_tpg_check_initiator_node_acl( | |||
301 | tpg->num_node_acls++; | 306 | tpg->num_node_acls++; |
302 | spin_unlock_bh(&tpg->acl_node_lock); | 307 | spin_unlock_bh(&tpg->acl_node_lock); |
303 | 308 | ||
304 | printk("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s" | 309 | pr_debug("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s" |
305 | " Initiator Node: %s\n", TPG_TFO(tpg)->get_fabric_name(), | 310 | " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(), |
306 | TPG_TFO(tpg)->tpg_get_tag(tpg), acl->queue_depth, | 311 | tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth, |
307 | TPG_TFO(tpg)->get_fabric_name(), initiatorname); | 312 | tpg->se_tpg_tfo->get_fabric_name(), initiatorname); |
308 | 313 | ||
309 | return acl; | 314 | return acl; |
310 | } | 315 | } |
@@ -351,12 +356,12 @@ struct se_node_acl *core_tpg_add_initiator_node_acl( | |||
351 | 356 | ||
352 | spin_lock_bh(&tpg->acl_node_lock); | 357 | spin_lock_bh(&tpg->acl_node_lock); |
353 | acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname); | 358 | acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname); |
354 | if ((acl)) { | 359 | if (acl) { |
355 | if (acl->dynamic_node_acl) { | 360 | if (acl->dynamic_node_acl) { |
356 | acl->dynamic_node_acl = 0; | 361 | acl->dynamic_node_acl = 0; |
357 | printk(KERN_INFO "%s_TPG[%u] - Replacing dynamic ACL" | 362 | pr_debug("%s_TPG[%u] - Replacing dynamic ACL" |
358 | " for %s\n", TPG_TFO(tpg)->get_fabric_name(), | 363 | " for %s\n", tpg->se_tpg_tfo->get_fabric_name(), |
359 | TPG_TFO(tpg)->tpg_get_tag(tpg), initiatorname); | 364 | tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname); |
360 | spin_unlock_bh(&tpg->acl_node_lock); | 365 | spin_unlock_bh(&tpg->acl_node_lock); |
361 | /* | 366 | /* |
362 | * Release the locally allocated struct se_node_acl | 367 | * Release the locally allocated struct se_node_acl |
@@ -364,22 +369,22 @@ struct se_node_acl *core_tpg_add_initiator_node_acl( | |||
364 | * a pointer to an existing demo mode node ACL. | 369 | * a pointer to an existing demo mode node ACL. |
365 | */ | 370 | */ |
366 | if (se_nacl) | 371 | if (se_nacl) |
367 | TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, | 372 | tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, |
368 | se_nacl); | 373 | se_nacl); |
369 | goto done; | 374 | goto done; |
370 | } | 375 | } |
371 | 376 | ||
372 | printk(KERN_ERR "ACL entry for %s Initiator" | 377 | pr_err("ACL entry for %s Initiator" |
373 | " Node %s already exists for TPG %u, ignoring" | 378 | " Node %s already exists for TPG %u, ignoring" |
374 | " request.\n", TPG_TFO(tpg)->get_fabric_name(), | 379 | " request.\n", tpg->se_tpg_tfo->get_fabric_name(), |
375 | initiatorname, TPG_TFO(tpg)->tpg_get_tag(tpg)); | 380 | initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg)); |
376 | spin_unlock_bh(&tpg->acl_node_lock); | 381 | spin_unlock_bh(&tpg->acl_node_lock); |
377 | return ERR_PTR(-EEXIST); | 382 | return ERR_PTR(-EEXIST); |
378 | } | 383 | } |
379 | spin_unlock_bh(&tpg->acl_node_lock); | 384 | spin_unlock_bh(&tpg->acl_node_lock); |
380 | 385 | ||
381 | if (!(se_nacl)) { | 386 | if (!se_nacl) { |
382 | printk("struct se_node_acl pointer is NULL\n"); | 387 | pr_err("struct se_node_acl pointer is NULL\n"); |
383 | return ERR_PTR(-EINVAL); | 388 | return ERR_PTR(-EINVAL); |
384 | } | 389 | } |
385 | /* | 390 | /* |
@@ -400,16 +405,16 @@ struct se_node_acl *core_tpg_add_initiator_node_acl( | |||
400 | acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX); | 405 | acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX); |
401 | spin_lock_init(&acl->stats_lock); | 406 | spin_lock_init(&acl->stats_lock); |
402 | 407 | ||
403 | TPG_TFO(tpg)->set_default_node_attributes(acl); | 408 | tpg->se_tpg_tfo->set_default_node_attributes(acl); |
404 | 409 | ||
405 | if (core_create_device_list_for_node(acl) < 0) { | 410 | if (core_create_device_list_for_node(acl) < 0) { |
406 | TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl); | 411 | tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl); |
407 | return ERR_PTR(-ENOMEM); | 412 | return ERR_PTR(-ENOMEM); |
408 | } | 413 | } |
409 | 414 | ||
410 | if (core_set_queue_depth_for_node(tpg, acl) < 0) { | 415 | if (core_set_queue_depth_for_node(tpg, acl) < 0) { |
411 | core_free_device_list_for_node(acl, tpg); | 416 | core_free_device_list_for_node(acl, tpg); |
412 | TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl); | 417 | tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl); |
413 | return ERR_PTR(-EINVAL); | 418 | return ERR_PTR(-EINVAL); |
414 | } | 419 | } |
415 | 420 | ||
@@ -419,10 +424,10 @@ struct se_node_acl *core_tpg_add_initiator_node_acl( | |||
419 | spin_unlock_bh(&tpg->acl_node_lock); | 424 | spin_unlock_bh(&tpg->acl_node_lock); |
420 | 425 | ||
421 | done: | 426 | done: |
422 | printk(KERN_INFO "%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s" | 427 | pr_debug("%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s" |
423 | " Initiator Node: %s\n", TPG_TFO(tpg)->get_fabric_name(), | 428 | " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(), |
424 | TPG_TFO(tpg)->tpg_get_tag(tpg), acl->queue_depth, | 429 | tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth, |
425 | TPG_TFO(tpg)->get_fabric_name(), initiatorname); | 430 | tpg->se_tpg_tfo->get_fabric_name(), initiatorname); |
426 | 431 | ||
427 | return acl; | 432 | return acl; |
428 | } | 433 | } |
@@ -457,7 +462,7 @@ int core_tpg_del_initiator_node_acl( | |||
457 | /* | 462 | /* |
458 | * Determine if the session needs to be closed by our context. | 463 | * Determine if the session needs to be closed by our context. |
459 | */ | 464 | */ |
460 | if (!(TPG_TFO(tpg)->shutdown_session(sess))) | 465 | if (!tpg->se_tpg_tfo->shutdown_session(sess)) |
461 | continue; | 466 | continue; |
462 | 467 | ||
463 | spin_unlock_bh(&tpg->session_lock); | 468 | spin_unlock_bh(&tpg->session_lock); |
@@ -465,7 +470,7 @@ int core_tpg_del_initiator_node_acl( | |||
465 | * If the $FABRIC_MOD session for the Initiator Node ACL exists, | 470 | * If the $FABRIC_MOD session for the Initiator Node ACL exists, |
466 | * forcefully shutdown the $FABRIC_MOD session/nexus. | 471 | * forcefully shutdown the $FABRIC_MOD session/nexus. |
467 | */ | 472 | */ |
468 | TPG_TFO(tpg)->close_session(sess); | 473 | tpg->se_tpg_tfo->close_session(sess); |
469 | 474 | ||
470 | spin_lock_bh(&tpg->session_lock); | 475 | spin_lock_bh(&tpg->session_lock); |
471 | } | 476 | } |
@@ -475,10 +480,10 @@ int core_tpg_del_initiator_node_acl( | |||
475 | core_clear_initiator_node_from_tpg(acl, tpg); | 480 | core_clear_initiator_node_from_tpg(acl, tpg); |
476 | core_free_device_list_for_node(acl, tpg); | 481 | core_free_device_list_for_node(acl, tpg); |
477 | 482 | ||
478 | printk(KERN_INFO "%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s" | 483 | pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s" |
479 | " Initiator Node: %s\n", TPG_TFO(tpg)->get_fabric_name(), | 484 | " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(), |
480 | TPG_TFO(tpg)->tpg_get_tag(tpg), acl->queue_depth, | 485 | tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth, |
481 | TPG_TFO(tpg)->get_fabric_name(), acl->initiatorname); | 486 | tpg->se_tpg_tfo->get_fabric_name(), acl->initiatorname); |
482 | 487 | ||
483 | return 0; | 488 | return 0; |
484 | } | 489 | } |
@@ -500,11 +505,11 @@ int core_tpg_set_initiator_node_queue_depth( | |||
500 | 505 | ||
501 | spin_lock_bh(&tpg->acl_node_lock); | 506 | spin_lock_bh(&tpg->acl_node_lock); |
502 | acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname); | 507 | acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname); |
503 | if (!(acl)) { | 508 | if (!acl) { |
504 | printk(KERN_ERR "Access Control List entry for %s Initiator" | 509 | pr_err("Access Control List entry for %s Initiator" |
505 | " Node %s does not exists for TPG %hu, ignoring" | 510 | " Node %s does not exists for TPG %hu, ignoring" |
506 | " request.\n", TPG_TFO(tpg)->get_fabric_name(), | 511 | " request.\n", tpg->se_tpg_tfo->get_fabric_name(), |
507 | initiatorname, TPG_TFO(tpg)->tpg_get_tag(tpg)); | 512 | initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg)); |
508 | spin_unlock_bh(&tpg->acl_node_lock); | 513 | spin_unlock_bh(&tpg->acl_node_lock); |
509 | return -ENODEV; | 514 | return -ENODEV; |
510 | } | 515 | } |
@@ -520,12 +525,12 @@ int core_tpg_set_initiator_node_queue_depth( | |||
520 | continue; | 525 | continue; |
521 | 526 | ||
522 | if (!force) { | 527 | if (!force) { |
523 | printk(KERN_ERR "Unable to change queue depth for %s" | 528 | pr_err("Unable to change queue depth for %s" |
524 | " Initiator Node: %s while session is" | 529 | " Initiator Node: %s while session is" |
525 | " operational. To forcefully change the queue" | 530 | " operational. To forcefully change the queue" |
526 | " depth and force session reinstatement" | 531 | " depth and force session reinstatement" |
527 | " use the \"force=1\" parameter.\n", | 532 | " use the \"force=1\" parameter.\n", |
528 | TPG_TFO(tpg)->get_fabric_name(), initiatorname); | 533 | tpg->se_tpg_tfo->get_fabric_name(), initiatorname); |
529 | spin_unlock_bh(&tpg->session_lock); | 534 | spin_unlock_bh(&tpg->session_lock); |
530 | 535 | ||
531 | spin_lock_bh(&tpg->acl_node_lock); | 536 | spin_lock_bh(&tpg->acl_node_lock); |
@@ -537,7 +542,7 @@ int core_tpg_set_initiator_node_queue_depth( | |||
537 | /* | 542 | /* |
538 | * Determine if the session needs to be closed by our context. | 543 | * Determine if the session needs to be closed by our context. |
539 | */ | 544 | */ |
540 | if (!(TPG_TFO(tpg)->shutdown_session(sess))) | 545 | if (!tpg->se_tpg_tfo->shutdown_session(sess)) |
541 | continue; | 546 | continue; |
542 | 547 | ||
543 | init_sess = sess; | 548 | init_sess = sess; |
@@ -549,7 +554,7 @@ int core_tpg_set_initiator_node_queue_depth( | |||
549 | * Change the value in the Node's struct se_node_acl, and call | 554 | * Change the value in the Node's struct se_node_acl, and call |
550 | * core_set_queue_depth_for_node() to add the requested queue depth. | 555 | * core_set_queue_depth_for_node() to add the requested queue depth. |
551 | * | 556 | * |
552 | * Finally call TPG_TFO(tpg)->close_session() to force session | 557 | * Finally call tpg->se_tpg_tfo->close_session() to force session |
553 | * reinstatement to occur if there is an active session for the | 558 | * reinstatement to occur if there is an active session for the |
554 | * $FABRIC_MOD Initiator Node in question. | 559 | * $FABRIC_MOD Initiator Node in question. |
555 | */ | 560 | */ |
@@ -561,10 +566,10 @@ int core_tpg_set_initiator_node_queue_depth( | |||
561 | * Force session reinstatement if | 566 | * Force session reinstatement if |
562 | * core_set_queue_depth_for_node() failed, because we assume | 567 | * core_set_queue_depth_for_node() failed, because we assume |
563 | * the $FABRIC_MOD has already the set session reinstatement | 568 | * the $FABRIC_MOD has already the set session reinstatement |
564 | * bit from TPG_TFO(tpg)->shutdown_session() called above. | 569 | * bit from tpg->se_tpg_tfo->shutdown_session() called above. |
565 | */ | 570 | */ |
566 | if (init_sess) | 571 | if (init_sess) |
567 | TPG_TFO(tpg)->close_session(init_sess); | 572 | tpg->se_tpg_tfo->close_session(init_sess); |
568 | 573 | ||
569 | spin_lock_bh(&tpg->acl_node_lock); | 574 | spin_lock_bh(&tpg->acl_node_lock); |
570 | if (dynamic_acl) | 575 | if (dynamic_acl) |
@@ -578,12 +583,12 @@ int core_tpg_set_initiator_node_queue_depth( | |||
578 | * forcefully shutdown the $FABRIC_MOD session/nexus. | 583 | * forcefully shutdown the $FABRIC_MOD session/nexus. |
579 | */ | 584 | */ |
580 | if (init_sess) | 585 | if (init_sess) |
581 | TPG_TFO(tpg)->close_session(init_sess); | 586 | tpg->se_tpg_tfo->close_session(init_sess); |
582 | 587 | ||
583 | printk(KERN_INFO "Successfuly changed queue depth to: %d for Initiator" | 588 | pr_debug("Successfuly changed queue depth to: %d for Initiator" |
584 | " Node: %s on %s Target Portal Group: %u\n", queue_depth, | 589 | " Node: %s on %s Target Portal Group: %u\n", queue_depth, |
585 | initiatorname, TPG_TFO(tpg)->get_fabric_name(), | 590 | initiatorname, tpg->se_tpg_tfo->get_fabric_name(), |
586 | TPG_TFO(tpg)->tpg_get_tag(tpg)); | 591 | tpg->se_tpg_tfo->tpg_get_tag(tpg)); |
587 | 592 | ||
588 | spin_lock_bh(&tpg->acl_node_lock); | 593 | spin_lock_bh(&tpg->acl_node_lock); |
589 | if (dynamic_acl) | 594 | if (dynamic_acl) |
@@ -597,7 +602,7 @@ EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth); | |||
597 | static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg) | 602 | static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg) |
598 | { | 603 | { |
599 | /* Set in core_dev_setup_virtual_lun0() */ | 604 | /* Set in core_dev_setup_virtual_lun0() */ |
600 | struct se_device *dev = se_global->g_lun0_dev; | 605 | struct se_device *dev = g_lun0_dev; |
601 | struct se_lun *lun = &se_tpg->tpg_virt_lun0; | 606 | struct se_lun *lun = &se_tpg->tpg_virt_lun0; |
602 | u32 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; | 607 | u32 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; |
603 | int ret; | 608 | int ret; |
@@ -614,7 +619,7 @@ static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg) | |||
614 | 619 | ||
615 | ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev); | 620 | ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev); |
616 | if (ret < 0) | 621 | if (ret < 0) |
617 | return -1; | 622 | return ret; |
618 | 623 | ||
619 | return 0; | 624 | return 0; |
620 | } | 625 | } |
@@ -638,8 +643,8 @@ int core_tpg_register( | |||
638 | 643 | ||
639 | se_tpg->tpg_lun_list = kzalloc((sizeof(struct se_lun) * | 644 | se_tpg->tpg_lun_list = kzalloc((sizeof(struct se_lun) * |
640 | TRANSPORT_MAX_LUNS_PER_TPG), GFP_KERNEL); | 645 | TRANSPORT_MAX_LUNS_PER_TPG), GFP_KERNEL); |
641 | if (!(se_tpg->tpg_lun_list)) { | 646 | if (!se_tpg->tpg_lun_list) { |
642 | printk(KERN_ERR "Unable to allocate struct se_portal_group->" | 647 | pr_err("Unable to allocate struct se_portal_group->" |
643 | "tpg_lun_list\n"); | 648 | "tpg_lun_list\n"); |
644 | return -ENOMEM; | 649 | return -ENOMEM; |
645 | } | 650 | } |
@@ -663,7 +668,7 @@ int core_tpg_register( | |||
663 | se_tpg->se_tpg_wwn = se_wwn; | 668 | se_tpg->se_tpg_wwn = se_wwn; |
664 | atomic_set(&se_tpg->tpg_pr_ref_count, 0); | 669 | atomic_set(&se_tpg->tpg_pr_ref_count, 0); |
665 | INIT_LIST_HEAD(&se_tpg->acl_node_list); | 670 | INIT_LIST_HEAD(&se_tpg->acl_node_list); |
666 | INIT_LIST_HEAD(&se_tpg->se_tpg_list); | 671 | INIT_LIST_HEAD(&se_tpg->se_tpg_node); |
667 | INIT_LIST_HEAD(&se_tpg->tpg_sess_list); | 672 | INIT_LIST_HEAD(&se_tpg->tpg_sess_list); |
668 | spin_lock_init(&se_tpg->acl_node_lock); | 673 | spin_lock_init(&se_tpg->acl_node_lock); |
669 | spin_lock_init(&se_tpg->session_lock); | 674 | spin_lock_init(&se_tpg->session_lock); |
@@ -676,11 +681,11 @@ int core_tpg_register( | |||
676 | } | 681 | } |
677 | } | 682 | } |
678 | 683 | ||
679 | spin_lock_bh(&se_global->se_tpg_lock); | 684 | spin_lock_bh(&tpg_lock); |
680 | list_add_tail(&se_tpg->se_tpg_list, &se_global->g_se_tpg_list); | 685 | list_add_tail(&se_tpg->se_tpg_node, &tpg_list); |
681 | spin_unlock_bh(&se_global->se_tpg_lock); | 686 | spin_unlock_bh(&tpg_lock); |
682 | 687 | ||
683 | printk(KERN_INFO "TARGET_CORE[%s]: Allocated %s struct se_portal_group for" | 688 | pr_debug("TARGET_CORE[%s]: Allocated %s struct se_portal_group for" |
684 | " endpoint: %s, Portal Tag: %u\n", tfo->get_fabric_name(), | 689 | " endpoint: %s, Portal Tag: %u\n", tfo->get_fabric_name(), |
685 | (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ? | 690 | (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ? |
686 | "Normal" : "Discovery", (tfo->tpg_get_wwn(se_tpg) == NULL) ? | 691 | "Normal" : "Discovery", (tfo->tpg_get_wwn(se_tpg) == NULL) ? |
@@ -694,16 +699,16 @@ int core_tpg_deregister(struct se_portal_group *se_tpg) | |||
694 | { | 699 | { |
695 | struct se_node_acl *nacl, *nacl_tmp; | 700 | struct se_node_acl *nacl, *nacl_tmp; |
696 | 701 | ||
697 | printk(KERN_INFO "TARGET_CORE[%s]: Deallocating %s struct se_portal_group" | 702 | pr_debug("TARGET_CORE[%s]: Deallocating %s struct se_portal_group" |
698 | " for endpoint: %s Portal Tag %u\n", | 703 | " for endpoint: %s Portal Tag %u\n", |
699 | (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ? | 704 | (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ? |
700 | "Normal" : "Discovery", TPG_TFO(se_tpg)->get_fabric_name(), | 705 | "Normal" : "Discovery", se_tpg->se_tpg_tfo->get_fabric_name(), |
701 | TPG_TFO(se_tpg)->tpg_get_wwn(se_tpg), | 706 | se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg), |
702 | TPG_TFO(se_tpg)->tpg_get_tag(se_tpg)); | 707 | se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg)); |
703 | 708 | ||
704 | spin_lock_bh(&se_global->se_tpg_lock); | 709 | spin_lock_bh(&tpg_lock); |
705 | list_del(&se_tpg->se_tpg_list); | 710 | list_del(&se_tpg->se_tpg_node); |
706 | spin_unlock_bh(&se_global->se_tpg_lock); | 711 | spin_unlock_bh(&tpg_lock); |
707 | 712 | ||
708 | while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0) | 713 | while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0) |
709 | cpu_relax(); | 714 | cpu_relax(); |
@@ -721,7 +726,7 @@ int core_tpg_deregister(struct se_portal_group *se_tpg) | |||
721 | 726 | ||
722 | core_tpg_wait_for_nacl_pr_ref(nacl); | 727 | core_tpg_wait_for_nacl_pr_ref(nacl); |
723 | core_free_device_list_for_node(nacl, se_tpg); | 728 | core_free_device_list_for_node(nacl, se_tpg); |
724 | TPG_TFO(se_tpg)->tpg_release_fabric_acl(se_tpg, nacl); | 729 | se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, nacl); |
725 | 730 | ||
726 | spin_lock_bh(&se_tpg->acl_node_lock); | 731 | spin_lock_bh(&se_tpg->acl_node_lock); |
727 | } | 732 | } |
@@ -743,21 +748,21 @@ struct se_lun *core_tpg_pre_addlun( | |||
743 | struct se_lun *lun; | 748 | struct se_lun *lun; |
744 | 749 | ||
745 | if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { | 750 | if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { |
746 | printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG" | 751 | pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG" |
747 | "-1: %u for Target Portal Group: %u\n", | 752 | "-1: %u for Target Portal Group: %u\n", |
748 | TPG_TFO(tpg)->get_fabric_name(), | 753 | tpg->se_tpg_tfo->get_fabric_name(), |
749 | unpacked_lun, TRANSPORT_MAX_LUNS_PER_TPG-1, | 754 | unpacked_lun, TRANSPORT_MAX_LUNS_PER_TPG-1, |
750 | TPG_TFO(tpg)->tpg_get_tag(tpg)); | 755 | tpg->se_tpg_tfo->tpg_get_tag(tpg)); |
751 | return ERR_PTR(-EOVERFLOW); | 756 | return ERR_PTR(-EOVERFLOW); |
752 | } | 757 | } |
753 | 758 | ||
754 | spin_lock(&tpg->tpg_lun_lock); | 759 | spin_lock(&tpg->tpg_lun_lock); |
755 | lun = &tpg->tpg_lun_list[unpacked_lun]; | 760 | lun = &tpg->tpg_lun_list[unpacked_lun]; |
756 | if (lun->lun_status == TRANSPORT_LUN_STATUS_ACTIVE) { | 761 | if (lun->lun_status == TRANSPORT_LUN_STATUS_ACTIVE) { |
757 | printk(KERN_ERR "TPG Logical Unit Number: %u is already active" | 762 | pr_err("TPG Logical Unit Number: %u is already active" |
758 | " on %s Target Portal Group: %u, ignoring request.\n", | 763 | " on %s Target Portal Group: %u, ignoring request.\n", |
759 | unpacked_lun, TPG_TFO(tpg)->get_fabric_name(), | 764 | unpacked_lun, tpg->se_tpg_tfo->get_fabric_name(), |
760 | TPG_TFO(tpg)->tpg_get_tag(tpg)); | 765 | tpg->se_tpg_tfo->tpg_get_tag(tpg)); |
761 | spin_unlock(&tpg->tpg_lun_lock); | 766 | spin_unlock(&tpg->tpg_lun_lock); |
762 | return ERR_PTR(-EINVAL); | 767 | return ERR_PTR(-EINVAL); |
763 | } | 768 | } |
@@ -772,8 +777,11 @@ int core_tpg_post_addlun( | |||
772 | u32 lun_access, | 777 | u32 lun_access, |
773 | void *lun_ptr) | 778 | void *lun_ptr) |
774 | { | 779 | { |
775 | if (core_dev_export(lun_ptr, tpg, lun) < 0) | 780 | int ret; |
776 | return -1; | 781 | |
782 | ret = core_dev_export(lun_ptr, tpg, lun); | ||
783 | if (ret < 0) | ||
784 | return ret; | ||
777 | 785 | ||
778 | spin_lock(&tpg->tpg_lun_lock); | 786 | spin_lock(&tpg->tpg_lun_lock); |
779 | lun->lun_access = lun_access; | 787 | lun->lun_access = lun_access; |
@@ -799,21 +807,21 @@ struct se_lun *core_tpg_pre_dellun( | |||
799 | struct se_lun *lun; | 807 | struct se_lun *lun; |
800 | 808 | ||
801 | if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { | 809 | if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { |
802 | printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG" | 810 | pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG" |
803 | "-1: %u for Target Portal Group: %u\n", | 811 | "-1: %u for Target Portal Group: %u\n", |
804 | TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, | 812 | tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, |
805 | TRANSPORT_MAX_LUNS_PER_TPG-1, | 813 | TRANSPORT_MAX_LUNS_PER_TPG-1, |
806 | TPG_TFO(tpg)->tpg_get_tag(tpg)); | 814 | tpg->se_tpg_tfo->tpg_get_tag(tpg)); |
807 | return ERR_PTR(-EOVERFLOW); | 815 | return ERR_PTR(-EOVERFLOW); |
808 | } | 816 | } |
809 | 817 | ||
810 | spin_lock(&tpg->tpg_lun_lock); | 818 | spin_lock(&tpg->tpg_lun_lock); |
811 | lun = &tpg->tpg_lun_list[unpacked_lun]; | 819 | lun = &tpg->tpg_lun_list[unpacked_lun]; |
812 | if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) { | 820 | if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) { |
813 | printk(KERN_ERR "%s Logical Unit Number: %u is not active on" | 821 | pr_err("%s Logical Unit Number: %u is not active on" |
814 | " Target Portal Group: %u, ignoring request.\n", | 822 | " Target Portal Group: %u, ignoring request.\n", |
815 | TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, | 823 | tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, |
816 | TPG_TFO(tpg)->tpg_get_tag(tpg)); | 824 | tpg->se_tpg_tfo->tpg_get_tag(tpg)); |
817 | spin_unlock(&tpg->tpg_lun_lock); | 825 | spin_unlock(&tpg->tpg_lun_lock); |
818 | return ERR_PTR(-ENODEV); | 826 | return ERR_PTR(-ENODEV); |
819 | } | 827 | } |
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 4b9b7169bdd9..46352d658e35 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c | |||
@@ -58,139 +58,12 @@ | |||
58 | #include "target_core_scdb.h" | 58 | #include "target_core_scdb.h" |
59 | #include "target_core_ua.h" | 59 | #include "target_core_ua.h" |
60 | 60 | ||
61 | /* #define DEBUG_CDB_HANDLER */ | 61 | static int sub_api_initialized; |
62 | #ifdef DEBUG_CDB_HANDLER | ||
63 | #define DEBUG_CDB_H(x...) printk(KERN_INFO x) | ||
64 | #else | ||
65 | #define DEBUG_CDB_H(x...) | ||
66 | #endif | ||
67 | |||
68 | /* #define DEBUG_CMD_MAP */ | ||
69 | #ifdef DEBUG_CMD_MAP | ||
70 | #define DEBUG_CMD_M(x...) printk(KERN_INFO x) | ||
71 | #else | ||
72 | #define DEBUG_CMD_M(x...) | ||
73 | #endif | ||
74 | |||
75 | /* #define DEBUG_MEM_ALLOC */ | ||
76 | #ifdef DEBUG_MEM_ALLOC | ||
77 | #define DEBUG_MEM(x...) printk(KERN_INFO x) | ||
78 | #else | ||
79 | #define DEBUG_MEM(x...) | ||
80 | #endif | ||
81 | |||
82 | /* #define DEBUG_MEM2_ALLOC */ | ||
83 | #ifdef DEBUG_MEM2_ALLOC | ||
84 | #define DEBUG_MEM2(x...) printk(KERN_INFO x) | ||
85 | #else | ||
86 | #define DEBUG_MEM2(x...) | ||
87 | #endif | ||
88 | |||
89 | /* #define DEBUG_SG_CALC */ | ||
90 | #ifdef DEBUG_SG_CALC | ||
91 | #define DEBUG_SC(x...) printk(KERN_INFO x) | ||
92 | #else | ||
93 | #define DEBUG_SC(x...) | ||
94 | #endif | ||
95 | |||
96 | /* #define DEBUG_SE_OBJ */ | ||
97 | #ifdef DEBUG_SE_OBJ | ||
98 | #define DEBUG_SO(x...) printk(KERN_INFO x) | ||
99 | #else | ||
100 | #define DEBUG_SO(x...) | ||
101 | #endif | ||
102 | |||
103 | /* #define DEBUG_CMD_VOL */ | ||
104 | #ifdef DEBUG_CMD_VOL | ||
105 | #define DEBUG_VOL(x...) printk(KERN_INFO x) | ||
106 | #else | ||
107 | #define DEBUG_VOL(x...) | ||
108 | #endif | ||
109 | |||
110 | /* #define DEBUG_CMD_STOP */ | ||
111 | #ifdef DEBUG_CMD_STOP | ||
112 | #define DEBUG_CS(x...) printk(KERN_INFO x) | ||
113 | #else | ||
114 | #define DEBUG_CS(x...) | ||
115 | #endif | ||
116 | |||
117 | /* #define DEBUG_PASSTHROUGH */ | ||
118 | #ifdef DEBUG_PASSTHROUGH | ||
119 | #define DEBUG_PT(x...) printk(KERN_INFO x) | ||
120 | #else | ||
121 | #define DEBUG_PT(x...) | ||
122 | #endif | ||
123 | |||
124 | /* #define DEBUG_TASK_STOP */ | ||
125 | #ifdef DEBUG_TASK_STOP | ||
126 | #define DEBUG_TS(x...) printk(KERN_INFO x) | ||
127 | #else | ||
128 | #define DEBUG_TS(x...) | ||
129 | #endif | ||
130 | |||
131 | /* #define DEBUG_TRANSPORT_STOP */ | ||
132 | #ifdef DEBUG_TRANSPORT_STOP | ||
133 | #define DEBUG_TRANSPORT_S(x...) printk(KERN_INFO x) | ||
134 | #else | ||
135 | #define DEBUG_TRANSPORT_S(x...) | ||
136 | #endif | ||
137 | |||
138 | /* #define DEBUG_TASK_FAILURE */ | ||
139 | #ifdef DEBUG_TASK_FAILURE | ||
140 | #define DEBUG_TF(x...) printk(KERN_INFO x) | ||
141 | #else | ||
142 | #define DEBUG_TF(x...) | ||
143 | #endif | ||
144 | |||
145 | /* #define DEBUG_DEV_OFFLINE */ | ||
146 | #ifdef DEBUG_DEV_OFFLINE | ||
147 | #define DEBUG_DO(x...) printk(KERN_INFO x) | ||
148 | #else | ||
149 | #define DEBUG_DO(x...) | ||
150 | #endif | ||
151 | |||
152 | /* #define DEBUG_TASK_STATE */ | ||
153 | #ifdef DEBUG_TASK_STATE | ||
154 | #define DEBUG_TSTATE(x...) printk(KERN_INFO x) | ||
155 | #else | ||
156 | #define DEBUG_TSTATE(x...) | ||
157 | #endif | ||
158 | |||
159 | /* #define DEBUG_STATUS_THR */ | ||
160 | #ifdef DEBUG_STATUS_THR | ||
161 | #define DEBUG_ST(x...) printk(KERN_INFO x) | ||
162 | #else | ||
163 | #define DEBUG_ST(x...) | ||
164 | #endif | ||
165 | |||
166 | /* #define DEBUG_TASK_TIMEOUT */ | ||
167 | #ifdef DEBUG_TASK_TIMEOUT | ||
168 | #define DEBUG_TT(x...) printk(KERN_INFO x) | ||
169 | #else | ||
170 | #define DEBUG_TT(x...) | ||
171 | #endif | ||
172 | |||
173 | /* #define DEBUG_GENERIC_REQUEST_FAILURE */ | ||
174 | #ifdef DEBUG_GENERIC_REQUEST_FAILURE | ||
175 | #define DEBUG_GRF(x...) printk(KERN_INFO x) | ||
176 | #else | ||
177 | #define DEBUG_GRF(x...) | ||
178 | #endif | ||
179 | |||
180 | /* #define DEBUG_SAM_TASK_ATTRS */ | ||
181 | #ifdef DEBUG_SAM_TASK_ATTRS | ||
182 | #define DEBUG_STA(x...) printk(KERN_INFO x) | ||
183 | #else | ||
184 | #define DEBUG_STA(x...) | ||
185 | #endif | ||
186 | |||
187 | struct se_global *se_global; | ||
188 | 62 | ||
189 | static struct kmem_cache *se_cmd_cache; | 63 | static struct kmem_cache *se_cmd_cache; |
190 | static struct kmem_cache *se_sess_cache; | 64 | static struct kmem_cache *se_sess_cache; |
191 | struct kmem_cache *se_tmr_req_cache; | 65 | struct kmem_cache *se_tmr_req_cache; |
192 | struct kmem_cache *se_ua_cache; | 66 | struct kmem_cache *se_ua_cache; |
193 | struct kmem_cache *se_mem_cache; | ||
194 | struct kmem_cache *t10_pr_reg_cache; | 67 | struct kmem_cache *t10_pr_reg_cache; |
195 | struct kmem_cache *t10_alua_lu_gp_cache; | 68 | struct kmem_cache *t10_alua_lu_gp_cache; |
196 | struct kmem_cache *t10_alua_lu_gp_mem_cache; | 69 | struct kmem_cache *t10_alua_lu_gp_mem_cache; |
@@ -201,116 +74,87 @@ struct kmem_cache *t10_alua_tg_pt_gp_mem_cache; | |||
201 | typedef int (*map_func_t)(struct se_task *, u32); | 74 | typedef int (*map_func_t)(struct se_task *, u32); |
202 | 75 | ||
203 | static int transport_generic_write_pending(struct se_cmd *); | 76 | static int transport_generic_write_pending(struct se_cmd *); |
204 | static int transport_processing_thread(void *); | 77 | static int transport_processing_thread(void *param); |
205 | static int __transport_execute_tasks(struct se_device *dev); | 78 | static int __transport_execute_tasks(struct se_device *dev); |
206 | static void transport_complete_task_attr(struct se_cmd *cmd); | 79 | static void transport_complete_task_attr(struct se_cmd *cmd); |
80 | static int transport_complete_qf(struct se_cmd *cmd); | ||
81 | static void transport_handle_queue_full(struct se_cmd *cmd, | ||
82 | struct se_device *dev, int (*qf_callback)(struct se_cmd *)); | ||
207 | static void transport_direct_request_timeout(struct se_cmd *cmd); | 83 | static void transport_direct_request_timeout(struct se_cmd *cmd); |
208 | static void transport_free_dev_tasks(struct se_cmd *cmd); | 84 | static void transport_free_dev_tasks(struct se_cmd *cmd); |
209 | static u32 transport_generic_get_cdb_count(struct se_cmd *cmd, | 85 | static u32 transport_allocate_tasks(struct se_cmd *cmd, |
210 | unsigned long long starting_lba, u32 sectors, | 86 | unsigned long long starting_lba, |
211 | enum dma_data_direction data_direction, | 87 | enum dma_data_direction data_direction, |
212 | struct list_head *mem_list, int set_counts); | 88 | struct scatterlist *sgl, unsigned int nents); |
213 | static int transport_generic_get_mem(struct se_cmd *cmd, u32 length, | 89 | static int transport_generic_get_mem(struct se_cmd *cmd); |
214 | u32 dma_size); | ||
215 | static int transport_generic_remove(struct se_cmd *cmd, | 90 | static int transport_generic_remove(struct se_cmd *cmd, |
216 | int release_to_pool, int session_reinstatement); | 91 | int session_reinstatement); |
217 | static int transport_get_sectors(struct se_cmd *cmd); | ||
218 | static struct list_head *transport_init_se_mem_list(void); | ||
219 | static int transport_map_sg_to_mem(struct se_cmd *cmd, | ||
220 | struct list_head *se_mem_list, void *in_mem, | ||
221 | u32 *se_mem_cnt); | ||
222 | static void transport_memcpy_se_mem_read_contig(struct se_cmd *cmd, | ||
223 | unsigned char *dst, struct list_head *se_mem_list); | ||
224 | static void transport_release_fe_cmd(struct se_cmd *cmd); | 92 | static void transport_release_fe_cmd(struct se_cmd *cmd); |
225 | static void transport_remove_cmd_from_queue(struct se_cmd *cmd, | 93 | static void transport_remove_cmd_from_queue(struct se_cmd *cmd, |
226 | struct se_queue_obj *qobj); | 94 | struct se_queue_obj *qobj); |
227 | static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq); | 95 | static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq); |
228 | static void transport_stop_all_task_timers(struct se_cmd *cmd); | 96 | static void transport_stop_all_task_timers(struct se_cmd *cmd); |
229 | 97 | ||
230 | int init_se_global(void) | 98 | int init_se_kmem_caches(void) |
231 | { | 99 | { |
232 | struct se_global *global; | ||
233 | |||
234 | global = kzalloc(sizeof(struct se_global), GFP_KERNEL); | ||
235 | if (!(global)) { | ||
236 | printk(KERN_ERR "Unable to allocate memory for struct se_global\n"); | ||
237 | return -1; | ||
238 | } | ||
239 | |||
240 | INIT_LIST_HEAD(&global->g_lu_gps_list); | ||
241 | INIT_LIST_HEAD(&global->g_se_tpg_list); | ||
242 | INIT_LIST_HEAD(&global->g_hba_list); | ||
243 | INIT_LIST_HEAD(&global->g_se_dev_list); | ||
244 | spin_lock_init(&global->g_device_lock); | ||
245 | spin_lock_init(&global->hba_lock); | ||
246 | spin_lock_init(&global->se_tpg_lock); | ||
247 | spin_lock_init(&global->lu_gps_lock); | ||
248 | spin_lock_init(&global->plugin_class_lock); | ||
249 | |||
250 | se_cmd_cache = kmem_cache_create("se_cmd_cache", | 100 | se_cmd_cache = kmem_cache_create("se_cmd_cache", |
251 | sizeof(struct se_cmd), __alignof__(struct se_cmd), 0, NULL); | 101 | sizeof(struct se_cmd), __alignof__(struct se_cmd), 0, NULL); |
252 | if (!(se_cmd_cache)) { | 102 | if (!se_cmd_cache) { |
253 | printk(KERN_ERR "kmem_cache_create for struct se_cmd failed\n"); | 103 | pr_err("kmem_cache_create for struct se_cmd failed\n"); |
254 | goto out; | 104 | goto out; |
255 | } | 105 | } |
256 | se_tmr_req_cache = kmem_cache_create("se_tmr_cache", | 106 | se_tmr_req_cache = kmem_cache_create("se_tmr_cache", |
257 | sizeof(struct se_tmr_req), __alignof__(struct se_tmr_req), | 107 | sizeof(struct se_tmr_req), __alignof__(struct se_tmr_req), |
258 | 0, NULL); | 108 | 0, NULL); |
259 | if (!(se_tmr_req_cache)) { | 109 | if (!se_tmr_req_cache) { |
260 | printk(KERN_ERR "kmem_cache_create() for struct se_tmr_req" | 110 | pr_err("kmem_cache_create() for struct se_tmr_req" |
261 | " failed\n"); | 111 | " failed\n"); |
262 | goto out; | 112 | goto out; |
263 | } | 113 | } |
264 | se_sess_cache = kmem_cache_create("se_sess_cache", | 114 | se_sess_cache = kmem_cache_create("se_sess_cache", |
265 | sizeof(struct se_session), __alignof__(struct se_session), | 115 | sizeof(struct se_session), __alignof__(struct se_session), |
266 | 0, NULL); | 116 | 0, NULL); |
267 | if (!(se_sess_cache)) { | 117 | if (!se_sess_cache) { |
268 | printk(KERN_ERR "kmem_cache_create() for struct se_session" | 118 | pr_err("kmem_cache_create() for struct se_session" |
269 | " failed\n"); | 119 | " failed\n"); |
270 | goto out; | 120 | goto out; |
271 | } | 121 | } |
272 | se_ua_cache = kmem_cache_create("se_ua_cache", | 122 | se_ua_cache = kmem_cache_create("se_ua_cache", |
273 | sizeof(struct se_ua), __alignof__(struct se_ua), | 123 | sizeof(struct se_ua), __alignof__(struct se_ua), |
274 | 0, NULL); | 124 | 0, NULL); |
275 | if (!(se_ua_cache)) { | 125 | if (!se_ua_cache) { |
276 | printk(KERN_ERR "kmem_cache_create() for struct se_ua failed\n"); | 126 | pr_err("kmem_cache_create() for struct se_ua failed\n"); |
277 | goto out; | ||
278 | } | ||
279 | se_mem_cache = kmem_cache_create("se_mem_cache", | ||
280 | sizeof(struct se_mem), __alignof__(struct se_mem), 0, NULL); | ||
281 | if (!(se_mem_cache)) { | ||
282 | printk(KERN_ERR "kmem_cache_create() for struct se_mem failed\n"); | ||
283 | goto out; | 127 | goto out; |
284 | } | 128 | } |
285 | t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache", | 129 | t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache", |
286 | sizeof(struct t10_pr_registration), | 130 | sizeof(struct t10_pr_registration), |
287 | __alignof__(struct t10_pr_registration), 0, NULL); | 131 | __alignof__(struct t10_pr_registration), 0, NULL); |
288 | if (!(t10_pr_reg_cache)) { | 132 | if (!t10_pr_reg_cache) { |
289 | printk(KERN_ERR "kmem_cache_create() for struct t10_pr_registration" | 133 | pr_err("kmem_cache_create() for struct t10_pr_registration" |
290 | " failed\n"); | 134 | " failed\n"); |
291 | goto out; | 135 | goto out; |
292 | } | 136 | } |
293 | t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache", | 137 | t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache", |
294 | sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp), | 138 | sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp), |
295 | 0, NULL); | 139 | 0, NULL); |
296 | if (!(t10_alua_lu_gp_cache)) { | 140 | if (!t10_alua_lu_gp_cache) { |
297 | printk(KERN_ERR "kmem_cache_create() for t10_alua_lu_gp_cache" | 141 | pr_err("kmem_cache_create() for t10_alua_lu_gp_cache" |
298 | " failed\n"); | 142 | " failed\n"); |
299 | goto out; | 143 | goto out; |
300 | } | 144 | } |
301 | t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache", | 145 | t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache", |
302 | sizeof(struct t10_alua_lu_gp_member), | 146 | sizeof(struct t10_alua_lu_gp_member), |
303 | __alignof__(struct t10_alua_lu_gp_member), 0, NULL); | 147 | __alignof__(struct t10_alua_lu_gp_member), 0, NULL); |
304 | if (!(t10_alua_lu_gp_mem_cache)) { | 148 | if (!t10_alua_lu_gp_mem_cache) { |
305 | printk(KERN_ERR "kmem_cache_create() for t10_alua_lu_gp_mem_" | 149 | pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_" |
306 | "cache failed\n"); | 150 | "cache failed\n"); |
307 | goto out; | 151 | goto out; |
308 | } | 152 | } |
309 | t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache", | 153 | t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache", |
310 | sizeof(struct t10_alua_tg_pt_gp), | 154 | sizeof(struct t10_alua_tg_pt_gp), |
311 | __alignof__(struct t10_alua_tg_pt_gp), 0, NULL); | 155 | __alignof__(struct t10_alua_tg_pt_gp), 0, NULL); |
312 | if (!(t10_alua_tg_pt_gp_cache)) { | 156 | if (!t10_alua_tg_pt_gp_cache) { |
313 | printk(KERN_ERR "kmem_cache_create() for t10_alua_tg_pt_gp_" | 157 | pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_" |
314 | "cache failed\n"); | 158 | "cache failed\n"); |
315 | goto out; | 159 | goto out; |
316 | } | 160 | } |
@@ -319,14 +163,12 @@ int init_se_global(void) | |||
319 | sizeof(struct t10_alua_tg_pt_gp_member), | 163 | sizeof(struct t10_alua_tg_pt_gp_member), |
320 | __alignof__(struct t10_alua_tg_pt_gp_member), | 164 | __alignof__(struct t10_alua_tg_pt_gp_member), |
321 | 0, NULL); | 165 | 0, NULL); |
322 | if (!(t10_alua_tg_pt_gp_mem_cache)) { | 166 | if (!t10_alua_tg_pt_gp_mem_cache) { |
323 | printk(KERN_ERR "kmem_cache_create() for t10_alua_tg_pt_gp_" | 167 | pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_" |
324 | "mem_t failed\n"); | 168 | "mem_t failed\n"); |
325 | goto out; | 169 | goto out; |
326 | } | 170 | } |
327 | 171 | ||
328 | se_global = global; | ||
329 | |||
330 | return 0; | 172 | return 0; |
331 | out: | 173 | out: |
332 | if (se_cmd_cache) | 174 | if (se_cmd_cache) |
@@ -337,8 +179,6 @@ out: | |||
337 | kmem_cache_destroy(se_sess_cache); | 179 | kmem_cache_destroy(se_sess_cache); |
338 | if (se_ua_cache) | 180 | if (se_ua_cache) |
339 | kmem_cache_destroy(se_ua_cache); | 181 | kmem_cache_destroy(se_ua_cache); |
340 | if (se_mem_cache) | ||
341 | kmem_cache_destroy(se_mem_cache); | ||
342 | if (t10_pr_reg_cache) | 182 | if (t10_pr_reg_cache) |
343 | kmem_cache_destroy(t10_pr_reg_cache); | 183 | kmem_cache_destroy(t10_pr_reg_cache); |
344 | if (t10_alua_lu_gp_cache) | 184 | if (t10_alua_lu_gp_cache) |
@@ -349,45 +189,25 @@ out: | |||
349 | kmem_cache_destroy(t10_alua_tg_pt_gp_cache); | 189 | kmem_cache_destroy(t10_alua_tg_pt_gp_cache); |
350 | if (t10_alua_tg_pt_gp_mem_cache) | 190 | if (t10_alua_tg_pt_gp_mem_cache) |
351 | kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); | 191 | kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); |
352 | kfree(global); | 192 | return -ENOMEM; |
353 | return -1; | ||
354 | } | 193 | } |
355 | 194 | ||
356 | void release_se_global(void) | 195 | void release_se_kmem_caches(void) |
357 | { | 196 | { |
358 | struct se_global *global; | ||
359 | |||
360 | global = se_global; | ||
361 | if (!(global)) | ||
362 | return; | ||
363 | |||
364 | kmem_cache_destroy(se_cmd_cache); | 197 | kmem_cache_destroy(se_cmd_cache); |
365 | kmem_cache_destroy(se_tmr_req_cache); | 198 | kmem_cache_destroy(se_tmr_req_cache); |
366 | kmem_cache_destroy(se_sess_cache); | 199 | kmem_cache_destroy(se_sess_cache); |
367 | kmem_cache_destroy(se_ua_cache); | 200 | kmem_cache_destroy(se_ua_cache); |
368 | kmem_cache_destroy(se_mem_cache); | ||
369 | kmem_cache_destroy(t10_pr_reg_cache); | 201 | kmem_cache_destroy(t10_pr_reg_cache); |
370 | kmem_cache_destroy(t10_alua_lu_gp_cache); | 202 | kmem_cache_destroy(t10_alua_lu_gp_cache); |
371 | kmem_cache_destroy(t10_alua_lu_gp_mem_cache); | 203 | kmem_cache_destroy(t10_alua_lu_gp_mem_cache); |
372 | kmem_cache_destroy(t10_alua_tg_pt_gp_cache); | 204 | kmem_cache_destroy(t10_alua_tg_pt_gp_cache); |
373 | kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); | 205 | kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); |
374 | kfree(global); | ||
375 | |||
376 | se_global = NULL; | ||
377 | } | 206 | } |
378 | 207 | ||
379 | /* SCSI statistics table index */ | 208 | /* This code ensures unique mib indexes are handed out. */ |
380 | static struct scsi_index_table scsi_index_table; | 209 | static DEFINE_SPINLOCK(scsi_mib_index_lock); |
381 | 210 | static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX]; | |
382 | /* | ||
383 | * Initialize the index table for allocating unique row indexes to various mib | ||
384 | * tables. | ||
385 | */ | ||
386 | void init_scsi_index_table(void) | ||
387 | { | ||
388 | memset(&scsi_index_table, 0, sizeof(struct scsi_index_table)); | ||
389 | spin_lock_init(&scsi_index_table.lock); | ||
390 | } | ||
391 | 211 | ||
392 | /* | 212 | /* |
393 | * Allocate a new row index for the entry type specified | 213 | * Allocate a new row index for the entry type specified |
@@ -396,16 +216,11 @@ u32 scsi_get_new_index(scsi_index_t type) | |||
396 | { | 216 | { |
397 | u32 new_index; | 217 | u32 new_index; |
398 | 218 | ||
399 | if ((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)) { | 219 | BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)); |
400 | printk(KERN_ERR "Invalid index type %d\n", type); | ||
401 | return -EINVAL; | ||
402 | } | ||
403 | 220 | ||
404 | spin_lock(&scsi_index_table.lock); | 221 | spin_lock(&scsi_mib_index_lock); |
405 | new_index = ++scsi_index_table.scsi_mib_index[type]; | 222 | new_index = ++scsi_mib_index[type]; |
406 | if (new_index == 0) | 223 | spin_unlock(&scsi_mib_index_lock); |
407 | new_index = ++scsi_index_table.scsi_mib_index[type]; | ||
408 | spin_unlock(&scsi_index_table.lock); | ||
409 | 224 | ||
410 | return new_index; | 225 | return new_index; |
411 | } | 226 | } |
@@ -425,34 +240,37 @@ static int transport_subsystem_reqmods(void) | |||
425 | 240 | ||
426 | ret = request_module("target_core_iblock"); | 241 | ret = request_module("target_core_iblock"); |
427 | if (ret != 0) | 242 | if (ret != 0) |
428 | printk(KERN_ERR "Unable to load target_core_iblock\n"); | 243 | pr_err("Unable to load target_core_iblock\n"); |
429 | 244 | ||
430 | ret = request_module("target_core_file"); | 245 | ret = request_module("target_core_file"); |
431 | if (ret != 0) | 246 | if (ret != 0) |
432 | printk(KERN_ERR "Unable to load target_core_file\n"); | 247 | pr_err("Unable to load target_core_file\n"); |
433 | 248 | ||
434 | ret = request_module("target_core_pscsi"); | 249 | ret = request_module("target_core_pscsi"); |
435 | if (ret != 0) | 250 | if (ret != 0) |
436 | printk(KERN_ERR "Unable to load target_core_pscsi\n"); | 251 | pr_err("Unable to load target_core_pscsi\n"); |
437 | 252 | ||
438 | ret = request_module("target_core_stgt"); | 253 | ret = request_module("target_core_stgt"); |
439 | if (ret != 0) | 254 | if (ret != 0) |
440 | printk(KERN_ERR "Unable to load target_core_stgt\n"); | 255 | pr_err("Unable to load target_core_stgt\n"); |
441 | 256 | ||
442 | return 0; | 257 | return 0; |
443 | } | 258 | } |
444 | 259 | ||
445 | int transport_subsystem_check_init(void) | 260 | int transport_subsystem_check_init(void) |
446 | { | 261 | { |
447 | if (se_global->g_sub_api_initialized) | 262 | int ret; |
263 | |||
264 | if (sub_api_initialized) | ||
448 | return 0; | 265 | return 0; |
449 | /* | 266 | /* |
450 | * Request the loading of known TCM subsystem plugins.. | 267 | * Request the loading of known TCM subsystem plugins.. |
451 | */ | 268 | */ |
452 | if (transport_subsystem_reqmods() < 0) | 269 | ret = transport_subsystem_reqmods(); |
453 | return -1; | 270 | if (ret < 0) |
271 | return ret; | ||
454 | 272 | ||
455 | se_global->g_sub_api_initialized = 1; | 273 | sub_api_initialized = 1; |
456 | return 0; | 274 | return 0; |
457 | } | 275 | } |
458 | 276 | ||
@@ -461,8 +279,8 @@ struct se_session *transport_init_session(void) | |||
461 | struct se_session *se_sess; | 279 | struct se_session *se_sess; |
462 | 280 | ||
463 | se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL); | 281 | se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL); |
464 | if (!(se_sess)) { | 282 | if (!se_sess) { |
465 | printk(KERN_ERR "Unable to allocate struct se_session from" | 283 | pr_err("Unable to allocate struct se_session from" |
466 | " se_sess_cache\n"); | 284 | " se_sess_cache\n"); |
467 | return ERR_PTR(-ENOMEM); | 285 | return ERR_PTR(-ENOMEM); |
468 | } | 286 | } |
@@ -497,9 +315,9 @@ void __transport_register_session( | |||
497 | * If the fabric module supports an ISID based TransportID, | 315 | * If the fabric module supports an ISID based TransportID, |
498 | * save this value in binary from the fabric I_T Nexus now. | 316 | * save this value in binary from the fabric I_T Nexus now. |
499 | */ | 317 | */ |
500 | if (TPG_TFO(se_tpg)->sess_get_initiator_sid != NULL) { | 318 | if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) { |
501 | memset(&buf[0], 0, PR_REG_ISID_LEN); | 319 | memset(&buf[0], 0, PR_REG_ISID_LEN); |
502 | TPG_TFO(se_tpg)->sess_get_initiator_sid(se_sess, | 320 | se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess, |
503 | &buf[0], PR_REG_ISID_LEN); | 321 | &buf[0], PR_REG_ISID_LEN); |
504 | se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]); | 322 | se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]); |
505 | } | 323 | } |
@@ -516,8 +334,8 @@ void __transport_register_session( | |||
516 | } | 334 | } |
517 | list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list); | 335 | list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list); |
518 | 336 | ||
519 | printk(KERN_INFO "TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n", | 337 | pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n", |
520 | TPG_TFO(se_tpg)->get_fabric_name(), se_sess->fabric_sess_ptr); | 338 | se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr); |
521 | } | 339 | } |
522 | EXPORT_SYMBOL(__transport_register_session); | 340 | EXPORT_SYMBOL(__transport_register_session); |
523 | 341 | ||
@@ -541,7 +359,7 @@ void transport_deregister_session_configfs(struct se_session *se_sess) | |||
541 | * Used by struct se_node_acl's under ConfigFS to locate active struct se_session | 359 | * Used by struct se_node_acl's under ConfigFS to locate active struct se_session |
542 | */ | 360 | */ |
543 | se_nacl = se_sess->se_node_acl; | 361 | se_nacl = se_sess->se_node_acl; |
544 | if ((se_nacl)) { | 362 | if (se_nacl) { |
545 | spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); | 363 | spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); |
546 | list_del(&se_sess->sess_acl_list); | 364 | list_del(&se_sess->sess_acl_list); |
547 | /* | 365 | /* |
@@ -572,7 +390,7 @@ void transport_deregister_session(struct se_session *se_sess) | |||
572 | struct se_portal_group *se_tpg = se_sess->se_tpg; | 390 | struct se_portal_group *se_tpg = se_sess->se_tpg; |
573 | struct se_node_acl *se_nacl; | 391 | struct se_node_acl *se_nacl; |
574 | 392 | ||
575 | if (!(se_tpg)) { | 393 | if (!se_tpg) { |
576 | transport_free_session(se_sess); | 394 | transport_free_session(se_sess); |
577 | return; | 395 | return; |
578 | } | 396 | } |
@@ -588,18 +406,18 @@ void transport_deregister_session(struct se_session *se_sess) | |||
588 | * struct se_node_acl if it had been previously dynamically generated. | 406 | * struct se_node_acl if it had been previously dynamically generated. |
589 | */ | 407 | */ |
590 | se_nacl = se_sess->se_node_acl; | 408 | se_nacl = se_sess->se_node_acl; |
591 | if ((se_nacl)) { | 409 | if (se_nacl) { |
592 | spin_lock_bh(&se_tpg->acl_node_lock); | 410 | spin_lock_bh(&se_tpg->acl_node_lock); |
593 | if (se_nacl->dynamic_node_acl) { | 411 | if (se_nacl->dynamic_node_acl) { |
594 | if (!(TPG_TFO(se_tpg)->tpg_check_demo_mode_cache( | 412 | if (!se_tpg->se_tpg_tfo->tpg_check_demo_mode_cache( |
595 | se_tpg))) { | 413 | se_tpg)) { |
596 | list_del(&se_nacl->acl_list); | 414 | list_del(&se_nacl->acl_list); |
597 | se_tpg->num_node_acls--; | 415 | se_tpg->num_node_acls--; |
598 | spin_unlock_bh(&se_tpg->acl_node_lock); | 416 | spin_unlock_bh(&se_tpg->acl_node_lock); |
599 | 417 | ||
600 | core_tpg_wait_for_nacl_pr_ref(se_nacl); | 418 | core_tpg_wait_for_nacl_pr_ref(se_nacl); |
601 | core_free_device_list_for_node(se_nacl, se_tpg); | 419 | core_free_device_list_for_node(se_nacl, se_tpg); |
602 | TPG_TFO(se_tpg)->tpg_release_fabric_acl(se_tpg, | 420 | se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, |
603 | se_nacl); | 421 | se_nacl); |
604 | spin_lock_bh(&se_tpg->acl_node_lock); | 422 | spin_lock_bh(&se_tpg->acl_node_lock); |
605 | } | 423 | } |
@@ -609,13 +427,13 @@ void transport_deregister_session(struct se_session *se_sess) | |||
609 | 427 | ||
610 | transport_free_session(se_sess); | 428 | transport_free_session(se_sess); |
611 | 429 | ||
612 | printk(KERN_INFO "TARGET_CORE[%s]: Deregistered fabric_sess\n", | 430 | pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n", |
613 | TPG_TFO(se_tpg)->get_fabric_name()); | 431 | se_tpg->se_tpg_tfo->get_fabric_name()); |
614 | } | 432 | } |
615 | EXPORT_SYMBOL(transport_deregister_session); | 433 | EXPORT_SYMBOL(transport_deregister_session); |
616 | 434 | ||
617 | /* | 435 | /* |
618 | * Called with T_TASK(cmd)->t_state_lock held. | 436 | * Called with cmd->t_state_lock held. |
619 | */ | 437 | */ |
620 | static void transport_all_task_dev_remove_state(struct se_cmd *cmd) | 438 | static void transport_all_task_dev_remove_state(struct se_cmd *cmd) |
621 | { | 439 | { |
@@ -623,28 +441,25 @@ static void transport_all_task_dev_remove_state(struct se_cmd *cmd) | |||
623 | struct se_task *task; | 441 | struct se_task *task; |
624 | unsigned long flags; | 442 | unsigned long flags; |
625 | 443 | ||
626 | if (!T_TASK(cmd)) | 444 | list_for_each_entry(task, &cmd->t_task_list, t_list) { |
627 | return; | ||
628 | |||
629 | list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) { | ||
630 | dev = task->se_dev; | 445 | dev = task->se_dev; |
631 | if (!(dev)) | 446 | if (!dev) |
632 | continue; | 447 | continue; |
633 | 448 | ||
634 | if (atomic_read(&task->task_active)) | 449 | if (atomic_read(&task->task_active)) |
635 | continue; | 450 | continue; |
636 | 451 | ||
637 | if (!(atomic_read(&task->task_state_active))) | 452 | if (!atomic_read(&task->task_state_active)) |
638 | continue; | 453 | continue; |
639 | 454 | ||
640 | spin_lock_irqsave(&dev->execute_task_lock, flags); | 455 | spin_lock_irqsave(&dev->execute_task_lock, flags); |
641 | list_del(&task->t_state_list); | 456 | list_del(&task->t_state_list); |
642 | DEBUG_TSTATE("Removed ITT: 0x%08x dev: %p task[%p]\n", | 457 | pr_debug("Removed ITT: 0x%08x dev: %p task[%p]\n", |
643 | CMD_TFO(cmd)->tfo_get_task_tag(cmd), dev, task); | 458 | cmd->se_tfo->get_task_tag(cmd), dev, task); |
644 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); | 459 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); |
645 | 460 | ||
646 | atomic_set(&task->task_state_active, 0); | 461 | atomic_set(&task->task_state_active, 0); |
647 | atomic_dec(&T_TASK(cmd)->t_task_cdbs_ex_left); | 462 | atomic_dec(&cmd->t_task_cdbs_ex_left); |
648 | } | 463 | } |
649 | } | 464 | } |
650 | 465 | ||
@@ -663,34 +478,34 @@ static int transport_cmd_check_stop( | |||
663 | { | 478 | { |
664 | unsigned long flags; | 479 | unsigned long flags; |
665 | 480 | ||
666 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 481 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
667 | /* | 482 | /* |
668 | * Determine if IOCTL context caller in requesting the stopping of this | 483 | * Determine if IOCTL context caller in requesting the stopping of this |
669 | * command for LUN shutdown purposes. | 484 | * command for LUN shutdown purposes. |
670 | */ | 485 | */ |
671 | if (atomic_read(&T_TASK(cmd)->transport_lun_stop)) { | 486 | if (atomic_read(&cmd->transport_lun_stop)) { |
672 | DEBUG_CS("%s:%d atomic_read(&T_TASK(cmd)->transport_lun_stop)" | 487 | pr_debug("%s:%d atomic_read(&cmd->transport_lun_stop)" |
673 | " == TRUE for ITT: 0x%08x\n", __func__, __LINE__, | 488 | " == TRUE for ITT: 0x%08x\n", __func__, __LINE__, |
674 | CMD_TFO(cmd)->get_task_tag(cmd)); | 489 | cmd->se_tfo->get_task_tag(cmd)); |
675 | 490 | ||
676 | cmd->deferred_t_state = cmd->t_state; | 491 | cmd->deferred_t_state = cmd->t_state; |
677 | cmd->t_state = TRANSPORT_DEFERRED_CMD; | 492 | cmd->t_state = TRANSPORT_DEFERRED_CMD; |
678 | atomic_set(&T_TASK(cmd)->t_transport_active, 0); | 493 | atomic_set(&cmd->t_transport_active, 0); |
679 | if (transport_off == 2) | 494 | if (transport_off == 2) |
680 | transport_all_task_dev_remove_state(cmd); | 495 | transport_all_task_dev_remove_state(cmd); |
681 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 496 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
682 | 497 | ||
683 | complete(&T_TASK(cmd)->transport_lun_stop_comp); | 498 | complete(&cmd->transport_lun_stop_comp); |
684 | return 1; | 499 | return 1; |
685 | } | 500 | } |
686 | /* | 501 | /* |
687 | * Determine if frontend context caller is requesting the stopping of | 502 | * Determine if frontend context caller is requesting the stopping of |
688 | * this command for frontend excpections. | 503 | * this command for frontend exceptions. |
689 | */ | 504 | */ |
690 | if (atomic_read(&T_TASK(cmd)->t_transport_stop)) { | 505 | if (atomic_read(&cmd->t_transport_stop)) { |
691 | DEBUG_CS("%s:%d atomic_read(&T_TASK(cmd)->t_transport_stop) ==" | 506 | pr_debug("%s:%d atomic_read(&cmd->t_transport_stop) ==" |
692 | " TRUE for ITT: 0x%08x\n", __func__, __LINE__, | 507 | " TRUE for ITT: 0x%08x\n", __func__, __LINE__, |
693 | CMD_TFO(cmd)->get_task_tag(cmd)); | 508 | cmd->se_tfo->get_task_tag(cmd)); |
694 | 509 | ||
695 | cmd->deferred_t_state = cmd->t_state; | 510 | cmd->deferred_t_state = cmd->t_state; |
696 | cmd->t_state = TRANSPORT_DEFERRED_CMD; | 511 | cmd->t_state = TRANSPORT_DEFERRED_CMD; |
@@ -703,13 +518,13 @@ static int transport_cmd_check_stop( | |||
703 | */ | 518 | */ |
704 | if (transport_off == 2) | 519 | if (transport_off == 2) |
705 | cmd->se_lun = NULL; | 520 | cmd->se_lun = NULL; |
706 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 521 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
707 | 522 | ||
708 | complete(&T_TASK(cmd)->t_transport_stop_comp); | 523 | complete(&cmd->t_transport_stop_comp); |
709 | return 1; | 524 | return 1; |
710 | } | 525 | } |
711 | if (transport_off) { | 526 | if (transport_off) { |
712 | atomic_set(&T_TASK(cmd)->t_transport_active, 0); | 527 | atomic_set(&cmd->t_transport_active, 0); |
713 | if (transport_off == 2) { | 528 | if (transport_off == 2) { |
714 | transport_all_task_dev_remove_state(cmd); | 529 | transport_all_task_dev_remove_state(cmd); |
715 | /* | 530 | /* |
@@ -722,20 +537,20 @@ static int transport_cmd_check_stop( | |||
722 | * their internally allocated I/O reference now and | 537 | * their internally allocated I/O reference now and |
723 | * struct se_cmd now. | 538 | * struct se_cmd now. |
724 | */ | 539 | */ |
725 | if (CMD_TFO(cmd)->check_stop_free != NULL) { | 540 | if (cmd->se_tfo->check_stop_free != NULL) { |
726 | spin_unlock_irqrestore( | 541 | spin_unlock_irqrestore( |
727 | &T_TASK(cmd)->t_state_lock, flags); | 542 | &cmd->t_state_lock, flags); |
728 | 543 | ||
729 | CMD_TFO(cmd)->check_stop_free(cmd); | 544 | cmd->se_tfo->check_stop_free(cmd); |
730 | return 1; | 545 | return 1; |
731 | } | 546 | } |
732 | } | 547 | } |
733 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 548 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
734 | 549 | ||
735 | return 0; | 550 | return 0; |
736 | } else if (t_state) | 551 | } else if (t_state) |
737 | cmd->t_state = t_state; | 552 | cmd->t_state = t_state; |
738 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 553 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
739 | 554 | ||
740 | return 0; | 555 | return 0; |
741 | } | 556 | } |
@@ -747,30 +562,30 @@ static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd) | |||
747 | 562 | ||
748 | static void transport_lun_remove_cmd(struct se_cmd *cmd) | 563 | static void transport_lun_remove_cmd(struct se_cmd *cmd) |
749 | { | 564 | { |
750 | struct se_lun *lun = SE_LUN(cmd); | 565 | struct se_lun *lun = cmd->se_lun; |
751 | unsigned long flags; | 566 | unsigned long flags; |
752 | 567 | ||
753 | if (!lun) | 568 | if (!lun) |
754 | return; | 569 | return; |
755 | 570 | ||
756 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 571 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
757 | if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) { | 572 | if (!atomic_read(&cmd->transport_dev_active)) { |
758 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 573 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
759 | goto check_lun; | 574 | goto check_lun; |
760 | } | 575 | } |
761 | atomic_set(&T_TASK(cmd)->transport_dev_active, 0); | 576 | atomic_set(&cmd->transport_dev_active, 0); |
762 | transport_all_task_dev_remove_state(cmd); | 577 | transport_all_task_dev_remove_state(cmd); |
763 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 578 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
764 | 579 | ||
765 | 580 | ||
766 | check_lun: | 581 | check_lun: |
767 | spin_lock_irqsave(&lun->lun_cmd_lock, flags); | 582 | spin_lock_irqsave(&lun->lun_cmd_lock, flags); |
768 | if (atomic_read(&T_TASK(cmd)->transport_lun_active)) { | 583 | if (atomic_read(&cmd->transport_lun_active)) { |
769 | list_del(&cmd->se_lun_list); | 584 | list_del(&cmd->se_lun_node); |
770 | atomic_set(&T_TASK(cmd)->transport_lun_active, 0); | 585 | atomic_set(&cmd->transport_lun_active, 0); |
771 | #if 0 | 586 | #if 0 |
772 | printk(KERN_INFO "Removed ITT: 0x%08x from LUN LIST[%d]\n" | 587 | pr_debug("Removed ITT: 0x%08x from LUN LIST[%d]\n" |
773 | CMD_TFO(cmd)->get_task_tag(cmd), lun->unpacked_lun); | 588 | cmd->se_tfo->get_task_tag(cmd), lun->unpacked_lun); |
774 | #endif | 589 | #endif |
775 | } | 590 | } |
776 | spin_unlock_irqrestore(&lun->lun_cmd_lock, flags); | 591 | spin_unlock_irqrestore(&lun->lun_cmd_lock, flags); |
@@ -778,92 +593,59 @@ check_lun: | |||
778 | 593 | ||
779 | void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) | 594 | void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) |
780 | { | 595 | { |
781 | transport_remove_cmd_from_queue(cmd, SE_DEV(cmd)->dev_queue_obj); | 596 | transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj); |
782 | transport_lun_remove_cmd(cmd); | 597 | transport_lun_remove_cmd(cmd); |
783 | 598 | ||
784 | if (transport_cmd_check_stop_to_fabric(cmd)) | 599 | if (transport_cmd_check_stop_to_fabric(cmd)) |
785 | return; | 600 | return; |
786 | if (remove) | 601 | if (remove) |
787 | transport_generic_remove(cmd, 0, 0); | 602 | transport_generic_remove(cmd, 0); |
788 | } | 603 | } |
789 | 604 | ||
790 | void transport_cmd_finish_abort_tmr(struct se_cmd *cmd) | 605 | void transport_cmd_finish_abort_tmr(struct se_cmd *cmd) |
791 | { | 606 | { |
792 | transport_remove_cmd_from_queue(cmd, SE_DEV(cmd)->dev_queue_obj); | 607 | transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj); |
793 | 608 | ||
794 | if (transport_cmd_check_stop_to_fabric(cmd)) | 609 | if (transport_cmd_check_stop_to_fabric(cmd)) |
795 | return; | 610 | return; |
796 | 611 | ||
797 | transport_generic_remove(cmd, 0, 0); | 612 | transport_generic_remove(cmd, 0); |
798 | } | 613 | } |
799 | 614 | ||
800 | static int transport_add_cmd_to_queue( | 615 | static void transport_add_cmd_to_queue( |
801 | struct se_cmd *cmd, | 616 | struct se_cmd *cmd, |
802 | int t_state) | 617 | int t_state) |
803 | { | 618 | { |
804 | struct se_device *dev = cmd->se_dev; | 619 | struct se_device *dev = cmd->se_dev; |
805 | struct se_queue_obj *qobj = dev->dev_queue_obj; | 620 | struct se_queue_obj *qobj = &dev->dev_queue_obj; |
806 | struct se_queue_req *qr; | ||
807 | unsigned long flags; | 621 | unsigned long flags; |
808 | 622 | ||
809 | qr = kzalloc(sizeof(struct se_queue_req), GFP_ATOMIC); | 623 | INIT_LIST_HEAD(&cmd->se_queue_node); |
810 | if (!(qr)) { | ||
811 | printk(KERN_ERR "Unable to allocate memory for" | ||
812 | " struct se_queue_req\n"); | ||
813 | return -1; | ||
814 | } | ||
815 | INIT_LIST_HEAD(&qr->qr_list); | ||
816 | |||
817 | qr->cmd = (void *)cmd; | ||
818 | qr->state = t_state; | ||
819 | 624 | ||
820 | if (t_state) { | 625 | if (t_state) { |
821 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 626 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
822 | cmd->t_state = t_state; | 627 | cmd->t_state = t_state; |
823 | atomic_set(&T_TASK(cmd)->t_transport_active, 1); | 628 | atomic_set(&cmd->t_transport_active, 1); |
824 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 629 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
825 | } | 630 | } |
826 | 631 | ||
827 | spin_lock_irqsave(&qobj->cmd_queue_lock, flags); | 632 | spin_lock_irqsave(&qobj->cmd_queue_lock, flags); |
828 | list_add_tail(&qr->qr_list, &qobj->qobj_list); | 633 | if (cmd->se_cmd_flags & SCF_EMULATE_QUEUE_FULL) { |
829 | atomic_inc(&T_TASK(cmd)->t_transport_queue_active); | 634 | cmd->se_cmd_flags &= ~SCF_EMULATE_QUEUE_FULL; |
635 | list_add(&cmd->se_queue_node, &qobj->qobj_list); | ||
636 | } else | ||
637 | list_add_tail(&cmd->se_queue_node, &qobj->qobj_list); | ||
638 | atomic_inc(&cmd->t_transport_queue_active); | ||
830 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); | 639 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); |
831 | 640 | ||
832 | atomic_inc(&qobj->queue_cnt); | 641 | atomic_inc(&qobj->queue_cnt); |
833 | wake_up_interruptible(&qobj->thread_wq); | 642 | wake_up_interruptible(&qobj->thread_wq); |
834 | return 0; | ||
835 | } | ||
836 | |||
837 | /* | ||
838 | * Called with struct se_queue_obj->cmd_queue_lock held. | ||
839 | */ | ||
840 | static struct se_queue_req * | ||
841 | __transport_get_qr_from_queue(struct se_queue_obj *qobj) | ||
842 | { | ||
843 | struct se_cmd *cmd; | ||
844 | struct se_queue_req *qr = NULL; | ||
845 | |||
846 | if (list_empty(&qobj->qobj_list)) | ||
847 | return NULL; | ||
848 | |||
849 | list_for_each_entry(qr, &qobj->qobj_list, qr_list) | ||
850 | break; | ||
851 | |||
852 | if (qr->cmd) { | ||
853 | cmd = (struct se_cmd *)qr->cmd; | ||
854 | atomic_dec(&T_TASK(cmd)->t_transport_queue_active); | ||
855 | } | ||
856 | list_del(&qr->qr_list); | ||
857 | atomic_dec(&qobj->queue_cnt); | ||
858 | |||
859 | return qr; | ||
860 | } | 643 | } |
861 | 644 | ||
862 | static struct se_queue_req * | 645 | static struct se_cmd * |
863 | transport_get_qr_from_queue(struct se_queue_obj *qobj) | 646 | transport_get_cmd_from_queue(struct se_queue_obj *qobj) |
864 | { | 647 | { |
865 | struct se_cmd *cmd; | 648 | struct se_cmd *cmd; |
866 | struct se_queue_req *qr; | ||
867 | unsigned long flags; | 649 | unsigned long flags; |
868 | 650 | ||
869 | spin_lock_irqsave(&qobj->cmd_queue_lock, flags); | 651 | spin_lock_irqsave(&qobj->cmd_queue_lock, flags); |
@@ -871,50 +653,42 @@ transport_get_qr_from_queue(struct se_queue_obj *qobj) | |||
871 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); | 653 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); |
872 | return NULL; | 654 | return NULL; |
873 | } | 655 | } |
656 | cmd = list_first_entry(&qobj->qobj_list, struct se_cmd, se_queue_node); | ||
874 | 657 | ||
875 | list_for_each_entry(qr, &qobj->qobj_list, qr_list) | 658 | atomic_dec(&cmd->t_transport_queue_active); |
876 | break; | ||
877 | 659 | ||
878 | if (qr->cmd) { | 660 | list_del(&cmd->se_queue_node); |
879 | cmd = (struct se_cmd *)qr->cmd; | ||
880 | atomic_dec(&T_TASK(cmd)->t_transport_queue_active); | ||
881 | } | ||
882 | list_del(&qr->qr_list); | ||
883 | atomic_dec(&qobj->queue_cnt); | 661 | atomic_dec(&qobj->queue_cnt); |
884 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); | 662 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); |
885 | 663 | ||
886 | return qr; | 664 | return cmd; |
887 | } | 665 | } |
888 | 666 | ||
889 | static void transport_remove_cmd_from_queue(struct se_cmd *cmd, | 667 | static void transport_remove_cmd_from_queue(struct se_cmd *cmd, |
890 | struct se_queue_obj *qobj) | 668 | struct se_queue_obj *qobj) |
891 | { | 669 | { |
892 | struct se_cmd *q_cmd; | 670 | struct se_cmd *t; |
893 | struct se_queue_req *qr = NULL, *qr_p = NULL; | ||
894 | unsigned long flags; | 671 | unsigned long flags; |
895 | 672 | ||
896 | spin_lock_irqsave(&qobj->cmd_queue_lock, flags); | 673 | spin_lock_irqsave(&qobj->cmd_queue_lock, flags); |
897 | if (!(atomic_read(&T_TASK(cmd)->t_transport_queue_active))) { | 674 | if (!atomic_read(&cmd->t_transport_queue_active)) { |
898 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); | 675 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); |
899 | return; | 676 | return; |
900 | } | 677 | } |
901 | 678 | ||
902 | list_for_each_entry_safe(qr, qr_p, &qobj->qobj_list, qr_list) { | 679 | list_for_each_entry(t, &qobj->qobj_list, se_queue_node) |
903 | q_cmd = (struct se_cmd *)qr->cmd; | 680 | if (t == cmd) { |
904 | if (q_cmd != cmd) | 681 | atomic_dec(&cmd->t_transport_queue_active); |
905 | continue; | 682 | atomic_dec(&qobj->queue_cnt); |
906 | 683 | list_del(&cmd->se_queue_node); | |
907 | atomic_dec(&T_TASK(q_cmd)->t_transport_queue_active); | 684 | break; |
908 | atomic_dec(&qobj->queue_cnt); | 685 | } |
909 | list_del(&qr->qr_list); | ||
910 | kfree(qr); | ||
911 | } | ||
912 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); | 686 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); |
913 | 687 | ||
914 | if (atomic_read(&T_TASK(cmd)->t_transport_queue_active)) { | 688 | if (atomic_read(&cmd->t_transport_queue_active)) { |
915 | printk(KERN_ERR "ITT: 0x%08x t_transport_queue_active: %d\n", | 689 | pr_err("ITT: 0x%08x t_transport_queue_active: %d\n", |
916 | CMD_TFO(cmd)->get_task_tag(cmd), | 690 | cmd->se_tfo->get_task_tag(cmd), |
917 | atomic_read(&T_TASK(cmd)->t_transport_queue_active)); | 691 | atomic_read(&cmd->t_transport_queue_active)); |
918 | } | 692 | } |
919 | } | 693 | } |
920 | 694 | ||
@@ -924,7 +698,7 @@ static void transport_remove_cmd_from_queue(struct se_cmd *cmd, | |||
924 | */ | 698 | */ |
925 | void transport_complete_sync_cache(struct se_cmd *cmd, int good) | 699 | void transport_complete_sync_cache(struct se_cmd *cmd, int good) |
926 | { | 700 | { |
927 | struct se_task *task = list_entry(T_TASK(cmd)->t_task_list.next, | 701 | struct se_task *task = list_entry(cmd->t_task_list.next, |
928 | struct se_task, t_list); | 702 | struct se_task, t_list); |
929 | 703 | ||
930 | if (good) { | 704 | if (good) { |
@@ -933,7 +707,7 @@ void transport_complete_sync_cache(struct se_cmd *cmd, int good) | |||
933 | } else { | 707 | } else { |
934 | task->task_scsi_status = SAM_STAT_CHECK_CONDITION; | 708 | task->task_scsi_status = SAM_STAT_CHECK_CONDITION; |
935 | task->task_error_status = PYX_TRANSPORT_ILLEGAL_REQUEST; | 709 | task->task_error_status = PYX_TRANSPORT_ILLEGAL_REQUEST; |
936 | TASK_CMD(task)->transport_error_status = | 710 | task->task_se_cmd->transport_error_status = |
937 | PYX_TRANSPORT_ILLEGAL_REQUEST; | 711 | PYX_TRANSPORT_ILLEGAL_REQUEST; |
938 | } | 712 | } |
939 | 713 | ||
@@ -948,22 +722,18 @@ EXPORT_SYMBOL(transport_complete_sync_cache); | |||
948 | */ | 722 | */ |
949 | void transport_complete_task(struct se_task *task, int success) | 723 | void transport_complete_task(struct se_task *task, int success) |
950 | { | 724 | { |
951 | struct se_cmd *cmd = TASK_CMD(task); | 725 | struct se_cmd *cmd = task->task_se_cmd; |
952 | struct se_device *dev = task->se_dev; | 726 | struct se_device *dev = task->se_dev; |
953 | int t_state; | 727 | int t_state; |
954 | unsigned long flags; | 728 | unsigned long flags; |
955 | #if 0 | 729 | #if 0 |
956 | printk(KERN_INFO "task: %p CDB: 0x%02x obj_ptr: %p\n", task, | 730 | pr_debug("task: %p CDB: 0x%02x obj_ptr: %p\n", task, |
957 | T_TASK(cmd)->t_task_cdb[0], dev); | 731 | cmd->t_task_cdb[0], dev); |
958 | #endif | 732 | #endif |
959 | if (dev) { | 733 | if (dev) |
960 | spin_lock_irqsave(&SE_HBA(dev)->hba_queue_lock, flags); | ||
961 | atomic_inc(&dev->depth_left); | 734 | atomic_inc(&dev->depth_left); |
962 | atomic_inc(&SE_HBA(dev)->left_queue_depth); | ||
963 | spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags); | ||
964 | } | ||
965 | 735 | ||
966 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 736 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
967 | atomic_set(&task->task_active, 0); | 737 | atomic_set(&task->task_active, 0); |
968 | 738 | ||
969 | /* | 739 | /* |
@@ -985,14 +755,14 @@ void transport_complete_task(struct se_task *task, int success) | |||
985 | */ | 755 | */ |
986 | if (atomic_read(&task->task_stop)) { | 756 | if (atomic_read(&task->task_stop)) { |
987 | /* | 757 | /* |
988 | * Decrement T_TASK(cmd)->t_se_count if this task had | 758 | * Decrement cmd->t_se_count if this task had |
989 | * previously thrown its timeout exception handler. | 759 | * previously thrown its timeout exception handler. |
990 | */ | 760 | */ |
991 | if (atomic_read(&task->task_timeout)) { | 761 | if (atomic_read(&task->task_timeout)) { |
992 | atomic_dec(&T_TASK(cmd)->t_se_count); | 762 | atomic_dec(&cmd->t_se_count); |
993 | atomic_set(&task->task_timeout, 0); | 763 | atomic_set(&task->task_timeout, 0); |
994 | } | 764 | } |
995 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 765 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
996 | 766 | ||
997 | complete(&task->task_stop_comp); | 767 | complete(&task->task_stop_comp); |
998 | return; | 768 | return; |
@@ -1003,34 +773,34 @@ void transport_complete_task(struct se_task *task, int success) | |||
1003 | * the processing thread. | 773 | * the processing thread. |
1004 | */ | 774 | */ |
1005 | if (atomic_read(&task->task_timeout)) { | 775 | if (atomic_read(&task->task_timeout)) { |
1006 | if (!(atomic_dec_and_test( | 776 | if (!atomic_dec_and_test( |
1007 | &T_TASK(cmd)->t_task_cdbs_timeout_left))) { | 777 | &cmd->t_task_cdbs_timeout_left)) { |
1008 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, | 778 | spin_unlock_irqrestore(&cmd->t_state_lock, |
1009 | flags); | 779 | flags); |
1010 | return; | 780 | return; |
1011 | } | 781 | } |
1012 | t_state = TRANSPORT_COMPLETE_TIMEOUT; | 782 | t_state = TRANSPORT_COMPLETE_TIMEOUT; |
1013 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 783 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
1014 | 784 | ||
1015 | transport_add_cmd_to_queue(cmd, t_state); | 785 | transport_add_cmd_to_queue(cmd, t_state); |
1016 | return; | 786 | return; |
1017 | } | 787 | } |
1018 | atomic_dec(&T_TASK(cmd)->t_task_cdbs_timeout_left); | 788 | atomic_dec(&cmd->t_task_cdbs_timeout_left); |
1019 | 789 | ||
1020 | /* | 790 | /* |
1021 | * Decrement the outstanding t_task_cdbs_left count. The last | 791 | * Decrement the outstanding t_task_cdbs_left count. The last |
1022 | * struct se_task from struct se_cmd will complete itself into the | 792 | * struct se_task from struct se_cmd will complete itself into the |
1023 | * device queue depending upon int success. | 793 | * device queue depending upon int success. |
1024 | */ | 794 | */ |
1025 | if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_left))) { | 795 | if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) { |
1026 | if (!success) | 796 | if (!success) |
1027 | T_TASK(cmd)->t_tasks_failed = 1; | 797 | cmd->t_tasks_failed = 1; |
1028 | 798 | ||
1029 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 799 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
1030 | return; | 800 | return; |
1031 | } | 801 | } |
1032 | 802 | ||
1033 | if (!success || T_TASK(cmd)->t_tasks_failed) { | 803 | if (!success || cmd->t_tasks_failed) { |
1034 | t_state = TRANSPORT_COMPLETE_FAILURE; | 804 | t_state = TRANSPORT_COMPLETE_FAILURE; |
1035 | if (!task->task_error_status) { | 805 | if (!task->task_error_status) { |
1036 | task->task_error_status = | 806 | task->task_error_status = |
@@ -1039,10 +809,10 @@ void transport_complete_task(struct se_task *task, int success) | |||
1039 | PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; | 809 | PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; |
1040 | } | 810 | } |
1041 | } else { | 811 | } else { |
1042 | atomic_set(&T_TASK(cmd)->t_transport_complete, 1); | 812 | atomic_set(&cmd->t_transport_complete, 1); |
1043 | t_state = TRANSPORT_COMPLETE_OK; | 813 | t_state = TRANSPORT_COMPLETE_OK; |
1044 | } | 814 | } |
1045 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 815 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
1046 | 816 | ||
1047 | transport_add_cmd_to_queue(cmd, t_state); | 817 | transport_add_cmd_to_queue(cmd, t_state); |
1048 | } | 818 | } |
@@ -1080,9 +850,9 @@ static inline int transport_add_task_check_sam_attr( | |||
1080 | &task_prev->t_execute_list : | 850 | &task_prev->t_execute_list : |
1081 | &dev->execute_task_list); | 851 | &dev->execute_task_list); |
1082 | 852 | ||
1083 | DEBUG_STA("Set HEAD_OF_QUEUE for task CDB: 0x%02x" | 853 | pr_debug("Set HEAD_OF_QUEUE for task CDB: 0x%02x" |
1084 | " in execution queue\n", | 854 | " in execution queue\n", |
1085 | T_TASK(task->task_se_cmd)->t_task_cdb[0]); | 855 | task->task_se_cmd->t_task_cdb[0]); |
1086 | return 1; | 856 | return 1; |
1087 | } | 857 | } |
1088 | /* | 858 | /* |
@@ -1124,8 +894,8 @@ static void __transport_add_task_to_execute_queue( | |||
1124 | 894 | ||
1125 | atomic_set(&task->task_state_active, 1); | 895 | atomic_set(&task->task_state_active, 1); |
1126 | 896 | ||
1127 | DEBUG_TSTATE("Added ITT: 0x%08x task[%p] to dev: %p\n", | 897 | pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n", |
1128 | CMD_TFO(task->task_se_cmd)->get_task_tag(task->task_se_cmd), | 898 | task->task_se_cmd->se_tfo->get_task_tag(task->task_se_cmd), |
1129 | task, dev); | 899 | task, dev); |
1130 | } | 900 | } |
1131 | 901 | ||
@@ -1135,8 +905,8 @@ static void transport_add_tasks_to_state_queue(struct se_cmd *cmd) | |||
1135 | struct se_task *task; | 905 | struct se_task *task; |
1136 | unsigned long flags; | 906 | unsigned long flags; |
1137 | 907 | ||
1138 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 908 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
1139 | list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) { | 909 | list_for_each_entry(task, &cmd->t_task_list, t_list) { |
1140 | dev = task->se_dev; | 910 | dev = task->se_dev; |
1141 | 911 | ||
1142 | if (atomic_read(&task->task_state_active)) | 912 | if (atomic_read(&task->task_state_active)) |
@@ -1146,23 +916,23 @@ static void transport_add_tasks_to_state_queue(struct se_cmd *cmd) | |||
1146 | list_add_tail(&task->t_state_list, &dev->state_task_list); | 916 | list_add_tail(&task->t_state_list, &dev->state_task_list); |
1147 | atomic_set(&task->task_state_active, 1); | 917 | atomic_set(&task->task_state_active, 1); |
1148 | 918 | ||
1149 | DEBUG_TSTATE("Added ITT: 0x%08x task[%p] to dev: %p\n", | 919 | pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n", |
1150 | CMD_TFO(task->task_se_cmd)->get_task_tag( | 920 | task->task_se_cmd->se_tfo->get_task_tag( |
1151 | task->task_se_cmd), task, dev); | 921 | task->task_se_cmd), task, dev); |
1152 | 922 | ||
1153 | spin_unlock(&dev->execute_task_lock); | 923 | spin_unlock(&dev->execute_task_lock); |
1154 | } | 924 | } |
1155 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 925 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
1156 | } | 926 | } |
1157 | 927 | ||
1158 | static void transport_add_tasks_from_cmd(struct se_cmd *cmd) | 928 | static void transport_add_tasks_from_cmd(struct se_cmd *cmd) |
1159 | { | 929 | { |
1160 | struct se_device *dev = SE_DEV(cmd); | 930 | struct se_device *dev = cmd->se_dev; |
1161 | struct se_task *task, *task_prev = NULL; | 931 | struct se_task *task, *task_prev = NULL; |
1162 | unsigned long flags; | 932 | unsigned long flags; |
1163 | 933 | ||
1164 | spin_lock_irqsave(&dev->execute_task_lock, flags); | 934 | spin_lock_irqsave(&dev->execute_task_lock, flags); |
1165 | list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) { | 935 | list_for_each_entry(task, &cmd->t_task_list, t_list) { |
1166 | if (atomic_read(&task->task_execute_queue)) | 936 | if (atomic_read(&task->task_execute_queue)) |
1167 | continue; | 937 | continue; |
1168 | /* | 938 | /* |
@@ -1174,30 +944,6 @@ static void transport_add_tasks_from_cmd(struct se_cmd *cmd) | |||
1174 | task_prev = task; | 944 | task_prev = task; |
1175 | } | 945 | } |
1176 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); | 946 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); |
1177 | |||
1178 | return; | ||
1179 | } | ||
1180 | |||
1181 | /* transport_get_task_from_execute_queue(): | ||
1182 | * | ||
1183 | * Called with dev->execute_task_lock held. | ||
1184 | */ | ||
1185 | static struct se_task * | ||
1186 | transport_get_task_from_execute_queue(struct se_device *dev) | ||
1187 | { | ||
1188 | struct se_task *task; | ||
1189 | |||
1190 | if (list_empty(&dev->execute_task_list)) | ||
1191 | return NULL; | ||
1192 | |||
1193 | list_for_each_entry(task, &dev->execute_task_list, t_execute_list) | ||
1194 | break; | ||
1195 | |||
1196 | list_del(&task->t_execute_list); | ||
1197 | atomic_set(&task->task_execute_queue, 0); | ||
1198 | atomic_dec(&dev->execute_tasks); | ||
1199 | |||
1200 | return task; | ||
1201 | } | 947 | } |
1202 | 948 | ||
1203 | /* transport_remove_task_from_execute_queue(): | 949 | /* transport_remove_task_from_execute_queue(): |
@@ -1222,6 +968,40 @@ void transport_remove_task_from_execute_queue( | |||
1222 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); | 968 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); |
1223 | } | 969 | } |
1224 | 970 | ||
971 | /* | ||
972 | * Handle QUEUE_FULL / -EAGAIN status | ||
973 | */ | ||
974 | |||
975 | static void target_qf_do_work(struct work_struct *work) | ||
976 | { | ||
977 | struct se_device *dev = container_of(work, struct se_device, | ||
978 | qf_work_queue); | ||
979 | struct se_cmd *cmd, *cmd_tmp; | ||
980 | |||
981 | spin_lock_irq(&dev->qf_cmd_lock); | ||
982 | list_for_each_entry_safe(cmd, cmd_tmp, &dev->qf_cmd_list, se_qf_node) { | ||
983 | |||
984 | list_del(&cmd->se_qf_node); | ||
985 | atomic_dec(&dev->dev_qf_count); | ||
986 | smp_mb__after_atomic_dec(); | ||
987 | spin_unlock_irq(&dev->qf_cmd_lock); | ||
988 | |||
989 | pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue" | ||
990 | " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd, | ||
991 | (cmd->t_state == TRANSPORT_COMPLETE_OK) ? "COMPLETE_OK" : | ||
992 | (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING" | ||
993 | : "UNKNOWN"); | ||
994 | /* | ||
995 | * The SCF_EMULATE_QUEUE_FULL flag will be cleared once se_cmd | ||
996 | * has been added to head of queue | ||
997 | */ | ||
998 | transport_add_cmd_to_queue(cmd, cmd->t_state); | ||
999 | |||
1000 | spin_lock_irq(&dev->qf_cmd_lock); | ||
1001 | } | ||
1002 | spin_unlock_irq(&dev->qf_cmd_lock); | ||
1003 | } | ||
1004 | |||
1225 | unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd) | 1005 | unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd) |
1226 | { | 1006 | { |
1227 | switch (cmd->data_direction) { | 1007 | switch (cmd->data_direction) { |
@@ -1269,7 +1049,7 @@ void transport_dump_dev_state( | |||
1269 | atomic_read(&dev->execute_tasks), atomic_read(&dev->depth_left), | 1049 | atomic_read(&dev->execute_tasks), atomic_read(&dev->depth_left), |
1270 | dev->queue_depth); | 1050 | dev->queue_depth); |
1271 | *bl += sprintf(b + *bl, " SectorSize: %u MaxSectors: %u\n", | 1051 | *bl += sprintf(b + *bl, " SectorSize: %u MaxSectors: %u\n", |
1272 | DEV_ATTRIB(dev)->block_size, DEV_ATTRIB(dev)->max_sectors); | 1052 | dev->se_sub_dev->se_dev_attrib.block_size, dev->se_sub_dev->se_dev_attrib.max_sectors); |
1273 | *bl += sprintf(b + *bl, " "); | 1053 | *bl += sprintf(b + *bl, " "); |
1274 | } | 1054 | } |
1275 | 1055 | ||
@@ -1279,33 +1059,29 @@ void transport_dump_dev_state( | |||
1279 | */ | 1059 | */ |
1280 | static void transport_release_all_cmds(struct se_device *dev) | 1060 | static void transport_release_all_cmds(struct se_device *dev) |
1281 | { | 1061 | { |
1282 | struct se_cmd *cmd = NULL; | 1062 | struct se_cmd *cmd, *tcmd; |
1283 | struct se_queue_req *qr = NULL, *qr_p = NULL; | ||
1284 | int bug_out = 0, t_state; | 1063 | int bug_out = 0, t_state; |
1285 | unsigned long flags; | 1064 | unsigned long flags; |
1286 | 1065 | ||
1287 | spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags); | 1066 | spin_lock_irqsave(&dev->dev_queue_obj.cmd_queue_lock, flags); |
1288 | list_for_each_entry_safe(qr, qr_p, &dev->dev_queue_obj->qobj_list, | 1067 | list_for_each_entry_safe(cmd, tcmd, &dev->dev_queue_obj.qobj_list, |
1289 | qr_list) { | 1068 | se_queue_node) { |
1290 | 1069 | t_state = cmd->t_state; | |
1291 | cmd = (struct se_cmd *)qr->cmd; | 1070 | list_del(&cmd->se_queue_node); |
1292 | t_state = qr->state; | 1071 | spin_unlock_irqrestore(&dev->dev_queue_obj.cmd_queue_lock, |
1293 | list_del(&qr->qr_list); | ||
1294 | kfree(qr); | ||
1295 | spin_unlock_irqrestore(&dev->dev_queue_obj->cmd_queue_lock, | ||
1296 | flags); | 1072 | flags); |
1297 | 1073 | ||
1298 | printk(KERN_ERR "Releasing ITT: 0x%08x, i_state: %u," | 1074 | pr_err("Releasing ITT: 0x%08x, i_state: %u," |
1299 | " t_state: %u directly\n", | 1075 | " t_state: %u directly\n", |
1300 | CMD_TFO(cmd)->get_task_tag(cmd), | 1076 | cmd->se_tfo->get_task_tag(cmd), |
1301 | CMD_TFO(cmd)->get_cmd_state(cmd), t_state); | 1077 | cmd->se_tfo->get_cmd_state(cmd), t_state); |
1302 | 1078 | ||
1303 | transport_release_fe_cmd(cmd); | 1079 | transport_release_fe_cmd(cmd); |
1304 | bug_out = 1; | 1080 | bug_out = 1; |
1305 | 1081 | ||
1306 | spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags); | 1082 | spin_lock_irqsave(&dev->dev_queue_obj.cmd_queue_lock, flags); |
1307 | } | 1083 | } |
1308 | spin_unlock_irqrestore(&dev->dev_queue_obj->cmd_queue_lock, flags); | 1084 | spin_unlock_irqrestore(&dev->dev_queue_obj.cmd_queue_lock, flags); |
1309 | #if 0 | 1085 | #if 0 |
1310 | if (bug_out) | 1086 | if (bug_out) |
1311 | BUG(); | 1087 | BUG(); |
@@ -1362,7 +1138,7 @@ void transport_dump_vpd_proto_id( | |||
1362 | if (p_buf) | 1138 | if (p_buf) |
1363 | strncpy(p_buf, buf, p_buf_len); | 1139 | strncpy(p_buf, buf, p_buf_len); |
1364 | else | 1140 | else |
1365 | printk(KERN_INFO "%s", buf); | 1141 | pr_debug("%s", buf); |
1366 | } | 1142 | } |
1367 | 1143 | ||
1368 | void | 1144 | void |
@@ -1387,7 +1163,8 @@ int transport_dump_vpd_assoc( | |||
1387 | int p_buf_len) | 1163 | int p_buf_len) |
1388 | { | 1164 | { |
1389 | unsigned char buf[VPD_TMP_BUF_SIZE]; | 1165 | unsigned char buf[VPD_TMP_BUF_SIZE]; |
1390 | int ret = 0, len; | 1166 | int ret = 0; |
1167 | int len; | ||
1391 | 1168 | ||
1392 | memset(buf, 0, VPD_TMP_BUF_SIZE); | 1169 | memset(buf, 0, VPD_TMP_BUF_SIZE); |
1393 | len = sprintf(buf, "T10 VPD Identifier Association: "); | 1170 | len = sprintf(buf, "T10 VPD Identifier Association: "); |
@@ -1404,14 +1181,14 @@ int transport_dump_vpd_assoc( | |||
1404 | break; | 1181 | break; |
1405 | default: | 1182 | default: |
1406 | sprintf(buf+len, "Unknown 0x%02x\n", vpd->association); | 1183 | sprintf(buf+len, "Unknown 0x%02x\n", vpd->association); |
1407 | ret = -1; | 1184 | ret = -EINVAL; |
1408 | break; | 1185 | break; |
1409 | } | 1186 | } |
1410 | 1187 | ||
1411 | if (p_buf) | 1188 | if (p_buf) |
1412 | strncpy(p_buf, buf, p_buf_len); | 1189 | strncpy(p_buf, buf, p_buf_len); |
1413 | else | 1190 | else |
1414 | printk("%s", buf); | 1191 | pr_debug("%s", buf); |
1415 | 1192 | ||
1416 | return ret; | 1193 | return ret; |
1417 | } | 1194 | } |
@@ -1434,7 +1211,8 @@ int transport_dump_vpd_ident_type( | |||
1434 | int p_buf_len) | 1211 | int p_buf_len) |
1435 | { | 1212 | { |
1436 | unsigned char buf[VPD_TMP_BUF_SIZE]; | 1213 | unsigned char buf[VPD_TMP_BUF_SIZE]; |
1437 | int ret = 0, len; | 1214 | int ret = 0; |
1215 | int len; | ||
1438 | 1216 | ||
1439 | memset(buf, 0, VPD_TMP_BUF_SIZE); | 1217 | memset(buf, 0, VPD_TMP_BUF_SIZE); |
1440 | len = sprintf(buf, "T10 VPD Identifier Type: "); | 1218 | len = sprintf(buf, "T10 VPD Identifier Type: "); |
@@ -1461,14 +1239,17 @@ int transport_dump_vpd_ident_type( | |||
1461 | default: | 1239 | default: |
1462 | sprintf(buf+len, "Unsupported: 0x%02x\n", | 1240 | sprintf(buf+len, "Unsupported: 0x%02x\n", |
1463 | vpd->device_identifier_type); | 1241 | vpd->device_identifier_type); |
1464 | ret = -1; | 1242 | ret = -EINVAL; |
1465 | break; | 1243 | break; |
1466 | } | 1244 | } |
1467 | 1245 | ||
1468 | if (p_buf) | 1246 | if (p_buf) { |
1247 | if (p_buf_len < strlen(buf)+1) | ||
1248 | return -EINVAL; | ||
1469 | strncpy(p_buf, buf, p_buf_len); | 1249 | strncpy(p_buf, buf, p_buf_len); |
1470 | else | 1250 | } else { |
1471 | printk("%s", buf); | 1251 | pr_debug("%s", buf); |
1252 | } | ||
1472 | 1253 | ||
1473 | return ret; | 1254 | return ret; |
1474 | } | 1255 | } |
@@ -1511,14 +1292,14 @@ int transport_dump_vpd_ident( | |||
1511 | default: | 1292 | default: |
1512 | sprintf(buf, "T10 VPD Device Identifier encoding unsupported:" | 1293 | sprintf(buf, "T10 VPD Device Identifier encoding unsupported:" |
1513 | " 0x%02x", vpd->device_identifier_code_set); | 1294 | " 0x%02x", vpd->device_identifier_code_set); |
1514 | ret = -1; | 1295 | ret = -EINVAL; |
1515 | break; | 1296 | break; |
1516 | } | 1297 | } |
1517 | 1298 | ||
1518 | if (p_buf) | 1299 | if (p_buf) |
1519 | strncpy(p_buf, buf, p_buf_len); | 1300 | strncpy(p_buf, buf, p_buf_len); |
1520 | else | 1301 | else |
1521 | printk("%s", buf); | 1302 | pr_debug("%s", buf); |
1522 | 1303 | ||
1523 | return ret; | 1304 | return ret; |
1524 | } | 1305 | } |
@@ -1569,51 +1350,51 @@ static void core_setup_task_attr_emulation(struct se_device *dev) | |||
1569 | * This is currently not available in upsream Linux/SCSI Target | 1350 | * This is currently not available in upsream Linux/SCSI Target |
1570 | * mode code, and is assumed to be disabled while using TCM/pSCSI. | 1351 | * mode code, and is assumed to be disabled while using TCM/pSCSI. |
1571 | */ | 1352 | */ |
1572 | if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { | 1353 | if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { |
1573 | dev->dev_task_attr_type = SAM_TASK_ATTR_PASSTHROUGH; | 1354 | dev->dev_task_attr_type = SAM_TASK_ATTR_PASSTHROUGH; |
1574 | return; | 1355 | return; |
1575 | } | 1356 | } |
1576 | 1357 | ||
1577 | dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED; | 1358 | dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED; |
1578 | DEBUG_STA("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x" | 1359 | pr_debug("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x" |
1579 | " device\n", TRANSPORT(dev)->name, | 1360 | " device\n", dev->transport->name, |
1580 | TRANSPORT(dev)->get_device_rev(dev)); | 1361 | dev->transport->get_device_rev(dev)); |
1581 | } | 1362 | } |
1582 | 1363 | ||
1583 | static void scsi_dump_inquiry(struct se_device *dev) | 1364 | static void scsi_dump_inquiry(struct se_device *dev) |
1584 | { | 1365 | { |
1585 | struct t10_wwn *wwn = DEV_T10_WWN(dev); | 1366 | struct t10_wwn *wwn = &dev->se_sub_dev->t10_wwn; |
1586 | int i, device_type; | 1367 | int i, device_type; |
1587 | /* | 1368 | /* |
1588 | * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer | 1369 | * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer |
1589 | */ | 1370 | */ |
1590 | printk(" Vendor: "); | 1371 | pr_debug(" Vendor: "); |
1591 | for (i = 0; i < 8; i++) | 1372 | for (i = 0; i < 8; i++) |
1592 | if (wwn->vendor[i] >= 0x20) | 1373 | if (wwn->vendor[i] >= 0x20) |
1593 | printk("%c", wwn->vendor[i]); | 1374 | pr_debug("%c", wwn->vendor[i]); |
1594 | else | 1375 | else |
1595 | printk(" "); | 1376 | pr_debug(" "); |
1596 | 1377 | ||
1597 | printk(" Model: "); | 1378 | pr_debug(" Model: "); |
1598 | for (i = 0; i < 16; i++) | 1379 | for (i = 0; i < 16; i++) |
1599 | if (wwn->model[i] >= 0x20) | 1380 | if (wwn->model[i] >= 0x20) |
1600 | printk("%c", wwn->model[i]); | 1381 | pr_debug("%c", wwn->model[i]); |
1601 | else | 1382 | else |
1602 | printk(" "); | 1383 | pr_debug(" "); |
1603 | 1384 | ||
1604 | printk(" Revision: "); | 1385 | pr_debug(" Revision: "); |
1605 | for (i = 0; i < 4; i++) | 1386 | for (i = 0; i < 4; i++) |
1606 | if (wwn->revision[i] >= 0x20) | 1387 | if (wwn->revision[i] >= 0x20) |
1607 | printk("%c", wwn->revision[i]); | 1388 | pr_debug("%c", wwn->revision[i]); |
1608 | else | 1389 | else |
1609 | printk(" "); | 1390 | pr_debug(" "); |
1610 | 1391 | ||
1611 | printk("\n"); | 1392 | pr_debug("\n"); |
1612 | 1393 | ||
1613 | device_type = TRANSPORT(dev)->get_device_type(dev); | 1394 | device_type = dev->transport->get_device_type(dev); |
1614 | printk(" Type: %s ", scsi_device_type(device_type)); | 1395 | pr_debug(" Type: %s ", scsi_device_type(device_type)); |
1615 | printk(" ANSI SCSI revision: %02x\n", | 1396 | pr_debug(" ANSI SCSI revision: %02x\n", |
1616 | TRANSPORT(dev)->get_device_rev(dev)); | 1397 | dev->transport->get_device_rev(dev)); |
1617 | } | 1398 | } |
1618 | 1399 | ||
1619 | struct se_device *transport_add_device_to_core_hba( | 1400 | struct se_device *transport_add_device_to_core_hba( |
@@ -1630,33 +1411,15 @@ struct se_device *transport_add_device_to_core_hba( | |||
1630 | struct se_device *dev; | 1411 | struct se_device *dev; |
1631 | 1412 | ||
1632 | dev = kzalloc(sizeof(struct se_device), GFP_KERNEL); | 1413 | dev = kzalloc(sizeof(struct se_device), GFP_KERNEL); |
1633 | if (!(dev)) { | 1414 | if (!dev) { |
1634 | printk(KERN_ERR "Unable to allocate memory for se_dev_t\n"); | 1415 | pr_err("Unable to allocate memory for se_dev_t\n"); |
1635 | return NULL; | ||
1636 | } | ||
1637 | dev->dev_queue_obj = kzalloc(sizeof(struct se_queue_obj), GFP_KERNEL); | ||
1638 | if (!(dev->dev_queue_obj)) { | ||
1639 | printk(KERN_ERR "Unable to allocate memory for" | ||
1640 | " dev->dev_queue_obj\n"); | ||
1641 | kfree(dev); | ||
1642 | return NULL; | 1416 | return NULL; |
1643 | } | 1417 | } |
1644 | transport_init_queue_obj(dev->dev_queue_obj); | ||
1645 | |||
1646 | dev->dev_status_queue_obj = kzalloc(sizeof(struct se_queue_obj), | ||
1647 | GFP_KERNEL); | ||
1648 | if (!(dev->dev_status_queue_obj)) { | ||
1649 | printk(KERN_ERR "Unable to allocate memory for" | ||
1650 | " dev->dev_status_queue_obj\n"); | ||
1651 | kfree(dev->dev_queue_obj); | ||
1652 | kfree(dev); | ||
1653 | return NULL; | ||
1654 | } | ||
1655 | transport_init_queue_obj(dev->dev_status_queue_obj); | ||
1656 | 1418 | ||
1419 | transport_init_queue_obj(&dev->dev_queue_obj); | ||
1657 | dev->dev_flags = device_flags; | 1420 | dev->dev_flags = device_flags; |
1658 | dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED; | 1421 | dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED; |
1659 | dev->dev_ptr = (void *) transport_dev; | 1422 | dev->dev_ptr = transport_dev; |
1660 | dev->se_hba = hba; | 1423 | dev->se_hba = hba; |
1661 | dev->se_sub_dev = se_dev; | 1424 | dev->se_sub_dev = se_dev; |
1662 | dev->transport = transport; | 1425 | dev->transport = transport; |
@@ -1668,6 +1431,7 @@ struct se_device *transport_add_device_to_core_hba( | |||
1668 | INIT_LIST_HEAD(&dev->delayed_cmd_list); | 1431 | INIT_LIST_HEAD(&dev->delayed_cmd_list); |
1669 | INIT_LIST_HEAD(&dev->ordered_cmd_list); | 1432 | INIT_LIST_HEAD(&dev->ordered_cmd_list); |
1670 | INIT_LIST_HEAD(&dev->state_task_list); | 1433 | INIT_LIST_HEAD(&dev->state_task_list); |
1434 | INIT_LIST_HEAD(&dev->qf_cmd_list); | ||
1671 | spin_lock_init(&dev->execute_task_lock); | 1435 | spin_lock_init(&dev->execute_task_lock); |
1672 | spin_lock_init(&dev->delayed_cmd_lock); | 1436 | spin_lock_init(&dev->delayed_cmd_lock); |
1673 | spin_lock_init(&dev->ordered_cmd_lock); | 1437 | spin_lock_init(&dev->ordered_cmd_lock); |
@@ -1678,6 +1442,7 @@ struct se_device *transport_add_device_to_core_hba( | |||
1678 | spin_lock_init(&dev->dev_status_thr_lock); | 1442 | spin_lock_init(&dev->dev_status_thr_lock); |
1679 | spin_lock_init(&dev->se_port_lock); | 1443 | spin_lock_init(&dev->se_port_lock); |
1680 | spin_lock_init(&dev->se_tmr_lock); | 1444 | spin_lock_init(&dev->se_tmr_lock); |
1445 | spin_lock_init(&dev->qf_cmd_lock); | ||
1681 | 1446 | ||
1682 | dev->queue_depth = dev_limits->queue_depth; | 1447 | dev->queue_depth = dev_limits->queue_depth; |
1683 | atomic_set(&dev->depth_left, dev->queue_depth); | 1448 | atomic_set(&dev->depth_left, dev->queue_depth); |
@@ -1715,13 +1480,16 @@ struct se_device *transport_add_device_to_core_hba( | |||
1715 | * Startup the struct se_device processing thread | 1480 | * Startup the struct se_device processing thread |
1716 | */ | 1481 | */ |
1717 | dev->process_thread = kthread_run(transport_processing_thread, dev, | 1482 | dev->process_thread = kthread_run(transport_processing_thread, dev, |
1718 | "LIO_%s", TRANSPORT(dev)->name); | 1483 | "LIO_%s", dev->transport->name); |
1719 | if (IS_ERR(dev->process_thread)) { | 1484 | if (IS_ERR(dev->process_thread)) { |
1720 | printk(KERN_ERR "Unable to create kthread: LIO_%s\n", | 1485 | pr_err("Unable to create kthread: LIO_%s\n", |
1721 | TRANSPORT(dev)->name); | 1486 | dev->transport->name); |
1722 | goto out; | 1487 | goto out; |
1723 | } | 1488 | } |
1724 | 1489 | /* | |
1490 | * Setup work_queue for QUEUE_FULL | ||
1491 | */ | ||
1492 | INIT_WORK(&dev->qf_work_queue, target_qf_do_work); | ||
1725 | /* | 1493 | /* |
1726 | * Preload the initial INQUIRY const values if we are doing | 1494 | * Preload the initial INQUIRY const values if we are doing |
1727 | * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI | 1495 | * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI |
@@ -1730,16 +1498,16 @@ struct se_device *transport_add_device_to_core_hba( | |||
1730 | * originals once back into DEV_T10_WWN(dev) for the virtual device | 1498 | * originals once back into DEV_T10_WWN(dev) for the virtual device |
1731 | * setup. | 1499 | * setup. |
1732 | */ | 1500 | */ |
1733 | if (TRANSPORT(dev)->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) { | 1501 | if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) { |
1734 | if (!(inquiry_prod) || !(inquiry_prod)) { | 1502 | if (!inquiry_prod || !inquiry_rev) { |
1735 | printk(KERN_ERR "All non TCM/pSCSI plugins require" | 1503 | pr_err("All non TCM/pSCSI plugins require" |
1736 | " INQUIRY consts\n"); | 1504 | " INQUIRY consts\n"); |
1737 | goto out; | 1505 | goto out; |
1738 | } | 1506 | } |
1739 | 1507 | ||
1740 | strncpy(&DEV_T10_WWN(dev)->vendor[0], "LIO-ORG", 8); | 1508 | strncpy(&dev->se_sub_dev->t10_wwn.vendor[0], "LIO-ORG", 8); |
1741 | strncpy(&DEV_T10_WWN(dev)->model[0], inquiry_prod, 16); | 1509 | strncpy(&dev->se_sub_dev->t10_wwn.model[0], inquiry_prod, 16); |
1742 | strncpy(&DEV_T10_WWN(dev)->revision[0], inquiry_rev, 4); | 1510 | strncpy(&dev->se_sub_dev->t10_wwn.revision[0], inquiry_rev, 4); |
1743 | } | 1511 | } |
1744 | scsi_dump_inquiry(dev); | 1512 | scsi_dump_inquiry(dev); |
1745 | 1513 | ||
@@ -1754,8 +1522,6 @@ out: | |||
1754 | 1522 | ||
1755 | se_release_vpd_for_dev(dev); | 1523 | se_release_vpd_for_dev(dev); |
1756 | 1524 | ||
1757 | kfree(dev->dev_status_queue_obj); | ||
1758 | kfree(dev->dev_queue_obj); | ||
1759 | kfree(dev); | 1525 | kfree(dev); |
1760 | 1526 | ||
1761 | return NULL; | 1527 | return NULL; |
@@ -1794,12 +1560,11 @@ transport_generic_get_task(struct se_cmd *cmd, | |||
1794 | enum dma_data_direction data_direction) | 1560 | enum dma_data_direction data_direction) |
1795 | { | 1561 | { |
1796 | struct se_task *task; | 1562 | struct se_task *task; |
1797 | struct se_device *dev = SE_DEV(cmd); | 1563 | struct se_device *dev = cmd->se_dev; |
1798 | unsigned long flags; | ||
1799 | 1564 | ||
1800 | task = dev->transport->alloc_task(cmd); | 1565 | task = dev->transport->alloc_task(cmd->t_task_cdb); |
1801 | if (!task) { | 1566 | if (!task) { |
1802 | printk(KERN_ERR "Unable to allocate struct se_task\n"); | 1567 | pr_err("Unable to allocate struct se_task\n"); |
1803 | return NULL; | 1568 | return NULL; |
1804 | } | 1569 | } |
1805 | 1570 | ||
@@ -1807,26 +1572,15 @@ transport_generic_get_task(struct se_cmd *cmd, | |||
1807 | INIT_LIST_HEAD(&task->t_execute_list); | 1572 | INIT_LIST_HEAD(&task->t_execute_list); |
1808 | INIT_LIST_HEAD(&task->t_state_list); | 1573 | INIT_LIST_HEAD(&task->t_state_list); |
1809 | init_completion(&task->task_stop_comp); | 1574 | init_completion(&task->task_stop_comp); |
1810 | task->task_no = T_TASK(cmd)->t_tasks_no++; | ||
1811 | task->task_se_cmd = cmd; | 1575 | task->task_se_cmd = cmd; |
1812 | task->se_dev = dev; | 1576 | task->se_dev = dev; |
1813 | task->task_data_direction = data_direction; | 1577 | task->task_data_direction = data_direction; |
1814 | 1578 | ||
1815 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | ||
1816 | list_add_tail(&task->t_list, &T_TASK(cmd)->t_task_list); | ||
1817 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | ||
1818 | |||
1819 | return task; | 1579 | return task; |
1820 | } | 1580 | } |
1821 | 1581 | ||
1822 | static int transport_generic_cmd_sequencer(struct se_cmd *, unsigned char *); | 1582 | static int transport_generic_cmd_sequencer(struct se_cmd *, unsigned char *); |
1823 | 1583 | ||
1824 | void transport_device_setup_cmd(struct se_cmd *cmd) | ||
1825 | { | ||
1826 | cmd->se_dev = SE_LUN(cmd)->lun_se_dev; | ||
1827 | } | ||
1828 | EXPORT_SYMBOL(transport_device_setup_cmd); | ||
1829 | |||
1830 | /* | 1584 | /* |
1831 | * Used by fabric modules containing a local struct se_cmd within their | 1585 | * Used by fabric modules containing a local struct se_cmd within their |
1832 | * fabric dependent per I/O descriptor. | 1586 | * fabric dependent per I/O descriptor. |
@@ -1840,20 +1594,17 @@ void transport_init_se_cmd( | |||
1840 | int task_attr, | 1594 | int task_attr, |
1841 | unsigned char *sense_buffer) | 1595 | unsigned char *sense_buffer) |
1842 | { | 1596 | { |
1843 | INIT_LIST_HEAD(&cmd->se_lun_list); | 1597 | INIT_LIST_HEAD(&cmd->se_lun_node); |
1844 | INIT_LIST_HEAD(&cmd->se_delayed_list); | 1598 | INIT_LIST_HEAD(&cmd->se_delayed_node); |
1845 | INIT_LIST_HEAD(&cmd->se_ordered_list); | 1599 | INIT_LIST_HEAD(&cmd->se_ordered_node); |
1846 | /* | 1600 | INIT_LIST_HEAD(&cmd->se_qf_node); |
1847 | * Setup t_task pointer to t_task_backstore | ||
1848 | */ | ||
1849 | cmd->t_task = &cmd->t_task_backstore; | ||
1850 | 1601 | ||
1851 | INIT_LIST_HEAD(&T_TASK(cmd)->t_task_list); | 1602 | INIT_LIST_HEAD(&cmd->t_task_list); |
1852 | init_completion(&T_TASK(cmd)->transport_lun_fe_stop_comp); | 1603 | init_completion(&cmd->transport_lun_fe_stop_comp); |
1853 | init_completion(&T_TASK(cmd)->transport_lun_stop_comp); | 1604 | init_completion(&cmd->transport_lun_stop_comp); |
1854 | init_completion(&T_TASK(cmd)->t_transport_stop_comp); | 1605 | init_completion(&cmd->t_transport_stop_comp); |
1855 | spin_lock_init(&T_TASK(cmd)->t_state_lock); | 1606 | spin_lock_init(&cmd->t_state_lock); |
1856 | atomic_set(&T_TASK(cmd)->transport_dev_active, 1); | 1607 | atomic_set(&cmd->transport_dev_active, 1); |
1857 | 1608 | ||
1858 | cmd->se_tfo = tfo; | 1609 | cmd->se_tfo = tfo; |
1859 | cmd->se_sess = se_sess; | 1610 | cmd->se_sess = se_sess; |
@@ -1870,23 +1621,23 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd) | |||
1870 | * Check if SAM Task Attribute emulation is enabled for this | 1621 | * Check if SAM Task Attribute emulation is enabled for this |
1871 | * struct se_device storage object | 1622 | * struct se_device storage object |
1872 | */ | 1623 | */ |
1873 | if (SE_DEV(cmd)->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) | 1624 | if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) |
1874 | return 0; | 1625 | return 0; |
1875 | 1626 | ||
1876 | if (cmd->sam_task_attr == MSG_ACA_TAG) { | 1627 | if (cmd->sam_task_attr == MSG_ACA_TAG) { |
1877 | DEBUG_STA("SAM Task Attribute ACA" | 1628 | pr_debug("SAM Task Attribute ACA" |
1878 | " emulation is not supported\n"); | 1629 | " emulation is not supported\n"); |
1879 | return -1; | 1630 | return -EINVAL; |
1880 | } | 1631 | } |
1881 | /* | 1632 | /* |
1882 | * Used to determine when ORDERED commands should go from | 1633 | * Used to determine when ORDERED commands should go from |
1883 | * Dormant to Active status. | 1634 | * Dormant to Active status. |
1884 | */ | 1635 | */ |
1885 | cmd->se_ordered_id = atomic_inc_return(&SE_DEV(cmd)->dev_ordered_id); | 1636 | cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id); |
1886 | smp_mb__after_atomic_inc(); | 1637 | smp_mb__after_atomic_inc(); |
1887 | DEBUG_STA("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n", | 1638 | pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n", |
1888 | cmd->se_ordered_id, cmd->sam_task_attr, | 1639 | cmd->se_ordered_id, cmd->sam_task_attr, |
1889 | TRANSPORT(cmd->se_dev)->name); | 1640 | cmd->se_dev->transport->name); |
1890 | return 0; | 1641 | return 0; |
1891 | } | 1642 | } |
1892 | 1643 | ||
@@ -1898,8 +1649,8 @@ void transport_free_se_cmd( | |||
1898 | /* | 1649 | /* |
1899 | * Check and free any extended CDB buffer that was allocated | 1650 | * Check and free any extended CDB buffer that was allocated |
1900 | */ | 1651 | */ |
1901 | if (T_TASK(se_cmd)->t_task_cdb != T_TASK(se_cmd)->__t_task_cdb) | 1652 | if (se_cmd->t_task_cdb != se_cmd->__t_task_cdb) |
1902 | kfree(T_TASK(se_cmd)->t_task_cdb); | 1653 | kfree(se_cmd->t_task_cdb); |
1903 | } | 1654 | } |
1904 | EXPORT_SYMBOL(transport_free_se_cmd); | 1655 | EXPORT_SYMBOL(transport_free_se_cmd); |
1905 | 1656 | ||
@@ -1922,42 +1673,41 @@ int transport_generic_allocate_tasks( | |||
1922 | */ | 1673 | */ |
1923 | cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks; | 1674 | cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks; |
1924 | 1675 | ||
1925 | transport_device_setup_cmd(cmd); | ||
1926 | /* | 1676 | /* |
1927 | * Ensure that the received CDB is less than the max (252 + 8) bytes | 1677 | * Ensure that the received CDB is less than the max (252 + 8) bytes |
1928 | * for VARIABLE_LENGTH_CMD | 1678 | * for VARIABLE_LENGTH_CMD |
1929 | */ | 1679 | */ |
1930 | if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) { | 1680 | if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) { |
1931 | printk(KERN_ERR "Received SCSI CDB with command_size: %d that" | 1681 | pr_err("Received SCSI CDB with command_size: %d that" |
1932 | " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", | 1682 | " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", |
1933 | scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE); | 1683 | scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE); |
1934 | return -1; | 1684 | return -EINVAL; |
1935 | } | 1685 | } |
1936 | /* | 1686 | /* |
1937 | * If the received CDB is larger than TCM_MAX_COMMAND_SIZE, | 1687 | * If the received CDB is larger than TCM_MAX_COMMAND_SIZE, |
1938 | * allocate the additional extended CDB buffer now.. Otherwise | 1688 | * allocate the additional extended CDB buffer now.. Otherwise |
1939 | * setup the pointer from __t_task_cdb to t_task_cdb. | 1689 | * setup the pointer from __t_task_cdb to t_task_cdb. |
1940 | */ | 1690 | */ |
1941 | if (scsi_command_size(cdb) > sizeof(T_TASK(cmd)->__t_task_cdb)) { | 1691 | if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) { |
1942 | T_TASK(cmd)->t_task_cdb = kzalloc(scsi_command_size(cdb), | 1692 | cmd->t_task_cdb = kzalloc(scsi_command_size(cdb), |
1943 | GFP_KERNEL); | 1693 | GFP_KERNEL); |
1944 | if (!(T_TASK(cmd)->t_task_cdb)) { | 1694 | if (!cmd->t_task_cdb) { |
1945 | printk(KERN_ERR "Unable to allocate T_TASK(cmd)->t_task_cdb" | 1695 | pr_err("Unable to allocate cmd->t_task_cdb" |
1946 | " %u > sizeof(T_TASK(cmd)->__t_task_cdb): %lu ops\n", | 1696 | " %u > sizeof(cmd->__t_task_cdb): %lu ops\n", |
1947 | scsi_command_size(cdb), | 1697 | scsi_command_size(cdb), |
1948 | (unsigned long)sizeof(T_TASK(cmd)->__t_task_cdb)); | 1698 | (unsigned long)sizeof(cmd->__t_task_cdb)); |
1949 | return -1; | 1699 | return -ENOMEM; |
1950 | } | 1700 | } |
1951 | } else | 1701 | } else |
1952 | T_TASK(cmd)->t_task_cdb = &T_TASK(cmd)->__t_task_cdb[0]; | 1702 | cmd->t_task_cdb = &cmd->__t_task_cdb[0]; |
1953 | /* | 1703 | /* |
1954 | * Copy the original CDB into T_TASK(cmd). | 1704 | * Copy the original CDB into cmd-> |
1955 | */ | 1705 | */ |
1956 | memcpy(T_TASK(cmd)->t_task_cdb, cdb, scsi_command_size(cdb)); | 1706 | memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb)); |
1957 | /* | 1707 | /* |
1958 | * Setup the received CDB based on SCSI defined opcodes and | 1708 | * Setup the received CDB based on SCSI defined opcodes and |
1959 | * perform unit attention, persistent reservations and ALUA | 1709 | * perform unit attention, persistent reservations and ALUA |
1960 | * checks for virtual device backends. The T_TASK(cmd)->t_task_cdb | 1710 | * checks for virtual device backends. The cmd->t_task_cdb |
1961 | * pointer is expected to be setup before we reach this point. | 1711 | * pointer is expected to be setup before we reach this point. |
1962 | */ | 1712 | */ |
1963 | ret = transport_generic_cmd_sequencer(cmd, cdb); | 1713 | ret = transport_generic_cmd_sequencer(cmd, cdb); |
@@ -1969,7 +1719,7 @@ int transport_generic_allocate_tasks( | |||
1969 | if (transport_check_alloc_task_attr(cmd) < 0) { | 1719 | if (transport_check_alloc_task_attr(cmd) < 0) { |
1970 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | 1720 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
1971 | cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; | 1721 | cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; |
1972 | return -2; | 1722 | return -EINVAL; |
1973 | } | 1723 | } |
1974 | spin_lock(&cmd->se_lun->lun_sep_lock); | 1724 | spin_lock(&cmd->se_lun->lun_sep_lock); |
1975 | if (cmd->se_lun->lun_sep) | 1725 | if (cmd->se_lun->lun_sep) |
@@ -1986,10 +1736,10 @@ EXPORT_SYMBOL(transport_generic_allocate_tasks); | |||
1986 | int transport_generic_handle_cdb( | 1736 | int transport_generic_handle_cdb( |
1987 | struct se_cmd *cmd) | 1737 | struct se_cmd *cmd) |
1988 | { | 1738 | { |
1989 | if (!SE_LUN(cmd)) { | 1739 | if (!cmd->se_lun) { |
1990 | dump_stack(); | 1740 | dump_stack(); |
1991 | printk(KERN_ERR "SE_LUN(cmd) is NULL\n"); | 1741 | pr_err("cmd->se_lun is NULL\n"); |
1992 | return -1; | 1742 | return -EINVAL; |
1993 | } | 1743 | } |
1994 | 1744 | ||
1995 | transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD); | 1745 | transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD); |
@@ -1998,6 +1748,29 @@ int transport_generic_handle_cdb( | |||
1998 | EXPORT_SYMBOL(transport_generic_handle_cdb); | 1748 | EXPORT_SYMBOL(transport_generic_handle_cdb); |
1999 | 1749 | ||
2000 | /* | 1750 | /* |
1751 | * Used by fabric module frontends to queue tasks directly. | ||
1752 | * Many only be used from process context only | ||
1753 | */ | ||
1754 | int transport_handle_cdb_direct( | ||
1755 | struct se_cmd *cmd) | ||
1756 | { | ||
1757 | if (!cmd->se_lun) { | ||
1758 | dump_stack(); | ||
1759 | pr_err("cmd->se_lun is NULL\n"); | ||
1760 | return -EINVAL; | ||
1761 | } | ||
1762 | if (in_interrupt()) { | ||
1763 | dump_stack(); | ||
1764 | pr_err("transport_generic_handle_cdb cannot be called" | ||
1765 | " from interrupt context\n"); | ||
1766 | return -EINVAL; | ||
1767 | } | ||
1768 | |||
1769 | return transport_generic_new_cmd(cmd); | ||
1770 | } | ||
1771 | EXPORT_SYMBOL(transport_handle_cdb_direct); | ||
1772 | |||
1773 | /* | ||
2001 | * Used by fabric module frontends defining a TFO->new_cmd_map() caller | 1774 | * Used by fabric module frontends defining a TFO->new_cmd_map() caller |
2002 | * to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD_MAP in order to | 1775 | * to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD_MAP in order to |
2003 | * complete setup in TCM process context w/ TFO->new_cmd_map(). | 1776 | * complete setup in TCM process context w/ TFO->new_cmd_map(). |
@@ -2005,10 +1778,10 @@ EXPORT_SYMBOL(transport_generic_handle_cdb); | |||
2005 | int transport_generic_handle_cdb_map( | 1778 | int transport_generic_handle_cdb_map( |
2006 | struct se_cmd *cmd) | 1779 | struct se_cmd *cmd) |
2007 | { | 1780 | { |
2008 | if (!SE_LUN(cmd)) { | 1781 | if (!cmd->se_lun) { |
2009 | dump_stack(); | 1782 | dump_stack(); |
2010 | printk(KERN_ERR "SE_LUN(cmd) is NULL\n"); | 1783 | pr_err("cmd->se_lun is NULL\n"); |
2011 | return -1; | 1784 | return -EINVAL; |
2012 | } | 1785 | } |
2013 | 1786 | ||
2014 | transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD_MAP); | 1787 | transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD_MAP); |
@@ -2030,7 +1803,7 @@ int transport_generic_handle_data( | |||
2030 | * in interrupt code, the signal_pending() check is skipped. | 1803 | * in interrupt code, the signal_pending() check is skipped. |
2031 | */ | 1804 | */ |
2032 | if (!in_interrupt() && signal_pending(current)) | 1805 | if (!in_interrupt() && signal_pending(current)) |
2033 | return -1; | 1806 | return -EPERM; |
2034 | /* | 1807 | /* |
2035 | * If the received CDB has aleady been ABORTED by the generic | 1808 | * If the received CDB has aleady been ABORTED by the generic |
2036 | * target engine, we now call transport_check_aborted_status() | 1809 | * target engine, we now call transport_check_aborted_status() |
@@ -2057,7 +1830,6 @@ int transport_generic_handle_tmr( | |||
2057 | * This is needed for early exceptions. | 1830 | * This is needed for early exceptions. |
2058 | */ | 1831 | */ |
2059 | cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks; | 1832 | cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks; |
2060 | transport_device_setup_cmd(cmd); | ||
2061 | 1833 | ||
2062 | transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR); | 1834 | transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR); |
2063 | return 0; | 1835 | return 0; |
@@ -2077,16 +1849,16 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd) | |||
2077 | unsigned long flags; | 1849 | unsigned long flags; |
2078 | int ret = 0; | 1850 | int ret = 0; |
2079 | 1851 | ||
2080 | DEBUG_TS("ITT[0x%08x] - Stopping tasks\n", | 1852 | pr_debug("ITT[0x%08x] - Stopping tasks\n", |
2081 | CMD_TFO(cmd)->get_task_tag(cmd)); | 1853 | cmd->se_tfo->get_task_tag(cmd)); |
2082 | 1854 | ||
2083 | /* | 1855 | /* |
2084 | * No tasks remain in the execution queue | 1856 | * No tasks remain in the execution queue |
2085 | */ | 1857 | */ |
2086 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 1858 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
2087 | list_for_each_entry_safe(task, task_tmp, | 1859 | list_for_each_entry_safe(task, task_tmp, |
2088 | &T_TASK(cmd)->t_task_list, t_list) { | 1860 | &cmd->t_task_list, t_list) { |
2089 | DEBUG_TS("task_no[%d] - Processing task %p\n", | 1861 | pr_debug("task_no[%d] - Processing task %p\n", |
2090 | task->task_no, task); | 1862 | task->task_no, task); |
2091 | /* | 1863 | /* |
2092 | * If the struct se_task has not been sent and is not active, | 1864 | * If the struct se_task has not been sent and is not active, |
@@ -2094,14 +1866,14 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd) | |||
2094 | */ | 1866 | */ |
2095 | if (!atomic_read(&task->task_sent) && | 1867 | if (!atomic_read(&task->task_sent) && |
2096 | !atomic_read(&task->task_active)) { | 1868 | !atomic_read(&task->task_active)) { |
2097 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, | 1869 | spin_unlock_irqrestore(&cmd->t_state_lock, |
2098 | flags); | 1870 | flags); |
2099 | transport_remove_task_from_execute_queue(task, | 1871 | transport_remove_task_from_execute_queue(task, |
2100 | task->se_dev); | 1872 | task->se_dev); |
2101 | 1873 | ||
2102 | DEBUG_TS("task_no[%d] - Removed from execute queue\n", | 1874 | pr_debug("task_no[%d] - Removed from execute queue\n", |
2103 | task->task_no); | 1875 | task->task_no); |
2104 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 1876 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
2105 | continue; | 1877 | continue; |
2106 | } | 1878 | } |
2107 | 1879 | ||
@@ -2111,42 +1883,32 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd) | |||
2111 | */ | 1883 | */ |
2112 | if (atomic_read(&task->task_active)) { | 1884 | if (atomic_read(&task->task_active)) { |
2113 | atomic_set(&task->task_stop, 1); | 1885 | atomic_set(&task->task_stop, 1); |
2114 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, | 1886 | spin_unlock_irqrestore(&cmd->t_state_lock, |
2115 | flags); | 1887 | flags); |
2116 | 1888 | ||
2117 | DEBUG_TS("task_no[%d] - Waiting to complete\n", | 1889 | pr_debug("task_no[%d] - Waiting to complete\n", |
2118 | task->task_no); | 1890 | task->task_no); |
2119 | wait_for_completion(&task->task_stop_comp); | 1891 | wait_for_completion(&task->task_stop_comp); |
2120 | DEBUG_TS("task_no[%d] - Stopped successfully\n", | 1892 | pr_debug("task_no[%d] - Stopped successfully\n", |
2121 | task->task_no); | 1893 | task->task_no); |
2122 | 1894 | ||
2123 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 1895 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
2124 | atomic_dec(&T_TASK(cmd)->t_task_cdbs_left); | 1896 | atomic_dec(&cmd->t_task_cdbs_left); |
2125 | 1897 | ||
2126 | atomic_set(&task->task_active, 0); | 1898 | atomic_set(&task->task_active, 0); |
2127 | atomic_set(&task->task_stop, 0); | 1899 | atomic_set(&task->task_stop, 0); |
2128 | } else { | 1900 | } else { |
2129 | DEBUG_TS("task_no[%d] - Did nothing\n", task->task_no); | 1901 | pr_debug("task_no[%d] - Did nothing\n", task->task_no); |
2130 | ret++; | 1902 | ret++; |
2131 | } | 1903 | } |
2132 | 1904 | ||
2133 | __transport_stop_task_timer(task, &flags); | 1905 | __transport_stop_task_timer(task, &flags); |
2134 | } | 1906 | } |
2135 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 1907 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
2136 | 1908 | ||
2137 | return ret; | 1909 | return ret; |
2138 | } | 1910 | } |
2139 | 1911 | ||
2140 | static void transport_failure_reset_queue_depth(struct se_device *dev) | ||
2141 | { | ||
2142 | unsigned long flags; | ||
2143 | |||
2144 | spin_lock_irqsave(&SE_HBA(dev)->hba_queue_lock, flags); | ||
2145 | atomic_inc(&dev->depth_left); | ||
2146 | atomic_inc(&SE_HBA(dev)->left_queue_depth); | ||
2147 | spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags); | ||
2148 | } | ||
2149 | |||
2150 | /* | 1912 | /* |
2151 | * Handle SAM-esque emulation for generic transport request failures. | 1913 | * Handle SAM-esque emulation for generic transport request failures. |
2152 | */ | 1914 | */ |
@@ -2156,29 +1918,31 @@ static void transport_generic_request_failure( | |||
2156 | int complete, | 1918 | int complete, |
2157 | int sc) | 1919 | int sc) |
2158 | { | 1920 | { |
2159 | DEBUG_GRF("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x" | 1921 | int ret = 0; |
2160 | " CDB: 0x%02x\n", cmd, CMD_TFO(cmd)->get_task_tag(cmd), | 1922 | |
2161 | T_TASK(cmd)->t_task_cdb[0]); | 1923 | pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x" |
2162 | DEBUG_GRF("-----[ i_state: %d t_state/def_t_state:" | 1924 | " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd), |
1925 | cmd->t_task_cdb[0]); | ||
1926 | pr_debug("-----[ i_state: %d t_state/def_t_state:" | ||
2163 | " %d/%d transport_error_status: %d\n", | 1927 | " %d/%d transport_error_status: %d\n", |
2164 | CMD_TFO(cmd)->get_cmd_state(cmd), | 1928 | cmd->se_tfo->get_cmd_state(cmd), |
2165 | cmd->t_state, cmd->deferred_t_state, | 1929 | cmd->t_state, cmd->deferred_t_state, |
2166 | cmd->transport_error_status); | 1930 | cmd->transport_error_status); |
2167 | DEBUG_GRF("-----[ t_task_cdbs: %d t_task_cdbs_left: %d" | 1931 | pr_debug("-----[ t_tasks: %d t_task_cdbs_left: %d" |
2168 | " t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --" | 1932 | " t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --" |
2169 | " t_transport_active: %d t_transport_stop: %d" | 1933 | " t_transport_active: %d t_transport_stop: %d" |
2170 | " t_transport_sent: %d\n", T_TASK(cmd)->t_task_cdbs, | 1934 | " t_transport_sent: %d\n", cmd->t_task_list_num, |
2171 | atomic_read(&T_TASK(cmd)->t_task_cdbs_left), | 1935 | atomic_read(&cmd->t_task_cdbs_left), |
2172 | atomic_read(&T_TASK(cmd)->t_task_cdbs_sent), | 1936 | atomic_read(&cmd->t_task_cdbs_sent), |
2173 | atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left), | 1937 | atomic_read(&cmd->t_task_cdbs_ex_left), |
2174 | atomic_read(&T_TASK(cmd)->t_transport_active), | 1938 | atomic_read(&cmd->t_transport_active), |
2175 | atomic_read(&T_TASK(cmd)->t_transport_stop), | 1939 | atomic_read(&cmd->t_transport_stop), |
2176 | atomic_read(&T_TASK(cmd)->t_transport_sent)); | 1940 | atomic_read(&cmd->t_transport_sent)); |
2177 | 1941 | ||
2178 | transport_stop_all_task_timers(cmd); | 1942 | transport_stop_all_task_timers(cmd); |
2179 | 1943 | ||
2180 | if (dev) | 1944 | if (dev) |
2181 | transport_failure_reset_queue_depth(dev); | 1945 | atomic_inc(&dev->depth_left); |
2182 | /* | 1946 | /* |
2183 | * For SAM Task Attribute emulation for failed struct se_cmd | 1947 | * For SAM Task Attribute emulation for failed struct se_cmd |
2184 | */ | 1948 | */ |
@@ -2211,8 +1975,8 @@ static void transport_generic_request_failure( | |||
2211 | * we force this session to fall back to session | 1975 | * we force this session to fall back to session |
2212 | * recovery. | 1976 | * recovery. |
2213 | */ | 1977 | */ |
2214 | CMD_TFO(cmd)->fall_back_to_erl0(cmd->se_sess); | 1978 | cmd->se_tfo->fall_back_to_erl0(cmd->se_sess); |
2215 | CMD_TFO(cmd)->stop_session(cmd->se_sess, 0, 0); | 1979 | cmd->se_tfo->stop_session(cmd->se_sess, 0, 0); |
2216 | 1980 | ||
2217 | goto check_stop; | 1981 | goto check_stop; |
2218 | case PYX_TRANSPORT_LU_COMM_FAILURE: | 1982 | case PYX_TRANSPORT_LU_COMM_FAILURE: |
@@ -2240,13 +2004,15 @@ static void transport_generic_request_failure( | |||
2240 | * | 2004 | * |
2241 | * See spc4r17, section 7.4.6 Control Mode Page, Table 349 | 2005 | * See spc4r17, section 7.4.6 Control Mode Page, Table 349 |
2242 | */ | 2006 | */ |
2243 | if (SE_SESS(cmd) && | 2007 | if (cmd->se_sess && |
2244 | DEV_ATTRIB(cmd->se_dev)->emulate_ua_intlck_ctrl == 2) | 2008 | cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2) |
2245 | core_scsi3_ua_allocate(SE_SESS(cmd)->se_node_acl, | 2009 | core_scsi3_ua_allocate(cmd->se_sess->se_node_acl, |
2246 | cmd->orig_fe_lun, 0x2C, | 2010 | cmd->orig_fe_lun, 0x2C, |
2247 | ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); | 2011 | ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); |
2248 | 2012 | ||
2249 | CMD_TFO(cmd)->queue_status(cmd); | 2013 | ret = cmd->se_tfo->queue_status(cmd); |
2014 | if (ret == -EAGAIN) | ||
2015 | goto queue_full; | ||
2250 | goto check_stop; | 2016 | goto check_stop; |
2251 | case PYX_TRANSPORT_USE_SENSE_REASON: | 2017 | case PYX_TRANSPORT_USE_SENSE_REASON: |
2252 | /* | 2018 | /* |
@@ -2254,8 +2020,8 @@ static void transport_generic_request_failure( | |||
2254 | */ | 2020 | */ |
2255 | break; | 2021 | break; |
2256 | default: | 2022 | default: |
2257 | printk(KERN_ERR "Unknown transport error for CDB 0x%02x: %d\n", | 2023 | pr_err("Unknown transport error for CDB 0x%02x: %d\n", |
2258 | T_TASK(cmd)->t_task_cdb[0], | 2024 | cmd->t_task_cdb[0], |
2259 | cmd->transport_error_status); | 2025 | cmd->transport_error_status); |
2260 | cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; | 2026 | cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; |
2261 | break; | 2027 | break; |
@@ -2263,32 +2029,41 @@ static void transport_generic_request_failure( | |||
2263 | 2029 | ||
2264 | if (!sc) | 2030 | if (!sc) |
2265 | transport_new_cmd_failure(cmd); | 2031 | transport_new_cmd_failure(cmd); |
2266 | else | 2032 | else { |
2267 | transport_send_check_condition_and_sense(cmd, | 2033 | ret = transport_send_check_condition_and_sense(cmd, |
2268 | cmd->scsi_sense_reason, 0); | 2034 | cmd->scsi_sense_reason, 0); |
2035 | if (ret == -EAGAIN) | ||
2036 | goto queue_full; | ||
2037 | } | ||
2038 | |||
2269 | check_stop: | 2039 | check_stop: |
2270 | transport_lun_remove_cmd(cmd); | 2040 | transport_lun_remove_cmd(cmd); |
2271 | if (!(transport_cmd_check_stop_to_fabric(cmd))) | 2041 | if (!transport_cmd_check_stop_to_fabric(cmd)) |
2272 | ; | 2042 | ; |
2043 | return; | ||
2044 | |||
2045 | queue_full: | ||
2046 | cmd->t_state = TRANSPORT_COMPLETE_OK; | ||
2047 | transport_handle_queue_full(cmd, cmd->se_dev, transport_complete_qf); | ||
2273 | } | 2048 | } |
2274 | 2049 | ||
2275 | static void transport_direct_request_timeout(struct se_cmd *cmd) | 2050 | static void transport_direct_request_timeout(struct se_cmd *cmd) |
2276 | { | 2051 | { |
2277 | unsigned long flags; | 2052 | unsigned long flags; |
2278 | 2053 | ||
2279 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 2054 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
2280 | if (!(atomic_read(&T_TASK(cmd)->t_transport_timeout))) { | 2055 | if (!atomic_read(&cmd->t_transport_timeout)) { |
2281 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 2056 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
2282 | return; | 2057 | return; |
2283 | } | 2058 | } |
2284 | if (atomic_read(&T_TASK(cmd)->t_task_cdbs_timeout_left)) { | 2059 | if (atomic_read(&cmd->t_task_cdbs_timeout_left)) { |
2285 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 2060 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
2286 | return; | 2061 | return; |
2287 | } | 2062 | } |
2288 | 2063 | ||
2289 | atomic_sub(atomic_read(&T_TASK(cmd)->t_transport_timeout), | 2064 | atomic_sub(atomic_read(&cmd->t_transport_timeout), |
2290 | &T_TASK(cmd)->t_se_count); | 2065 | &cmd->t_se_count); |
2291 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 2066 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
2292 | } | 2067 | } |
2293 | 2068 | ||
2294 | static void transport_generic_request_timeout(struct se_cmd *cmd) | 2069 | static void transport_generic_request_timeout(struct se_cmd *cmd) |
@@ -2296,35 +2071,18 @@ static void transport_generic_request_timeout(struct se_cmd *cmd) | |||
2296 | unsigned long flags; | 2071 | unsigned long flags; |
2297 | 2072 | ||
2298 | /* | 2073 | /* |
2299 | * Reset T_TASK(cmd)->t_se_count to allow transport_generic_remove() | 2074 | * Reset cmd->t_se_count to allow transport_generic_remove() |
2300 | * to allow last call to free memory resources. | 2075 | * to allow last call to free memory resources. |
2301 | */ | 2076 | */ |
2302 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 2077 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
2303 | if (atomic_read(&T_TASK(cmd)->t_transport_timeout) > 1) { | 2078 | if (atomic_read(&cmd->t_transport_timeout) > 1) { |
2304 | int tmp = (atomic_read(&T_TASK(cmd)->t_transport_timeout) - 1); | 2079 | int tmp = (atomic_read(&cmd->t_transport_timeout) - 1); |
2305 | |||
2306 | atomic_sub(tmp, &T_TASK(cmd)->t_se_count); | ||
2307 | } | ||
2308 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | ||
2309 | |||
2310 | transport_generic_remove(cmd, 0, 0); | ||
2311 | } | ||
2312 | |||
2313 | static int | ||
2314 | transport_generic_allocate_buf(struct se_cmd *cmd, u32 data_length) | ||
2315 | { | ||
2316 | unsigned char *buf; | ||
2317 | 2080 | ||
2318 | buf = kzalloc(data_length, GFP_KERNEL); | 2081 | atomic_sub(tmp, &cmd->t_se_count); |
2319 | if (!(buf)) { | ||
2320 | printk(KERN_ERR "Unable to allocate memory for buffer\n"); | ||
2321 | return -1; | ||
2322 | } | 2082 | } |
2083 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | ||
2323 | 2084 | ||
2324 | T_TASK(cmd)->t_tasks_se_num = 0; | 2085 | transport_generic_remove(cmd, 0); |
2325 | T_TASK(cmd)->t_task_buf = buf; | ||
2326 | |||
2327 | return 0; | ||
2328 | } | 2086 | } |
2329 | 2087 | ||
2330 | static inline u32 transport_lba_21(unsigned char *cdb) | 2088 | static inline u32 transport_lba_21(unsigned char *cdb) |
@@ -2364,9 +2122,9 @@ static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd) | |||
2364 | { | 2122 | { |
2365 | unsigned long flags; | 2123 | unsigned long flags; |
2366 | 2124 | ||
2367 | spin_lock_irqsave(&T_TASK(se_cmd)->t_state_lock, flags); | 2125 | spin_lock_irqsave(&se_cmd->t_state_lock, flags); |
2368 | se_cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE; | 2126 | se_cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE; |
2369 | spin_unlock_irqrestore(&T_TASK(se_cmd)->t_state_lock, flags); | 2127 | spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); |
2370 | } | 2128 | } |
2371 | 2129 | ||
2372 | /* | 2130 | /* |
@@ -2375,14 +2133,14 @@ static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd) | |||
2375 | static void transport_task_timeout_handler(unsigned long data) | 2133 | static void transport_task_timeout_handler(unsigned long data) |
2376 | { | 2134 | { |
2377 | struct se_task *task = (struct se_task *)data; | 2135 | struct se_task *task = (struct se_task *)data; |
2378 | struct se_cmd *cmd = TASK_CMD(task); | 2136 | struct se_cmd *cmd = task->task_se_cmd; |
2379 | unsigned long flags; | 2137 | unsigned long flags; |
2380 | 2138 | ||
2381 | DEBUG_TT("transport task timeout fired! task: %p cmd: %p\n", task, cmd); | 2139 | pr_debug("transport task timeout fired! task: %p cmd: %p\n", task, cmd); |
2382 | 2140 | ||
2383 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 2141 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
2384 | if (task->task_flags & TF_STOP) { | 2142 | if (task->task_flags & TF_STOP) { |
2385 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 2143 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
2386 | return; | 2144 | return; |
2387 | } | 2145 | } |
2388 | task->task_flags &= ~TF_RUNNING; | 2146 | task->task_flags &= ~TF_RUNNING; |
@@ -2390,46 +2148,46 @@ static void transport_task_timeout_handler(unsigned long data) | |||
2390 | /* | 2148 | /* |
2391 | * Determine if transport_complete_task() has already been called. | 2149 | * Determine if transport_complete_task() has already been called. |
2392 | */ | 2150 | */ |
2393 | if (!(atomic_read(&task->task_active))) { | 2151 | if (!atomic_read(&task->task_active)) { |
2394 | DEBUG_TT("transport task: %p cmd: %p timeout task_active" | 2152 | pr_debug("transport task: %p cmd: %p timeout task_active" |
2395 | " == 0\n", task, cmd); | 2153 | " == 0\n", task, cmd); |
2396 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 2154 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
2397 | return; | 2155 | return; |
2398 | } | 2156 | } |
2399 | 2157 | ||
2400 | atomic_inc(&T_TASK(cmd)->t_se_count); | 2158 | atomic_inc(&cmd->t_se_count); |
2401 | atomic_inc(&T_TASK(cmd)->t_transport_timeout); | 2159 | atomic_inc(&cmd->t_transport_timeout); |
2402 | T_TASK(cmd)->t_tasks_failed = 1; | 2160 | cmd->t_tasks_failed = 1; |
2403 | 2161 | ||
2404 | atomic_set(&task->task_timeout, 1); | 2162 | atomic_set(&task->task_timeout, 1); |
2405 | task->task_error_status = PYX_TRANSPORT_TASK_TIMEOUT; | 2163 | task->task_error_status = PYX_TRANSPORT_TASK_TIMEOUT; |
2406 | task->task_scsi_status = 1; | 2164 | task->task_scsi_status = 1; |
2407 | 2165 | ||
2408 | if (atomic_read(&task->task_stop)) { | 2166 | if (atomic_read(&task->task_stop)) { |
2409 | DEBUG_TT("transport task: %p cmd: %p timeout task_stop" | 2167 | pr_debug("transport task: %p cmd: %p timeout task_stop" |
2410 | " == 1\n", task, cmd); | 2168 | " == 1\n", task, cmd); |
2411 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 2169 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
2412 | complete(&task->task_stop_comp); | 2170 | complete(&task->task_stop_comp); |
2413 | return; | 2171 | return; |
2414 | } | 2172 | } |
2415 | 2173 | ||
2416 | if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_left))) { | 2174 | if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) { |
2417 | DEBUG_TT("transport task: %p cmd: %p timeout non zero" | 2175 | pr_debug("transport task: %p cmd: %p timeout non zero" |
2418 | " t_task_cdbs_left\n", task, cmd); | 2176 | " t_task_cdbs_left\n", task, cmd); |
2419 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 2177 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
2420 | return; | 2178 | return; |
2421 | } | 2179 | } |
2422 | DEBUG_TT("transport task: %p cmd: %p timeout ZERO t_task_cdbs_left\n", | 2180 | pr_debug("transport task: %p cmd: %p timeout ZERO t_task_cdbs_left\n", |
2423 | task, cmd); | 2181 | task, cmd); |
2424 | 2182 | ||
2425 | cmd->t_state = TRANSPORT_COMPLETE_FAILURE; | 2183 | cmd->t_state = TRANSPORT_COMPLETE_FAILURE; |
2426 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 2184 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
2427 | 2185 | ||
2428 | transport_add_cmd_to_queue(cmd, TRANSPORT_COMPLETE_FAILURE); | 2186 | transport_add_cmd_to_queue(cmd, TRANSPORT_COMPLETE_FAILURE); |
2429 | } | 2187 | } |
2430 | 2188 | ||
2431 | /* | 2189 | /* |
2432 | * Called with T_TASK(cmd)->t_state_lock held. | 2190 | * Called with cmd->t_state_lock held. |
2433 | */ | 2191 | */ |
2434 | static void transport_start_task_timer(struct se_task *task) | 2192 | static void transport_start_task_timer(struct se_task *task) |
2435 | { | 2193 | { |
@@ -2441,8 +2199,8 @@ static void transport_start_task_timer(struct se_task *task) | |||
2441 | /* | 2199 | /* |
2442 | * If the task_timeout is disabled, exit now. | 2200 | * If the task_timeout is disabled, exit now. |
2443 | */ | 2201 | */ |
2444 | timeout = DEV_ATTRIB(dev)->task_timeout; | 2202 | timeout = dev->se_sub_dev->se_dev_attrib.task_timeout; |
2445 | if (!(timeout)) | 2203 | if (!timeout) |
2446 | return; | 2204 | return; |
2447 | 2205 | ||
2448 | init_timer(&task->task_timer); | 2206 | init_timer(&task->task_timer); |
@@ -2453,27 +2211,27 @@ static void transport_start_task_timer(struct se_task *task) | |||
2453 | task->task_flags |= TF_RUNNING; | 2211 | task->task_flags |= TF_RUNNING; |
2454 | add_timer(&task->task_timer); | 2212 | add_timer(&task->task_timer); |
2455 | #if 0 | 2213 | #if 0 |
2456 | printk(KERN_INFO "Starting task timer for cmd: %p task: %p seconds:" | 2214 | pr_debug("Starting task timer for cmd: %p task: %p seconds:" |
2457 | " %d\n", task->task_se_cmd, task, timeout); | 2215 | " %d\n", task->task_se_cmd, task, timeout); |
2458 | #endif | 2216 | #endif |
2459 | } | 2217 | } |
2460 | 2218 | ||
2461 | /* | 2219 | /* |
2462 | * Called with spin_lock_irq(&T_TASK(cmd)->t_state_lock) held. | 2220 | * Called with spin_lock_irq(&cmd->t_state_lock) held. |
2463 | */ | 2221 | */ |
2464 | void __transport_stop_task_timer(struct se_task *task, unsigned long *flags) | 2222 | void __transport_stop_task_timer(struct se_task *task, unsigned long *flags) |
2465 | { | 2223 | { |
2466 | struct se_cmd *cmd = TASK_CMD(task); | 2224 | struct se_cmd *cmd = task->task_se_cmd; |
2467 | 2225 | ||
2468 | if (!(task->task_flags & TF_RUNNING)) | 2226 | if (!task->task_flags & TF_RUNNING) |
2469 | return; | 2227 | return; |
2470 | 2228 | ||
2471 | task->task_flags |= TF_STOP; | 2229 | task->task_flags |= TF_STOP; |
2472 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, *flags); | 2230 | spin_unlock_irqrestore(&cmd->t_state_lock, *flags); |
2473 | 2231 | ||
2474 | del_timer_sync(&task->task_timer); | 2232 | del_timer_sync(&task->task_timer); |
2475 | 2233 | ||
2476 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, *flags); | 2234 | spin_lock_irqsave(&cmd->t_state_lock, *flags); |
2477 | task->task_flags &= ~TF_RUNNING; | 2235 | task->task_flags &= ~TF_RUNNING; |
2478 | task->task_flags &= ~TF_STOP; | 2236 | task->task_flags &= ~TF_STOP; |
2479 | } | 2237 | } |
@@ -2483,11 +2241,11 @@ static void transport_stop_all_task_timers(struct se_cmd *cmd) | |||
2483 | struct se_task *task = NULL, *task_tmp; | 2241 | struct se_task *task = NULL, *task_tmp; |
2484 | unsigned long flags; | 2242 | unsigned long flags; |
2485 | 2243 | ||
2486 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 2244 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
2487 | list_for_each_entry_safe(task, task_tmp, | 2245 | list_for_each_entry_safe(task, task_tmp, |
2488 | &T_TASK(cmd)->t_task_list, t_list) | 2246 | &cmd->t_task_list, t_list) |
2489 | __transport_stop_task_timer(task, &flags); | 2247 | __transport_stop_task_timer(task, &flags); |
2490 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 2248 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
2491 | } | 2249 | } |
2492 | 2250 | ||
2493 | static inline int transport_tcq_window_closed(struct se_device *dev) | 2251 | static inline int transport_tcq_window_closed(struct se_device *dev) |
@@ -2498,7 +2256,7 @@ static inline int transport_tcq_window_closed(struct se_device *dev) | |||
2498 | } else | 2256 | } else |
2499 | msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_LONG); | 2257 | msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_LONG); |
2500 | 2258 | ||
2501 | wake_up_interruptible(&dev->dev_queue_obj->thread_wq); | 2259 | wake_up_interruptible(&dev->dev_queue_obj.thread_wq); |
2502 | return 0; | 2260 | return 0; |
2503 | } | 2261 | } |
2504 | 2262 | ||
@@ -2511,45 +2269,45 @@ static inline int transport_tcq_window_closed(struct se_device *dev) | |||
2511 | */ | 2269 | */ |
2512 | static inline int transport_execute_task_attr(struct se_cmd *cmd) | 2270 | static inline int transport_execute_task_attr(struct se_cmd *cmd) |
2513 | { | 2271 | { |
2514 | if (SE_DEV(cmd)->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) | 2272 | if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) |
2515 | return 1; | 2273 | return 1; |
2516 | /* | 2274 | /* |
2517 | * Check for the existence of HEAD_OF_QUEUE, and if true return 1 | 2275 | * Check for the existence of HEAD_OF_QUEUE, and if true return 1 |
2518 | * to allow the passed struct se_cmd list of tasks to the front of the list. | 2276 | * to allow the passed struct se_cmd list of tasks to the front of the list. |
2519 | */ | 2277 | */ |
2520 | if (cmd->sam_task_attr == MSG_HEAD_TAG) { | 2278 | if (cmd->sam_task_attr == MSG_HEAD_TAG) { |
2521 | atomic_inc(&SE_DEV(cmd)->dev_hoq_count); | 2279 | atomic_inc(&cmd->se_dev->dev_hoq_count); |
2522 | smp_mb__after_atomic_inc(); | 2280 | smp_mb__after_atomic_inc(); |
2523 | DEBUG_STA("Added HEAD_OF_QUEUE for CDB:" | 2281 | pr_debug("Added HEAD_OF_QUEUE for CDB:" |
2524 | " 0x%02x, se_ordered_id: %u\n", | 2282 | " 0x%02x, se_ordered_id: %u\n", |
2525 | T_TASK(cmd)->t_task_cdb[0], | 2283 | cmd->t_task_cdb[0], |
2526 | cmd->se_ordered_id); | 2284 | cmd->se_ordered_id); |
2527 | return 1; | 2285 | return 1; |
2528 | } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { | 2286 | } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { |
2529 | spin_lock(&SE_DEV(cmd)->ordered_cmd_lock); | 2287 | spin_lock(&cmd->se_dev->ordered_cmd_lock); |
2530 | list_add_tail(&cmd->se_ordered_list, | 2288 | list_add_tail(&cmd->se_ordered_node, |
2531 | &SE_DEV(cmd)->ordered_cmd_list); | 2289 | &cmd->se_dev->ordered_cmd_list); |
2532 | spin_unlock(&SE_DEV(cmd)->ordered_cmd_lock); | 2290 | spin_unlock(&cmd->se_dev->ordered_cmd_lock); |
2533 | 2291 | ||
2534 | atomic_inc(&SE_DEV(cmd)->dev_ordered_sync); | 2292 | atomic_inc(&cmd->se_dev->dev_ordered_sync); |
2535 | smp_mb__after_atomic_inc(); | 2293 | smp_mb__after_atomic_inc(); |
2536 | 2294 | ||
2537 | DEBUG_STA("Added ORDERED for CDB: 0x%02x to ordered" | 2295 | pr_debug("Added ORDERED for CDB: 0x%02x to ordered" |
2538 | " list, se_ordered_id: %u\n", | 2296 | " list, se_ordered_id: %u\n", |
2539 | T_TASK(cmd)->t_task_cdb[0], | 2297 | cmd->t_task_cdb[0], |
2540 | cmd->se_ordered_id); | 2298 | cmd->se_ordered_id); |
2541 | /* | 2299 | /* |
2542 | * Add ORDERED command to tail of execution queue if | 2300 | * Add ORDERED command to tail of execution queue if |
2543 | * no other older commands exist that need to be | 2301 | * no other older commands exist that need to be |
2544 | * completed first. | 2302 | * completed first. |
2545 | */ | 2303 | */ |
2546 | if (!(atomic_read(&SE_DEV(cmd)->simple_cmds))) | 2304 | if (!atomic_read(&cmd->se_dev->simple_cmds)) |
2547 | return 1; | 2305 | return 1; |
2548 | } else { | 2306 | } else { |
2549 | /* | 2307 | /* |
2550 | * For SIMPLE and UNTAGGED Task Attribute commands | 2308 | * For SIMPLE and UNTAGGED Task Attribute commands |
2551 | */ | 2309 | */ |
2552 | atomic_inc(&SE_DEV(cmd)->simple_cmds); | 2310 | atomic_inc(&cmd->se_dev->simple_cmds); |
2553 | smp_mb__after_atomic_inc(); | 2311 | smp_mb__after_atomic_inc(); |
2554 | } | 2312 | } |
2555 | /* | 2313 | /* |
@@ -2557,20 +2315,20 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd) | |||
2557 | * add the dormant task(s) built for the passed struct se_cmd to the | 2315 | * add the dormant task(s) built for the passed struct se_cmd to the |
2558 | * execution queue and become in Active state for this struct se_device. | 2316 | * execution queue and become in Active state for this struct se_device. |
2559 | */ | 2317 | */ |
2560 | if (atomic_read(&SE_DEV(cmd)->dev_ordered_sync) != 0) { | 2318 | if (atomic_read(&cmd->se_dev->dev_ordered_sync) != 0) { |
2561 | /* | 2319 | /* |
2562 | * Otherwise, add cmd w/ tasks to delayed cmd queue that | 2320 | * Otherwise, add cmd w/ tasks to delayed cmd queue that |
2563 | * will be drained upon completion of HEAD_OF_QUEUE task. | 2321 | * will be drained upon completion of HEAD_OF_QUEUE task. |
2564 | */ | 2322 | */ |
2565 | spin_lock(&SE_DEV(cmd)->delayed_cmd_lock); | 2323 | spin_lock(&cmd->se_dev->delayed_cmd_lock); |
2566 | cmd->se_cmd_flags |= SCF_DELAYED_CMD_FROM_SAM_ATTR; | 2324 | cmd->se_cmd_flags |= SCF_DELAYED_CMD_FROM_SAM_ATTR; |
2567 | list_add_tail(&cmd->se_delayed_list, | 2325 | list_add_tail(&cmd->se_delayed_node, |
2568 | &SE_DEV(cmd)->delayed_cmd_list); | 2326 | &cmd->se_dev->delayed_cmd_list); |
2569 | spin_unlock(&SE_DEV(cmd)->delayed_cmd_lock); | 2327 | spin_unlock(&cmd->se_dev->delayed_cmd_lock); |
2570 | 2328 | ||
2571 | DEBUG_STA("Added CDB: 0x%02x Task Attr: 0x%02x to" | 2329 | pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to" |
2572 | " delayed CMD list, se_ordered_id: %u\n", | 2330 | " delayed CMD list, se_ordered_id: %u\n", |
2573 | T_TASK(cmd)->t_task_cdb[0], cmd->sam_task_attr, | 2331 | cmd->t_task_cdb[0], cmd->sam_task_attr, |
2574 | cmd->se_ordered_id); | 2332 | cmd->se_ordered_id); |
2575 | /* | 2333 | /* |
2576 | * Return zero to let transport_execute_tasks() know | 2334 | * Return zero to let transport_execute_tasks() know |
@@ -2592,25 +2350,23 @@ static int transport_execute_tasks(struct se_cmd *cmd) | |||
2592 | { | 2350 | { |
2593 | int add_tasks; | 2351 | int add_tasks; |
2594 | 2352 | ||
2595 | if (!(cmd->se_cmd_flags & SCF_SE_DISABLE_ONLINE_CHECK)) { | 2353 | if (se_dev_check_online(cmd->se_orig_obj_ptr) != 0) { |
2596 | if (se_dev_check_online(cmd->se_orig_obj_ptr) != 0) { | 2354 | cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE; |
2597 | cmd->transport_error_status = | 2355 | transport_generic_request_failure(cmd, NULL, 0, 1); |
2598 | PYX_TRANSPORT_LU_COMM_FAILURE; | 2356 | return 0; |
2599 | transport_generic_request_failure(cmd, NULL, 0, 1); | ||
2600 | return 0; | ||
2601 | } | ||
2602 | } | 2357 | } |
2358 | |||
2603 | /* | 2359 | /* |
2604 | * Call transport_cmd_check_stop() to see if a fabric exception | 2360 | * Call transport_cmd_check_stop() to see if a fabric exception |
2605 | * has occurred that prevents execution. | 2361 | * has occurred that prevents execution. |
2606 | */ | 2362 | */ |
2607 | if (!(transport_cmd_check_stop(cmd, 0, TRANSPORT_PROCESSING))) { | 2363 | if (!transport_cmd_check_stop(cmd, 0, TRANSPORT_PROCESSING)) { |
2608 | /* | 2364 | /* |
2609 | * Check for SAM Task Attribute emulation and HEAD_OF_QUEUE | 2365 | * Check for SAM Task Attribute emulation and HEAD_OF_QUEUE |
2610 | * attribute for the tasks of the received struct se_cmd CDB | 2366 | * attribute for the tasks of the received struct se_cmd CDB |
2611 | */ | 2367 | */ |
2612 | add_tasks = transport_execute_task_attr(cmd); | 2368 | add_tasks = transport_execute_task_attr(cmd); |
2613 | if (add_tasks == 0) | 2369 | if (!add_tasks) |
2614 | goto execute_tasks; | 2370 | goto execute_tasks; |
2615 | /* | 2371 | /* |
2616 | * This calls transport_add_tasks_from_cmd() to handle | 2372 | * This calls transport_add_tasks_from_cmd() to handle |
@@ -2625,7 +2381,7 @@ static int transport_execute_tasks(struct se_cmd *cmd) | |||
2625 | * storage object. | 2381 | * storage object. |
2626 | */ | 2382 | */ |
2627 | execute_tasks: | 2383 | execute_tasks: |
2628 | __transport_execute_tasks(SE_DEV(cmd)); | 2384 | __transport_execute_tasks(cmd->se_dev); |
2629 | return 0; | 2385 | return 0; |
2630 | } | 2386 | } |
2631 | 2387 | ||
@@ -2639,51 +2395,49 @@ static int __transport_execute_tasks(struct se_device *dev) | |||
2639 | { | 2395 | { |
2640 | int error; | 2396 | int error; |
2641 | struct se_cmd *cmd = NULL; | 2397 | struct se_cmd *cmd = NULL; |
2642 | struct se_task *task; | 2398 | struct se_task *task = NULL; |
2643 | unsigned long flags; | 2399 | unsigned long flags; |
2644 | 2400 | ||
2645 | /* | 2401 | /* |
2646 | * Check if there is enough room in the device and HBA queue to send | 2402 | * Check if there is enough room in the device and HBA queue to send |
2647 | * struct se_transport_task's to the selected transport. | 2403 | * struct se_tasks to the selected transport. |
2648 | */ | 2404 | */ |
2649 | check_depth: | 2405 | check_depth: |
2650 | spin_lock_irqsave(&SE_HBA(dev)->hba_queue_lock, flags); | 2406 | if (!atomic_read(&dev->depth_left)) |
2651 | if (!(atomic_read(&dev->depth_left)) || | ||
2652 | !(atomic_read(&SE_HBA(dev)->left_queue_depth))) { | ||
2653 | spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags); | ||
2654 | return transport_tcq_window_closed(dev); | 2407 | return transport_tcq_window_closed(dev); |
2655 | } | ||
2656 | dev->dev_tcq_window_closed = 0; | ||
2657 | 2408 | ||
2658 | spin_lock(&dev->execute_task_lock); | 2409 | dev->dev_tcq_window_closed = 0; |
2659 | task = transport_get_task_from_execute_queue(dev); | ||
2660 | spin_unlock(&dev->execute_task_lock); | ||
2661 | 2410 | ||
2662 | if (!task) { | 2411 | spin_lock_irq(&dev->execute_task_lock); |
2663 | spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags); | 2412 | if (list_empty(&dev->execute_task_list)) { |
2413 | spin_unlock_irq(&dev->execute_task_lock); | ||
2664 | return 0; | 2414 | return 0; |
2665 | } | 2415 | } |
2416 | task = list_first_entry(&dev->execute_task_list, | ||
2417 | struct se_task, t_execute_list); | ||
2418 | list_del(&task->t_execute_list); | ||
2419 | atomic_set(&task->task_execute_queue, 0); | ||
2420 | atomic_dec(&dev->execute_tasks); | ||
2421 | spin_unlock_irq(&dev->execute_task_lock); | ||
2666 | 2422 | ||
2667 | atomic_dec(&dev->depth_left); | 2423 | atomic_dec(&dev->depth_left); |
2668 | atomic_dec(&SE_HBA(dev)->left_queue_depth); | ||
2669 | spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags); | ||
2670 | 2424 | ||
2671 | cmd = TASK_CMD(task); | 2425 | cmd = task->task_se_cmd; |
2672 | 2426 | ||
2673 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 2427 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
2674 | atomic_set(&task->task_active, 1); | 2428 | atomic_set(&task->task_active, 1); |
2675 | atomic_set(&task->task_sent, 1); | 2429 | atomic_set(&task->task_sent, 1); |
2676 | atomic_inc(&T_TASK(cmd)->t_task_cdbs_sent); | 2430 | atomic_inc(&cmd->t_task_cdbs_sent); |
2677 | 2431 | ||
2678 | if (atomic_read(&T_TASK(cmd)->t_task_cdbs_sent) == | 2432 | if (atomic_read(&cmd->t_task_cdbs_sent) == |
2679 | T_TASK(cmd)->t_task_cdbs) | 2433 | cmd->t_task_list_num) |
2680 | atomic_set(&cmd->transport_sent, 1); | 2434 | atomic_set(&cmd->transport_sent, 1); |
2681 | 2435 | ||
2682 | transport_start_task_timer(task); | 2436 | transport_start_task_timer(task); |
2683 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 2437 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
2684 | /* | 2438 | /* |
2685 | * The struct se_cmd->transport_emulate_cdb() function pointer is used | 2439 | * The struct se_cmd->transport_emulate_cdb() function pointer is used |
2686 | * to grab REPORT_LUNS CDBs before they hit the | 2440 | * to grab REPORT_LUNS and other CDBs we want to handle before they hit the |
2687 | * struct se_subsystem_api->do_task() caller below. | 2441 | * struct se_subsystem_api->do_task() caller below. |
2688 | */ | 2442 | */ |
2689 | if (cmd->transport_emulate_cdb) { | 2443 | if (cmd->transport_emulate_cdb) { |
@@ -2718,11 +2472,11 @@ check_depth: | |||
2718 | * call ->do_task() directly and let the underlying TCM subsystem plugin | 2472 | * call ->do_task() directly and let the underlying TCM subsystem plugin |
2719 | * code handle the CDB emulation. | 2473 | * code handle the CDB emulation. |
2720 | */ | 2474 | */ |
2721 | if ((TRANSPORT(dev)->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) && | 2475 | if ((dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) && |
2722 | (!(TASK_CMD(task)->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB))) | 2476 | (!(task->task_se_cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB))) |
2723 | error = transport_emulate_control_cdb(task); | 2477 | error = transport_emulate_control_cdb(task); |
2724 | else | 2478 | else |
2725 | error = TRANSPORT(dev)->do_task(task); | 2479 | error = dev->transport->do_task(task); |
2726 | 2480 | ||
2727 | if (error != 0) { | 2481 | if (error != 0) { |
2728 | cmd->transport_error_status = error; | 2482 | cmd->transport_error_status = error; |
@@ -2745,12 +2499,10 @@ void transport_new_cmd_failure(struct se_cmd *se_cmd) | |||
2745 | * Any unsolicited data will get dumped for failed command inside of | 2499 | * Any unsolicited data will get dumped for failed command inside of |
2746 | * the fabric plugin | 2500 | * the fabric plugin |
2747 | */ | 2501 | */ |
2748 | spin_lock_irqsave(&T_TASK(se_cmd)->t_state_lock, flags); | 2502 | spin_lock_irqsave(&se_cmd->t_state_lock, flags); |
2749 | se_cmd->se_cmd_flags |= SCF_SE_CMD_FAILED; | 2503 | se_cmd->se_cmd_flags |= SCF_SE_CMD_FAILED; |
2750 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | 2504 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
2751 | spin_unlock_irqrestore(&T_TASK(se_cmd)->t_state_lock, flags); | 2505 | spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); |
2752 | |||
2753 | CMD_TFO(se_cmd)->new_cmd_failure(se_cmd); | ||
2754 | } | 2506 | } |
2755 | 2507 | ||
2756 | static void transport_nop_wait_for_tasks(struct se_cmd *, int, int); | 2508 | static void transport_nop_wait_for_tasks(struct se_cmd *, int, int); |
@@ -2760,7 +2512,7 @@ static inline u32 transport_get_sectors_6( | |||
2760 | struct se_cmd *cmd, | 2512 | struct se_cmd *cmd, |
2761 | int *ret) | 2513 | int *ret) |
2762 | { | 2514 | { |
2763 | struct se_device *dev = SE_LUN(cmd)->lun_se_dev; | 2515 | struct se_device *dev = cmd->se_dev; |
2764 | 2516 | ||
2765 | /* | 2517 | /* |
2766 | * Assume TYPE_DISK for non struct se_device objects. | 2518 | * Assume TYPE_DISK for non struct se_device objects. |
@@ -2772,7 +2524,7 @@ static inline u32 transport_get_sectors_6( | |||
2772 | /* | 2524 | /* |
2773 | * Use 24-bit allocation length for TYPE_TAPE. | 2525 | * Use 24-bit allocation length for TYPE_TAPE. |
2774 | */ | 2526 | */ |
2775 | if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) | 2527 | if (dev->transport->get_device_type(dev) == TYPE_TAPE) |
2776 | return (u32)(cdb[2] << 16) + (cdb[3] << 8) + cdb[4]; | 2528 | return (u32)(cdb[2] << 16) + (cdb[3] << 8) + cdb[4]; |
2777 | 2529 | ||
2778 | /* | 2530 | /* |
@@ -2788,7 +2540,7 @@ static inline u32 transport_get_sectors_10( | |||
2788 | struct se_cmd *cmd, | 2540 | struct se_cmd *cmd, |
2789 | int *ret) | 2541 | int *ret) |
2790 | { | 2542 | { |
2791 | struct se_device *dev = SE_LUN(cmd)->lun_se_dev; | 2543 | struct se_device *dev = cmd->se_dev; |
2792 | 2544 | ||
2793 | /* | 2545 | /* |
2794 | * Assume TYPE_DISK for non struct se_device objects. | 2546 | * Assume TYPE_DISK for non struct se_device objects. |
@@ -2800,8 +2552,8 @@ static inline u32 transport_get_sectors_10( | |||
2800 | /* | 2552 | /* |
2801 | * XXX_10 is not defined in SSC, throw an exception | 2553 | * XXX_10 is not defined in SSC, throw an exception |
2802 | */ | 2554 | */ |
2803 | if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) { | 2555 | if (dev->transport->get_device_type(dev) == TYPE_TAPE) { |
2804 | *ret = -1; | 2556 | *ret = -EINVAL; |
2805 | return 0; | 2557 | return 0; |
2806 | } | 2558 | } |
2807 | 2559 | ||
@@ -2818,7 +2570,7 @@ static inline u32 transport_get_sectors_12( | |||
2818 | struct se_cmd *cmd, | 2570 | struct se_cmd *cmd, |
2819 | int *ret) | 2571 | int *ret) |
2820 | { | 2572 | { |
2821 | struct se_device *dev = SE_LUN(cmd)->lun_se_dev; | 2573 | struct se_device *dev = cmd->se_dev; |
2822 | 2574 | ||
2823 | /* | 2575 | /* |
2824 | * Assume TYPE_DISK for non struct se_device objects. | 2576 | * Assume TYPE_DISK for non struct se_device objects. |
@@ -2830,8 +2582,8 @@ static inline u32 transport_get_sectors_12( | |||
2830 | /* | 2582 | /* |
2831 | * XXX_12 is not defined in SSC, throw an exception | 2583 | * XXX_12 is not defined in SSC, throw an exception |
2832 | */ | 2584 | */ |
2833 | if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) { | 2585 | if (dev->transport->get_device_type(dev) == TYPE_TAPE) { |
2834 | *ret = -1; | 2586 | *ret = -EINVAL; |
2835 | return 0; | 2587 | return 0; |
2836 | } | 2588 | } |
2837 | 2589 | ||
@@ -2848,7 +2600,7 @@ static inline u32 transport_get_sectors_16( | |||
2848 | struct se_cmd *cmd, | 2600 | struct se_cmd *cmd, |
2849 | int *ret) | 2601 | int *ret) |
2850 | { | 2602 | { |
2851 | struct se_device *dev = SE_LUN(cmd)->lun_se_dev; | 2603 | struct se_device *dev = cmd->se_dev; |
2852 | 2604 | ||
2853 | /* | 2605 | /* |
2854 | * Assume TYPE_DISK for non struct se_device objects. | 2606 | * Assume TYPE_DISK for non struct se_device objects. |
@@ -2860,7 +2612,7 @@ static inline u32 transport_get_sectors_16( | |||
2860 | /* | 2612 | /* |
2861 | * Use 24-bit allocation length for TYPE_TAPE. | 2613 | * Use 24-bit allocation length for TYPE_TAPE. |
2862 | */ | 2614 | */ |
2863 | if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) | 2615 | if (dev->transport->get_device_type(dev) == TYPE_TAPE) |
2864 | return (u32)(cdb[12] << 16) + (cdb[13] << 8) + cdb[14]; | 2616 | return (u32)(cdb[12] << 16) + (cdb[13] << 8) + cdb[14]; |
2865 | 2617 | ||
2866 | type_disk: | 2618 | type_disk: |
@@ -2890,57 +2642,30 @@ static inline u32 transport_get_size( | |||
2890 | unsigned char *cdb, | 2642 | unsigned char *cdb, |
2891 | struct se_cmd *cmd) | 2643 | struct se_cmd *cmd) |
2892 | { | 2644 | { |
2893 | struct se_device *dev = SE_DEV(cmd); | 2645 | struct se_device *dev = cmd->se_dev; |
2894 | 2646 | ||
2895 | if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) { | 2647 | if (dev->transport->get_device_type(dev) == TYPE_TAPE) { |
2896 | if (cdb[1] & 1) { /* sectors */ | 2648 | if (cdb[1] & 1) { /* sectors */ |
2897 | return DEV_ATTRIB(dev)->block_size * sectors; | 2649 | return dev->se_sub_dev->se_dev_attrib.block_size * sectors; |
2898 | } else /* bytes */ | 2650 | } else /* bytes */ |
2899 | return sectors; | 2651 | return sectors; |
2900 | } | 2652 | } |
2901 | #if 0 | 2653 | #if 0 |
2902 | printk(KERN_INFO "Returning block_size: %u, sectors: %u == %u for" | 2654 | pr_debug("Returning block_size: %u, sectors: %u == %u for" |
2903 | " %s object\n", DEV_ATTRIB(dev)->block_size, sectors, | 2655 | " %s object\n", dev->se_sub_dev->se_dev_attrib.block_size, sectors, |
2904 | DEV_ATTRIB(dev)->block_size * sectors, | 2656 | dev->se_sub_dev->se_dev_attrib.block_size * sectors, |
2905 | TRANSPORT(dev)->name); | 2657 | dev->transport->name); |
2906 | #endif | 2658 | #endif |
2907 | return DEV_ATTRIB(dev)->block_size * sectors; | 2659 | return dev->se_sub_dev->se_dev_attrib.block_size * sectors; |
2908 | } | ||
2909 | |||
2910 | unsigned char transport_asciihex_to_binaryhex(unsigned char val[2]) | ||
2911 | { | ||
2912 | unsigned char result = 0; | ||
2913 | /* | ||
2914 | * MSB | ||
2915 | */ | ||
2916 | if ((val[0] >= 'a') && (val[0] <= 'f')) | ||
2917 | result = ((val[0] - 'a' + 10) & 0xf) << 4; | ||
2918 | else | ||
2919 | if ((val[0] >= 'A') && (val[0] <= 'F')) | ||
2920 | result = ((val[0] - 'A' + 10) & 0xf) << 4; | ||
2921 | else /* digit */ | ||
2922 | result = ((val[0] - '0') & 0xf) << 4; | ||
2923 | /* | ||
2924 | * LSB | ||
2925 | */ | ||
2926 | if ((val[1] >= 'a') && (val[1] <= 'f')) | ||
2927 | result |= ((val[1] - 'a' + 10) & 0xf); | ||
2928 | else | ||
2929 | if ((val[1] >= 'A') && (val[1] <= 'F')) | ||
2930 | result |= ((val[1] - 'A' + 10) & 0xf); | ||
2931 | else /* digit */ | ||
2932 | result |= ((val[1] - '0') & 0xf); | ||
2933 | |||
2934 | return result; | ||
2935 | } | 2660 | } |
2936 | EXPORT_SYMBOL(transport_asciihex_to_binaryhex); | ||
2937 | 2661 | ||
2938 | static void transport_xor_callback(struct se_cmd *cmd) | 2662 | static void transport_xor_callback(struct se_cmd *cmd) |
2939 | { | 2663 | { |
2940 | unsigned char *buf, *addr; | 2664 | unsigned char *buf, *addr; |
2941 | struct se_mem *se_mem; | 2665 | struct scatterlist *sg; |
2942 | unsigned int offset; | 2666 | unsigned int offset; |
2943 | int i; | 2667 | int i; |
2668 | int count; | ||
2944 | /* | 2669 | /* |
2945 | * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command | 2670 | * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command |
2946 | * | 2671 | * |
@@ -2953,32 +2678,37 @@ static void transport_xor_callback(struct se_cmd *cmd) | |||
2953 | * 5) transfer the resulting XOR data to the data-in buffer. | 2678 | * 5) transfer the resulting XOR data to the data-in buffer. |
2954 | */ | 2679 | */ |
2955 | buf = kmalloc(cmd->data_length, GFP_KERNEL); | 2680 | buf = kmalloc(cmd->data_length, GFP_KERNEL); |
2956 | if (!(buf)) { | 2681 | if (!buf) { |
2957 | printk(KERN_ERR "Unable to allocate xor_callback buf\n"); | 2682 | pr_err("Unable to allocate xor_callback buf\n"); |
2958 | return; | 2683 | return; |
2959 | } | 2684 | } |
2960 | /* | 2685 | /* |
2961 | * Copy the scatterlist WRITE buffer located at T_TASK(cmd)->t_mem_list | 2686 | * Copy the scatterlist WRITE buffer located at cmd->t_data_sg |
2962 | * into the locally allocated *buf | 2687 | * into the locally allocated *buf |
2963 | */ | 2688 | */ |
2964 | transport_memcpy_se_mem_read_contig(cmd, buf, T_TASK(cmd)->t_mem_list); | 2689 | sg_copy_to_buffer(cmd->t_data_sg, |
2690 | cmd->t_data_nents, | ||
2691 | buf, | ||
2692 | cmd->data_length); | ||
2693 | |||
2965 | /* | 2694 | /* |
2966 | * Now perform the XOR against the BIDI read memory located at | 2695 | * Now perform the XOR against the BIDI read memory located at |
2967 | * T_TASK(cmd)->t_mem_bidi_list | 2696 | * cmd->t_mem_bidi_list |
2968 | */ | 2697 | */ |
2969 | 2698 | ||
2970 | offset = 0; | 2699 | offset = 0; |
2971 | list_for_each_entry(se_mem, T_TASK(cmd)->t_mem_bidi_list, se_list) { | 2700 | for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) { |
2972 | addr = (unsigned char *)kmap_atomic(se_mem->se_page, KM_USER0); | 2701 | addr = kmap_atomic(sg_page(sg), KM_USER0); |
2973 | if (!(addr)) | 2702 | if (!addr) |
2974 | goto out; | 2703 | goto out; |
2975 | 2704 | ||
2976 | for (i = 0; i < se_mem->se_len; i++) | 2705 | for (i = 0; i < sg->length; i++) |
2977 | *(addr + se_mem->se_off + i) ^= *(buf + offset + i); | 2706 | *(addr + sg->offset + i) ^= *(buf + offset + i); |
2978 | 2707 | ||
2979 | offset += se_mem->se_len; | 2708 | offset += sg->length; |
2980 | kunmap_atomic(addr, KM_USER0); | 2709 | kunmap_atomic(addr, KM_USER0); |
2981 | } | 2710 | } |
2711 | |||
2982 | out: | 2712 | out: |
2983 | kfree(buf); | 2713 | kfree(buf); |
2984 | } | 2714 | } |
@@ -2994,75 +2724,60 @@ static int transport_get_sense_data(struct se_cmd *cmd) | |||
2994 | unsigned long flags; | 2724 | unsigned long flags; |
2995 | u32 offset = 0; | 2725 | u32 offset = 0; |
2996 | 2726 | ||
2997 | if (!SE_LUN(cmd)) { | 2727 | WARN_ON(!cmd->se_lun); |
2998 | printk(KERN_ERR "SE_LUN(cmd) is NULL\n"); | 2728 | |
2999 | return -1; | 2729 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
3000 | } | ||
3001 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | ||
3002 | if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { | 2730 | if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { |
3003 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 2731 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
3004 | return 0; | 2732 | return 0; |
3005 | } | 2733 | } |
3006 | 2734 | ||
3007 | list_for_each_entry_safe(task, task_tmp, | 2735 | list_for_each_entry_safe(task, task_tmp, |
3008 | &T_TASK(cmd)->t_task_list, t_list) { | 2736 | &cmd->t_task_list, t_list) { |
3009 | 2737 | ||
3010 | if (!task->task_sense) | 2738 | if (!task->task_sense) |
3011 | continue; | 2739 | continue; |
3012 | 2740 | ||
3013 | dev = task->se_dev; | 2741 | dev = task->se_dev; |
3014 | if (!(dev)) | 2742 | if (!dev) |
3015 | continue; | 2743 | continue; |
3016 | 2744 | ||
3017 | if (!TRANSPORT(dev)->get_sense_buffer) { | 2745 | if (!dev->transport->get_sense_buffer) { |
3018 | printk(KERN_ERR "TRANSPORT(dev)->get_sense_buffer" | 2746 | pr_err("dev->transport->get_sense_buffer" |
3019 | " is NULL\n"); | 2747 | " is NULL\n"); |
3020 | continue; | 2748 | continue; |
3021 | } | 2749 | } |
3022 | 2750 | ||
3023 | sense_buffer = TRANSPORT(dev)->get_sense_buffer(task); | 2751 | sense_buffer = dev->transport->get_sense_buffer(task); |
3024 | if (!(sense_buffer)) { | 2752 | if (!sense_buffer) { |
3025 | printk(KERN_ERR "ITT[0x%08x]_TASK[%d]: Unable to locate" | 2753 | pr_err("ITT[0x%08x]_TASK[%d]: Unable to locate" |
3026 | " sense buffer for task with sense\n", | 2754 | " sense buffer for task with sense\n", |
3027 | CMD_TFO(cmd)->get_task_tag(cmd), task->task_no); | 2755 | cmd->se_tfo->get_task_tag(cmd), task->task_no); |
3028 | continue; | 2756 | continue; |
3029 | } | 2757 | } |
3030 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 2758 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
3031 | 2759 | ||
3032 | offset = CMD_TFO(cmd)->set_fabric_sense_len(cmd, | 2760 | offset = cmd->se_tfo->set_fabric_sense_len(cmd, |
3033 | TRANSPORT_SENSE_BUFFER); | 2761 | TRANSPORT_SENSE_BUFFER); |
3034 | 2762 | ||
3035 | memcpy((void *)&buffer[offset], (void *)sense_buffer, | 2763 | memcpy(&buffer[offset], sense_buffer, |
3036 | TRANSPORT_SENSE_BUFFER); | 2764 | TRANSPORT_SENSE_BUFFER); |
3037 | cmd->scsi_status = task->task_scsi_status; | 2765 | cmd->scsi_status = task->task_scsi_status; |
3038 | /* Automatically padded */ | 2766 | /* Automatically padded */ |
3039 | cmd->scsi_sense_length = | 2767 | cmd->scsi_sense_length = |
3040 | (TRANSPORT_SENSE_BUFFER + offset); | 2768 | (TRANSPORT_SENSE_BUFFER + offset); |
3041 | 2769 | ||
3042 | printk(KERN_INFO "HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x" | 2770 | pr_debug("HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x" |
3043 | " and sense\n", | 2771 | " and sense\n", |
3044 | dev->se_hba->hba_id, TRANSPORT(dev)->name, | 2772 | dev->se_hba->hba_id, dev->transport->name, |
3045 | cmd->scsi_status); | 2773 | cmd->scsi_status); |
3046 | return 0; | 2774 | return 0; |
3047 | } | 2775 | } |
3048 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 2776 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
3049 | 2777 | ||
3050 | return -1; | 2778 | return -1; |
3051 | } | 2779 | } |
3052 | 2780 | ||
3053 | static int transport_allocate_resources(struct se_cmd *cmd) | ||
3054 | { | ||
3055 | u32 length = cmd->data_length; | ||
3056 | |||
3057 | if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) || | ||
3058 | (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) | ||
3059 | return transport_generic_get_mem(cmd, length, PAGE_SIZE); | ||
3060 | else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB) | ||
3061 | return transport_generic_allocate_buf(cmd, length); | ||
3062 | else | ||
3063 | return 0; | ||
3064 | } | ||
3065 | |||
3066 | static int | 2781 | static int |
3067 | transport_handle_reservation_conflict(struct se_cmd *cmd) | 2782 | transport_handle_reservation_conflict(struct se_cmd *cmd) |
3068 | { | 2783 | { |
@@ -3077,12 +2792,40 @@ transport_handle_reservation_conflict(struct se_cmd *cmd) | |||
3077 | * | 2792 | * |
3078 | * See spc4r17, section 7.4.6 Control Mode Page, Table 349 | 2793 | * See spc4r17, section 7.4.6 Control Mode Page, Table 349 |
3079 | */ | 2794 | */ |
3080 | if (SE_SESS(cmd) && | 2795 | if (cmd->se_sess && |
3081 | DEV_ATTRIB(cmd->se_dev)->emulate_ua_intlck_ctrl == 2) | 2796 | cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2) |
3082 | core_scsi3_ua_allocate(SE_SESS(cmd)->se_node_acl, | 2797 | core_scsi3_ua_allocate(cmd->se_sess->se_node_acl, |
3083 | cmd->orig_fe_lun, 0x2C, | 2798 | cmd->orig_fe_lun, 0x2C, |
3084 | ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); | 2799 | ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); |
3085 | return -2; | 2800 | return -EINVAL; |
2801 | } | ||
2802 | |||
2803 | static inline long long transport_dev_end_lba(struct se_device *dev) | ||
2804 | { | ||
2805 | return dev->transport->get_blocks(dev) + 1; | ||
2806 | } | ||
2807 | |||
2808 | static int transport_cmd_get_valid_sectors(struct se_cmd *cmd) | ||
2809 | { | ||
2810 | struct se_device *dev = cmd->se_dev; | ||
2811 | u32 sectors; | ||
2812 | |||
2813 | if (dev->transport->get_device_type(dev) != TYPE_DISK) | ||
2814 | return 0; | ||
2815 | |||
2816 | sectors = (cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size); | ||
2817 | |||
2818 | if ((cmd->t_task_lba + sectors) > transport_dev_end_lba(dev)) { | ||
2819 | pr_err("LBA: %llu Sectors: %u exceeds" | ||
2820 | " transport_dev_end_lba(): %llu\n", | ||
2821 | cmd->t_task_lba, sectors, | ||
2822 | transport_dev_end_lba(dev)); | ||
2823 | pr_err(" We should return CHECK_CONDITION" | ||
2824 | " but we don't yet\n"); | ||
2825 | return 0; | ||
2826 | } | ||
2827 | |||
2828 | return sectors; | ||
3086 | } | 2829 | } |
3087 | 2830 | ||
3088 | /* transport_generic_cmd_sequencer(): | 2831 | /* transport_generic_cmd_sequencer(): |
@@ -3099,7 +2842,7 @@ static int transport_generic_cmd_sequencer( | |||
3099 | struct se_cmd *cmd, | 2842 | struct se_cmd *cmd, |
3100 | unsigned char *cdb) | 2843 | unsigned char *cdb) |
3101 | { | 2844 | { |
3102 | struct se_device *dev = SE_DEV(cmd); | 2845 | struct se_device *dev = cmd->se_dev; |
3103 | struct se_subsystem_dev *su_dev = dev->se_sub_dev; | 2846 | struct se_subsystem_dev *su_dev = dev->se_sub_dev; |
3104 | int ret = 0, sector_ret = 0, passthrough; | 2847 | int ret = 0, sector_ret = 0, passthrough; |
3105 | u32 sectors = 0, size = 0, pr_reg_type = 0; | 2848 | u32 sectors = 0, size = 0, pr_reg_type = 0; |
@@ -3113,12 +2856,12 @@ static int transport_generic_cmd_sequencer( | |||
3113 | &transport_nop_wait_for_tasks; | 2856 | &transport_nop_wait_for_tasks; |
3114 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | 2857 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
3115 | cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION; | 2858 | cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION; |
3116 | return -2; | 2859 | return -EINVAL; |
3117 | } | 2860 | } |
3118 | /* | 2861 | /* |
3119 | * Check status of Asymmetric Logical Unit Assignment port | 2862 | * Check status of Asymmetric Logical Unit Assignment port |
3120 | */ | 2863 | */ |
3121 | ret = T10_ALUA(su_dev)->alua_state_check(cmd, cdb, &alua_ascq); | 2864 | ret = su_dev->t10_alua.alua_state_check(cmd, cdb, &alua_ascq); |
3122 | if (ret != 0) { | 2865 | if (ret != 0) { |
3123 | cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks; | 2866 | cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks; |
3124 | /* | 2867 | /* |
@@ -3128,22 +2871,22 @@ static int transport_generic_cmd_sequencer( | |||
3128 | */ | 2871 | */ |
3129 | if (ret > 0) { | 2872 | if (ret > 0) { |
3130 | #if 0 | 2873 | #if 0 |
3131 | printk(KERN_INFO "[%s]: ALUA TG Port not available," | 2874 | pr_debug("[%s]: ALUA TG Port not available," |
3132 | " SenseKey: NOT_READY, ASC/ASCQ: 0x04/0x%02x\n", | 2875 | " SenseKey: NOT_READY, ASC/ASCQ: 0x04/0x%02x\n", |
3133 | CMD_TFO(cmd)->get_fabric_name(), alua_ascq); | 2876 | cmd->se_tfo->get_fabric_name(), alua_ascq); |
3134 | #endif | 2877 | #endif |
3135 | transport_set_sense_codes(cmd, 0x04, alua_ascq); | 2878 | transport_set_sense_codes(cmd, 0x04, alua_ascq); |
3136 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | 2879 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
3137 | cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY; | 2880 | cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY; |
3138 | return -2; | 2881 | return -EINVAL; |
3139 | } | 2882 | } |
3140 | goto out_invalid_cdb_field; | 2883 | goto out_invalid_cdb_field; |
3141 | } | 2884 | } |
3142 | /* | 2885 | /* |
3143 | * Check status for SPC-3 Persistent Reservations | 2886 | * Check status for SPC-3 Persistent Reservations |
3144 | */ | 2887 | */ |
3145 | if (T10_PR_OPS(su_dev)->t10_reservation_check(cmd, &pr_reg_type) != 0) { | 2888 | if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type) != 0) { |
3146 | if (T10_PR_OPS(su_dev)->t10_seq_non_holder( | 2889 | if (su_dev->t10_pr.pr_ops.t10_seq_non_holder( |
3147 | cmd, cdb, pr_reg_type) != 0) | 2890 | cmd, cdb, pr_reg_type) != 0) |
3148 | return transport_handle_reservation_conflict(cmd); | 2891 | return transport_handle_reservation_conflict(cmd); |
3149 | /* | 2892 | /* |
@@ -3160,7 +2903,7 @@ static int transport_generic_cmd_sequencer( | |||
3160 | goto out_unsupported_cdb; | 2903 | goto out_unsupported_cdb; |
3161 | size = transport_get_size(sectors, cdb, cmd); | 2904 | size = transport_get_size(sectors, cdb, cmd); |
3162 | cmd->transport_split_cdb = &split_cdb_XX_6; | 2905 | cmd->transport_split_cdb = &split_cdb_XX_6; |
3163 | T_TASK(cmd)->t_task_lba = transport_lba_21(cdb); | 2906 | cmd->t_task_lba = transport_lba_21(cdb); |
3164 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; | 2907 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
3165 | break; | 2908 | break; |
3166 | case READ_10: | 2909 | case READ_10: |
@@ -3169,7 +2912,7 @@ static int transport_generic_cmd_sequencer( | |||
3169 | goto out_unsupported_cdb; | 2912 | goto out_unsupported_cdb; |
3170 | size = transport_get_size(sectors, cdb, cmd); | 2913 | size = transport_get_size(sectors, cdb, cmd); |
3171 | cmd->transport_split_cdb = &split_cdb_XX_10; | 2914 | cmd->transport_split_cdb = &split_cdb_XX_10; |
3172 | T_TASK(cmd)->t_task_lba = transport_lba_32(cdb); | 2915 | cmd->t_task_lba = transport_lba_32(cdb); |
3173 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; | 2916 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
3174 | break; | 2917 | break; |
3175 | case READ_12: | 2918 | case READ_12: |
@@ -3178,7 +2921,7 @@ static int transport_generic_cmd_sequencer( | |||
3178 | goto out_unsupported_cdb; | 2921 | goto out_unsupported_cdb; |
3179 | size = transport_get_size(sectors, cdb, cmd); | 2922 | size = transport_get_size(sectors, cdb, cmd); |
3180 | cmd->transport_split_cdb = &split_cdb_XX_12; | 2923 | cmd->transport_split_cdb = &split_cdb_XX_12; |
3181 | T_TASK(cmd)->t_task_lba = transport_lba_32(cdb); | 2924 | cmd->t_task_lba = transport_lba_32(cdb); |
3182 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; | 2925 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
3183 | break; | 2926 | break; |
3184 | case READ_16: | 2927 | case READ_16: |
@@ -3187,7 +2930,7 @@ static int transport_generic_cmd_sequencer( | |||
3187 | goto out_unsupported_cdb; | 2930 | goto out_unsupported_cdb; |
3188 | size = transport_get_size(sectors, cdb, cmd); | 2931 | size = transport_get_size(sectors, cdb, cmd); |
3189 | cmd->transport_split_cdb = &split_cdb_XX_16; | 2932 | cmd->transport_split_cdb = &split_cdb_XX_16; |
3190 | T_TASK(cmd)->t_task_lba = transport_lba_64(cdb); | 2933 | cmd->t_task_lba = transport_lba_64(cdb); |
3191 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; | 2934 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
3192 | break; | 2935 | break; |
3193 | case WRITE_6: | 2936 | case WRITE_6: |
@@ -3196,7 +2939,7 @@ static int transport_generic_cmd_sequencer( | |||
3196 | goto out_unsupported_cdb; | 2939 | goto out_unsupported_cdb; |
3197 | size = transport_get_size(sectors, cdb, cmd); | 2940 | size = transport_get_size(sectors, cdb, cmd); |
3198 | cmd->transport_split_cdb = &split_cdb_XX_6; | 2941 | cmd->transport_split_cdb = &split_cdb_XX_6; |
3199 | T_TASK(cmd)->t_task_lba = transport_lba_21(cdb); | 2942 | cmd->t_task_lba = transport_lba_21(cdb); |
3200 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; | 2943 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
3201 | break; | 2944 | break; |
3202 | case WRITE_10: | 2945 | case WRITE_10: |
@@ -3205,8 +2948,8 @@ static int transport_generic_cmd_sequencer( | |||
3205 | goto out_unsupported_cdb; | 2948 | goto out_unsupported_cdb; |
3206 | size = transport_get_size(sectors, cdb, cmd); | 2949 | size = transport_get_size(sectors, cdb, cmd); |
3207 | cmd->transport_split_cdb = &split_cdb_XX_10; | 2950 | cmd->transport_split_cdb = &split_cdb_XX_10; |
3208 | T_TASK(cmd)->t_task_lba = transport_lba_32(cdb); | 2951 | cmd->t_task_lba = transport_lba_32(cdb); |
3209 | T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8); | 2952 | cmd->t_tasks_fua = (cdb[1] & 0x8); |
3210 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; | 2953 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
3211 | break; | 2954 | break; |
3212 | case WRITE_12: | 2955 | case WRITE_12: |
@@ -3215,8 +2958,8 @@ static int transport_generic_cmd_sequencer( | |||
3215 | goto out_unsupported_cdb; | 2958 | goto out_unsupported_cdb; |
3216 | size = transport_get_size(sectors, cdb, cmd); | 2959 | size = transport_get_size(sectors, cdb, cmd); |
3217 | cmd->transport_split_cdb = &split_cdb_XX_12; | 2960 | cmd->transport_split_cdb = &split_cdb_XX_12; |
3218 | T_TASK(cmd)->t_task_lba = transport_lba_32(cdb); | 2961 | cmd->t_task_lba = transport_lba_32(cdb); |
3219 | T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8); | 2962 | cmd->t_tasks_fua = (cdb[1] & 0x8); |
3220 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; | 2963 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
3221 | break; | 2964 | break; |
3222 | case WRITE_16: | 2965 | case WRITE_16: |
@@ -3225,22 +2968,22 @@ static int transport_generic_cmd_sequencer( | |||
3225 | goto out_unsupported_cdb; | 2968 | goto out_unsupported_cdb; |
3226 | size = transport_get_size(sectors, cdb, cmd); | 2969 | size = transport_get_size(sectors, cdb, cmd); |
3227 | cmd->transport_split_cdb = &split_cdb_XX_16; | 2970 | cmd->transport_split_cdb = &split_cdb_XX_16; |
3228 | T_TASK(cmd)->t_task_lba = transport_lba_64(cdb); | 2971 | cmd->t_task_lba = transport_lba_64(cdb); |
3229 | T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8); | 2972 | cmd->t_tasks_fua = (cdb[1] & 0x8); |
3230 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; | 2973 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
3231 | break; | 2974 | break; |
3232 | case XDWRITEREAD_10: | 2975 | case XDWRITEREAD_10: |
3233 | if ((cmd->data_direction != DMA_TO_DEVICE) || | 2976 | if ((cmd->data_direction != DMA_TO_DEVICE) || |
3234 | !(T_TASK(cmd)->t_tasks_bidi)) | 2977 | !(cmd->t_tasks_bidi)) |
3235 | goto out_invalid_cdb_field; | 2978 | goto out_invalid_cdb_field; |
3236 | sectors = transport_get_sectors_10(cdb, cmd, §or_ret); | 2979 | sectors = transport_get_sectors_10(cdb, cmd, §or_ret); |
3237 | if (sector_ret) | 2980 | if (sector_ret) |
3238 | goto out_unsupported_cdb; | 2981 | goto out_unsupported_cdb; |
3239 | size = transport_get_size(sectors, cdb, cmd); | 2982 | size = transport_get_size(sectors, cdb, cmd); |
3240 | cmd->transport_split_cdb = &split_cdb_XX_10; | 2983 | cmd->transport_split_cdb = &split_cdb_XX_10; |
3241 | T_TASK(cmd)->t_task_lba = transport_lba_32(cdb); | 2984 | cmd->t_task_lba = transport_lba_32(cdb); |
3242 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; | 2985 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
3243 | passthrough = (TRANSPORT(dev)->transport_type == | 2986 | passthrough = (dev->transport->transport_type == |
3244 | TRANSPORT_PLUGIN_PHBA_PDEV); | 2987 | TRANSPORT_PLUGIN_PHBA_PDEV); |
3245 | /* | 2988 | /* |
3246 | * Skip the remaining assignments for TCM/PSCSI passthrough | 2989 | * Skip the remaining assignments for TCM/PSCSI passthrough |
@@ -3251,7 +2994,7 @@ static int transport_generic_cmd_sequencer( | |||
3251 | * Setup BIDI XOR callback to be run during transport_generic_complete_ok() | 2994 | * Setup BIDI XOR callback to be run during transport_generic_complete_ok() |
3252 | */ | 2995 | */ |
3253 | cmd->transport_complete_callback = &transport_xor_callback; | 2996 | cmd->transport_complete_callback = &transport_xor_callback; |
3254 | T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8); | 2997 | cmd->t_tasks_fua = (cdb[1] & 0x8); |
3255 | break; | 2998 | break; |
3256 | case VARIABLE_LENGTH_CMD: | 2999 | case VARIABLE_LENGTH_CMD: |
3257 | service_action = get_unaligned_be16(&cdb[8]); | 3000 | service_action = get_unaligned_be16(&cdb[8]); |
@@ -3259,7 +3002,7 @@ static int transport_generic_cmd_sequencer( | |||
3259 | * Determine if this is TCM/PSCSI device and we should disable | 3002 | * Determine if this is TCM/PSCSI device and we should disable |
3260 | * internal emulation for this CDB. | 3003 | * internal emulation for this CDB. |
3261 | */ | 3004 | */ |
3262 | passthrough = (TRANSPORT(dev)->transport_type == | 3005 | passthrough = (dev->transport->transport_type == |
3263 | TRANSPORT_PLUGIN_PHBA_PDEV); | 3006 | TRANSPORT_PLUGIN_PHBA_PDEV); |
3264 | 3007 | ||
3265 | switch (service_action) { | 3008 | switch (service_action) { |
@@ -3273,7 +3016,7 @@ static int transport_generic_cmd_sequencer( | |||
3273 | * XDWRITE_READ_32 logic. | 3016 | * XDWRITE_READ_32 logic. |
3274 | */ | 3017 | */ |
3275 | cmd->transport_split_cdb = &split_cdb_XX_32; | 3018 | cmd->transport_split_cdb = &split_cdb_XX_32; |
3276 | T_TASK(cmd)->t_task_lba = transport_lba_64_ext(cdb); | 3019 | cmd->t_task_lba = transport_lba_64_ext(cdb); |
3277 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; | 3020 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
3278 | 3021 | ||
3279 | /* | 3022 | /* |
@@ -3287,14 +3030,22 @@ static int transport_generic_cmd_sequencer( | |||
3287 | * transport_generic_complete_ok() | 3030 | * transport_generic_complete_ok() |
3288 | */ | 3031 | */ |
3289 | cmd->transport_complete_callback = &transport_xor_callback; | 3032 | cmd->transport_complete_callback = &transport_xor_callback; |
3290 | T_TASK(cmd)->t_tasks_fua = (cdb[10] & 0x8); | 3033 | cmd->t_tasks_fua = (cdb[10] & 0x8); |
3291 | break; | 3034 | break; |
3292 | case WRITE_SAME_32: | 3035 | case WRITE_SAME_32: |
3293 | sectors = transport_get_sectors_32(cdb, cmd, §or_ret); | 3036 | sectors = transport_get_sectors_32(cdb, cmd, §or_ret); |
3294 | if (sector_ret) | 3037 | if (sector_ret) |
3295 | goto out_unsupported_cdb; | 3038 | goto out_unsupported_cdb; |
3296 | size = transport_get_size(sectors, cdb, cmd); | 3039 | |
3297 | T_TASK(cmd)->t_task_lba = get_unaligned_be64(&cdb[12]); | 3040 | if (sectors) |
3041 | size = transport_get_size(sectors, cdb, cmd); | ||
3042 | else { | ||
3043 | pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not" | ||
3044 | " supported\n"); | ||
3045 | goto out_invalid_cdb_field; | ||
3046 | } | ||
3047 | |||
3048 | cmd->t_task_lba = get_unaligned_be64(&cdb[12]); | ||
3298 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | 3049 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
3299 | 3050 | ||
3300 | /* | 3051 | /* |
@@ -3304,7 +3055,7 @@ static int transport_generic_cmd_sequencer( | |||
3304 | break; | 3055 | break; |
3305 | 3056 | ||
3306 | if ((cdb[10] & 0x04) || (cdb[10] & 0x02)) { | 3057 | if ((cdb[10] & 0x04) || (cdb[10] & 0x02)) { |
3307 | printk(KERN_ERR "WRITE_SAME PBDATA and LBDATA" | 3058 | pr_err("WRITE_SAME PBDATA and LBDATA" |
3308 | " bits not supported for Block Discard" | 3059 | " bits not supported for Block Discard" |
3309 | " Emulation\n"); | 3060 | " Emulation\n"); |
3310 | goto out_invalid_cdb_field; | 3061 | goto out_invalid_cdb_field; |
@@ -3314,28 +3065,28 @@ static int transport_generic_cmd_sequencer( | |||
3314 | * tpws with the UNMAP=1 bit set. | 3065 | * tpws with the UNMAP=1 bit set. |
3315 | */ | 3066 | */ |
3316 | if (!(cdb[10] & 0x08)) { | 3067 | if (!(cdb[10] & 0x08)) { |
3317 | printk(KERN_ERR "WRITE_SAME w/o UNMAP bit not" | 3068 | pr_err("WRITE_SAME w/o UNMAP bit not" |
3318 | " supported for Block Discard Emulation\n"); | 3069 | " supported for Block Discard Emulation\n"); |
3319 | goto out_invalid_cdb_field; | 3070 | goto out_invalid_cdb_field; |
3320 | } | 3071 | } |
3321 | break; | 3072 | break; |
3322 | default: | 3073 | default: |
3323 | printk(KERN_ERR "VARIABLE_LENGTH_CMD service action" | 3074 | pr_err("VARIABLE_LENGTH_CMD service action" |
3324 | " 0x%04x not supported\n", service_action); | 3075 | " 0x%04x not supported\n", service_action); |
3325 | goto out_unsupported_cdb; | 3076 | goto out_unsupported_cdb; |
3326 | } | 3077 | } |
3327 | break; | 3078 | break; |
3328 | case 0xa3: | 3079 | case MAINTENANCE_IN: |
3329 | if (TRANSPORT(dev)->get_device_type(dev) != TYPE_ROM) { | 3080 | if (dev->transport->get_device_type(dev) != TYPE_ROM) { |
3330 | /* MAINTENANCE_IN from SCC-2 */ | 3081 | /* MAINTENANCE_IN from SCC-2 */ |
3331 | /* | 3082 | /* |
3332 | * Check for emulated MI_REPORT_TARGET_PGS. | 3083 | * Check for emulated MI_REPORT_TARGET_PGS. |
3333 | */ | 3084 | */ |
3334 | if (cdb[1] == MI_REPORT_TARGET_PGS) { | 3085 | if (cdb[1] == MI_REPORT_TARGET_PGS) { |
3335 | cmd->transport_emulate_cdb = | 3086 | cmd->transport_emulate_cdb = |
3336 | (T10_ALUA(su_dev)->alua_type == | 3087 | (su_dev->t10_alua.alua_type == |
3337 | SPC3_ALUA_EMULATED) ? | 3088 | SPC3_ALUA_EMULATED) ? |
3338 | &core_emulate_report_target_port_groups : | 3089 | core_emulate_report_target_port_groups : |
3339 | NULL; | 3090 | NULL; |
3340 | } | 3091 | } |
3341 | size = (cdb[6] << 24) | (cdb[7] << 16) | | 3092 | size = (cdb[6] << 24) | (cdb[7] << 16) | |
@@ -3344,7 +3095,7 @@ static int transport_generic_cmd_sequencer( | |||
3344 | /* GPCMD_SEND_KEY from multi media commands */ | 3095 | /* GPCMD_SEND_KEY from multi media commands */ |
3345 | size = (cdb[8] << 8) + cdb[9]; | 3096 | size = (cdb[8] << 8) + cdb[9]; |
3346 | } | 3097 | } |
3347 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | 3098 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
3348 | break; | 3099 | break; |
3349 | case MODE_SELECT: | 3100 | case MODE_SELECT: |
3350 | size = cdb[4]; | 3101 | size = cdb[4]; |
@@ -3356,7 +3107,7 @@ static int transport_generic_cmd_sequencer( | |||
3356 | break; | 3107 | break; |
3357 | case MODE_SENSE: | 3108 | case MODE_SENSE: |
3358 | size = cdb[4]; | 3109 | size = cdb[4]; |
3359 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | 3110 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
3360 | break; | 3111 | break; |
3361 | case MODE_SENSE_10: | 3112 | case MODE_SENSE_10: |
3362 | case GPCMD_READ_BUFFER_CAPACITY: | 3113 | case GPCMD_READ_BUFFER_CAPACITY: |
@@ -3364,11 +3115,11 @@ static int transport_generic_cmd_sequencer( | |||
3364 | case LOG_SELECT: | 3115 | case LOG_SELECT: |
3365 | case LOG_SENSE: | 3116 | case LOG_SENSE: |
3366 | size = (cdb[7] << 8) + cdb[8]; | 3117 | size = (cdb[7] << 8) + cdb[8]; |
3367 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | 3118 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
3368 | break; | 3119 | break; |
3369 | case READ_BLOCK_LIMITS: | 3120 | case READ_BLOCK_LIMITS: |
3370 | size = READ_BLOCK_LEN; | 3121 | size = READ_BLOCK_LEN; |
3371 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | 3122 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
3372 | break; | 3123 | break; |
3373 | case GPCMD_GET_CONFIGURATION: | 3124 | case GPCMD_GET_CONFIGURATION: |
3374 | case GPCMD_READ_FORMAT_CAPACITIES: | 3125 | case GPCMD_READ_FORMAT_CAPACITIES: |
@@ -3380,11 +3131,11 @@ static int transport_generic_cmd_sequencer( | |||
3380 | case PERSISTENT_RESERVE_IN: | 3131 | case PERSISTENT_RESERVE_IN: |
3381 | case PERSISTENT_RESERVE_OUT: | 3132 | case PERSISTENT_RESERVE_OUT: |
3382 | cmd->transport_emulate_cdb = | 3133 | cmd->transport_emulate_cdb = |
3383 | (T10_RES(su_dev)->res_type == | 3134 | (su_dev->t10_pr.res_type == |
3384 | SPC3_PERSISTENT_RESERVATIONS) ? | 3135 | SPC3_PERSISTENT_RESERVATIONS) ? |
3385 | &core_scsi3_emulate_pr : NULL; | 3136 | core_scsi3_emulate_pr : NULL; |
3386 | size = (cdb[7] << 8) + cdb[8]; | 3137 | size = (cdb[7] << 8) + cdb[8]; |
3387 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | 3138 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
3388 | break; | 3139 | break; |
3389 | case GPCMD_MECHANISM_STATUS: | 3140 | case GPCMD_MECHANISM_STATUS: |
3390 | case GPCMD_READ_DVD_STRUCTURE: | 3141 | case GPCMD_READ_DVD_STRUCTURE: |
@@ -3393,19 +3144,19 @@ static int transport_generic_cmd_sequencer( | |||
3393 | break; | 3144 | break; |
3394 | case READ_POSITION: | 3145 | case READ_POSITION: |
3395 | size = READ_POSITION_LEN; | 3146 | size = READ_POSITION_LEN; |
3396 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | 3147 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
3397 | break; | 3148 | break; |
3398 | case 0xa4: | 3149 | case MAINTENANCE_OUT: |
3399 | if (TRANSPORT(dev)->get_device_type(dev) != TYPE_ROM) { | 3150 | if (dev->transport->get_device_type(dev) != TYPE_ROM) { |
3400 | /* MAINTENANCE_OUT from SCC-2 | 3151 | /* MAINTENANCE_OUT from SCC-2 |
3401 | * | 3152 | * |
3402 | * Check for emulated MO_SET_TARGET_PGS. | 3153 | * Check for emulated MO_SET_TARGET_PGS. |
3403 | */ | 3154 | */ |
3404 | if (cdb[1] == MO_SET_TARGET_PGS) { | 3155 | if (cdb[1] == MO_SET_TARGET_PGS) { |
3405 | cmd->transport_emulate_cdb = | 3156 | cmd->transport_emulate_cdb = |
3406 | (T10_ALUA(su_dev)->alua_type == | 3157 | (su_dev->t10_alua.alua_type == |
3407 | SPC3_ALUA_EMULATED) ? | 3158 | SPC3_ALUA_EMULATED) ? |
3408 | &core_emulate_set_target_port_groups : | 3159 | core_emulate_set_target_port_groups : |
3409 | NULL; | 3160 | NULL; |
3410 | } | 3161 | } |
3411 | 3162 | ||
@@ -3415,7 +3166,7 @@ static int transport_generic_cmd_sequencer( | |||
3415 | /* GPCMD_REPORT_KEY from multi media commands */ | 3166 | /* GPCMD_REPORT_KEY from multi media commands */ |
3416 | size = (cdb[8] << 8) + cdb[9]; | 3167 | size = (cdb[8] << 8) + cdb[9]; |
3417 | } | 3168 | } |
3418 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | 3169 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
3419 | break; | 3170 | break; |
3420 | case INQUIRY: | 3171 | case INQUIRY: |
3421 | size = (cdb[3] << 8) + cdb[4]; | 3172 | size = (cdb[3] << 8) + cdb[4]; |
@@ -3423,23 +3174,23 @@ static int transport_generic_cmd_sequencer( | |||
3423 | * Do implict HEAD_OF_QUEUE processing for INQUIRY. | 3174 | * Do implict HEAD_OF_QUEUE processing for INQUIRY. |
3424 | * See spc4r17 section 5.3 | 3175 | * See spc4r17 section 5.3 |
3425 | */ | 3176 | */ |
3426 | if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) | 3177 | if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) |
3427 | cmd->sam_task_attr = MSG_HEAD_TAG; | 3178 | cmd->sam_task_attr = MSG_HEAD_TAG; |
3428 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | 3179 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
3429 | break; | 3180 | break; |
3430 | case READ_BUFFER: | 3181 | case READ_BUFFER: |
3431 | size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; | 3182 | size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; |
3432 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | 3183 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
3433 | break; | 3184 | break; |
3434 | case READ_CAPACITY: | 3185 | case READ_CAPACITY: |
3435 | size = READ_CAP_LEN; | 3186 | size = READ_CAP_LEN; |
3436 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | 3187 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
3437 | break; | 3188 | break; |
3438 | case READ_MEDIA_SERIAL_NUMBER: | 3189 | case READ_MEDIA_SERIAL_NUMBER: |
3439 | case SECURITY_PROTOCOL_IN: | 3190 | case SECURITY_PROTOCOL_IN: |
3440 | case SECURITY_PROTOCOL_OUT: | 3191 | case SECURITY_PROTOCOL_OUT: |
3441 | size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; | 3192 | size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; |
3442 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | 3193 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
3443 | break; | 3194 | break; |
3444 | case SERVICE_ACTION_IN: | 3195 | case SERVICE_ACTION_IN: |
3445 | case ACCESS_CONTROL_IN: | 3196 | case ACCESS_CONTROL_IN: |
@@ -3450,36 +3201,36 @@ static int transport_generic_cmd_sequencer( | |||
3450 | case WRITE_ATTRIBUTE: | 3201 | case WRITE_ATTRIBUTE: |
3451 | size = (cdb[10] << 24) | (cdb[11] << 16) | | 3202 | size = (cdb[10] << 24) | (cdb[11] << 16) | |
3452 | (cdb[12] << 8) | cdb[13]; | 3203 | (cdb[12] << 8) | cdb[13]; |
3453 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | 3204 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
3454 | break; | 3205 | break; |
3455 | case RECEIVE_DIAGNOSTIC: | 3206 | case RECEIVE_DIAGNOSTIC: |
3456 | case SEND_DIAGNOSTIC: | 3207 | case SEND_DIAGNOSTIC: |
3457 | size = (cdb[3] << 8) | cdb[4]; | 3208 | size = (cdb[3] << 8) | cdb[4]; |
3458 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | 3209 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
3459 | break; | 3210 | break; |
3460 | /* #warning FIXME: Figure out correct GPCMD_READ_CD blocksize. */ | 3211 | /* #warning FIXME: Figure out correct GPCMD_READ_CD blocksize. */ |
3461 | #if 0 | 3212 | #if 0 |
3462 | case GPCMD_READ_CD: | 3213 | case GPCMD_READ_CD: |
3463 | sectors = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; | 3214 | sectors = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; |
3464 | size = (2336 * sectors); | 3215 | size = (2336 * sectors); |
3465 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | 3216 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
3466 | break; | 3217 | break; |
3467 | #endif | 3218 | #endif |
3468 | case READ_TOC: | 3219 | case READ_TOC: |
3469 | size = cdb[8]; | 3220 | size = cdb[8]; |
3470 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | 3221 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
3471 | break; | 3222 | break; |
3472 | case REQUEST_SENSE: | 3223 | case REQUEST_SENSE: |
3473 | size = cdb[4]; | 3224 | size = cdb[4]; |
3474 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | 3225 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
3475 | break; | 3226 | break; |
3476 | case READ_ELEMENT_STATUS: | 3227 | case READ_ELEMENT_STATUS: |
3477 | size = 65536 * cdb[7] + 256 * cdb[8] + cdb[9]; | 3228 | size = 65536 * cdb[7] + 256 * cdb[8] + cdb[9]; |
3478 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | 3229 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
3479 | break; | 3230 | break; |
3480 | case WRITE_BUFFER: | 3231 | case WRITE_BUFFER: |
3481 | size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; | 3232 | size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; |
3482 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | 3233 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
3483 | break; | 3234 | break; |
3484 | case RESERVE: | 3235 | case RESERVE: |
3485 | case RESERVE_10: | 3236 | case RESERVE_10: |
@@ -3500,9 +3251,9 @@ static int transport_generic_cmd_sequencer( | |||
3500 | * emulation disabled. | 3251 | * emulation disabled. |
3501 | */ | 3252 | */ |
3502 | cmd->transport_emulate_cdb = | 3253 | cmd->transport_emulate_cdb = |
3503 | (T10_RES(su_dev)->res_type != | 3254 | (su_dev->t10_pr.res_type != |
3504 | SPC_PASSTHROUGH) ? | 3255 | SPC_PASSTHROUGH) ? |
3505 | &core_scsi2_emulate_crh : NULL; | 3256 | core_scsi2_emulate_crh : NULL; |
3506 | cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; | 3257 | cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; |
3507 | break; | 3258 | break; |
3508 | case RELEASE: | 3259 | case RELEASE: |
@@ -3517,9 +3268,9 @@ static int transport_generic_cmd_sequencer( | |||
3517 | size = cmd->data_length; | 3268 | size = cmd->data_length; |
3518 | 3269 | ||
3519 | cmd->transport_emulate_cdb = | 3270 | cmd->transport_emulate_cdb = |
3520 | (T10_RES(su_dev)->res_type != | 3271 | (su_dev->t10_pr.res_type != |
3521 | SPC_PASSTHROUGH) ? | 3272 | SPC_PASSTHROUGH) ? |
3522 | &core_scsi2_emulate_crh : NULL; | 3273 | core_scsi2_emulate_crh : NULL; |
3523 | cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; | 3274 | cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; |
3524 | break; | 3275 | break; |
3525 | case SYNCHRONIZE_CACHE: | 3276 | case SYNCHRONIZE_CACHE: |
@@ -3529,10 +3280,10 @@ static int transport_generic_cmd_sequencer( | |||
3529 | */ | 3280 | */ |
3530 | if (cdb[0] == SYNCHRONIZE_CACHE) { | 3281 | if (cdb[0] == SYNCHRONIZE_CACHE) { |
3531 | sectors = transport_get_sectors_10(cdb, cmd, §or_ret); | 3282 | sectors = transport_get_sectors_10(cdb, cmd, §or_ret); |
3532 | T_TASK(cmd)->t_task_lba = transport_lba_32(cdb); | 3283 | cmd->t_task_lba = transport_lba_32(cdb); |
3533 | } else { | 3284 | } else { |
3534 | sectors = transport_get_sectors_16(cdb, cmd, §or_ret); | 3285 | sectors = transport_get_sectors_16(cdb, cmd, §or_ret); |
3535 | T_TASK(cmd)->t_task_lba = transport_lba_64(cdb); | 3286 | cmd->t_task_lba = transport_lba_64(cdb); |
3536 | } | 3287 | } |
3537 | if (sector_ret) | 3288 | if (sector_ret) |
3538 | goto out_unsupported_cdb; | 3289 | goto out_unsupported_cdb; |
@@ -3543,7 +3294,7 @@ static int transport_generic_cmd_sequencer( | |||
3543 | /* | 3294 | /* |
3544 | * For TCM/pSCSI passthrough, skip cmd->transport_emulate_cdb() | 3295 | * For TCM/pSCSI passthrough, skip cmd->transport_emulate_cdb() |
3545 | */ | 3296 | */ |
3546 | if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) | 3297 | if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) |
3547 | break; | 3298 | break; |
3548 | /* | 3299 | /* |
3549 | * Set SCF_EMULATE_CDB_ASYNC to ensure asynchronous operation | 3300 | * Set SCF_EMULATE_CDB_ASYNC to ensure asynchronous operation |
@@ -3554,32 +3305,27 @@ static int transport_generic_cmd_sequencer( | |||
3554 | * Check to ensure that LBA + Range does not exceed past end of | 3305 | * Check to ensure that LBA + Range does not exceed past end of |
3555 | * device. | 3306 | * device. |
3556 | */ | 3307 | */ |
3557 | if (transport_get_sectors(cmd) < 0) | 3308 | if (!transport_cmd_get_valid_sectors(cmd)) |
3558 | goto out_invalid_cdb_field; | 3309 | goto out_invalid_cdb_field; |
3559 | break; | 3310 | break; |
3560 | case UNMAP: | 3311 | case UNMAP: |
3561 | size = get_unaligned_be16(&cdb[7]); | 3312 | size = get_unaligned_be16(&cdb[7]); |
3562 | passthrough = (TRANSPORT(dev)->transport_type == | 3313 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
3563 | TRANSPORT_PLUGIN_PHBA_PDEV); | ||
3564 | /* | ||
3565 | * Determine if the received UNMAP used to for direct passthrough | ||
3566 | * into Linux/SCSI with struct request via TCM/pSCSI or we are | ||
3567 | * signaling the use of internal transport_generic_unmap() emulation | ||
3568 | * for UNMAP -> Linux/BLOCK disbard with TCM/IBLOCK and TCM/FILEIO | ||
3569 | * subsystem plugin backstores. | ||
3570 | */ | ||
3571 | if (!(passthrough)) | ||
3572 | cmd->se_cmd_flags |= SCF_EMULATE_SYNC_UNMAP; | ||
3573 | |||
3574 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | ||
3575 | break; | 3314 | break; |
3576 | case WRITE_SAME_16: | 3315 | case WRITE_SAME_16: |
3577 | sectors = transport_get_sectors_16(cdb, cmd, §or_ret); | 3316 | sectors = transport_get_sectors_16(cdb, cmd, §or_ret); |
3578 | if (sector_ret) | 3317 | if (sector_ret) |
3579 | goto out_unsupported_cdb; | 3318 | goto out_unsupported_cdb; |
3580 | size = transport_get_size(sectors, cdb, cmd); | 3319 | |
3581 | T_TASK(cmd)->t_task_lba = get_unaligned_be16(&cdb[2]); | 3320 | if (sectors) |
3582 | passthrough = (TRANSPORT(dev)->transport_type == | 3321 | size = transport_get_size(sectors, cdb, cmd); |
3322 | else { | ||
3323 | pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); | ||
3324 | goto out_invalid_cdb_field; | ||
3325 | } | ||
3326 | |||
3327 | cmd->t_task_lba = get_unaligned_be16(&cdb[2]); | ||
3328 | passthrough = (dev->transport->transport_type == | ||
3583 | TRANSPORT_PLUGIN_PHBA_PDEV); | 3329 | TRANSPORT_PLUGIN_PHBA_PDEV); |
3584 | /* | 3330 | /* |
3585 | * Determine if the received WRITE_SAME_16 is used to for direct | 3331 | * Determine if the received WRITE_SAME_16 is used to for direct |
@@ -3588,9 +3334,9 @@ static int transport_generic_cmd_sequencer( | |||
3588 | * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK and | 3334 | * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK and |
3589 | * TCM/FILEIO subsystem plugin backstores. | 3335 | * TCM/FILEIO subsystem plugin backstores. |
3590 | */ | 3336 | */ |
3591 | if (!(passthrough)) { | 3337 | if (!passthrough) { |
3592 | if ((cdb[1] & 0x04) || (cdb[1] & 0x02)) { | 3338 | if ((cdb[1] & 0x04) || (cdb[1] & 0x02)) { |
3593 | printk(KERN_ERR "WRITE_SAME PBDATA and LBDATA" | 3339 | pr_err("WRITE_SAME PBDATA and LBDATA" |
3594 | " bits not supported for Block Discard" | 3340 | " bits not supported for Block Discard" |
3595 | " Emulation\n"); | 3341 | " Emulation\n"); |
3596 | goto out_invalid_cdb_field; | 3342 | goto out_invalid_cdb_field; |
@@ -3600,7 +3346,7 @@ static int transport_generic_cmd_sequencer( | |||
3600 | * tpws with the UNMAP=1 bit set. | 3346 | * tpws with the UNMAP=1 bit set. |
3601 | */ | 3347 | */ |
3602 | if (!(cdb[1] & 0x08)) { | 3348 | if (!(cdb[1] & 0x08)) { |
3603 | printk(KERN_ERR "WRITE_SAME w/o UNMAP bit not " | 3349 | pr_err("WRITE_SAME w/o UNMAP bit not " |
3604 | " supported for Block Discard Emulation\n"); | 3350 | " supported for Block Discard Emulation\n"); |
3605 | goto out_invalid_cdb_field; | 3351 | goto out_invalid_cdb_field; |
3606 | } | 3352 | } |
@@ -3625,34 +3371,34 @@ static int transport_generic_cmd_sequencer( | |||
3625 | break; | 3371 | break; |
3626 | case REPORT_LUNS: | 3372 | case REPORT_LUNS: |
3627 | cmd->transport_emulate_cdb = | 3373 | cmd->transport_emulate_cdb = |
3628 | &transport_core_report_lun_response; | 3374 | transport_core_report_lun_response; |
3629 | size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; | 3375 | size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; |
3630 | /* | 3376 | /* |
3631 | * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS | 3377 | * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS |
3632 | * See spc4r17 section 5.3 | 3378 | * See spc4r17 section 5.3 |
3633 | */ | 3379 | */ |
3634 | if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) | 3380 | if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) |
3635 | cmd->sam_task_attr = MSG_HEAD_TAG; | 3381 | cmd->sam_task_attr = MSG_HEAD_TAG; |
3636 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | 3382 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
3637 | break; | 3383 | break; |
3638 | default: | 3384 | default: |
3639 | printk(KERN_WARNING "TARGET_CORE[%s]: Unsupported SCSI Opcode" | 3385 | pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode" |
3640 | " 0x%02x, sending CHECK_CONDITION.\n", | 3386 | " 0x%02x, sending CHECK_CONDITION.\n", |
3641 | CMD_TFO(cmd)->get_fabric_name(), cdb[0]); | 3387 | cmd->se_tfo->get_fabric_name(), cdb[0]); |
3642 | cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks; | 3388 | cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks; |
3643 | goto out_unsupported_cdb; | 3389 | goto out_unsupported_cdb; |
3644 | } | 3390 | } |
3645 | 3391 | ||
3646 | if (size != cmd->data_length) { | 3392 | if (size != cmd->data_length) { |
3647 | printk(KERN_WARNING "TARGET_CORE[%s]: Expected Transfer Length:" | 3393 | pr_warn("TARGET_CORE[%s]: Expected Transfer Length:" |
3648 | " %u does not match SCSI CDB Length: %u for SAM Opcode:" | 3394 | " %u does not match SCSI CDB Length: %u for SAM Opcode:" |
3649 | " 0x%02x\n", CMD_TFO(cmd)->get_fabric_name(), | 3395 | " 0x%02x\n", cmd->se_tfo->get_fabric_name(), |
3650 | cmd->data_length, size, cdb[0]); | 3396 | cmd->data_length, size, cdb[0]); |
3651 | 3397 | ||
3652 | cmd->cmd_spdtl = size; | 3398 | cmd->cmd_spdtl = size; |
3653 | 3399 | ||
3654 | if (cmd->data_direction == DMA_TO_DEVICE) { | 3400 | if (cmd->data_direction == DMA_TO_DEVICE) { |
3655 | printk(KERN_ERR "Rejecting underflow/overflow" | 3401 | pr_err("Rejecting underflow/overflow" |
3656 | " WRITE data\n"); | 3402 | " WRITE data\n"); |
3657 | goto out_invalid_cdb_field; | 3403 | goto out_invalid_cdb_field; |
3658 | } | 3404 | } |
@@ -3660,10 +3406,10 @@ static int transport_generic_cmd_sequencer( | |||
3660 | * Reject READ_* or WRITE_* with overflow/underflow for | 3406 | * Reject READ_* or WRITE_* with overflow/underflow for |
3661 | * type SCF_SCSI_DATA_SG_IO_CDB. | 3407 | * type SCF_SCSI_DATA_SG_IO_CDB. |
3662 | */ | 3408 | */ |
3663 | if (!(ret) && (DEV_ATTRIB(dev)->block_size != 512)) { | 3409 | if (!ret && (dev->se_sub_dev->se_dev_attrib.block_size != 512)) { |
3664 | printk(KERN_ERR "Failing OVERFLOW/UNDERFLOW for LBA op" | 3410 | pr_err("Failing OVERFLOW/UNDERFLOW for LBA op" |
3665 | " CDB on non 512-byte sector setup subsystem" | 3411 | " CDB on non 512-byte sector setup subsystem" |
3666 | " plugin: %s\n", TRANSPORT(dev)->name); | 3412 | " plugin: %s\n", dev->transport->name); |
3667 | /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */ | 3413 | /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */ |
3668 | goto out_invalid_cdb_field; | 3414 | goto out_invalid_cdb_field; |
3669 | } | 3415 | } |
@@ -3678,105 +3424,22 @@ static int transport_generic_cmd_sequencer( | |||
3678 | cmd->data_length = size; | 3424 | cmd->data_length = size; |
3679 | } | 3425 | } |
3680 | 3426 | ||
3427 | /* Let's limit control cdbs to a page, for simplicity's sake. */ | ||
3428 | if ((cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) && | ||
3429 | size > PAGE_SIZE) | ||
3430 | goto out_invalid_cdb_field; | ||
3431 | |||
3681 | transport_set_supported_SAM_opcode(cmd); | 3432 | transport_set_supported_SAM_opcode(cmd); |
3682 | return ret; | 3433 | return ret; |
3683 | 3434 | ||
3684 | out_unsupported_cdb: | 3435 | out_unsupported_cdb: |
3685 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | 3436 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
3686 | cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; | 3437 | cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; |
3687 | return -2; | 3438 | return -EINVAL; |
3688 | out_invalid_cdb_field: | 3439 | out_invalid_cdb_field: |
3689 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | 3440 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
3690 | cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; | 3441 | cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; |
3691 | return -2; | 3442 | return -EINVAL; |
3692 | } | ||
3693 | |||
3694 | static inline void transport_release_tasks(struct se_cmd *); | ||
3695 | |||
3696 | /* | ||
3697 | * This function will copy a contiguous *src buffer into a destination | ||
3698 | * struct scatterlist array. | ||
3699 | */ | ||
3700 | static void transport_memcpy_write_contig( | ||
3701 | struct se_cmd *cmd, | ||
3702 | struct scatterlist *sg_d, | ||
3703 | unsigned char *src) | ||
3704 | { | ||
3705 | u32 i = 0, length = 0, total_length = cmd->data_length; | ||
3706 | void *dst; | ||
3707 | |||
3708 | while (total_length) { | ||
3709 | length = sg_d[i].length; | ||
3710 | |||
3711 | if (length > total_length) | ||
3712 | length = total_length; | ||
3713 | |||
3714 | dst = sg_virt(&sg_d[i]); | ||
3715 | |||
3716 | memcpy(dst, src, length); | ||
3717 | |||
3718 | if (!(total_length -= length)) | ||
3719 | return; | ||
3720 | |||
3721 | src += length; | ||
3722 | i++; | ||
3723 | } | ||
3724 | } | ||
3725 | |||
3726 | /* | ||
3727 | * This function will copy a struct scatterlist array *sg_s into a destination | ||
3728 | * contiguous *dst buffer. | ||
3729 | */ | ||
3730 | static void transport_memcpy_read_contig( | ||
3731 | struct se_cmd *cmd, | ||
3732 | unsigned char *dst, | ||
3733 | struct scatterlist *sg_s) | ||
3734 | { | ||
3735 | u32 i = 0, length = 0, total_length = cmd->data_length; | ||
3736 | void *src; | ||
3737 | |||
3738 | while (total_length) { | ||
3739 | length = sg_s[i].length; | ||
3740 | |||
3741 | if (length > total_length) | ||
3742 | length = total_length; | ||
3743 | |||
3744 | src = sg_virt(&sg_s[i]); | ||
3745 | |||
3746 | memcpy(dst, src, length); | ||
3747 | |||
3748 | if (!(total_length -= length)) | ||
3749 | return; | ||
3750 | |||
3751 | dst += length; | ||
3752 | i++; | ||
3753 | } | ||
3754 | } | ||
3755 | |||
3756 | static void transport_memcpy_se_mem_read_contig( | ||
3757 | struct se_cmd *cmd, | ||
3758 | unsigned char *dst, | ||
3759 | struct list_head *se_mem_list) | ||
3760 | { | ||
3761 | struct se_mem *se_mem; | ||
3762 | void *src; | ||
3763 | u32 length = 0, total_length = cmd->data_length; | ||
3764 | |||
3765 | list_for_each_entry(se_mem, se_mem_list, se_list) { | ||
3766 | length = se_mem->se_len; | ||
3767 | |||
3768 | if (length > total_length) | ||
3769 | length = total_length; | ||
3770 | |||
3771 | src = page_address(se_mem->se_page) + se_mem->se_off; | ||
3772 | |||
3773 | memcpy(dst, src, length); | ||
3774 | |||
3775 | if (!(total_length -= length)) | ||
3776 | return; | ||
3777 | |||
3778 | dst += length; | ||
3779 | } | ||
3780 | } | 3443 | } |
3781 | 3444 | ||
3782 | /* | 3445 | /* |
@@ -3786,7 +3449,7 @@ static void transport_memcpy_se_mem_read_contig( | |||
3786 | */ | 3449 | */ |
3787 | static void transport_complete_task_attr(struct se_cmd *cmd) | 3450 | static void transport_complete_task_attr(struct se_cmd *cmd) |
3788 | { | 3451 | { |
3789 | struct se_device *dev = SE_DEV(cmd); | 3452 | struct se_device *dev = cmd->se_dev; |
3790 | struct se_cmd *cmd_p, *cmd_tmp; | 3453 | struct se_cmd *cmd_p, *cmd_tmp; |
3791 | int new_active_tasks = 0; | 3454 | int new_active_tasks = 0; |
3792 | 3455 | ||
@@ -3794,25 +3457,25 @@ static void transport_complete_task_attr(struct se_cmd *cmd) | |||
3794 | atomic_dec(&dev->simple_cmds); | 3457 | atomic_dec(&dev->simple_cmds); |
3795 | smp_mb__after_atomic_dec(); | 3458 | smp_mb__after_atomic_dec(); |
3796 | dev->dev_cur_ordered_id++; | 3459 | dev->dev_cur_ordered_id++; |
3797 | DEBUG_STA("Incremented dev->dev_cur_ordered_id: %u for" | 3460 | pr_debug("Incremented dev->dev_cur_ordered_id: %u for" |
3798 | " SIMPLE: %u\n", dev->dev_cur_ordered_id, | 3461 | " SIMPLE: %u\n", dev->dev_cur_ordered_id, |
3799 | cmd->se_ordered_id); | 3462 | cmd->se_ordered_id); |
3800 | } else if (cmd->sam_task_attr == MSG_HEAD_TAG) { | 3463 | } else if (cmd->sam_task_attr == MSG_HEAD_TAG) { |
3801 | atomic_dec(&dev->dev_hoq_count); | 3464 | atomic_dec(&dev->dev_hoq_count); |
3802 | smp_mb__after_atomic_dec(); | 3465 | smp_mb__after_atomic_dec(); |
3803 | dev->dev_cur_ordered_id++; | 3466 | dev->dev_cur_ordered_id++; |
3804 | DEBUG_STA("Incremented dev_cur_ordered_id: %u for" | 3467 | pr_debug("Incremented dev_cur_ordered_id: %u for" |
3805 | " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id, | 3468 | " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id, |
3806 | cmd->se_ordered_id); | 3469 | cmd->se_ordered_id); |
3807 | } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { | 3470 | } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { |
3808 | spin_lock(&dev->ordered_cmd_lock); | 3471 | spin_lock(&dev->ordered_cmd_lock); |
3809 | list_del(&cmd->se_ordered_list); | 3472 | list_del(&cmd->se_ordered_node); |
3810 | atomic_dec(&dev->dev_ordered_sync); | 3473 | atomic_dec(&dev->dev_ordered_sync); |
3811 | smp_mb__after_atomic_dec(); | 3474 | smp_mb__after_atomic_dec(); |
3812 | spin_unlock(&dev->ordered_cmd_lock); | 3475 | spin_unlock(&dev->ordered_cmd_lock); |
3813 | 3476 | ||
3814 | dev->dev_cur_ordered_id++; | 3477 | dev->dev_cur_ordered_id++; |
3815 | DEBUG_STA("Incremented dev_cur_ordered_id: %u for ORDERED:" | 3478 | pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:" |
3816 | " %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id); | 3479 | " %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id); |
3817 | } | 3480 | } |
3818 | /* | 3481 | /* |
@@ -3822,15 +3485,15 @@ static void transport_complete_task_attr(struct se_cmd *cmd) | |||
3822 | */ | 3485 | */ |
3823 | spin_lock(&dev->delayed_cmd_lock); | 3486 | spin_lock(&dev->delayed_cmd_lock); |
3824 | list_for_each_entry_safe(cmd_p, cmd_tmp, | 3487 | list_for_each_entry_safe(cmd_p, cmd_tmp, |
3825 | &dev->delayed_cmd_list, se_delayed_list) { | 3488 | &dev->delayed_cmd_list, se_delayed_node) { |
3826 | 3489 | ||
3827 | list_del(&cmd_p->se_delayed_list); | 3490 | list_del(&cmd_p->se_delayed_node); |
3828 | spin_unlock(&dev->delayed_cmd_lock); | 3491 | spin_unlock(&dev->delayed_cmd_lock); |
3829 | 3492 | ||
3830 | DEBUG_STA("Calling add_tasks() for" | 3493 | pr_debug("Calling add_tasks() for" |
3831 | " cmd_p: 0x%02x Task Attr: 0x%02x" | 3494 | " cmd_p: 0x%02x Task Attr: 0x%02x" |
3832 | " Dormant -> Active, se_ordered_id: %u\n", | 3495 | " Dormant -> Active, se_ordered_id: %u\n", |
3833 | T_TASK(cmd_p)->t_task_cdb[0], | 3496 | cmd_p->t_task_cdb[0], |
3834 | cmd_p->sam_task_attr, cmd_p->se_ordered_id); | 3497 | cmd_p->sam_task_attr, cmd_p->se_ordered_id); |
3835 | 3498 | ||
3836 | transport_add_tasks_from_cmd(cmd_p); | 3499 | transport_add_tasks_from_cmd(cmd_p); |
@@ -3846,20 +3509,79 @@ static void transport_complete_task_attr(struct se_cmd *cmd) | |||
3846 | * to do the processing of the Active tasks. | 3509 | * to do the processing of the Active tasks. |
3847 | */ | 3510 | */ |
3848 | if (new_active_tasks != 0) | 3511 | if (new_active_tasks != 0) |
3849 | wake_up_interruptible(&dev->dev_queue_obj->thread_wq); | 3512 | wake_up_interruptible(&dev->dev_queue_obj.thread_wq); |
3513 | } | ||
3514 | |||
3515 | static int transport_complete_qf(struct se_cmd *cmd) | ||
3516 | { | ||
3517 | int ret = 0; | ||
3518 | |||
3519 | if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) | ||
3520 | return cmd->se_tfo->queue_status(cmd); | ||
3521 | |||
3522 | switch (cmd->data_direction) { | ||
3523 | case DMA_FROM_DEVICE: | ||
3524 | ret = cmd->se_tfo->queue_data_in(cmd); | ||
3525 | break; | ||
3526 | case DMA_TO_DEVICE: | ||
3527 | if (cmd->t_bidi_data_sg) { | ||
3528 | ret = cmd->se_tfo->queue_data_in(cmd); | ||
3529 | if (ret < 0) | ||
3530 | return ret; | ||
3531 | } | ||
3532 | /* Fall through for DMA_TO_DEVICE */ | ||
3533 | case DMA_NONE: | ||
3534 | ret = cmd->se_tfo->queue_status(cmd); | ||
3535 | break; | ||
3536 | default: | ||
3537 | break; | ||
3538 | } | ||
3539 | |||
3540 | return ret; | ||
3541 | } | ||
3542 | |||
3543 | static void transport_handle_queue_full( | ||
3544 | struct se_cmd *cmd, | ||
3545 | struct se_device *dev, | ||
3546 | int (*qf_callback)(struct se_cmd *)) | ||
3547 | { | ||
3548 | spin_lock_irq(&dev->qf_cmd_lock); | ||
3549 | cmd->se_cmd_flags |= SCF_EMULATE_QUEUE_FULL; | ||
3550 | cmd->transport_qf_callback = qf_callback; | ||
3551 | list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list); | ||
3552 | atomic_inc(&dev->dev_qf_count); | ||
3553 | smp_mb__after_atomic_inc(); | ||
3554 | spin_unlock_irq(&cmd->se_dev->qf_cmd_lock); | ||
3555 | |||
3556 | schedule_work(&cmd->se_dev->qf_work_queue); | ||
3850 | } | 3557 | } |
3851 | 3558 | ||
3852 | static void transport_generic_complete_ok(struct se_cmd *cmd) | 3559 | static void transport_generic_complete_ok(struct se_cmd *cmd) |
3853 | { | 3560 | { |
3854 | int reason = 0; | 3561 | int reason = 0, ret; |
3855 | /* | 3562 | /* |
3856 | * Check if we need to move delayed/dormant tasks from cmds on the | 3563 | * Check if we need to move delayed/dormant tasks from cmds on the |
3857 | * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task | 3564 | * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task |
3858 | * Attribute. | 3565 | * Attribute. |
3859 | */ | 3566 | */ |
3860 | if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) | 3567 | if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) |
3861 | transport_complete_task_attr(cmd); | 3568 | transport_complete_task_attr(cmd); |
3862 | /* | 3569 | /* |
3570 | * Check to schedule QUEUE_FULL work, or execute an existing | ||
3571 | * cmd->transport_qf_callback() | ||
3572 | */ | ||
3573 | if (atomic_read(&cmd->se_dev->dev_qf_count) != 0) | ||
3574 | schedule_work(&cmd->se_dev->qf_work_queue); | ||
3575 | |||
3576 | if (cmd->transport_qf_callback) { | ||
3577 | ret = cmd->transport_qf_callback(cmd); | ||
3578 | if (ret < 0) | ||
3579 | goto queue_full; | ||
3580 | |||
3581 | cmd->transport_qf_callback = NULL; | ||
3582 | goto done; | ||
3583 | } | ||
3584 | /* | ||
3863 | * Check if we need to retrieve a sense buffer from | 3585 | * Check if we need to retrieve a sense buffer from |
3864 | * the struct se_cmd in question. | 3586 | * the struct se_cmd in question. |
3865 | */ | 3587 | */ |
@@ -3872,8 +3594,11 @@ static void transport_generic_complete_ok(struct se_cmd *cmd) | |||
3872 | * a non GOOD status. | 3594 | * a non GOOD status. |
3873 | */ | 3595 | */ |
3874 | if (cmd->scsi_status) { | 3596 | if (cmd->scsi_status) { |
3875 | transport_send_check_condition_and_sense( | 3597 | ret = transport_send_check_condition_and_sense( |
3876 | cmd, reason, 1); | 3598 | cmd, reason, 1); |
3599 | if (ret == -EAGAIN) | ||
3600 | goto queue_full; | ||
3601 | |||
3877 | transport_lun_remove_cmd(cmd); | 3602 | transport_lun_remove_cmd(cmd); |
3878 | transport_cmd_check_stop_to_fabric(cmd); | 3603 | transport_cmd_check_stop_to_fabric(cmd); |
3879 | return; | 3604 | return; |
@@ -3889,53 +3614,57 @@ static void transport_generic_complete_ok(struct se_cmd *cmd) | |||
3889 | switch (cmd->data_direction) { | 3614 | switch (cmd->data_direction) { |
3890 | case DMA_FROM_DEVICE: | 3615 | case DMA_FROM_DEVICE: |
3891 | spin_lock(&cmd->se_lun->lun_sep_lock); | 3616 | spin_lock(&cmd->se_lun->lun_sep_lock); |
3892 | if (SE_LUN(cmd)->lun_sep) { | 3617 | if (cmd->se_lun->lun_sep) { |
3893 | SE_LUN(cmd)->lun_sep->sep_stats.tx_data_octets += | 3618 | cmd->se_lun->lun_sep->sep_stats.tx_data_octets += |
3894 | cmd->data_length; | 3619 | cmd->data_length; |
3895 | } | 3620 | } |
3896 | spin_unlock(&cmd->se_lun->lun_sep_lock); | 3621 | spin_unlock(&cmd->se_lun->lun_sep_lock); |
3897 | /* | ||
3898 | * If enabled by TCM fabirc module pre-registered SGL | ||
3899 | * memory, perform the memcpy() from the TCM internal | ||
3900 | * contigious buffer back to the original SGL. | ||
3901 | */ | ||
3902 | if (cmd->se_cmd_flags & SCF_PASSTHROUGH_CONTIG_TO_SG) | ||
3903 | transport_memcpy_write_contig(cmd, | ||
3904 | T_TASK(cmd)->t_task_pt_sgl, | ||
3905 | T_TASK(cmd)->t_task_buf); | ||
3906 | 3622 | ||
3907 | CMD_TFO(cmd)->queue_data_in(cmd); | 3623 | ret = cmd->se_tfo->queue_data_in(cmd); |
3624 | if (ret == -EAGAIN) | ||
3625 | goto queue_full; | ||
3908 | break; | 3626 | break; |
3909 | case DMA_TO_DEVICE: | 3627 | case DMA_TO_DEVICE: |
3910 | spin_lock(&cmd->se_lun->lun_sep_lock); | 3628 | spin_lock(&cmd->se_lun->lun_sep_lock); |
3911 | if (SE_LUN(cmd)->lun_sep) { | 3629 | if (cmd->se_lun->lun_sep) { |
3912 | SE_LUN(cmd)->lun_sep->sep_stats.rx_data_octets += | 3630 | cmd->se_lun->lun_sep->sep_stats.rx_data_octets += |
3913 | cmd->data_length; | 3631 | cmd->data_length; |
3914 | } | 3632 | } |
3915 | spin_unlock(&cmd->se_lun->lun_sep_lock); | 3633 | spin_unlock(&cmd->se_lun->lun_sep_lock); |
3916 | /* | 3634 | /* |
3917 | * Check if we need to send READ payload for BIDI-COMMAND | 3635 | * Check if we need to send READ payload for BIDI-COMMAND |
3918 | */ | 3636 | */ |
3919 | if (T_TASK(cmd)->t_mem_bidi_list != NULL) { | 3637 | if (cmd->t_bidi_data_sg) { |
3920 | spin_lock(&cmd->se_lun->lun_sep_lock); | 3638 | spin_lock(&cmd->se_lun->lun_sep_lock); |
3921 | if (SE_LUN(cmd)->lun_sep) { | 3639 | if (cmd->se_lun->lun_sep) { |
3922 | SE_LUN(cmd)->lun_sep->sep_stats.tx_data_octets += | 3640 | cmd->se_lun->lun_sep->sep_stats.tx_data_octets += |
3923 | cmd->data_length; | 3641 | cmd->data_length; |
3924 | } | 3642 | } |
3925 | spin_unlock(&cmd->se_lun->lun_sep_lock); | 3643 | spin_unlock(&cmd->se_lun->lun_sep_lock); |
3926 | CMD_TFO(cmd)->queue_data_in(cmd); | 3644 | ret = cmd->se_tfo->queue_data_in(cmd); |
3645 | if (ret == -EAGAIN) | ||
3646 | goto queue_full; | ||
3927 | break; | 3647 | break; |
3928 | } | 3648 | } |
3929 | /* Fall through for DMA_TO_DEVICE */ | 3649 | /* Fall through for DMA_TO_DEVICE */ |
3930 | case DMA_NONE: | 3650 | case DMA_NONE: |
3931 | CMD_TFO(cmd)->queue_status(cmd); | 3651 | ret = cmd->se_tfo->queue_status(cmd); |
3652 | if (ret == -EAGAIN) | ||
3653 | goto queue_full; | ||
3932 | break; | 3654 | break; |
3933 | default: | 3655 | default: |
3934 | break; | 3656 | break; |
3935 | } | 3657 | } |
3936 | 3658 | ||
3659 | done: | ||
3937 | transport_lun_remove_cmd(cmd); | 3660 | transport_lun_remove_cmd(cmd); |
3938 | transport_cmd_check_stop_to_fabric(cmd); | 3661 | transport_cmd_check_stop_to_fabric(cmd); |
3662 | return; | ||
3663 | |||
3664 | queue_full: | ||
3665 | pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p," | ||
3666 | " data_direction: %d\n", cmd, cmd->data_direction); | ||
3667 | transport_handle_queue_full(cmd, cmd->se_dev, transport_complete_qf); | ||
3939 | } | 3668 | } |
3940 | 3669 | ||
3941 | static void transport_free_dev_tasks(struct se_cmd *cmd) | 3670 | static void transport_free_dev_tasks(struct se_cmd *cmd) |
@@ -3943,9 +3672,9 @@ static void transport_free_dev_tasks(struct se_cmd *cmd) | |||
3943 | struct se_task *task, *task_tmp; | 3672 | struct se_task *task, *task_tmp; |
3944 | unsigned long flags; | 3673 | unsigned long flags; |
3945 | 3674 | ||
3946 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 3675 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
3947 | list_for_each_entry_safe(task, task_tmp, | 3676 | list_for_each_entry_safe(task, task_tmp, |
3948 | &T_TASK(cmd)->t_task_list, t_list) { | 3677 | &cmd->t_task_list, t_list) { |
3949 | if (atomic_read(&task->task_active)) | 3678 | if (atomic_read(&task->task_active)) |
3950 | continue; | 3679 | continue; |
3951 | 3680 | ||
@@ -3954,75 +3683,40 @@ static void transport_free_dev_tasks(struct se_cmd *cmd) | |||
3954 | 3683 | ||
3955 | list_del(&task->t_list); | 3684 | list_del(&task->t_list); |
3956 | 3685 | ||
3957 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 3686 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
3958 | if (task->se_dev) | 3687 | if (task->se_dev) |
3959 | TRANSPORT(task->se_dev)->free_task(task); | 3688 | task->se_dev->transport->free_task(task); |
3960 | else | 3689 | else |
3961 | printk(KERN_ERR "task[%u] - task->se_dev is NULL\n", | 3690 | pr_err("task[%u] - task->se_dev is NULL\n", |
3962 | task->task_no); | 3691 | task->task_no); |
3963 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 3692 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
3964 | } | 3693 | } |
3965 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 3694 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
3966 | } | 3695 | } |
3967 | 3696 | ||
3968 | static inline void transport_free_pages(struct se_cmd *cmd) | 3697 | static inline void transport_free_sgl(struct scatterlist *sgl, int nents) |
3969 | { | 3698 | { |
3970 | struct se_mem *se_mem, *se_mem_tmp; | 3699 | struct scatterlist *sg; |
3971 | int free_page = 1; | 3700 | int count; |
3972 | |||
3973 | if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) | ||
3974 | free_page = 0; | ||
3975 | if (cmd->se_dev->transport->do_se_mem_map) | ||
3976 | free_page = 0; | ||
3977 | 3701 | ||
3978 | if (T_TASK(cmd)->t_task_buf) { | 3702 | for_each_sg(sgl, sg, nents, count) |
3979 | kfree(T_TASK(cmd)->t_task_buf); | 3703 | __free_page(sg_page(sg)); |
3980 | T_TASK(cmd)->t_task_buf = NULL; | ||
3981 | return; | ||
3982 | } | ||
3983 | 3704 | ||
3984 | /* | 3705 | kfree(sgl); |
3985 | * Caller will handle releasing of struct se_mem. | 3706 | } |
3986 | */ | ||
3987 | if (cmd->se_cmd_flags & SCF_CMD_PASSTHROUGH_NOALLOC) | ||
3988 | return; | ||
3989 | 3707 | ||
3990 | if (!(T_TASK(cmd)->t_tasks_se_num)) | 3708 | static inline void transport_free_pages(struct se_cmd *cmd) |
3709 | { | ||
3710 | if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) | ||
3991 | return; | 3711 | return; |
3992 | 3712 | ||
3993 | list_for_each_entry_safe(se_mem, se_mem_tmp, | 3713 | transport_free_sgl(cmd->t_data_sg, cmd->t_data_nents); |
3994 | T_TASK(cmd)->t_mem_list, se_list) { | 3714 | cmd->t_data_sg = NULL; |
3995 | /* | 3715 | cmd->t_data_nents = 0; |
3996 | * We only release call __free_page(struct se_mem->se_page) when | ||
3997 | * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use, | ||
3998 | */ | ||
3999 | if (free_page) | ||
4000 | __free_page(se_mem->se_page); | ||
4001 | |||
4002 | list_del(&se_mem->se_list); | ||
4003 | kmem_cache_free(se_mem_cache, se_mem); | ||
4004 | } | ||
4005 | |||
4006 | if (T_TASK(cmd)->t_mem_bidi_list && T_TASK(cmd)->t_tasks_se_bidi_num) { | ||
4007 | list_for_each_entry_safe(se_mem, se_mem_tmp, | ||
4008 | T_TASK(cmd)->t_mem_bidi_list, se_list) { | ||
4009 | /* | ||
4010 | * We only release call __free_page(struct se_mem->se_page) when | ||
4011 | * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use, | ||
4012 | */ | ||
4013 | if (free_page) | ||
4014 | __free_page(se_mem->se_page); | ||
4015 | |||
4016 | list_del(&se_mem->se_list); | ||
4017 | kmem_cache_free(se_mem_cache, se_mem); | ||
4018 | } | ||
4019 | } | ||
4020 | 3716 | ||
4021 | kfree(T_TASK(cmd)->t_mem_bidi_list); | 3717 | transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents); |
4022 | T_TASK(cmd)->t_mem_bidi_list = NULL; | 3718 | cmd->t_bidi_data_sg = NULL; |
4023 | kfree(T_TASK(cmd)->t_mem_list); | 3719 | cmd->t_bidi_data_nents = 0; |
4024 | T_TASK(cmd)->t_mem_list = NULL; | ||
4025 | T_TASK(cmd)->t_tasks_se_num = 0; | ||
4026 | } | 3720 | } |
4027 | 3721 | ||
4028 | static inline void transport_release_tasks(struct se_cmd *cmd) | 3722 | static inline void transport_release_tasks(struct se_cmd *cmd) |
@@ -4034,23 +3728,23 @@ static inline int transport_dec_and_check(struct se_cmd *cmd) | |||
4034 | { | 3728 | { |
4035 | unsigned long flags; | 3729 | unsigned long flags; |
4036 | 3730 | ||
4037 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 3731 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
4038 | if (atomic_read(&T_TASK(cmd)->t_fe_count)) { | 3732 | if (atomic_read(&cmd->t_fe_count)) { |
4039 | if (!(atomic_dec_and_test(&T_TASK(cmd)->t_fe_count))) { | 3733 | if (!atomic_dec_and_test(&cmd->t_fe_count)) { |
4040 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, | 3734 | spin_unlock_irqrestore(&cmd->t_state_lock, |
4041 | flags); | 3735 | flags); |
4042 | return 1; | 3736 | return 1; |
4043 | } | 3737 | } |
4044 | } | 3738 | } |
4045 | 3739 | ||
4046 | if (atomic_read(&T_TASK(cmd)->t_se_count)) { | 3740 | if (atomic_read(&cmd->t_se_count)) { |
4047 | if (!(atomic_dec_and_test(&T_TASK(cmd)->t_se_count))) { | 3741 | if (!atomic_dec_and_test(&cmd->t_se_count)) { |
4048 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, | 3742 | spin_unlock_irqrestore(&cmd->t_state_lock, |
4049 | flags); | 3743 | flags); |
4050 | return 1; | 3744 | return 1; |
4051 | } | 3745 | } |
4052 | } | 3746 | } |
4053 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 3747 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
4054 | 3748 | ||
4055 | return 0; | 3749 | return 0; |
4056 | } | 3750 | } |
@@ -4062,68 +3756,57 @@ static void transport_release_fe_cmd(struct se_cmd *cmd) | |||
4062 | if (transport_dec_and_check(cmd)) | 3756 | if (transport_dec_and_check(cmd)) |
4063 | return; | 3757 | return; |
4064 | 3758 | ||
4065 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 3759 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
4066 | if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) { | 3760 | if (!atomic_read(&cmd->transport_dev_active)) { |
4067 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 3761 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
4068 | goto free_pages; | 3762 | goto free_pages; |
4069 | } | 3763 | } |
4070 | atomic_set(&T_TASK(cmd)->transport_dev_active, 0); | 3764 | atomic_set(&cmd->transport_dev_active, 0); |
4071 | transport_all_task_dev_remove_state(cmd); | 3765 | transport_all_task_dev_remove_state(cmd); |
4072 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 3766 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
4073 | 3767 | ||
4074 | transport_release_tasks(cmd); | 3768 | transport_release_tasks(cmd); |
4075 | free_pages: | 3769 | free_pages: |
4076 | transport_free_pages(cmd); | 3770 | transport_free_pages(cmd); |
4077 | transport_free_se_cmd(cmd); | 3771 | transport_free_se_cmd(cmd); |
4078 | CMD_TFO(cmd)->release_cmd_direct(cmd); | 3772 | cmd->se_tfo->release_cmd(cmd); |
4079 | } | 3773 | } |
4080 | 3774 | ||
4081 | static int transport_generic_remove( | 3775 | static int |
4082 | struct se_cmd *cmd, | 3776 | transport_generic_remove(struct se_cmd *cmd, int session_reinstatement) |
4083 | int release_to_pool, | ||
4084 | int session_reinstatement) | ||
4085 | { | 3777 | { |
4086 | unsigned long flags; | 3778 | unsigned long flags; |
4087 | 3779 | ||
4088 | if (!(T_TASK(cmd))) | ||
4089 | goto release_cmd; | ||
4090 | |||
4091 | if (transport_dec_and_check(cmd)) { | 3780 | if (transport_dec_and_check(cmd)) { |
4092 | if (session_reinstatement) { | 3781 | if (session_reinstatement) { |
4093 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 3782 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
4094 | transport_all_task_dev_remove_state(cmd); | 3783 | transport_all_task_dev_remove_state(cmd); |
4095 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, | 3784 | spin_unlock_irqrestore(&cmd->t_state_lock, |
4096 | flags); | 3785 | flags); |
4097 | } | 3786 | } |
4098 | return 1; | 3787 | return 1; |
4099 | } | 3788 | } |
4100 | 3789 | ||
4101 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 3790 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
4102 | if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) { | 3791 | if (!atomic_read(&cmd->transport_dev_active)) { |
4103 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 3792 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
4104 | goto free_pages; | 3793 | goto free_pages; |
4105 | } | 3794 | } |
4106 | atomic_set(&T_TASK(cmd)->transport_dev_active, 0); | 3795 | atomic_set(&cmd->transport_dev_active, 0); |
4107 | transport_all_task_dev_remove_state(cmd); | 3796 | transport_all_task_dev_remove_state(cmd); |
4108 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 3797 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
4109 | 3798 | ||
4110 | transport_release_tasks(cmd); | 3799 | transport_release_tasks(cmd); |
3800 | |||
4111 | free_pages: | 3801 | free_pages: |
4112 | transport_free_pages(cmd); | 3802 | transport_free_pages(cmd); |
4113 | 3803 | transport_release_cmd(cmd); | |
4114 | release_cmd: | ||
4115 | if (release_to_pool) { | ||
4116 | transport_release_cmd_to_pool(cmd); | ||
4117 | } else { | ||
4118 | transport_free_se_cmd(cmd); | ||
4119 | CMD_TFO(cmd)->release_cmd_direct(cmd); | ||
4120 | } | ||
4121 | |||
4122 | return 0; | 3804 | return 0; |
4123 | } | 3805 | } |
4124 | 3806 | ||
4125 | /* | 3807 | /* |
4126 | * transport_generic_map_mem_to_cmd - Perform SGL -> struct se_mem map | 3808 | * transport_generic_map_mem_to_cmd - Use fabric-alloced pages instead of |
3809 | * allocating in the core. | ||
4127 | * @cmd: Associated se_cmd descriptor | 3810 | * @cmd: Associated se_cmd descriptor |
4128 | * @mem: SGL style memory for TCM WRITE / READ | 3811 | * @mem: SGL style memory for TCM WRITE / READ |
4129 | * @sg_mem_num: Number of SGL elements | 3812 | * @sg_mem_num: Number of SGL elements |
@@ -4135,614 +3818,163 @@ release_cmd: | |||
4135 | */ | 3818 | */ |
4136 | int transport_generic_map_mem_to_cmd( | 3819 | int transport_generic_map_mem_to_cmd( |
4137 | struct se_cmd *cmd, | 3820 | struct se_cmd *cmd, |
4138 | struct scatterlist *mem, | 3821 | struct scatterlist *sgl, |
4139 | u32 sg_mem_num, | 3822 | u32 sgl_count, |
4140 | struct scatterlist *mem_bidi_in, | 3823 | struct scatterlist *sgl_bidi, |
4141 | u32 sg_mem_bidi_num) | 3824 | u32 sgl_bidi_count) |
4142 | { | 3825 | { |
4143 | u32 se_mem_cnt_out = 0; | 3826 | if (!sgl || !sgl_count) |
4144 | int ret; | ||
4145 | |||
4146 | if (!(mem) || !(sg_mem_num)) | ||
4147 | return 0; | 3827 | return 0; |
4148 | /* | ||
4149 | * Passed *mem will contain a list_head containing preformatted | ||
4150 | * struct se_mem elements... | ||
4151 | */ | ||
4152 | if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM)) { | ||
4153 | if ((mem_bidi_in) || (sg_mem_bidi_num)) { | ||
4154 | printk(KERN_ERR "SCF_CMD_PASSTHROUGH_NOALLOC not supported" | ||
4155 | " with BIDI-COMMAND\n"); | ||
4156 | return -ENOSYS; | ||
4157 | } | ||
4158 | 3828 | ||
4159 | T_TASK(cmd)->t_mem_list = (struct list_head *)mem; | ||
4160 | T_TASK(cmd)->t_tasks_se_num = sg_mem_num; | ||
4161 | cmd->se_cmd_flags |= SCF_CMD_PASSTHROUGH_NOALLOC; | ||
4162 | return 0; | ||
4163 | } | ||
4164 | /* | ||
4165 | * Otherwise, assume the caller is passing a struct scatterlist | ||
4166 | * array from include/linux/scatterlist.h | ||
4167 | */ | ||
4168 | if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) || | 3829 | if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) || |
4169 | (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) { | 3830 | (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) { |
4170 | /* | ||
4171 | * For CDB using TCM struct se_mem linked list scatterlist memory | ||
4172 | * processed into a TCM struct se_subsystem_dev, we do the mapping | ||
4173 | * from the passed physical memory to struct se_mem->se_page here. | ||
4174 | */ | ||
4175 | T_TASK(cmd)->t_mem_list = transport_init_se_mem_list(); | ||
4176 | if (!(T_TASK(cmd)->t_mem_list)) | ||
4177 | return -ENOMEM; | ||
4178 | 3831 | ||
4179 | ret = transport_map_sg_to_mem(cmd, | 3832 | cmd->t_data_sg = sgl; |
4180 | T_TASK(cmd)->t_mem_list, mem, &se_mem_cnt_out); | 3833 | cmd->t_data_nents = sgl_count; |
4181 | if (ret < 0) | ||
4182 | return -ENOMEM; | ||
4183 | 3834 | ||
4184 | T_TASK(cmd)->t_tasks_se_num = se_mem_cnt_out; | 3835 | if (sgl_bidi && sgl_bidi_count) { |
4185 | /* | 3836 | cmd->t_bidi_data_sg = sgl_bidi; |
4186 | * Setup BIDI READ list of struct se_mem elements | 3837 | cmd->t_bidi_data_nents = sgl_bidi_count; |
4187 | */ | ||
4188 | if ((mem_bidi_in) && (sg_mem_bidi_num)) { | ||
4189 | T_TASK(cmd)->t_mem_bidi_list = transport_init_se_mem_list(); | ||
4190 | if (!(T_TASK(cmd)->t_mem_bidi_list)) { | ||
4191 | kfree(T_TASK(cmd)->t_mem_list); | ||
4192 | return -ENOMEM; | ||
4193 | } | ||
4194 | se_mem_cnt_out = 0; | ||
4195 | |||
4196 | ret = transport_map_sg_to_mem(cmd, | ||
4197 | T_TASK(cmd)->t_mem_bidi_list, mem_bidi_in, | ||
4198 | &se_mem_cnt_out); | ||
4199 | if (ret < 0) { | ||
4200 | kfree(T_TASK(cmd)->t_mem_list); | ||
4201 | return -ENOMEM; | ||
4202 | } | ||
4203 | |||
4204 | T_TASK(cmd)->t_tasks_se_bidi_num = se_mem_cnt_out; | ||
4205 | } | 3838 | } |
4206 | cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; | 3839 | cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; |
4207 | |||
4208 | } else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB) { | ||
4209 | if (mem_bidi_in || sg_mem_bidi_num) { | ||
4210 | printk(KERN_ERR "BIDI-Commands not supported using " | ||
4211 | "SCF_SCSI_CONTROL_NONSG_IO_CDB\n"); | ||
4212 | return -ENOSYS; | ||
4213 | } | ||
4214 | /* | ||
4215 | * For incoming CDBs using a contiguous buffer internall with TCM, | ||
4216 | * save the passed struct scatterlist memory. After TCM storage object | ||
4217 | * processing has completed for this struct se_cmd, TCM core will call | ||
4218 | * transport_memcpy_[write,read]_contig() as necessary from | ||
4219 | * transport_generic_complete_ok() and transport_write_pending() in order | ||
4220 | * to copy the TCM buffer to/from the original passed *mem in SGL -> | ||
4221 | * struct scatterlist format. | ||
4222 | */ | ||
4223 | cmd->se_cmd_flags |= SCF_PASSTHROUGH_CONTIG_TO_SG; | ||
4224 | T_TASK(cmd)->t_task_pt_sgl = mem; | ||
4225 | } | 3840 | } |
4226 | 3841 | ||
4227 | return 0; | 3842 | return 0; |
4228 | } | 3843 | } |
4229 | EXPORT_SYMBOL(transport_generic_map_mem_to_cmd); | 3844 | EXPORT_SYMBOL(transport_generic_map_mem_to_cmd); |
4230 | 3845 | ||
4231 | |||
4232 | static inline long long transport_dev_end_lba(struct se_device *dev) | ||
4233 | { | ||
4234 | return dev->transport->get_blocks(dev) + 1; | ||
4235 | } | ||
4236 | |||
4237 | static int transport_get_sectors(struct se_cmd *cmd) | ||
4238 | { | ||
4239 | struct se_device *dev = SE_DEV(cmd); | ||
4240 | |||
4241 | T_TASK(cmd)->t_tasks_sectors = | ||
4242 | (cmd->data_length / DEV_ATTRIB(dev)->block_size); | ||
4243 | if (!(T_TASK(cmd)->t_tasks_sectors)) | ||
4244 | T_TASK(cmd)->t_tasks_sectors = 1; | ||
4245 | |||
4246 | if (TRANSPORT(dev)->get_device_type(dev) != TYPE_DISK) | ||
4247 | return 0; | ||
4248 | |||
4249 | if ((T_TASK(cmd)->t_task_lba + T_TASK(cmd)->t_tasks_sectors) > | ||
4250 | transport_dev_end_lba(dev)) { | ||
4251 | printk(KERN_ERR "LBA: %llu Sectors: %u exceeds" | ||
4252 | " transport_dev_end_lba(): %llu\n", | ||
4253 | T_TASK(cmd)->t_task_lba, T_TASK(cmd)->t_tasks_sectors, | ||
4254 | transport_dev_end_lba(dev)); | ||
4255 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | ||
4256 | cmd->scsi_sense_reason = TCM_SECTOR_COUNT_TOO_MANY; | ||
4257 | return PYX_TRANSPORT_REQ_TOO_MANY_SECTORS; | ||
4258 | } | ||
4259 | |||
4260 | return 0; | ||
4261 | } | ||
4262 | |||
4263 | static int transport_new_cmd_obj(struct se_cmd *cmd) | 3846 | static int transport_new_cmd_obj(struct se_cmd *cmd) |
4264 | { | 3847 | { |
4265 | struct se_device *dev = SE_DEV(cmd); | 3848 | struct se_device *dev = cmd->se_dev; |
4266 | u32 task_cdbs = 0, rc; | 3849 | u32 task_cdbs; |
4267 | 3850 | u32 rc; | |
4268 | if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) { | 3851 | int set_counts = 1; |
4269 | task_cdbs++; | ||
4270 | T_TASK(cmd)->t_task_cdbs++; | ||
4271 | } else { | ||
4272 | int set_counts = 1; | ||
4273 | 3852 | ||
4274 | /* | 3853 | /* |
4275 | * Setup any BIDI READ tasks and memory from | 3854 | * Setup any BIDI READ tasks and memory from |
4276 | * T_TASK(cmd)->t_mem_bidi_list so the READ struct se_tasks | 3855 | * cmd->t_mem_bidi_list so the READ struct se_tasks |
4277 | * are queued first for the non pSCSI passthrough case. | 3856 | * are queued first for the non pSCSI passthrough case. |
4278 | */ | 3857 | */ |
4279 | if ((T_TASK(cmd)->t_mem_bidi_list != NULL) && | 3858 | if (cmd->t_bidi_data_sg && |
4280 | (TRANSPORT(dev)->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV)) { | 3859 | (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV)) { |
4281 | rc = transport_generic_get_cdb_count(cmd, | 3860 | rc = transport_allocate_tasks(cmd, |
4282 | T_TASK(cmd)->t_task_lba, | 3861 | cmd->t_task_lba, |
4283 | T_TASK(cmd)->t_tasks_sectors, | 3862 | DMA_FROM_DEVICE, |
4284 | DMA_FROM_DEVICE, T_TASK(cmd)->t_mem_bidi_list, | 3863 | cmd->t_bidi_data_sg, |
4285 | set_counts); | 3864 | cmd->t_bidi_data_nents); |
4286 | if (!(rc)) { | 3865 | if (rc <= 0) { |
4287 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | ||
4288 | cmd->scsi_sense_reason = | ||
4289 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | ||
4290 | return PYX_TRANSPORT_LU_COMM_FAILURE; | ||
4291 | } | ||
4292 | set_counts = 0; | ||
4293 | } | ||
4294 | /* | ||
4295 | * Setup the tasks and memory from T_TASK(cmd)->t_mem_list | ||
4296 | * Note for BIDI transfers this will contain the WRITE payload | ||
4297 | */ | ||
4298 | task_cdbs = transport_generic_get_cdb_count(cmd, | ||
4299 | T_TASK(cmd)->t_task_lba, | ||
4300 | T_TASK(cmd)->t_tasks_sectors, | ||
4301 | cmd->data_direction, T_TASK(cmd)->t_mem_list, | ||
4302 | set_counts); | ||
4303 | if (!(task_cdbs)) { | ||
4304 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | 3866 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
4305 | cmd->scsi_sense_reason = | 3867 | cmd->scsi_sense_reason = |
4306 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | 3868 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
4307 | return PYX_TRANSPORT_LU_COMM_FAILURE; | 3869 | return PYX_TRANSPORT_LU_COMM_FAILURE; |
4308 | } | 3870 | } |
4309 | T_TASK(cmd)->t_task_cdbs += task_cdbs; | 3871 | atomic_inc(&cmd->t_fe_count); |
4310 | 3872 | atomic_inc(&cmd->t_se_count); | |
4311 | #if 0 | 3873 | set_counts = 0; |
4312 | printk(KERN_INFO "data_length: %u, LBA: %llu t_tasks_sectors:" | ||
4313 | " %u, t_task_cdbs: %u\n", obj_ptr, cmd->data_length, | ||
4314 | T_TASK(cmd)->t_task_lba, T_TASK(cmd)->t_tasks_sectors, | ||
4315 | T_TASK(cmd)->t_task_cdbs); | ||
4316 | #endif | ||
4317 | } | ||
4318 | |||
4319 | atomic_set(&T_TASK(cmd)->t_task_cdbs_left, task_cdbs); | ||
4320 | atomic_set(&T_TASK(cmd)->t_task_cdbs_ex_left, task_cdbs); | ||
4321 | atomic_set(&T_TASK(cmd)->t_task_cdbs_timeout_left, task_cdbs); | ||
4322 | return 0; | ||
4323 | } | ||
4324 | |||
4325 | static struct list_head *transport_init_se_mem_list(void) | ||
4326 | { | ||
4327 | struct list_head *se_mem_list; | ||
4328 | |||
4329 | se_mem_list = kzalloc(sizeof(struct list_head), GFP_KERNEL); | ||
4330 | if (!(se_mem_list)) { | ||
4331 | printk(KERN_ERR "Unable to allocate memory for se_mem_list\n"); | ||
4332 | return NULL; | ||
4333 | } | 3874 | } |
4334 | INIT_LIST_HEAD(se_mem_list); | ||
4335 | |||
4336 | return se_mem_list; | ||
4337 | } | ||
4338 | |||
4339 | static int | ||
4340 | transport_generic_get_mem(struct se_cmd *cmd, u32 length, u32 dma_size) | ||
4341 | { | ||
4342 | unsigned char *buf; | ||
4343 | struct se_mem *se_mem; | ||
4344 | |||
4345 | T_TASK(cmd)->t_mem_list = transport_init_se_mem_list(); | ||
4346 | if (!(T_TASK(cmd)->t_mem_list)) | ||
4347 | return -ENOMEM; | ||
4348 | |||
4349 | /* | 3875 | /* |
4350 | * If the device uses memory mapping this is enough. | 3876 | * Setup the tasks and memory from cmd->t_mem_list |
3877 | * Note for BIDI transfers this will contain the WRITE payload | ||
4351 | */ | 3878 | */ |
4352 | if (cmd->se_dev->transport->do_se_mem_map) | 3879 | task_cdbs = transport_allocate_tasks(cmd, |
4353 | return 0; | 3880 | cmd->t_task_lba, |
4354 | 3881 | cmd->data_direction, | |
4355 | /* | 3882 | cmd->t_data_sg, |
4356 | * Setup BIDI-COMMAND READ list of struct se_mem elements | 3883 | cmd->t_data_nents); |
4357 | */ | 3884 | if (task_cdbs <= 0) { |
4358 | if (T_TASK(cmd)->t_tasks_bidi) { | 3885 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
4359 | T_TASK(cmd)->t_mem_bidi_list = transport_init_se_mem_list(); | 3886 | cmd->scsi_sense_reason = |
4360 | if (!(T_TASK(cmd)->t_mem_bidi_list)) { | 3887 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
4361 | kfree(T_TASK(cmd)->t_mem_list); | 3888 | return PYX_TRANSPORT_LU_COMM_FAILURE; |
4362 | return -ENOMEM; | ||
4363 | } | ||
4364 | } | 3889 | } |
4365 | 3890 | ||
4366 | while (length) { | 3891 | if (set_counts) { |
4367 | se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL); | 3892 | atomic_inc(&cmd->t_fe_count); |
4368 | if (!(se_mem)) { | 3893 | atomic_inc(&cmd->t_se_count); |
4369 | printk(KERN_ERR "Unable to allocate struct se_mem\n"); | ||
4370 | goto out; | ||
4371 | } | ||
4372 | |||
4373 | /* #warning FIXME Allocate contigous pages for struct se_mem elements */ | ||
4374 | se_mem->se_page = alloc_pages(GFP_KERNEL, 0); | ||
4375 | if (!(se_mem->se_page)) { | ||
4376 | printk(KERN_ERR "alloc_pages() failed\n"); | ||
4377 | goto out; | ||
4378 | } | ||
4379 | |||
4380 | buf = kmap_atomic(se_mem->se_page, KM_IRQ0); | ||
4381 | if (!(buf)) { | ||
4382 | printk(KERN_ERR "kmap_atomic() failed\n"); | ||
4383 | goto out; | ||
4384 | } | ||
4385 | INIT_LIST_HEAD(&se_mem->se_list); | ||
4386 | se_mem->se_len = (length > dma_size) ? dma_size : length; | ||
4387 | memset(buf, 0, se_mem->se_len); | ||
4388 | kunmap_atomic(buf, KM_IRQ0); | ||
4389 | |||
4390 | list_add_tail(&se_mem->se_list, T_TASK(cmd)->t_mem_list); | ||
4391 | T_TASK(cmd)->t_tasks_se_num++; | ||
4392 | |||
4393 | DEBUG_MEM("Allocated struct se_mem page(%p) Length(%u)" | ||
4394 | " Offset(%u)\n", se_mem->se_page, se_mem->se_len, | ||
4395 | se_mem->se_off); | ||
4396 | |||
4397 | length -= se_mem->se_len; | ||
4398 | } | 3894 | } |
4399 | 3895 | ||
4400 | DEBUG_MEM("Allocated total struct se_mem elements(%u)\n", | 3896 | cmd->t_task_list_num = task_cdbs; |
4401 | T_TASK(cmd)->t_tasks_se_num); | ||
4402 | 3897 | ||
3898 | atomic_set(&cmd->t_task_cdbs_left, task_cdbs); | ||
3899 | atomic_set(&cmd->t_task_cdbs_ex_left, task_cdbs); | ||
3900 | atomic_set(&cmd->t_task_cdbs_timeout_left, task_cdbs); | ||
4403 | return 0; | 3901 | return 0; |
4404 | out: | ||
4405 | if (se_mem) | ||
4406 | __free_pages(se_mem->se_page, 0); | ||
4407 | kmem_cache_free(se_mem_cache, se_mem); | ||
4408 | return -1; | ||
4409 | } | 3902 | } |
4410 | 3903 | ||
4411 | u32 transport_calc_sg_num( | 3904 | void *transport_kmap_first_data_page(struct se_cmd *cmd) |
4412 | struct se_task *task, | ||
4413 | struct se_mem *in_se_mem, | ||
4414 | u32 task_offset) | ||
4415 | { | 3905 | { |
4416 | struct se_cmd *se_cmd = task->task_se_cmd; | 3906 | struct scatterlist *sg = cmd->t_data_sg; |
4417 | struct se_device *se_dev = SE_DEV(se_cmd); | ||
4418 | struct se_mem *se_mem = in_se_mem; | ||
4419 | struct target_core_fabric_ops *tfo = CMD_TFO(se_cmd); | ||
4420 | u32 sg_length, task_size = task->task_size, task_sg_num_padded; | ||
4421 | |||
4422 | while (task_size != 0) { | ||
4423 | DEBUG_SC("se_mem->se_page(%p) se_mem->se_len(%u)" | ||
4424 | " se_mem->se_off(%u) task_offset(%u)\n", | ||
4425 | se_mem->se_page, se_mem->se_len, | ||
4426 | se_mem->se_off, task_offset); | ||
4427 | |||
4428 | if (task_offset == 0) { | ||
4429 | if (task_size >= se_mem->se_len) { | ||
4430 | sg_length = se_mem->se_len; | ||
4431 | |||
4432 | if (!(list_is_last(&se_mem->se_list, | ||
4433 | T_TASK(se_cmd)->t_mem_list))) | ||
4434 | se_mem = list_entry(se_mem->se_list.next, | ||
4435 | struct se_mem, se_list); | ||
4436 | } else { | ||
4437 | sg_length = task_size; | ||
4438 | task_size -= sg_length; | ||
4439 | goto next; | ||
4440 | } | ||
4441 | 3907 | ||
4442 | DEBUG_SC("sg_length(%u) task_size(%u)\n", | 3908 | BUG_ON(!sg); |
4443 | sg_length, task_size); | ||
4444 | } else { | ||
4445 | if ((se_mem->se_len - task_offset) > task_size) { | ||
4446 | sg_length = task_size; | ||
4447 | task_size -= sg_length; | ||
4448 | goto next; | ||
4449 | } else { | ||
4450 | sg_length = (se_mem->se_len - task_offset); | ||
4451 | |||
4452 | if (!(list_is_last(&se_mem->se_list, | ||
4453 | T_TASK(se_cmd)->t_mem_list))) | ||
4454 | se_mem = list_entry(se_mem->se_list.next, | ||
4455 | struct se_mem, se_list); | ||
4456 | } | ||
4457 | |||
4458 | DEBUG_SC("sg_length(%u) task_size(%u)\n", | ||
4459 | sg_length, task_size); | ||
4460 | |||
4461 | task_offset = 0; | ||
4462 | } | ||
4463 | task_size -= sg_length; | ||
4464 | next: | ||
4465 | DEBUG_SC("task[%u] - Reducing task_size to(%u)\n", | ||
4466 | task->task_no, task_size); | ||
4467 | |||
4468 | task->task_sg_num++; | ||
4469 | } | ||
4470 | /* | ||
4471 | * Check if the fabric module driver is requesting that all | ||
4472 | * struct se_task->task_sg[] be chained together.. If so, | ||
4473 | * then allocate an extra padding SG entry for linking and | ||
4474 | * marking the end of the chained SGL. | ||
4475 | */ | ||
4476 | if (tfo->task_sg_chaining) { | ||
4477 | task_sg_num_padded = (task->task_sg_num + 1); | ||
4478 | task->task_padded_sg = 1; | ||
4479 | } else | ||
4480 | task_sg_num_padded = task->task_sg_num; | ||
4481 | |||
4482 | task->task_sg = kzalloc(task_sg_num_padded * | ||
4483 | sizeof(struct scatterlist), GFP_KERNEL); | ||
4484 | if (!(task->task_sg)) { | ||
4485 | printk(KERN_ERR "Unable to allocate memory for" | ||
4486 | " task->task_sg\n"); | ||
4487 | return 0; | ||
4488 | } | ||
4489 | sg_init_table(&task->task_sg[0], task_sg_num_padded); | ||
4490 | /* | 3909 | /* |
4491 | * Setup task->task_sg_bidi for SCSI READ payload for | 3910 | * We need to take into account a possible offset here for fabrics like |
4492 | * TCM/pSCSI passthrough if present for BIDI-COMMAND | 3911 | * tcm_loop who may be using a contig buffer from the SCSI midlayer for |
3912 | * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd() | ||
4493 | */ | 3913 | */ |
4494 | if ((T_TASK(se_cmd)->t_mem_bidi_list != NULL) && | 3914 | return kmap(sg_page(sg)) + sg->offset; |
4495 | (TRANSPORT(se_dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)) { | ||
4496 | task->task_sg_bidi = kzalloc(task_sg_num_padded * | ||
4497 | sizeof(struct scatterlist), GFP_KERNEL); | ||
4498 | if (!(task->task_sg_bidi)) { | ||
4499 | printk(KERN_ERR "Unable to allocate memory for" | ||
4500 | " task->task_sg_bidi\n"); | ||
4501 | return 0; | ||
4502 | } | ||
4503 | sg_init_table(&task->task_sg_bidi[0], task_sg_num_padded); | ||
4504 | } | ||
4505 | /* | ||
4506 | * For the chaining case, setup the proper end of SGL for the | ||
4507 | * initial submission struct task into struct se_subsystem_api. | ||
4508 | * This will be cleared later by transport_do_task_sg_chain() | ||
4509 | */ | ||
4510 | if (task->task_padded_sg) { | ||
4511 | sg_mark_end(&task->task_sg[task->task_sg_num - 1]); | ||
4512 | /* | ||
4513 | * Added the 'if' check before marking end of bi-directional | ||
4514 | * scatterlist (which gets created only in case of request | ||
4515 | * (RD + WR). | ||
4516 | */ | ||
4517 | if (task->task_sg_bidi) | ||
4518 | sg_mark_end(&task->task_sg_bidi[task->task_sg_num - 1]); | ||
4519 | } | ||
4520 | |||
4521 | DEBUG_SC("Successfully allocated task->task_sg_num(%u)," | ||
4522 | " task_sg_num_padded(%u)\n", task->task_sg_num, | ||
4523 | task_sg_num_padded); | ||
4524 | |||
4525 | return task->task_sg_num; | ||
4526 | } | 3915 | } |
3916 | EXPORT_SYMBOL(transport_kmap_first_data_page); | ||
4527 | 3917 | ||
4528 | static inline int transport_set_tasks_sectors_disk( | 3918 | void transport_kunmap_first_data_page(struct se_cmd *cmd) |
4529 | struct se_task *task, | ||
4530 | struct se_device *dev, | ||
4531 | unsigned long long lba, | ||
4532 | u32 sectors, | ||
4533 | int *max_sectors_set) | ||
4534 | { | 3919 | { |
4535 | if ((lba + sectors) > transport_dev_end_lba(dev)) { | 3920 | kunmap(sg_page(cmd->t_data_sg)); |
4536 | task->task_sectors = ((transport_dev_end_lba(dev) - lba) + 1); | ||
4537 | |||
4538 | if (task->task_sectors > DEV_ATTRIB(dev)->max_sectors) { | ||
4539 | task->task_sectors = DEV_ATTRIB(dev)->max_sectors; | ||
4540 | *max_sectors_set = 1; | ||
4541 | } | ||
4542 | } else { | ||
4543 | if (sectors > DEV_ATTRIB(dev)->max_sectors) { | ||
4544 | task->task_sectors = DEV_ATTRIB(dev)->max_sectors; | ||
4545 | *max_sectors_set = 1; | ||
4546 | } else | ||
4547 | task->task_sectors = sectors; | ||
4548 | } | ||
4549 | |||
4550 | return 0; | ||
4551 | } | 3921 | } |
3922 | EXPORT_SYMBOL(transport_kunmap_first_data_page); | ||
4552 | 3923 | ||
4553 | static inline int transport_set_tasks_sectors_non_disk( | 3924 | static int |
4554 | struct se_task *task, | 3925 | transport_generic_get_mem(struct se_cmd *cmd) |
4555 | struct se_device *dev, | ||
4556 | unsigned long long lba, | ||
4557 | u32 sectors, | ||
4558 | int *max_sectors_set) | ||
4559 | { | 3926 | { |
4560 | if (sectors > DEV_ATTRIB(dev)->max_sectors) { | 3927 | u32 length = cmd->data_length; |
4561 | task->task_sectors = DEV_ATTRIB(dev)->max_sectors; | 3928 | unsigned int nents; |
4562 | *max_sectors_set = 1; | 3929 | struct page *page; |
4563 | } else | 3930 | int i = 0; |
4564 | task->task_sectors = sectors; | ||
4565 | 3931 | ||
4566 | return 0; | 3932 | nents = DIV_ROUND_UP(length, PAGE_SIZE); |
4567 | } | 3933 | cmd->t_data_sg = kmalloc(sizeof(struct scatterlist) * nents, GFP_KERNEL); |
3934 | if (!cmd->t_data_sg) | ||
3935 | return -ENOMEM; | ||
4568 | 3936 | ||
4569 | static inline int transport_set_tasks_sectors( | 3937 | cmd->t_data_nents = nents; |
4570 | struct se_task *task, | 3938 | sg_init_table(cmd->t_data_sg, nents); |
4571 | struct se_device *dev, | ||
4572 | unsigned long long lba, | ||
4573 | u32 sectors, | ||
4574 | int *max_sectors_set) | ||
4575 | { | ||
4576 | return (TRANSPORT(dev)->get_device_type(dev) == TYPE_DISK) ? | ||
4577 | transport_set_tasks_sectors_disk(task, dev, lba, sectors, | ||
4578 | max_sectors_set) : | ||
4579 | transport_set_tasks_sectors_non_disk(task, dev, lba, sectors, | ||
4580 | max_sectors_set); | ||
4581 | } | ||
4582 | 3939 | ||
4583 | static int transport_map_sg_to_mem( | 3940 | while (length) { |
4584 | struct se_cmd *cmd, | 3941 | u32 page_len = min_t(u32, length, PAGE_SIZE); |
4585 | struct list_head *se_mem_list, | 3942 | page = alloc_page(GFP_KERNEL | __GFP_ZERO); |
4586 | void *in_mem, | 3943 | if (!page) |
4587 | u32 *se_mem_cnt) | 3944 | goto out; |
4588 | { | ||
4589 | struct se_mem *se_mem; | ||
4590 | struct scatterlist *sg; | ||
4591 | u32 sg_count = 1, cmd_size = cmd->data_length; | ||
4592 | 3945 | ||
4593 | if (!in_mem) { | 3946 | sg_set_page(&cmd->t_data_sg[i], page, page_len, 0); |
4594 | printk(KERN_ERR "No source scatterlist\n"); | 3947 | length -= page_len; |
4595 | return -1; | 3948 | i++; |
4596 | } | 3949 | } |
4597 | sg = (struct scatterlist *)in_mem; | 3950 | return 0; |
4598 | |||
4599 | while (cmd_size) { | ||
4600 | se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL); | ||
4601 | if (!(se_mem)) { | ||
4602 | printk(KERN_ERR "Unable to allocate struct se_mem\n"); | ||
4603 | return -1; | ||
4604 | } | ||
4605 | INIT_LIST_HEAD(&se_mem->se_list); | ||
4606 | DEBUG_MEM("sg_to_mem: Starting loop with cmd_size: %u" | ||
4607 | " sg_page: %p offset: %d length: %d\n", cmd_size, | ||
4608 | sg_page(sg), sg->offset, sg->length); | ||
4609 | |||
4610 | se_mem->se_page = sg_page(sg); | ||
4611 | se_mem->se_off = sg->offset; | ||
4612 | |||
4613 | if (cmd_size > sg->length) { | ||
4614 | se_mem->se_len = sg->length; | ||
4615 | sg = sg_next(sg); | ||
4616 | sg_count++; | ||
4617 | } else | ||
4618 | se_mem->se_len = cmd_size; | ||
4619 | |||
4620 | cmd_size -= se_mem->se_len; | ||
4621 | |||
4622 | DEBUG_MEM("sg_to_mem: *se_mem_cnt: %u cmd_size: %u\n", | ||
4623 | *se_mem_cnt, cmd_size); | ||
4624 | DEBUG_MEM("sg_to_mem: Final se_page: %p se_off: %d se_len: %d\n", | ||
4625 | se_mem->se_page, se_mem->se_off, se_mem->se_len); | ||
4626 | 3951 | ||
4627 | list_add_tail(&se_mem->se_list, se_mem_list); | 3952 | out: |
4628 | (*se_mem_cnt)++; | 3953 | while (i >= 0) { |
3954 | __free_page(sg_page(&cmd->t_data_sg[i])); | ||
3955 | i--; | ||
4629 | } | 3956 | } |
4630 | 3957 | kfree(cmd->t_data_sg); | |
4631 | DEBUG_MEM("task[0] - Mapped(%u) struct scatterlist segments to(%u)" | 3958 | cmd->t_data_sg = NULL; |
4632 | " struct se_mem\n", sg_count, *se_mem_cnt); | 3959 | return -ENOMEM; |
4633 | |||
4634 | if (sg_count != *se_mem_cnt) | ||
4635 | BUG(); | ||
4636 | |||
4637 | return 0; | ||
4638 | } | 3960 | } |
4639 | 3961 | ||
4640 | /* transport_map_mem_to_sg(): | 3962 | /* Reduce sectors if they are too long for the device */ |
4641 | * | 3963 | static inline sector_t transport_limit_task_sectors( |
4642 | * | 3964 | struct se_device *dev, |
4643 | */ | 3965 | unsigned long long lba, |
4644 | int transport_map_mem_to_sg( | 3966 | sector_t sectors) |
4645 | struct se_task *task, | ||
4646 | struct list_head *se_mem_list, | ||
4647 | void *in_mem, | ||
4648 | struct se_mem *in_se_mem, | ||
4649 | struct se_mem **out_se_mem, | ||
4650 | u32 *se_mem_cnt, | ||
4651 | u32 *task_offset) | ||
4652 | { | 3967 | { |
4653 | struct se_cmd *se_cmd = task->task_se_cmd; | 3968 | sectors = min_t(sector_t, sectors, dev->se_sub_dev->se_dev_attrib.max_sectors); |
4654 | struct se_mem *se_mem = in_se_mem; | ||
4655 | struct scatterlist *sg = (struct scatterlist *)in_mem; | ||
4656 | u32 task_size = task->task_size, sg_no = 0; | ||
4657 | 3969 | ||
4658 | if (!sg) { | 3970 | if (dev->transport->get_device_type(dev) == TYPE_DISK) |
4659 | printk(KERN_ERR "Unable to locate valid struct" | 3971 | if ((lba + sectors) > transport_dev_end_lba(dev)) |
4660 | " scatterlist pointer\n"); | 3972 | sectors = ((transport_dev_end_lba(dev) - lba) + 1); |
4661 | return -1; | ||
4662 | } | ||
4663 | |||
4664 | while (task_size != 0) { | ||
4665 | /* | ||
4666 | * Setup the contigious array of scatterlists for | ||
4667 | * this struct se_task. | ||
4668 | */ | ||
4669 | sg_assign_page(sg, se_mem->se_page); | ||
4670 | |||
4671 | if (*task_offset == 0) { | ||
4672 | sg->offset = se_mem->se_off; | ||
4673 | |||
4674 | if (task_size >= se_mem->se_len) { | ||
4675 | sg->length = se_mem->se_len; | ||
4676 | 3973 | ||
4677 | if (!(list_is_last(&se_mem->se_list, | 3974 | return sectors; |
4678 | T_TASK(se_cmd)->t_mem_list))) { | ||
4679 | se_mem = list_entry(se_mem->se_list.next, | ||
4680 | struct se_mem, se_list); | ||
4681 | (*se_mem_cnt)++; | ||
4682 | } | ||
4683 | } else { | ||
4684 | sg->length = task_size; | ||
4685 | /* | ||
4686 | * Determine if we need to calculate an offset | ||
4687 | * into the struct se_mem on the next go around.. | ||
4688 | */ | ||
4689 | task_size -= sg->length; | ||
4690 | if (!(task_size)) | ||
4691 | *task_offset = sg->length; | ||
4692 | |||
4693 | goto next; | ||
4694 | } | ||
4695 | |||
4696 | } else { | ||
4697 | sg->offset = (*task_offset + se_mem->se_off); | ||
4698 | |||
4699 | if ((se_mem->se_len - *task_offset) > task_size) { | ||
4700 | sg->length = task_size; | ||
4701 | /* | ||
4702 | * Determine if we need to calculate an offset | ||
4703 | * into the struct se_mem on the next go around.. | ||
4704 | */ | ||
4705 | task_size -= sg->length; | ||
4706 | if (!(task_size)) | ||
4707 | *task_offset += sg->length; | ||
4708 | |||
4709 | goto next; | ||
4710 | } else { | ||
4711 | sg->length = (se_mem->se_len - *task_offset); | ||
4712 | |||
4713 | if (!(list_is_last(&se_mem->se_list, | ||
4714 | T_TASK(se_cmd)->t_mem_list))) { | ||
4715 | se_mem = list_entry(se_mem->se_list.next, | ||
4716 | struct se_mem, se_list); | ||
4717 | (*se_mem_cnt)++; | ||
4718 | } | ||
4719 | } | ||
4720 | |||
4721 | *task_offset = 0; | ||
4722 | } | ||
4723 | task_size -= sg->length; | ||
4724 | next: | ||
4725 | DEBUG_MEM("task[%u] mem_to_sg - sg[%u](%p)(%u)(%u) - Reducing" | ||
4726 | " task_size to(%u), task_offset: %u\n", task->task_no, sg_no, | ||
4727 | sg_page(sg), sg->length, sg->offset, task_size, *task_offset); | ||
4728 | |||
4729 | sg_no++; | ||
4730 | if (!(task_size)) | ||
4731 | break; | ||
4732 | |||
4733 | sg = sg_next(sg); | ||
4734 | |||
4735 | if (task_size > se_cmd->data_length) | ||
4736 | BUG(); | ||
4737 | } | ||
4738 | *out_se_mem = se_mem; | ||
4739 | |||
4740 | DEBUG_MEM("task[%u] - Mapped(%u) struct se_mem segments to total(%u)" | ||
4741 | " SGs\n", task->task_no, *se_mem_cnt, sg_no); | ||
4742 | |||
4743 | return 0; | ||
4744 | } | 3975 | } |
4745 | 3976 | ||
3977 | |||
4746 | /* | 3978 | /* |
4747 | * This function can be used by HW target mode drivers to create a linked | 3979 | * This function can be used by HW target mode drivers to create a linked |
4748 | * scatterlist from all contiguously allocated struct se_task->task_sg[]. | 3980 | * scatterlist from all contiguously allocated struct se_task->task_sg[]. |
@@ -4751,334 +3983,236 @@ next: | |||
4751 | */ | 3983 | */ |
4752 | void transport_do_task_sg_chain(struct se_cmd *cmd) | 3984 | void transport_do_task_sg_chain(struct se_cmd *cmd) |
4753 | { | 3985 | { |
4754 | struct scatterlist *sg_head = NULL, *sg_link = NULL, *sg_first = NULL; | 3986 | struct scatterlist *sg_first = NULL; |
4755 | struct scatterlist *sg_head_cur = NULL, *sg_link_cur = NULL; | 3987 | struct scatterlist *sg_prev = NULL; |
4756 | struct scatterlist *sg, *sg_end = NULL, *sg_end_cur = NULL; | 3988 | int sg_prev_nents = 0; |
3989 | struct scatterlist *sg; | ||
4757 | struct se_task *task; | 3990 | struct se_task *task; |
4758 | struct target_core_fabric_ops *tfo = CMD_TFO(cmd); | 3991 | u32 chained_nents = 0; |
4759 | u32 task_sg_num = 0, sg_count = 0; | ||
4760 | int i; | 3992 | int i; |
4761 | 3993 | ||
4762 | if (tfo->task_sg_chaining == 0) { | 3994 | BUG_ON(!cmd->se_tfo->task_sg_chaining); |
4763 | printk(KERN_ERR "task_sg_chaining is diabled for fabric module:" | 3995 | |
4764 | " %s\n", tfo->get_fabric_name()); | ||
4765 | dump_stack(); | ||
4766 | return; | ||
4767 | } | ||
4768 | /* | 3996 | /* |
4769 | * Walk the struct se_task list and setup scatterlist chains | 3997 | * Walk the struct se_task list and setup scatterlist chains |
4770 | * for each contiguosly allocated struct se_task->task_sg[]. | 3998 | * for each contiguously allocated struct se_task->task_sg[]. |
4771 | */ | 3999 | */ |
4772 | list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) { | 4000 | list_for_each_entry(task, &cmd->t_task_list, t_list) { |
4773 | if (!(task->task_sg) || !(task->task_padded_sg)) | 4001 | if (!task->task_sg) |
4774 | continue; | 4002 | continue; |
4775 | 4003 | ||
4776 | if (sg_head && sg_link) { | 4004 | BUG_ON(!task->task_padded_sg); |
4777 | sg_head_cur = &task->task_sg[0]; | ||
4778 | sg_link_cur = &task->task_sg[task->task_sg_num]; | ||
4779 | /* | ||
4780 | * Either add chain or mark end of scatterlist | ||
4781 | */ | ||
4782 | if (!(list_is_last(&task->t_list, | ||
4783 | &T_TASK(cmd)->t_task_list))) { | ||
4784 | /* | ||
4785 | * Clear existing SGL termination bit set in | ||
4786 | * transport_calc_sg_num(), see sg_mark_end() | ||
4787 | */ | ||
4788 | sg_end_cur = &task->task_sg[task->task_sg_num - 1]; | ||
4789 | sg_end_cur->page_link &= ~0x02; | ||
4790 | |||
4791 | sg_chain(sg_head, task_sg_num, sg_head_cur); | ||
4792 | sg_count += task->task_sg_num; | ||
4793 | task_sg_num = (task->task_sg_num + 1); | ||
4794 | } else { | ||
4795 | sg_chain(sg_head, task_sg_num, sg_head_cur); | ||
4796 | sg_count += task->task_sg_num; | ||
4797 | task_sg_num = task->task_sg_num; | ||
4798 | } | ||
4799 | 4005 | ||
4800 | sg_head = sg_head_cur; | 4006 | if (!sg_first) { |
4801 | sg_link = sg_link_cur; | 4007 | sg_first = task->task_sg; |
4802 | continue; | 4008 | chained_nents = task->task_sg_nents; |
4803 | } | ||
4804 | sg_head = sg_first = &task->task_sg[0]; | ||
4805 | sg_link = &task->task_sg[task->task_sg_num]; | ||
4806 | /* | ||
4807 | * Check for single task.. | ||
4808 | */ | ||
4809 | if (!(list_is_last(&task->t_list, &T_TASK(cmd)->t_task_list))) { | ||
4810 | /* | ||
4811 | * Clear existing SGL termination bit set in | ||
4812 | * transport_calc_sg_num(), see sg_mark_end() | ||
4813 | */ | ||
4814 | sg_end = &task->task_sg[task->task_sg_num - 1]; | ||
4815 | sg_end->page_link &= ~0x02; | ||
4816 | sg_count += task->task_sg_num; | ||
4817 | task_sg_num = (task->task_sg_num + 1); | ||
4818 | } else { | 4009 | } else { |
4819 | sg_count += task->task_sg_num; | 4010 | sg_chain(sg_prev, sg_prev_nents, task->task_sg); |
4820 | task_sg_num = task->task_sg_num; | 4011 | chained_nents += task->task_sg_nents; |
4821 | } | 4012 | } |
4013 | |||
4014 | sg_prev = task->task_sg; | ||
4015 | sg_prev_nents = task->task_sg_nents; | ||
4822 | } | 4016 | } |
4823 | /* | 4017 | /* |
4824 | * Setup the starting pointer and total t_tasks_sg_linked_no including | 4018 | * Setup the starting pointer and total t_tasks_sg_linked_no including |
4825 | * padding SGs for linking and to mark the end. | 4019 | * padding SGs for linking and to mark the end. |
4826 | */ | 4020 | */ |
4827 | T_TASK(cmd)->t_tasks_sg_chained = sg_first; | 4021 | cmd->t_tasks_sg_chained = sg_first; |
4828 | T_TASK(cmd)->t_tasks_sg_chained_no = sg_count; | 4022 | cmd->t_tasks_sg_chained_no = chained_nents; |
4829 | 4023 | ||
4830 | DEBUG_CMD_M("Setup cmd: %p T_TASK(cmd)->t_tasks_sg_chained: %p and" | 4024 | pr_debug("Setup cmd: %p cmd->t_tasks_sg_chained: %p and" |
4831 | " t_tasks_sg_chained_no: %u\n", cmd, T_TASK(cmd)->t_tasks_sg_chained, | 4025 | " t_tasks_sg_chained_no: %u\n", cmd, cmd->t_tasks_sg_chained, |
4832 | T_TASK(cmd)->t_tasks_sg_chained_no); | 4026 | cmd->t_tasks_sg_chained_no); |
4833 | 4027 | ||
4834 | for_each_sg(T_TASK(cmd)->t_tasks_sg_chained, sg, | 4028 | for_each_sg(cmd->t_tasks_sg_chained, sg, |
4835 | T_TASK(cmd)->t_tasks_sg_chained_no, i) { | 4029 | cmd->t_tasks_sg_chained_no, i) { |
4836 | 4030 | ||
4837 | DEBUG_CMD_M("SG[%d]: %p page: %p length: %d offset: %d, magic: 0x%08x\n", | 4031 | pr_debug("SG[%d]: %p page: %p length: %d offset: %d\n", |
4838 | i, sg, sg_page(sg), sg->length, sg->offset, sg->sg_magic); | 4032 | i, sg, sg_page(sg), sg->length, sg->offset); |
4839 | if (sg_is_chain(sg)) | 4033 | if (sg_is_chain(sg)) |
4840 | DEBUG_CMD_M("SG: %p sg_is_chain=1\n", sg); | 4034 | pr_debug("SG: %p sg_is_chain=1\n", sg); |
4841 | if (sg_is_last(sg)) | 4035 | if (sg_is_last(sg)) |
4842 | DEBUG_CMD_M("SG: %p sg_is_last=1\n", sg); | 4036 | pr_debug("SG: %p sg_is_last=1\n", sg); |
4843 | } | 4037 | } |
4844 | } | 4038 | } |
4845 | EXPORT_SYMBOL(transport_do_task_sg_chain); | 4039 | EXPORT_SYMBOL(transport_do_task_sg_chain); |
4846 | 4040 | ||
4847 | static int transport_do_se_mem_map( | 4041 | /* |
4848 | struct se_device *dev, | 4042 | * Break up cmd into chunks transport can handle |
4849 | struct se_task *task, | 4043 | */ |
4850 | struct list_head *se_mem_list, | 4044 | static int transport_allocate_data_tasks( |
4851 | void *in_mem, | ||
4852 | struct se_mem *in_se_mem, | ||
4853 | struct se_mem **out_se_mem, | ||
4854 | u32 *se_mem_cnt, | ||
4855 | u32 *task_offset_in) | ||
4856 | { | ||
4857 | u32 task_offset = *task_offset_in; | ||
4858 | int ret = 0; | ||
4859 | /* | ||
4860 | * se_subsystem_api_t->do_se_mem_map is used when internal allocation | ||
4861 | * has been done by the transport plugin. | ||
4862 | */ | ||
4863 | if (TRANSPORT(dev)->do_se_mem_map) { | ||
4864 | ret = TRANSPORT(dev)->do_se_mem_map(task, se_mem_list, | ||
4865 | in_mem, in_se_mem, out_se_mem, se_mem_cnt, | ||
4866 | task_offset_in); | ||
4867 | if (ret == 0) | ||
4868 | T_TASK(task->task_se_cmd)->t_tasks_se_num += *se_mem_cnt; | ||
4869 | |||
4870 | return ret; | ||
4871 | } | ||
4872 | |||
4873 | BUG_ON(list_empty(se_mem_list)); | ||
4874 | /* | ||
4875 | * This is the normal path for all normal non BIDI and BIDI-COMMAND | ||
4876 | * WRITE payloads.. If we need to do BIDI READ passthrough for | ||
4877 | * TCM/pSCSI the first call to transport_do_se_mem_map -> | ||
4878 | * transport_calc_sg_num() -> transport_map_mem_to_sg() will do the | ||
4879 | * allocation for task->task_sg_bidi, and the subsequent call to | ||
4880 | * transport_do_se_mem_map() from transport_generic_get_cdb_count() | ||
4881 | */ | ||
4882 | if (!(task->task_sg_bidi)) { | ||
4883 | /* | ||
4884 | * Assume default that transport plugin speaks preallocated | ||
4885 | * scatterlists. | ||
4886 | */ | ||
4887 | if (!(transport_calc_sg_num(task, in_se_mem, task_offset))) | ||
4888 | return -1; | ||
4889 | /* | ||
4890 | * struct se_task->task_sg now contains the struct scatterlist array. | ||
4891 | */ | ||
4892 | return transport_map_mem_to_sg(task, se_mem_list, task->task_sg, | ||
4893 | in_se_mem, out_se_mem, se_mem_cnt, | ||
4894 | task_offset_in); | ||
4895 | } | ||
4896 | /* | ||
4897 | * Handle the se_mem_list -> struct task->task_sg_bidi | ||
4898 | * memory map for the extra BIDI READ payload | ||
4899 | */ | ||
4900 | return transport_map_mem_to_sg(task, se_mem_list, task->task_sg_bidi, | ||
4901 | in_se_mem, out_se_mem, se_mem_cnt, | ||
4902 | task_offset_in); | ||
4903 | } | ||
4904 | |||
4905 | static u32 transport_generic_get_cdb_count( | ||
4906 | struct se_cmd *cmd, | 4045 | struct se_cmd *cmd, |
4907 | unsigned long long lba, | 4046 | unsigned long long lba, |
4908 | u32 sectors, | ||
4909 | enum dma_data_direction data_direction, | 4047 | enum dma_data_direction data_direction, |
4910 | struct list_head *mem_list, | 4048 | struct scatterlist *sgl, |
4911 | int set_counts) | 4049 | unsigned int sgl_nents) |
4912 | { | 4050 | { |
4913 | unsigned char *cdb = NULL; | 4051 | unsigned char *cdb = NULL; |
4914 | struct se_task *task; | 4052 | struct se_task *task; |
4915 | struct se_mem *se_mem = NULL, *se_mem_lout = NULL; | 4053 | struct se_device *dev = cmd->se_dev; |
4916 | struct se_mem *se_mem_bidi = NULL, *se_mem_bidi_lout = NULL; | 4054 | unsigned long flags; |
4917 | struct se_device *dev = SE_DEV(cmd); | 4055 | sector_t sectors; |
4918 | int max_sectors_set = 0, ret; | 4056 | int task_count, i, ret; |
4919 | u32 task_offset_in = 0, se_mem_cnt = 0, se_mem_bidi_cnt = 0, task_cdbs = 0; | 4057 | sector_t dev_max_sectors = dev->se_sub_dev->se_dev_attrib.max_sectors; |
4920 | 4058 | u32 sector_size = dev->se_sub_dev->se_dev_attrib.block_size; | |
4921 | if (!mem_list) { | 4059 | struct scatterlist *sg; |
4922 | printk(KERN_ERR "mem_list is NULL in transport_generic_get" | 4060 | struct scatterlist *cmd_sg; |
4923 | "_cdb_count()\n"); | ||
4924 | return 0; | ||
4925 | } | ||
4926 | /* | ||
4927 | * While using RAMDISK_DR backstores is the only case where | ||
4928 | * mem_list will ever be empty at this point. | ||
4929 | */ | ||
4930 | if (!(list_empty(mem_list))) | ||
4931 | se_mem = list_entry(mem_list->next, struct se_mem, se_list); | ||
4932 | /* | ||
4933 | * Check for extra se_mem_bidi mapping for BIDI-COMMANDs to | ||
4934 | * struct se_task->task_sg_bidi for TCM/pSCSI passthrough operation | ||
4935 | */ | ||
4936 | if ((T_TASK(cmd)->t_mem_bidi_list != NULL) && | ||
4937 | !(list_empty(T_TASK(cmd)->t_mem_bidi_list)) && | ||
4938 | (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)) | ||
4939 | se_mem_bidi = list_entry(T_TASK(cmd)->t_mem_bidi_list->next, | ||
4940 | struct se_mem, se_list); | ||
4941 | |||
4942 | while (sectors) { | ||
4943 | DEBUG_VOL("ITT[0x%08x] LBA(%llu) SectorsLeft(%u) EOBJ(%llu)\n", | ||
4944 | CMD_TFO(cmd)->get_task_tag(cmd), lba, sectors, | ||
4945 | transport_dev_end_lba(dev)); | ||
4946 | 4061 | ||
4947 | task = transport_generic_get_task(cmd, data_direction); | 4062 | WARN_ON(cmd->data_length % sector_size); |
4948 | if (!(task)) | 4063 | sectors = DIV_ROUND_UP(cmd->data_length, sector_size); |
4949 | goto out; | 4064 | task_count = DIV_ROUND_UP(sectors, dev_max_sectors); |
4950 | 4065 | ||
4951 | transport_set_tasks_sectors(task, dev, lba, sectors, | 4066 | cmd_sg = sgl; |
4952 | &max_sectors_set); | 4067 | for (i = 0; i < task_count; i++) { |
4068 | unsigned int task_size; | ||
4069 | int count; | ||
4070 | |||
4071 | task = transport_generic_get_task(cmd, data_direction); | ||
4072 | if (!task) | ||
4073 | return -ENOMEM; | ||
4953 | 4074 | ||
4954 | task->task_lba = lba; | 4075 | task->task_lba = lba; |
4955 | lba += task->task_sectors; | 4076 | task->task_sectors = min(sectors, dev_max_sectors); |
4956 | sectors -= task->task_sectors; | 4077 | task->task_size = task->task_sectors * sector_size; |
4957 | task->task_size = (task->task_sectors * | ||
4958 | DEV_ATTRIB(dev)->block_size); | ||
4959 | |||
4960 | cdb = TRANSPORT(dev)->get_cdb(task); | ||
4961 | if ((cdb)) { | ||
4962 | memcpy(cdb, T_TASK(cmd)->t_task_cdb, | ||
4963 | scsi_command_size(T_TASK(cmd)->t_task_cdb)); | ||
4964 | cmd->transport_split_cdb(task->task_lba, | ||
4965 | &task->task_sectors, cdb); | ||
4966 | } | ||
4967 | 4078 | ||
4968 | /* | 4079 | cdb = dev->transport->get_cdb(task); |
4969 | * Perform the SE OBJ plugin and/or Transport plugin specific | 4080 | BUG_ON(!cdb); |
4970 | * mapping for T_TASK(cmd)->t_mem_list. And setup the | 4081 | |
4971 | * task->task_sg and if necessary task->task_sg_bidi | 4082 | memcpy(cdb, cmd->t_task_cdb, |
4972 | */ | 4083 | scsi_command_size(cmd->t_task_cdb)); |
4973 | ret = transport_do_se_mem_map(dev, task, mem_list, | 4084 | |
4974 | NULL, se_mem, &se_mem_lout, &se_mem_cnt, | 4085 | /* Update new cdb with updated lba/sectors */ |
4975 | &task_offset_in); | 4086 | cmd->transport_split_cdb(task->task_lba, task->task_sectors, cdb); |
4976 | if (ret < 0) | ||
4977 | goto out; | ||
4978 | 4087 | ||
4979 | se_mem = se_mem_lout; | ||
4980 | /* | 4088 | /* |
4981 | * Setup the T_TASK(cmd)->t_mem_bidi_list -> task->task_sg_bidi | 4089 | * Check if the fabric module driver is requesting that all |
4982 | * mapping for SCSI READ for BIDI-COMMAND passthrough with TCM/pSCSI | 4090 | * struct se_task->task_sg[] be chained together.. If so, |
4983 | * | 4091 | * then allocate an extra padding SG entry for linking and |
4984 | * Note that the first call to transport_do_se_mem_map() above will | 4092 | * marking the end of the chained SGL. |
4985 | * allocate struct se_task->task_sg_bidi in transport_do_se_mem_map() | 4093 | * Possibly over-allocate task sgl size by using cmd sgl size. |
4986 | * -> transport_calc_sg_num(), and the second here will do the | 4094 | * It's so much easier and only a waste when task_count > 1. |
4987 | * mapping for SCSI READ for BIDI-COMMAND passthrough with TCM/pSCSI. | 4095 | * That is extremely rare. |
4988 | */ | 4096 | */ |
4989 | if (task->task_sg_bidi != NULL) { | 4097 | task->task_sg_nents = sgl_nents; |
4990 | ret = transport_do_se_mem_map(dev, task, | 4098 | if (cmd->se_tfo->task_sg_chaining) { |
4991 | T_TASK(cmd)->t_mem_bidi_list, NULL, | 4099 | task->task_sg_nents++; |
4992 | se_mem_bidi, &se_mem_bidi_lout, &se_mem_bidi_cnt, | 4100 | task->task_padded_sg = 1; |
4993 | &task_offset_in); | 4101 | } |
4994 | if (ret < 0) | ||
4995 | goto out; | ||
4996 | 4102 | ||
4997 | se_mem_bidi = se_mem_bidi_lout; | 4103 | task->task_sg = kmalloc(sizeof(struct scatterlist) * |
4104 | task->task_sg_nents, GFP_KERNEL); | ||
4105 | if (!task->task_sg) { | ||
4106 | cmd->se_dev->transport->free_task(task); | ||
4107 | return -ENOMEM; | ||
4998 | } | 4108 | } |
4999 | task_cdbs++; | ||
5000 | 4109 | ||
5001 | DEBUG_VOL("Incremented task_cdbs(%u) task->task_sg_num(%u)\n", | 4110 | sg_init_table(task->task_sg, task->task_sg_nents); |
5002 | task_cdbs, task->task_sg_num); | ||
5003 | 4111 | ||
5004 | if (max_sectors_set) { | 4112 | task_size = task->task_size; |
5005 | max_sectors_set = 0; | 4113 | |
5006 | continue; | 4114 | /* Build new sgl, only up to task_size */ |
4115 | for_each_sg(task->task_sg, sg, task->task_sg_nents, count) { | ||
4116 | if (cmd_sg->length > task_size) | ||
4117 | break; | ||
4118 | |||
4119 | *sg = *cmd_sg; | ||
4120 | task_size -= cmd_sg->length; | ||
4121 | cmd_sg = sg_next(cmd_sg); | ||
5007 | } | 4122 | } |
5008 | 4123 | ||
5009 | if (!sectors) | 4124 | lba += task->task_sectors; |
5010 | break; | 4125 | sectors -= task->task_sectors; |
5011 | } | ||
5012 | 4126 | ||
5013 | if (set_counts) { | 4127 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
5014 | atomic_inc(&T_TASK(cmd)->t_fe_count); | 4128 | list_add_tail(&task->t_list, &cmd->t_task_list); |
5015 | atomic_inc(&T_TASK(cmd)->t_se_count); | 4129 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
5016 | } | 4130 | } |
4131 | /* | ||
4132 | * Now perform the memory map of task->task_sg[] into backend | ||
4133 | * subsystem memory.. | ||
4134 | */ | ||
4135 | list_for_each_entry(task, &cmd->t_task_list, t_list) { | ||
4136 | if (atomic_read(&task->task_sent)) | ||
4137 | continue; | ||
4138 | if (!dev->transport->map_data_SG) | ||
4139 | continue; | ||
5017 | 4140 | ||
5018 | DEBUG_VOL("ITT[0x%08x] total %s cdbs(%u)\n", | 4141 | ret = dev->transport->map_data_SG(task); |
5019 | CMD_TFO(cmd)->get_task_tag(cmd), (data_direction == DMA_TO_DEVICE) | 4142 | if (ret < 0) |
5020 | ? "DMA_TO_DEVICE" : "DMA_FROM_DEVICE", task_cdbs); | 4143 | return 0; |
4144 | } | ||
5021 | 4145 | ||
5022 | return task_cdbs; | 4146 | return task_count; |
5023 | out: | ||
5024 | return 0; | ||
5025 | } | 4147 | } |
5026 | 4148 | ||
5027 | static int | 4149 | static int |
5028 | transport_map_control_cmd_to_task(struct se_cmd *cmd) | 4150 | transport_allocate_control_task(struct se_cmd *cmd) |
5029 | { | 4151 | { |
5030 | struct se_device *dev = SE_DEV(cmd); | 4152 | struct se_device *dev = cmd->se_dev; |
5031 | unsigned char *cdb; | 4153 | unsigned char *cdb; |
5032 | struct se_task *task; | 4154 | struct se_task *task; |
5033 | int ret; | 4155 | unsigned long flags; |
4156 | int ret = 0; | ||
5034 | 4157 | ||
5035 | task = transport_generic_get_task(cmd, cmd->data_direction); | 4158 | task = transport_generic_get_task(cmd, cmd->data_direction); |
5036 | if (!task) | 4159 | if (!task) |
5037 | return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; | 4160 | return -ENOMEM; |
5038 | 4161 | ||
5039 | cdb = TRANSPORT(dev)->get_cdb(task); | 4162 | cdb = dev->transport->get_cdb(task); |
5040 | if (cdb) | 4163 | BUG_ON(!cdb); |
5041 | memcpy(cdb, cmd->t_task->t_task_cdb, | 4164 | memcpy(cdb, cmd->t_task_cdb, |
5042 | scsi_command_size(cmd->t_task->t_task_cdb)); | 4165 | scsi_command_size(cmd->t_task_cdb)); |
5043 | 4166 | ||
4167 | task->task_sg = kmalloc(sizeof(struct scatterlist) * cmd->t_data_nents, | ||
4168 | GFP_KERNEL); | ||
4169 | if (!task->task_sg) { | ||
4170 | cmd->se_dev->transport->free_task(task); | ||
4171 | return -ENOMEM; | ||
4172 | } | ||
4173 | |||
4174 | memcpy(task->task_sg, cmd->t_data_sg, | ||
4175 | sizeof(struct scatterlist) * cmd->t_data_nents); | ||
5044 | task->task_size = cmd->data_length; | 4176 | task->task_size = cmd->data_length; |
5045 | task->task_sg_num = | 4177 | task->task_sg_nents = cmd->t_data_nents; |
5046 | (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) ? 1 : 0; | ||
5047 | 4178 | ||
5048 | atomic_inc(&cmd->t_task->t_fe_count); | 4179 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
5049 | atomic_inc(&cmd->t_task->t_se_count); | 4180 | list_add_tail(&task->t_list, &cmd->t_task_list); |
4181 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | ||
5050 | 4182 | ||
5051 | if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) { | 4183 | if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) { |
5052 | struct se_mem *se_mem = NULL, *se_mem_lout = NULL; | 4184 | if (dev->transport->map_control_SG) |
5053 | u32 se_mem_cnt = 0, task_offset = 0; | 4185 | ret = dev->transport->map_control_SG(task); |
5054 | |||
5055 | if (!list_empty(T_TASK(cmd)->t_mem_list)) | ||
5056 | se_mem = list_entry(T_TASK(cmd)->t_mem_list->next, | ||
5057 | struct se_mem, se_list); | ||
5058 | |||
5059 | ret = transport_do_se_mem_map(dev, task, | ||
5060 | cmd->t_task->t_mem_list, NULL, se_mem, | ||
5061 | &se_mem_lout, &se_mem_cnt, &task_offset); | ||
5062 | if (ret < 0) | ||
5063 | return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; | ||
5064 | |||
5065 | if (dev->transport->map_task_SG) | ||
5066 | return dev->transport->map_task_SG(task); | ||
5067 | return 0; | ||
5068 | } else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB) { | ||
5069 | if (dev->transport->map_task_non_SG) | ||
5070 | return dev->transport->map_task_non_SG(task); | ||
5071 | return 0; | ||
5072 | } else if (cmd->se_cmd_flags & SCF_SCSI_NON_DATA_CDB) { | 4186 | } else if (cmd->se_cmd_flags & SCF_SCSI_NON_DATA_CDB) { |
5073 | if (dev->transport->cdb_none) | 4187 | if (dev->transport->cdb_none) |
5074 | return dev->transport->cdb_none(task); | 4188 | ret = dev->transport->cdb_none(task); |
5075 | return 0; | ||
5076 | } else { | 4189 | } else { |
4190 | pr_err("target: Unknown control cmd type!\n"); | ||
5077 | BUG(); | 4191 | BUG(); |
5078 | return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; | ||
5079 | } | 4192 | } |
4193 | |||
4194 | /* Success! Return number of tasks allocated */ | ||
4195 | if (ret == 0) | ||
4196 | return 1; | ||
4197 | return ret; | ||
4198 | } | ||
4199 | |||
4200 | static u32 transport_allocate_tasks( | ||
4201 | struct se_cmd *cmd, | ||
4202 | unsigned long long lba, | ||
4203 | enum dma_data_direction data_direction, | ||
4204 | struct scatterlist *sgl, | ||
4205 | unsigned int sgl_nents) | ||
4206 | { | ||
4207 | if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) | ||
4208 | return transport_allocate_data_tasks(cmd, lba, data_direction, | ||
4209 | sgl, sgl_nents); | ||
4210 | else | ||
4211 | return transport_allocate_control_task(cmd); | ||
4212 | |||
5080 | } | 4213 | } |
5081 | 4214 | ||
4215 | |||
5082 | /* transport_generic_new_cmd(): Called from transport_processing_thread() | 4216 | /* transport_generic_new_cmd(): Called from transport_processing_thread() |
5083 | * | 4217 | * |
5084 | * Allocate storage transport resources from a set of values predefined | 4218 | * Allocate storage transport resources from a set of values predefined |
@@ -5088,64 +4222,33 @@ transport_map_control_cmd_to_task(struct se_cmd *cmd) | |||
5088 | /* | 4222 | /* |
5089 | * Generate struct se_task(s) and/or their payloads for this CDB. | 4223 | * Generate struct se_task(s) and/or their payloads for this CDB. |
5090 | */ | 4224 | */ |
5091 | static int transport_generic_new_cmd(struct se_cmd *cmd) | 4225 | int transport_generic_new_cmd(struct se_cmd *cmd) |
5092 | { | 4226 | { |
5093 | struct se_portal_group *se_tpg; | ||
5094 | struct se_task *task; | ||
5095 | struct se_device *dev = SE_DEV(cmd); | ||
5096 | int ret = 0; | 4227 | int ret = 0; |
5097 | 4228 | ||
5098 | /* | 4229 | /* |
5099 | * Determine is the TCM fabric module has already allocated physical | 4230 | * Determine is the TCM fabric module has already allocated physical |
5100 | * memory, and is directly calling transport_generic_map_mem_to_cmd() | 4231 | * memory, and is directly calling transport_generic_map_mem_to_cmd() |
5101 | * to setup beforehand the linked list of physical memory at | 4232 | * beforehand. |
5102 | * T_TASK(cmd)->t_mem_list of struct se_mem->se_page | ||
5103 | */ | 4233 | */ |
5104 | if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)) { | 4234 | if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) && |
5105 | ret = transport_allocate_resources(cmd); | 4235 | cmd->data_length) { |
4236 | ret = transport_generic_get_mem(cmd); | ||
5106 | if (ret < 0) | 4237 | if (ret < 0) |
5107 | return ret; | 4238 | return ret; |
5108 | } | 4239 | } |
5109 | 4240 | /* | |
5110 | ret = transport_get_sectors(cmd); | 4241 | * Call transport_new_cmd_obj() to invoke transport_allocate_tasks() for |
5111 | if (ret < 0) | 4242 | * control or data CDB types, and perform the map to backend subsystem |
5112 | return ret; | 4243 | * code from SGL memory allocated here by transport_generic_get_mem(), or |
5113 | 4244 | * via pre-existing SGL memory setup explictly by fabric module code with | |
4245 | * transport_generic_map_mem_to_cmd(). | ||
4246 | */ | ||
5114 | ret = transport_new_cmd_obj(cmd); | 4247 | ret = transport_new_cmd_obj(cmd); |
5115 | if (ret < 0) | 4248 | if (ret < 0) |
5116 | return ret; | 4249 | return ret; |
5117 | |||
5118 | /* | 4250 | /* |
5119 | * Determine if the calling TCM fabric module is talking to | 4251 | * For WRITEs, let the fabric know its buffer is ready.. |
5120 | * Linux/NET via kernel sockets and needs to allocate a | ||
5121 | * struct iovec array to complete the struct se_cmd | ||
5122 | */ | ||
5123 | se_tpg = SE_LUN(cmd)->lun_sep->sep_tpg; | ||
5124 | if (TPG_TFO(se_tpg)->alloc_cmd_iovecs != NULL) { | ||
5125 | ret = TPG_TFO(se_tpg)->alloc_cmd_iovecs(cmd); | ||
5126 | if (ret < 0) | ||
5127 | return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; | ||
5128 | } | ||
5129 | |||
5130 | if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { | ||
5131 | list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) { | ||
5132 | if (atomic_read(&task->task_sent)) | ||
5133 | continue; | ||
5134 | if (!dev->transport->map_task_SG) | ||
5135 | continue; | ||
5136 | |||
5137 | ret = dev->transport->map_task_SG(task); | ||
5138 | if (ret < 0) | ||
5139 | return ret; | ||
5140 | } | ||
5141 | } else { | ||
5142 | ret = transport_map_control_cmd_to_task(cmd); | ||
5143 | if (ret < 0) | ||
5144 | return ret; | ||
5145 | } | ||
5146 | |||
5147 | /* | ||
5148 | * For WRITEs, let the iSCSI Target RX Thread know its buffer is ready.. | ||
5149 | * This WRITE struct se_cmd (and all of its associated struct se_task's) | 4252 | * This WRITE struct se_cmd (and all of its associated struct se_task's) |
5150 | * will be added to the struct se_device execution queue after its WRITE | 4253 | * will be added to the struct se_device execution queue after its WRITE |
5151 | * data has arrived. (ie: It gets handled by the transport processing | 4254 | * data has arrived. (ie: It gets handled by the transport processing |
@@ -5162,6 +4265,7 @@ static int transport_generic_new_cmd(struct se_cmd *cmd) | |||
5162 | transport_execute_tasks(cmd); | 4265 | transport_execute_tasks(cmd); |
5163 | return 0; | 4266 | return 0; |
5164 | } | 4267 | } |
4268 | EXPORT_SYMBOL(transport_generic_new_cmd); | ||
5165 | 4269 | ||
5166 | /* transport_generic_process_write(): | 4270 | /* transport_generic_process_write(): |
5167 | * | 4271 | * |
@@ -5169,68 +4273,15 @@ static int transport_generic_new_cmd(struct se_cmd *cmd) | |||
5169 | */ | 4273 | */ |
5170 | void transport_generic_process_write(struct se_cmd *cmd) | 4274 | void transport_generic_process_write(struct se_cmd *cmd) |
5171 | { | 4275 | { |
5172 | #if 0 | ||
5173 | /* | ||
5174 | * Copy SCSI Presented DTL sector(s) from received buffers allocated to | ||
5175 | * original EDTL | ||
5176 | */ | ||
5177 | if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { | ||
5178 | if (!T_TASK(cmd)->t_tasks_se_num) { | ||
5179 | unsigned char *dst, *buf = | ||
5180 | (unsigned char *)T_TASK(cmd)->t_task_buf; | ||
5181 | |||
5182 | dst = kzalloc(cmd->cmd_spdtl), GFP_KERNEL); | ||
5183 | if (!(dst)) { | ||
5184 | printk(KERN_ERR "Unable to allocate memory for" | ||
5185 | " WRITE underflow\n"); | ||
5186 | transport_generic_request_failure(cmd, NULL, | ||
5187 | PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1); | ||
5188 | return; | ||
5189 | } | ||
5190 | memcpy(dst, buf, cmd->cmd_spdtl); | ||
5191 | |||
5192 | kfree(T_TASK(cmd)->t_task_buf); | ||
5193 | T_TASK(cmd)->t_task_buf = dst; | ||
5194 | } else { | ||
5195 | struct scatterlist *sg = | ||
5196 | (struct scatterlist *sg)T_TASK(cmd)->t_task_buf; | ||
5197 | struct scatterlist *orig_sg; | ||
5198 | |||
5199 | orig_sg = kzalloc(sizeof(struct scatterlist) * | ||
5200 | T_TASK(cmd)->t_tasks_se_num, | ||
5201 | GFP_KERNEL))) { | ||
5202 | if (!(orig_sg)) { | ||
5203 | printk(KERN_ERR "Unable to allocate memory" | ||
5204 | " for WRITE underflow\n"); | ||
5205 | transport_generic_request_failure(cmd, NULL, | ||
5206 | PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1); | ||
5207 | return; | ||
5208 | } | ||
5209 | |||
5210 | memcpy(orig_sg, T_TASK(cmd)->t_task_buf, | ||
5211 | sizeof(struct scatterlist) * | ||
5212 | T_TASK(cmd)->t_tasks_se_num); | ||
5213 | |||
5214 | cmd->data_length = cmd->cmd_spdtl; | ||
5215 | /* | ||
5216 | * FIXME, clear out original struct se_task and state | ||
5217 | * information. | ||
5218 | */ | ||
5219 | if (transport_generic_new_cmd(cmd) < 0) { | ||
5220 | transport_generic_request_failure(cmd, NULL, | ||
5221 | PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1); | ||
5222 | kfree(orig_sg); | ||
5223 | return; | ||
5224 | } | ||
5225 | |||
5226 | transport_memcpy_write_sg(cmd, orig_sg); | ||
5227 | } | ||
5228 | } | ||
5229 | #endif | ||
5230 | transport_execute_tasks(cmd); | 4276 | transport_execute_tasks(cmd); |
5231 | } | 4277 | } |
5232 | EXPORT_SYMBOL(transport_generic_process_write); | 4278 | EXPORT_SYMBOL(transport_generic_process_write); |
5233 | 4279 | ||
4280 | static int transport_write_pending_qf(struct se_cmd *cmd) | ||
4281 | { | ||
4282 | return cmd->se_tfo->write_pending(cmd); | ||
4283 | } | ||
4284 | |||
5234 | /* transport_generic_write_pending(): | 4285 | /* transport_generic_write_pending(): |
5235 | * | 4286 | * |
5236 | * | 4287 | * |
@@ -5240,24 +4291,26 @@ static int transport_generic_write_pending(struct se_cmd *cmd) | |||
5240 | unsigned long flags; | 4291 | unsigned long flags; |
5241 | int ret; | 4292 | int ret; |
5242 | 4293 | ||
5243 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 4294 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
5244 | cmd->t_state = TRANSPORT_WRITE_PENDING; | 4295 | cmd->t_state = TRANSPORT_WRITE_PENDING; |
5245 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 4296 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
5246 | /* | 4297 | |
5247 | * For the TCM control CDBs using a contiguous buffer, do the memcpy | 4298 | if (cmd->transport_qf_callback) { |
5248 | * from the passed Linux/SCSI struct scatterlist located at | 4299 | ret = cmd->transport_qf_callback(cmd); |
5249 | * T_TASK(se_cmd)->t_task_pt_buf to the contiguous buffer at | 4300 | if (ret == -EAGAIN) |
5250 | * T_TASK(se_cmd)->t_task_buf. | 4301 | goto queue_full; |
5251 | */ | 4302 | else if (ret < 0) |
5252 | if (cmd->se_cmd_flags & SCF_PASSTHROUGH_CONTIG_TO_SG) | 4303 | return ret; |
5253 | transport_memcpy_read_contig(cmd, | 4304 | |
5254 | T_TASK(cmd)->t_task_buf, | 4305 | cmd->transport_qf_callback = NULL; |
5255 | T_TASK(cmd)->t_task_pt_sgl); | 4306 | return 0; |
4307 | } | ||
4308 | |||
5256 | /* | 4309 | /* |
5257 | * Clear the se_cmd for WRITE_PENDING status in order to set | 4310 | * Clear the se_cmd for WRITE_PENDING status in order to set |
5258 | * T_TASK(cmd)->t_transport_active=0 so that transport_generic_handle_data | 4311 | * cmd->t_transport_active=0 so that transport_generic_handle_data |
5259 | * can be called from HW target mode interrupt code. This is safe | 4312 | * can be called from HW target mode interrupt code. This is safe |
5260 | * to be called with transport_off=1 before the CMD_TFO(cmd)->write_pending | 4313 | * to be called with transport_off=1 before the cmd->se_tfo->write_pending |
5261 | * because the se_cmd->se_lun pointer is not being cleared. | 4314 | * because the se_cmd->se_lun pointer is not being cleared. |
5262 | */ | 4315 | */ |
5263 | transport_cmd_check_stop(cmd, 1, 0); | 4316 | transport_cmd_check_stop(cmd, 1, 0); |
@@ -5266,26 +4319,30 @@ static int transport_generic_write_pending(struct se_cmd *cmd) | |||
5266 | * Call the fabric write_pending function here to let the | 4319 | * Call the fabric write_pending function here to let the |
5267 | * frontend know that WRITE buffers are ready. | 4320 | * frontend know that WRITE buffers are ready. |
5268 | */ | 4321 | */ |
5269 | ret = CMD_TFO(cmd)->write_pending(cmd); | 4322 | ret = cmd->se_tfo->write_pending(cmd); |
5270 | if (ret < 0) | 4323 | if (ret == -EAGAIN) |
4324 | goto queue_full; | ||
4325 | else if (ret < 0) | ||
5271 | return ret; | 4326 | return ret; |
5272 | 4327 | ||
5273 | return PYX_TRANSPORT_WRITE_PENDING; | 4328 | return PYX_TRANSPORT_WRITE_PENDING; |
4329 | |||
4330 | queue_full: | ||
4331 | pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd); | ||
4332 | cmd->t_state = TRANSPORT_COMPLETE_QF_WP; | ||
4333 | transport_handle_queue_full(cmd, cmd->se_dev, | ||
4334 | transport_write_pending_qf); | ||
4335 | return ret; | ||
5274 | } | 4336 | } |
5275 | 4337 | ||
5276 | /* transport_release_cmd_to_pool(): | 4338 | void transport_release_cmd(struct se_cmd *cmd) |
5277 | * | ||
5278 | * | ||
5279 | */ | ||
5280 | void transport_release_cmd_to_pool(struct se_cmd *cmd) | ||
5281 | { | 4339 | { |
5282 | BUG_ON(!T_TASK(cmd)); | 4340 | BUG_ON(!cmd->se_tfo); |
5283 | BUG_ON(!CMD_TFO(cmd)); | ||
5284 | 4341 | ||
5285 | transport_free_se_cmd(cmd); | 4342 | transport_free_se_cmd(cmd); |
5286 | CMD_TFO(cmd)->release_cmd_to_pool(cmd); | 4343 | cmd->se_tfo->release_cmd(cmd); |
5287 | } | 4344 | } |
5288 | EXPORT_SYMBOL(transport_release_cmd_to_pool); | 4345 | EXPORT_SYMBOL(transport_release_cmd); |
5289 | 4346 | ||
5290 | /* transport_generic_free_cmd(): | 4347 | /* transport_generic_free_cmd(): |
5291 | * | 4348 | * |
@@ -5294,19 +4351,18 @@ EXPORT_SYMBOL(transport_release_cmd_to_pool); | |||
5294 | void transport_generic_free_cmd( | 4351 | void transport_generic_free_cmd( |
5295 | struct se_cmd *cmd, | 4352 | struct se_cmd *cmd, |
5296 | int wait_for_tasks, | 4353 | int wait_for_tasks, |
5297 | int release_to_pool, | ||
5298 | int session_reinstatement) | 4354 | int session_reinstatement) |
5299 | { | 4355 | { |
5300 | if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) || !T_TASK(cmd)) | 4356 | if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) |
5301 | transport_release_cmd_to_pool(cmd); | 4357 | transport_release_cmd(cmd); |
5302 | else { | 4358 | else { |
5303 | core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd); | 4359 | core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd); |
5304 | 4360 | ||
5305 | if (SE_LUN(cmd)) { | 4361 | if (cmd->se_lun) { |
5306 | #if 0 | 4362 | #if 0 |
5307 | printk(KERN_INFO "cmd: %p ITT: 0x%08x contains" | 4363 | pr_debug("cmd: %p ITT: 0x%08x contains" |
5308 | " SE_LUN(cmd)\n", cmd, | 4364 | " cmd->se_lun\n", cmd, |
5309 | CMD_TFO(cmd)->get_task_tag(cmd)); | 4365 | cmd->se_tfo->get_task_tag(cmd)); |
5310 | #endif | 4366 | #endif |
5311 | transport_lun_remove_cmd(cmd); | 4367 | transport_lun_remove_cmd(cmd); |
5312 | } | 4368 | } |
@@ -5316,8 +4372,7 @@ void transport_generic_free_cmd( | |||
5316 | 4372 | ||
5317 | transport_free_dev_tasks(cmd); | 4373 | transport_free_dev_tasks(cmd); |
5318 | 4374 | ||
5319 | transport_generic_remove(cmd, release_to_pool, | 4375 | transport_generic_remove(cmd, session_reinstatement); |
5320 | session_reinstatement); | ||
5321 | } | 4376 | } |
5322 | } | 4377 | } |
5323 | EXPORT_SYMBOL(transport_generic_free_cmd); | 4378 | EXPORT_SYMBOL(transport_generic_free_cmd); |
@@ -5343,43 +4398,36 @@ static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun) | |||
5343 | * If the frontend has already requested this struct se_cmd to | 4398 | * If the frontend has already requested this struct se_cmd to |
5344 | * be stopped, we can safely ignore this struct se_cmd. | 4399 | * be stopped, we can safely ignore this struct se_cmd. |
5345 | */ | 4400 | */ |
5346 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 4401 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
5347 | if (atomic_read(&T_TASK(cmd)->t_transport_stop)) { | 4402 | if (atomic_read(&cmd->t_transport_stop)) { |
5348 | atomic_set(&T_TASK(cmd)->transport_lun_stop, 0); | 4403 | atomic_set(&cmd->transport_lun_stop, 0); |
5349 | DEBUG_TRANSPORT_S("ConfigFS ITT[0x%08x] - t_transport_stop ==" | 4404 | pr_debug("ConfigFS ITT[0x%08x] - t_transport_stop ==" |
5350 | " TRUE, skipping\n", CMD_TFO(cmd)->get_task_tag(cmd)); | 4405 | " TRUE, skipping\n", cmd->se_tfo->get_task_tag(cmd)); |
5351 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 4406 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
5352 | transport_cmd_check_stop(cmd, 1, 0); | 4407 | transport_cmd_check_stop(cmd, 1, 0); |
5353 | return -1; | 4408 | return -EPERM; |
5354 | } | 4409 | } |
5355 | atomic_set(&T_TASK(cmd)->transport_lun_fe_stop, 1); | 4410 | atomic_set(&cmd->transport_lun_fe_stop, 1); |
5356 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 4411 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
5357 | 4412 | ||
5358 | wake_up_interruptible(&SE_DEV(cmd)->dev_queue_obj->thread_wq); | 4413 | wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq); |
5359 | 4414 | ||
5360 | ret = transport_stop_tasks_for_cmd(cmd); | 4415 | ret = transport_stop_tasks_for_cmd(cmd); |
5361 | 4416 | ||
5362 | DEBUG_TRANSPORT_S("ConfigFS: cmd: %p t_task_cdbs: %d stop tasks ret:" | 4417 | pr_debug("ConfigFS: cmd: %p t_tasks: %d stop tasks ret:" |
5363 | " %d\n", cmd, T_TASK(cmd)->t_task_cdbs, ret); | 4418 | " %d\n", cmd, cmd->t_task_list_num, ret); |
5364 | if (!ret) { | 4419 | if (!ret) { |
5365 | DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopping cmd....\n", | 4420 | pr_debug("ConfigFS: ITT[0x%08x] - stopping cmd....\n", |
5366 | CMD_TFO(cmd)->get_task_tag(cmd)); | 4421 | cmd->se_tfo->get_task_tag(cmd)); |
5367 | wait_for_completion(&T_TASK(cmd)->transport_lun_stop_comp); | 4422 | wait_for_completion(&cmd->transport_lun_stop_comp); |
5368 | DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopped cmd....\n", | 4423 | pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n", |
5369 | CMD_TFO(cmd)->get_task_tag(cmd)); | 4424 | cmd->se_tfo->get_task_tag(cmd)); |
5370 | } | 4425 | } |
5371 | transport_remove_cmd_from_queue(cmd, SE_DEV(cmd)->dev_queue_obj); | 4426 | transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj); |
5372 | 4427 | ||
5373 | return 0; | 4428 | return 0; |
5374 | } | 4429 | } |
5375 | 4430 | ||
5376 | /* #define DEBUG_CLEAR_LUN */ | ||
5377 | #ifdef DEBUG_CLEAR_LUN | ||
5378 | #define DEBUG_CLEAR_L(x...) printk(KERN_INFO x) | ||
5379 | #else | ||
5380 | #define DEBUG_CLEAR_L(x...) | ||
5381 | #endif | ||
5382 | |||
5383 | static void __transport_clear_lun_from_sessions(struct se_lun *lun) | 4431 | static void __transport_clear_lun_from_sessions(struct se_lun *lun) |
5384 | { | 4432 | { |
5385 | struct se_cmd *cmd = NULL; | 4433 | struct se_cmd *cmd = NULL; |
@@ -5389,66 +4437,59 @@ static void __transport_clear_lun_from_sessions(struct se_lun *lun) | |||
5389 | * Initiator Port. | 4437 | * Initiator Port. |
5390 | */ | 4438 | */ |
5391 | spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); | 4439 | spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); |
5392 | while (!list_empty_careful(&lun->lun_cmd_list)) { | 4440 | while (!list_empty(&lun->lun_cmd_list)) { |
5393 | cmd = list_entry(lun->lun_cmd_list.next, | 4441 | cmd = list_first_entry(&lun->lun_cmd_list, |
5394 | struct se_cmd, se_lun_list); | 4442 | struct se_cmd, se_lun_node); |
5395 | list_del(&cmd->se_lun_list); | 4443 | list_del(&cmd->se_lun_node); |
5396 | 4444 | ||
5397 | if (!(T_TASK(cmd))) { | 4445 | atomic_set(&cmd->transport_lun_active, 0); |
5398 | printk(KERN_ERR "ITT: 0x%08x, T_TASK(cmd) = NULL" | ||
5399 | "[i,t]_state: %u/%u\n", | ||
5400 | CMD_TFO(cmd)->get_task_tag(cmd), | ||
5401 | CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state); | ||
5402 | BUG(); | ||
5403 | } | ||
5404 | atomic_set(&T_TASK(cmd)->transport_lun_active, 0); | ||
5405 | /* | 4446 | /* |
5406 | * This will notify iscsi_target_transport.c: | 4447 | * This will notify iscsi_target_transport.c: |
5407 | * transport_cmd_check_stop() that a LUN shutdown is in | 4448 | * transport_cmd_check_stop() that a LUN shutdown is in |
5408 | * progress for the iscsi_cmd_t. | 4449 | * progress for the iscsi_cmd_t. |
5409 | */ | 4450 | */ |
5410 | spin_lock(&T_TASK(cmd)->t_state_lock); | 4451 | spin_lock(&cmd->t_state_lock); |
5411 | DEBUG_CLEAR_L("SE_LUN[%d] - Setting T_TASK(cmd)->transport" | 4452 | pr_debug("SE_LUN[%d] - Setting cmd->transport" |
5412 | "_lun_stop for ITT: 0x%08x\n", | 4453 | "_lun_stop for ITT: 0x%08x\n", |
5413 | SE_LUN(cmd)->unpacked_lun, | 4454 | cmd->se_lun->unpacked_lun, |
5414 | CMD_TFO(cmd)->get_task_tag(cmd)); | 4455 | cmd->se_tfo->get_task_tag(cmd)); |
5415 | atomic_set(&T_TASK(cmd)->transport_lun_stop, 1); | 4456 | atomic_set(&cmd->transport_lun_stop, 1); |
5416 | spin_unlock(&T_TASK(cmd)->t_state_lock); | 4457 | spin_unlock(&cmd->t_state_lock); |
5417 | 4458 | ||
5418 | spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags); | 4459 | spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags); |
5419 | 4460 | ||
5420 | if (!(SE_LUN(cmd))) { | 4461 | if (!cmd->se_lun) { |
5421 | printk(KERN_ERR "ITT: 0x%08x, [i,t]_state: %u/%u\n", | 4462 | pr_err("ITT: 0x%08x, [i,t]_state: %u/%u\n", |
5422 | CMD_TFO(cmd)->get_task_tag(cmd), | 4463 | cmd->se_tfo->get_task_tag(cmd), |
5423 | CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state); | 4464 | cmd->se_tfo->get_cmd_state(cmd), cmd->t_state); |
5424 | BUG(); | 4465 | BUG(); |
5425 | } | 4466 | } |
5426 | /* | 4467 | /* |
5427 | * If the Storage engine still owns the iscsi_cmd_t, determine | 4468 | * If the Storage engine still owns the iscsi_cmd_t, determine |
5428 | * and/or stop its context. | 4469 | * and/or stop its context. |
5429 | */ | 4470 | */ |
5430 | DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x before transport" | 4471 | pr_debug("SE_LUN[%d] - ITT: 0x%08x before transport" |
5431 | "_lun_wait_for_tasks()\n", SE_LUN(cmd)->unpacked_lun, | 4472 | "_lun_wait_for_tasks()\n", cmd->se_lun->unpacked_lun, |
5432 | CMD_TFO(cmd)->get_task_tag(cmd)); | 4473 | cmd->se_tfo->get_task_tag(cmd)); |
5433 | 4474 | ||
5434 | if (transport_lun_wait_for_tasks(cmd, SE_LUN(cmd)) < 0) { | 4475 | if (transport_lun_wait_for_tasks(cmd, cmd->se_lun) < 0) { |
5435 | spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); | 4476 | spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); |
5436 | continue; | 4477 | continue; |
5437 | } | 4478 | } |
5438 | 4479 | ||
5439 | DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x after transport_lun" | 4480 | pr_debug("SE_LUN[%d] - ITT: 0x%08x after transport_lun" |
5440 | "_wait_for_tasks(): SUCCESS\n", | 4481 | "_wait_for_tasks(): SUCCESS\n", |
5441 | SE_LUN(cmd)->unpacked_lun, | 4482 | cmd->se_lun->unpacked_lun, |
5442 | CMD_TFO(cmd)->get_task_tag(cmd)); | 4483 | cmd->se_tfo->get_task_tag(cmd)); |
5443 | 4484 | ||
5444 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, cmd_flags); | 4485 | spin_lock_irqsave(&cmd->t_state_lock, cmd_flags); |
5445 | if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) { | 4486 | if (!atomic_read(&cmd->transport_dev_active)) { |
5446 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, cmd_flags); | 4487 | spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); |
5447 | goto check_cond; | 4488 | goto check_cond; |
5448 | } | 4489 | } |
5449 | atomic_set(&T_TASK(cmd)->transport_dev_active, 0); | 4490 | atomic_set(&cmd->transport_dev_active, 0); |
5450 | transport_all_task_dev_remove_state(cmd); | 4491 | transport_all_task_dev_remove_state(cmd); |
5451 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, cmd_flags); | 4492 | spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); |
5452 | 4493 | ||
5453 | transport_free_dev_tasks(cmd); | 4494 | transport_free_dev_tasks(cmd); |
5454 | /* | 4495 | /* |
@@ -5465,24 +4506,24 @@ check_cond: | |||
5465 | * be released, notify the waiting thread now that LU has | 4506 | * be released, notify the waiting thread now that LU has |
5466 | * finished accessing it. | 4507 | * finished accessing it. |
5467 | */ | 4508 | */ |
5468 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, cmd_flags); | 4509 | spin_lock_irqsave(&cmd->t_state_lock, cmd_flags); |
5469 | if (atomic_read(&T_TASK(cmd)->transport_lun_fe_stop)) { | 4510 | if (atomic_read(&cmd->transport_lun_fe_stop)) { |
5470 | DEBUG_CLEAR_L("SE_LUN[%d] - Detected FE stop for" | 4511 | pr_debug("SE_LUN[%d] - Detected FE stop for" |
5471 | " struct se_cmd: %p ITT: 0x%08x\n", | 4512 | " struct se_cmd: %p ITT: 0x%08x\n", |
5472 | lun->unpacked_lun, | 4513 | lun->unpacked_lun, |
5473 | cmd, CMD_TFO(cmd)->get_task_tag(cmd)); | 4514 | cmd, cmd->se_tfo->get_task_tag(cmd)); |
5474 | 4515 | ||
5475 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, | 4516 | spin_unlock_irqrestore(&cmd->t_state_lock, |
5476 | cmd_flags); | 4517 | cmd_flags); |
5477 | transport_cmd_check_stop(cmd, 1, 0); | 4518 | transport_cmd_check_stop(cmd, 1, 0); |
5478 | complete(&T_TASK(cmd)->transport_lun_fe_stop_comp); | 4519 | complete(&cmd->transport_lun_fe_stop_comp); |
5479 | spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); | 4520 | spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); |
5480 | continue; | 4521 | continue; |
5481 | } | 4522 | } |
5482 | DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x finished processing\n", | 4523 | pr_debug("SE_LUN[%d] - ITT: 0x%08x finished processing\n", |
5483 | lun->unpacked_lun, CMD_TFO(cmd)->get_task_tag(cmd)); | 4524 | lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd)); |
5484 | 4525 | ||
5485 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, cmd_flags); | 4526 | spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); |
5486 | spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); | 4527 | spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); |
5487 | } | 4528 | } |
5488 | spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags); | 4529 | spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags); |
@@ -5502,11 +4543,11 @@ int transport_clear_lun_from_sessions(struct se_lun *lun) | |||
5502 | { | 4543 | { |
5503 | struct task_struct *kt; | 4544 | struct task_struct *kt; |
5504 | 4545 | ||
5505 | kt = kthread_run(transport_clear_lun_thread, (void *)lun, | 4546 | kt = kthread_run(transport_clear_lun_thread, lun, |
5506 | "tcm_cl_%u", lun->unpacked_lun); | 4547 | "tcm_cl_%u", lun->unpacked_lun); |
5507 | if (IS_ERR(kt)) { | 4548 | if (IS_ERR(kt)) { |
5508 | printk(KERN_ERR "Unable to start clear_lun thread\n"); | 4549 | pr_err("Unable to start clear_lun thread\n"); |
5509 | return -1; | 4550 | return PTR_ERR(kt); |
5510 | } | 4551 | } |
5511 | wait_for_completion(&lun->lun_shutdown_comp); | 4552 | wait_for_completion(&lun->lun_shutdown_comp); |
5512 | 4553 | ||
@@ -5528,20 +4569,20 @@ static void transport_generic_wait_for_tasks( | |||
5528 | if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req)) | 4569 | if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req)) |
5529 | return; | 4570 | return; |
5530 | 4571 | ||
5531 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 4572 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
5532 | /* | 4573 | /* |
5533 | * If we are already stopped due to an external event (ie: LUN shutdown) | 4574 | * If we are already stopped due to an external event (ie: LUN shutdown) |
5534 | * sleep until the connection can have the passed struct se_cmd back. | 4575 | * sleep until the connection can have the passed struct se_cmd back. |
5535 | * The T_TASK(cmd)->transport_lun_stopped_sem will be upped by | 4576 | * The cmd->transport_lun_stopped_sem will be upped by |
5536 | * transport_clear_lun_from_sessions() once the ConfigFS context caller | 4577 | * transport_clear_lun_from_sessions() once the ConfigFS context caller |
5537 | * has completed its operation on the struct se_cmd. | 4578 | * has completed its operation on the struct se_cmd. |
5538 | */ | 4579 | */ |
5539 | if (atomic_read(&T_TASK(cmd)->transport_lun_stop)) { | 4580 | if (atomic_read(&cmd->transport_lun_stop)) { |
5540 | 4581 | ||
5541 | DEBUG_TRANSPORT_S("wait_for_tasks: Stopping" | 4582 | pr_debug("wait_for_tasks: Stopping" |
5542 | " wait_for_completion(&T_TASK(cmd)transport_lun_fe" | 4583 | " wait_for_completion(&cmd->t_tasktransport_lun_fe" |
5543 | "_stop_comp); for ITT: 0x%08x\n", | 4584 | "_stop_comp); for ITT: 0x%08x\n", |
5544 | CMD_TFO(cmd)->get_task_tag(cmd)); | 4585 | cmd->se_tfo->get_task_tag(cmd)); |
5545 | /* | 4586 | /* |
5546 | * There is a special case for WRITES where a FE exception + | 4587 | * There is a special case for WRITES where a FE exception + |
5547 | * LUN shutdown means ConfigFS context is still sleeping on | 4588 | * LUN shutdown means ConfigFS context is still sleeping on |
@@ -5549,10 +4590,10 @@ static void transport_generic_wait_for_tasks( | |||
5549 | * We go ahead and up transport_lun_stop_comp just to be sure | 4590 | * We go ahead and up transport_lun_stop_comp just to be sure |
5550 | * here. | 4591 | * here. |
5551 | */ | 4592 | */ |
5552 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 4593 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
5553 | complete(&T_TASK(cmd)->transport_lun_stop_comp); | 4594 | complete(&cmd->transport_lun_stop_comp); |
5554 | wait_for_completion(&T_TASK(cmd)->transport_lun_fe_stop_comp); | 4595 | wait_for_completion(&cmd->transport_lun_fe_stop_comp); |
5555 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 4596 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
5556 | 4597 | ||
5557 | transport_all_task_dev_remove_state(cmd); | 4598 | transport_all_task_dev_remove_state(cmd); |
5558 | /* | 4599 | /* |
@@ -5560,44 +4601,44 @@ static void transport_generic_wait_for_tasks( | |||
5560 | * struct se_cmd, now owns the structure and can be released through | 4601 | * struct se_cmd, now owns the structure and can be released through |
5561 | * normal means below. | 4602 | * normal means below. |
5562 | */ | 4603 | */ |
5563 | DEBUG_TRANSPORT_S("wait_for_tasks: Stopped" | 4604 | pr_debug("wait_for_tasks: Stopped" |
5564 | " wait_for_completion(&T_TASK(cmd)transport_lun_fe_" | 4605 | " wait_for_completion(&cmd->t_tasktransport_lun_fe_" |
5565 | "stop_comp); for ITT: 0x%08x\n", | 4606 | "stop_comp); for ITT: 0x%08x\n", |
5566 | CMD_TFO(cmd)->get_task_tag(cmd)); | 4607 | cmd->se_tfo->get_task_tag(cmd)); |
5567 | 4608 | ||
5568 | atomic_set(&T_TASK(cmd)->transport_lun_stop, 0); | 4609 | atomic_set(&cmd->transport_lun_stop, 0); |
5569 | } | 4610 | } |
5570 | if (!atomic_read(&T_TASK(cmd)->t_transport_active) || | 4611 | if (!atomic_read(&cmd->t_transport_active) || |
5571 | atomic_read(&T_TASK(cmd)->t_transport_aborted)) | 4612 | atomic_read(&cmd->t_transport_aborted)) |
5572 | goto remove; | 4613 | goto remove; |
5573 | 4614 | ||
5574 | atomic_set(&T_TASK(cmd)->t_transport_stop, 1); | 4615 | atomic_set(&cmd->t_transport_stop, 1); |
5575 | 4616 | ||
5576 | DEBUG_TRANSPORT_S("wait_for_tasks: Stopping %p ITT: 0x%08x" | 4617 | pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08x" |
5577 | " i_state: %d, t_state/def_t_state: %d/%d, t_transport_stop" | 4618 | " i_state: %d, t_state/def_t_state: %d/%d, t_transport_stop" |
5578 | " = TRUE\n", cmd, CMD_TFO(cmd)->get_task_tag(cmd), | 4619 | " = TRUE\n", cmd, cmd->se_tfo->get_task_tag(cmd), |
5579 | CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state, | 4620 | cmd->se_tfo->get_cmd_state(cmd), cmd->t_state, |
5580 | cmd->deferred_t_state); | 4621 | cmd->deferred_t_state); |
5581 | 4622 | ||
5582 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 4623 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
5583 | 4624 | ||
5584 | wake_up_interruptible(&SE_DEV(cmd)->dev_queue_obj->thread_wq); | 4625 | wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq); |
5585 | 4626 | ||
5586 | wait_for_completion(&T_TASK(cmd)->t_transport_stop_comp); | 4627 | wait_for_completion(&cmd->t_transport_stop_comp); |
5587 | 4628 | ||
5588 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 4629 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
5589 | atomic_set(&T_TASK(cmd)->t_transport_active, 0); | 4630 | atomic_set(&cmd->t_transport_active, 0); |
5590 | atomic_set(&T_TASK(cmd)->t_transport_stop, 0); | 4631 | atomic_set(&cmd->t_transport_stop, 0); |
5591 | 4632 | ||
5592 | DEBUG_TRANSPORT_S("wait_for_tasks: Stopped wait_for_compltion(" | 4633 | pr_debug("wait_for_tasks: Stopped wait_for_compltion(" |
5593 | "&T_TASK(cmd)->t_transport_stop_comp) for ITT: 0x%08x\n", | 4634 | "&cmd->t_transport_stop_comp) for ITT: 0x%08x\n", |
5594 | CMD_TFO(cmd)->get_task_tag(cmd)); | 4635 | cmd->se_tfo->get_task_tag(cmd)); |
5595 | remove: | 4636 | remove: |
5596 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 4637 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
5597 | if (!remove_cmd) | 4638 | if (!remove_cmd) |
5598 | return; | 4639 | return; |
5599 | 4640 | ||
5600 | transport_generic_free_cmd(cmd, 0, 0, session_reinstatement); | 4641 | transport_generic_free_cmd(cmd, 0, session_reinstatement); |
5601 | } | 4642 | } |
5602 | 4643 | ||
5603 | static int transport_get_sense_codes( | 4644 | static int transport_get_sense_codes( |
@@ -5632,13 +4673,13 @@ int transport_send_check_condition_and_sense( | |||
5632 | int offset; | 4673 | int offset; |
5633 | u8 asc = 0, ascq = 0; | 4674 | u8 asc = 0, ascq = 0; |
5634 | 4675 | ||
5635 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 4676 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
5636 | if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { | 4677 | if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { |
5637 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 4678 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
5638 | return 0; | 4679 | return 0; |
5639 | } | 4680 | } |
5640 | cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION; | 4681 | cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION; |
5641 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 4682 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
5642 | 4683 | ||
5643 | if (!reason && from_transport) | 4684 | if (!reason && from_transport) |
5644 | goto after_reason; | 4685 | goto after_reason; |
@@ -5651,7 +4692,7 @@ int transport_send_check_condition_and_sense( | |||
5651 | * TRANSPORT_SENSE_BUFFER is now set to SCSI_SENSE_BUFFERSIZE | 4692 | * TRANSPORT_SENSE_BUFFER is now set to SCSI_SENSE_BUFFERSIZE |
5652 | * from include/scsi/scsi_cmnd.h | 4693 | * from include/scsi/scsi_cmnd.h |
5653 | */ | 4694 | */ |
5654 | offset = CMD_TFO(cmd)->set_fabric_sense_len(cmd, | 4695 | offset = cmd->se_tfo->set_fabric_sense_len(cmd, |
5655 | TRANSPORT_SENSE_BUFFER); | 4696 | TRANSPORT_SENSE_BUFFER); |
5656 | /* | 4697 | /* |
5657 | * Actual SENSE DATA, see SPC-3 7.23.2 SPC_SENSE_KEY_OFFSET uses | 4698 | * Actual SENSE DATA, see SPC-3 7.23.2 SPC_SENSE_KEY_OFFSET uses |
@@ -5788,8 +4829,7 @@ int transport_send_check_condition_and_sense( | |||
5788 | cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER + offset; | 4829 | cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER + offset; |
5789 | 4830 | ||
5790 | after_reason: | 4831 | after_reason: |
5791 | CMD_TFO(cmd)->queue_status(cmd); | 4832 | return cmd->se_tfo->queue_status(cmd); |
5792 | return 0; | ||
5793 | } | 4833 | } |
5794 | EXPORT_SYMBOL(transport_send_check_condition_and_sense); | 4834 | EXPORT_SYMBOL(transport_send_check_condition_and_sense); |
5795 | 4835 | ||
@@ -5797,18 +4837,18 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status) | |||
5797 | { | 4837 | { |
5798 | int ret = 0; | 4838 | int ret = 0; |
5799 | 4839 | ||
5800 | if (atomic_read(&T_TASK(cmd)->t_transport_aborted) != 0) { | 4840 | if (atomic_read(&cmd->t_transport_aborted) != 0) { |
5801 | if (!(send_status) || | 4841 | if (!send_status || |
5802 | (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS)) | 4842 | (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS)) |
5803 | return 1; | 4843 | return 1; |
5804 | #if 0 | 4844 | #if 0 |
5805 | printk(KERN_INFO "Sending delayed SAM_STAT_TASK_ABORTED" | 4845 | pr_debug("Sending delayed SAM_STAT_TASK_ABORTED" |
5806 | " status for CDB: 0x%02x ITT: 0x%08x\n", | 4846 | " status for CDB: 0x%02x ITT: 0x%08x\n", |
5807 | T_TASK(cmd)->t_task_cdb[0], | 4847 | cmd->t_task_cdb[0], |
5808 | CMD_TFO(cmd)->get_task_tag(cmd)); | 4848 | cmd->se_tfo->get_task_tag(cmd)); |
5809 | #endif | 4849 | #endif |
5810 | cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS; | 4850 | cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS; |
5811 | CMD_TFO(cmd)->queue_status(cmd); | 4851 | cmd->se_tfo->queue_status(cmd); |
5812 | ret = 1; | 4852 | ret = 1; |
5813 | } | 4853 | } |
5814 | return ret; | 4854 | return ret; |
@@ -5824,8 +4864,8 @@ void transport_send_task_abort(struct se_cmd *cmd) | |||
5824 | * queued back to fabric module by transport_check_aborted_status(). | 4864 | * queued back to fabric module by transport_check_aborted_status(). |
5825 | */ | 4865 | */ |
5826 | if (cmd->data_direction == DMA_TO_DEVICE) { | 4866 | if (cmd->data_direction == DMA_TO_DEVICE) { |
5827 | if (CMD_TFO(cmd)->write_pending_status(cmd) != 0) { | 4867 | if (cmd->se_tfo->write_pending_status(cmd) != 0) { |
5828 | atomic_inc(&T_TASK(cmd)->t_transport_aborted); | 4868 | atomic_inc(&cmd->t_transport_aborted); |
5829 | smp_mb__after_atomic_inc(); | 4869 | smp_mb__after_atomic_inc(); |
5830 | cmd->scsi_status = SAM_STAT_TASK_ABORTED; | 4870 | cmd->scsi_status = SAM_STAT_TASK_ABORTED; |
5831 | transport_new_cmd_failure(cmd); | 4871 | transport_new_cmd_failure(cmd); |
@@ -5834,11 +4874,11 @@ void transport_send_task_abort(struct se_cmd *cmd) | |||
5834 | } | 4874 | } |
5835 | cmd->scsi_status = SAM_STAT_TASK_ABORTED; | 4875 | cmd->scsi_status = SAM_STAT_TASK_ABORTED; |
5836 | #if 0 | 4876 | #if 0 |
5837 | printk(KERN_INFO "Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x," | 4877 | pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x," |
5838 | " ITT: 0x%08x\n", T_TASK(cmd)->t_task_cdb[0], | 4878 | " ITT: 0x%08x\n", cmd->t_task_cdb[0], |
5839 | CMD_TFO(cmd)->get_task_tag(cmd)); | 4879 | cmd->se_tfo->get_task_tag(cmd)); |
5840 | #endif | 4880 | #endif |
5841 | CMD_TFO(cmd)->queue_status(cmd); | 4881 | cmd->se_tfo->queue_status(cmd); |
5842 | } | 4882 | } |
5843 | 4883 | ||
5844 | /* transport_generic_do_tmr(): | 4884 | /* transport_generic_do_tmr(): |
@@ -5847,14 +4887,12 @@ void transport_send_task_abort(struct se_cmd *cmd) | |||
5847 | */ | 4887 | */ |
5848 | int transport_generic_do_tmr(struct se_cmd *cmd) | 4888 | int transport_generic_do_tmr(struct se_cmd *cmd) |
5849 | { | 4889 | { |
5850 | struct se_cmd *ref_cmd; | 4890 | struct se_device *dev = cmd->se_dev; |
5851 | struct se_device *dev = SE_DEV(cmd); | ||
5852 | struct se_tmr_req *tmr = cmd->se_tmr_req; | 4891 | struct se_tmr_req *tmr = cmd->se_tmr_req; |
5853 | int ret; | 4892 | int ret; |
5854 | 4893 | ||
5855 | switch (tmr->function) { | 4894 | switch (tmr->function) { |
5856 | case TMR_ABORT_TASK: | 4895 | case TMR_ABORT_TASK: |
5857 | ref_cmd = tmr->ref_cmd; | ||
5858 | tmr->response = TMR_FUNCTION_REJECTED; | 4896 | tmr->response = TMR_FUNCTION_REJECTED; |
5859 | break; | 4897 | break; |
5860 | case TMR_ABORT_TASK_SET: | 4898 | case TMR_ABORT_TASK_SET: |
@@ -5874,14 +4912,14 @@ int transport_generic_do_tmr(struct se_cmd *cmd) | |||
5874 | tmr->response = TMR_FUNCTION_REJECTED; | 4912 | tmr->response = TMR_FUNCTION_REJECTED; |
5875 | break; | 4913 | break; |
5876 | default: | 4914 | default: |
5877 | printk(KERN_ERR "Uknown TMR function: 0x%02x.\n", | 4915 | pr_err("Uknown TMR function: 0x%02x.\n", |
5878 | tmr->function); | 4916 | tmr->function); |
5879 | tmr->response = TMR_FUNCTION_REJECTED; | 4917 | tmr->response = TMR_FUNCTION_REJECTED; |
5880 | break; | 4918 | break; |
5881 | } | 4919 | } |
5882 | 4920 | ||
5883 | cmd->t_state = TRANSPORT_ISTATE_PROCESSING; | 4921 | cmd->t_state = TRANSPORT_ISTATE_PROCESSING; |
5884 | CMD_TFO(cmd)->queue_tm_rsp(cmd); | 4922 | cmd->se_tfo->queue_tm_rsp(cmd); |
5885 | 4923 | ||
5886 | transport_cmd_check_stop(cmd, 2, 0); | 4924 | transport_cmd_check_stop(cmd, 2, 0); |
5887 | return 0; | 4925 | return 0; |
@@ -5911,62 +4949,54 @@ transport_get_task_from_state_list(struct se_device *dev) | |||
5911 | static void transport_processing_shutdown(struct se_device *dev) | 4949 | static void transport_processing_shutdown(struct se_device *dev) |
5912 | { | 4950 | { |
5913 | struct se_cmd *cmd; | 4951 | struct se_cmd *cmd; |
5914 | struct se_queue_req *qr; | ||
5915 | struct se_task *task; | 4952 | struct se_task *task; |
5916 | u8 state; | ||
5917 | unsigned long flags; | 4953 | unsigned long flags; |
5918 | /* | 4954 | /* |
5919 | * Empty the struct se_device's struct se_task state list. | 4955 | * Empty the struct se_device's struct se_task state list. |
5920 | */ | 4956 | */ |
5921 | spin_lock_irqsave(&dev->execute_task_lock, flags); | 4957 | spin_lock_irqsave(&dev->execute_task_lock, flags); |
5922 | while ((task = transport_get_task_from_state_list(dev))) { | 4958 | while ((task = transport_get_task_from_state_list(dev))) { |
5923 | if (!(TASK_CMD(task))) { | 4959 | if (!task->task_se_cmd) { |
5924 | printk(KERN_ERR "TASK_CMD(task) is NULL!\n"); | 4960 | pr_err("task->task_se_cmd is NULL!\n"); |
5925 | continue; | 4961 | continue; |
5926 | } | 4962 | } |
5927 | cmd = TASK_CMD(task); | 4963 | cmd = task->task_se_cmd; |
5928 | 4964 | ||
5929 | if (!T_TASK(cmd)) { | ||
5930 | printk(KERN_ERR "T_TASK(cmd) is NULL for task: %p cmd:" | ||
5931 | " %p ITT: 0x%08x\n", task, cmd, | ||
5932 | CMD_TFO(cmd)->get_task_tag(cmd)); | ||
5933 | continue; | ||
5934 | } | ||
5935 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); | 4965 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); |
5936 | 4966 | ||
5937 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 4967 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
5938 | 4968 | ||
5939 | DEBUG_DO("PT: cmd: %p task: %p ITT/CmdSN: 0x%08x/0x%08x," | 4969 | pr_debug("PT: cmd: %p task: %p ITT: 0x%08x," |
5940 | " i_state/def_i_state: %d/%d, t_state/def_t_state:" | 4970 | " i_state: %d, t_state/def_t_state:" |
5941 | " %d/%d cdb: 0x%02x\n", cmd, task, | 4971 | " %d/%d cdb: 0x%02x\n", cmd, task, |
5942 | CMD_TFO(cmd)->get_task_tag(cmd), cmd->cmd_sn, | 4972 | cmd->se_tfo->get_task_tag(cmd), |
5943 | CMD_TFO(cmd)->get_cmd_state(cmd), cmd->deferred_i_state, | 4973 | cmd->se_tfo->get_cmd_state(cmd), |
5944 | cmd->t_state, cmd->deferred_t_state, | 4974 | cmd->t_state, cmd->deferred_t_state, |
5945 | T_TASK(cmd)->t_task_cdb[0]); | 4975 | cmd->t_task_cdb[0]); |
5946 | DEBUG_DO("PT: ITT[0x%08x] - t_task_cdbs: %d t_task_cdbs_left:" | 4976 | pr_debug("PT: ITT[0x%08x] - t_tasks: %d t_task_cdbs_left:" |
5947 | " %d t_task_cdbs_sent: %d -- t_transport_active: %d" | 4977 | " %d t_task_cdbs_sent: %d -- t_transport_active: %d" |
5948 | " t_transport_stop: %d t_transport_sent: %d\n", | 4978 | " t_transport_stop: %d t_transport_sent: %d\n", |
5949 | CMD_TFO(cmd)->get_task_tag(cmd), | 4979 | cmd->se_tfo->get_task_tag(cmd), |
5950 | T_TASK(cmd)->t_task_cdbs, | 4980 | cmd->t_task_list_num, |
5951 | atomic_read(&T_TASK(cmd)->t_task_cdbs_left), | 4981 | atomic_read(&cmd->t_task_cdbs_left), |
5952 | atomic_read(&T_TASK(cmd)->t_task_cdbs_sent), | 4982 | atomic_read(&cmd->t_task_cdbs_sent), |
5953 | atomic_read(&T_TASK(cmd)->t_transport_active), | 4983 | atomic_read(&cmd->t_transport_active), |
5954 | atomic_read(&T_TASK(cmd)->t_transport_stop), | 4984 | atomic_read(&cmd->t_transport_stop), |
5955 | atomic_read(&T_TASK(cmd)->t_transport_sent)); | 4985 | atomic_read(&cmd->t_transport_sent)); |
5956 | 4986 | ||
5957 | if (atomic_read(&task->task_active)) { | 4987 | if (atomic_read(&task->task_active)) { |
5958 | atomic_set(&task->task_stop, 1); | 4988 | atomic_set(&task->task_stop, 1); |
5959 | spin_unlock_irqrestore( | 4989 | spin_unlock_irqrestore( |
5960 | &T_TASK(cmd)->t_state_lock, flags); | 4990 | &cmd->t_state_lock, flags); |
5961 | 4991 | ||
5962 | DEBUG_DO("Waiting for task: %p to shutdown for dev:" | 4992 | pr_debug("Waiting for task: %p to shutdown for dev:" |
5963 | " %p\n", task, dev); | 4993 | " %p\n", task, dev); |
5964 | wait_for_completion(&task->task_stop_comp); | 4994 | wait_for_completion(&task->task_stop_comp); |
5965 | DEBUG_DO("Completed task: %p shutdown for dev: %p\n", | 4995 | pr_debug("Completed task: %p shutdown for dev: %p\n", |
5966 | task, dev); | 4996 | task, dev); |
5967 | 4997 | ||
5968 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 4998 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
5969 | atomic_dec(&T_TASK(cmd)->t_task_cdbs_left); | 4999 | atomic_dec(&cmd->t_task_cdbs_left); |
5970 | 5000 | ||
5971 | atomic_set(&task->task_active, 0); | 5001 | atomic_set(&task->task_active, 0); |
5972 | atomic_set(&task->task_stop, 0); | 5002 | atomic_set(&task->task_stop, 0); |
@@ -5976,72 +5006,72 @@ static void transport_processing_shutdown(struct se_device *dev) | |||
5976 | } | 5006 | } |
5977 | __transport_stop_task_timer(task, &flags); | 5007 | __transport_stop_task_timer(task, &flags); |
5978 | 5008 | ||
5979 | if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_ex_left))) { | 5009 | if (!atomic_dec_and_test(&cmd->t_task_cdbs_ex_left)) { |
5980 | spin_unlock_irqrestore( | 5010 | spin_unlock_irqrestore( |
5981 | &T_TASK(cmd)->t_state_lock, flags); | 5011 | &cmd->t_state_lock, flags); |
5982 | 5012 | ||
5983 | DEBUG_DO("Skipping task: %p, dev: %p for" | 5013 | pr_debug("Skipping task: %p, dev: %p for" |
5984 | " t_task_cdbs_ex_left: %d\n", task, dev, | 5014 | " t_task_cdbs_ex_left: %d\n", task, dev, |
5985 | atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left)); | 5015 | atomic_read(&cmd->t_task_cdbs_ex_left)); |
5986 | 5016 | ||
5987 | spin_lock_irqsave(&dev->execute_task_lock, flags); | 5017 | spin_lock_irqsave(&dev->execute_task_lock, flags); |
5988 | continue; | 5018 | continue; |
5989 | } | 5019 | } |
5990 | 5020 | ||
5991 | if (atomic_read(&T_TASK(cmd)->t_transport_active)) { | 5021 | if (atomic_read(&cmd->t_transport_active)) { |
5992 | DEBUG_DO("got t_transport_active = 1 for task: %p, dev:" | 5022 | pr_debug("got t_transport_active = 1 for task: %p, dev:" |
5993 | " %p\n", task, dev); | 5023 | " %p\n", task, dev); |
5994 | 5024 | ||
5995 | if (atomic_read(&T_TASK(cmd)->t_fe_count)) { | 5025 | if (atomic_read(&cmd->t_fe_count)) { |
5996 | spin_unlock_irqrestore( | 5026 | spin_unlock_irqrestore( |
5997 | &T_TASK(cmd)->t_state_lock, flags); | 5027 | &cmd->t_state_lock, flags); |
5998 | transport_send_check_condition_and_sense( | 5028 | transport_send_check_condition_and_sense( |
5999 | cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, | 5029 | cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, |
6000 | 0); | 5030 | 0); |
6001 | transport_remove_cmd_from_queue(cmd, | 5031 | transport_remove_cmd_from_queue(cmd, |
6002 | SE_DEV(cmd)->dev_queue_obj); | 5032 | &cmd->se_dev->dev_queue_obj); |
6003 | 5033 | ||
6004 | transport_lun_remove_cmd(cmd); | 5034 | transport_lun_remove_cmd(cmd); |
6005 | transport_cmd_check_stop(cmd, 1, 0); | 5035 | transport_cmd_check_stop(cmd, 1, 0); |
6006 | } else { | 5036 | } else { |
6007 | spin_unlock_irqrestore( | 5037 | spin_unlock_irqrestore( |
6008 | &T_TASK(cmd)->t_state_lock, flags); | 5038 | &cmd->t_state_lock, flags); |
6009 | 5039 | ||
6010 | transport_remove_cmd_from_queue(cmd, | 5040 | transport_remove_cmd_from_queue(cmd, |
6011 | SE_DEV(cmd)->dev_queue_obj); | 5041 | &cmd->se_dev->dev_queue_obj); |
6012 | 5042 | ||
6013 | transport_lun_remove_cmd(cmd); | 5043 | transport_lun_remove_cmd(cmd); |
6014 | 5044 | ||
6015 | if (transport_cmd_check_stop(cmd, 1, 0)) | 5045 | if (transport_cmd_check_stop(cmd, 1, 0)) |
6016 | transport_generic_remove(cmd, 0, 0); | 5046 | transport_generic_remove(cmd, 0); |
6017 | } | 5047 | } |
6018 | 5048 | ||
6019 | spin_lock_irqsave(&dev->execute_task_lock, flags); | 5049 | spin_lock_irqsave(&dev->execute_task_lock, flags); |
6020 | continue; | 5050 | continue; |
6021 | } | 5051 | } |
6022 | DEBUG_DO("Got t_transport_active = 0 for task: %p, dev: %p\n", | 5052 | pr_debug("Got t_transport_active = 0 for task: %p, dev: %p\n", |
6023 | task, dev); | 5053 | task, dev); |
6024 | 5054 | ||
6025 | if (atomic_read(&T_TASK(cmd)->t_fe_count)) { | 5055 | if (atomic_read(&cmd->t_fe_count)) { |
6026 | spin_unlock_irqrestore( | 5056 | spin_unlock_irqrestore( |
6027 | &T_TASK(cmd)->t_state_lock, flags); | 5057 | &cmd->t_state_lock, flags); |
6028 | transport_send_check_condition_and_sense(cmd, | 5058 | transport_send_check_condition_and_sense(cmd, |
6029 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); | 5059 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); |
6030 | transport_remove_cmd_from_queue(cmd, | 5060 | transport_remove_cmd_from_queue(cmd, |
6031 | SE_DEV(cmd)->dev_queue_obj); | 5061 | &cmd->se_dev->dev_queue_obj); |
6032 | 5062 | ||
6033 | transport_lun_remove_cmd(cmd); | 5063 | transport_lun_remove_cmd(cmd); |
6034 | transport_cmd_check_stop(cmd, 1, 0); | 5064 | transport_cmd_check_stop(cmd, 1, 0); |
6035 | } else { | 5065 | } else { |
6036 | spin_unlock_irqrestore( | 5066 | spin_unlock_irqrestore( |
6037 | &T_TASK(cmd)->t_state_lock, flags); | 5067 | &cmd->t_state_lock, flags); |
6038 | 5068 | ||
6039 | transport_remove_cmd_from_queue(cmd, | 5069 | transport_remove_cmd_from_queue(cmd, |
6040 | SE_DEV(cmd)->dev_queue_obj); | 5070 | &cmd->se_dev->dev_queue_obj); |
6041 | transport_lun_remove_cmd(cmd); | 5071 | transport_lun_remove_cmd(cmd); |
6042 | 5072 | ||
6043 | if (transport_cmd_check_stop(cmd, 1, 0)) | 5073 | if (transport_cmd_check_stop(cmd, 1, 0)) |
6044 | transport_generic_remove(cmd, 0, 0); | 5074 | transport_generic_remove(cmd, 0); |
6045 | } | 5075 | } |
6046 | 5076 | ||
6047 | spin_lock_irqsave(&dev->execute_task_lock, flags); | 5077 | spin_lock_irqsave(&dev->execute_task_lock, flags); |
@@ -6050,18 +5080,12 @@ static void transport_processing_shutdown(struct se_device *dev) | |||
6050 | /* | 5080 | /* |
6051 | * Empty the struct se_device's struct se_cmd list. | 5081 | * Empty the struct se_device's struct se_cmd list. |
6052 | */ | 5082 | */ |
6053 | spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags); | 5083 | while ((cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj))) { |
6054 | while ((qr = __transport_get_qr_from_queue(dev->dev_queue_obj))) { | 5084 | |
6055 | spin_unlock_irqrestore( | 5085 | pr_debug("From Device Queue: cmd: %p t_state: %d\n", |
6056 | &dev->dev_queue_obj->cmd_queue_lock, flags); | 5086 | cmd, cmd->t_state); |
6057 | cmd = (struct se_cmd *)qr->cmd; | 5087 | |
6058 | state = qr->state; | 5088 | if (atomic_read(&cmd->t_fe_count)) { |
6059 | kfree(qr); | ||
6060 | |||
6061 | DEBUG_DO("From Device Queue: cmd: %p t_state: %d\n", | ||
6062 | cmd, state); | ||
6063 | |||
6064 | if (atomic_read(&T_TASK(cmd)->t_fe_count)) { | ||
6065 | transport_send_check_condition_and_sense(cmd, | 5089 | transport_send_check_condition_and_sense(cmd, |
6066 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); | 5090 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); |
6067 | 5091 | ||
@@ -6070,11 +5094,9 @@ static void transport_processing_shutdown(struct se_device *dev) | |||
6070 | } else { | 5094 | } else { |
6071 | transport_lun_remove_cmd(cmd); | 5095 | transport_lun_remove_cmd(cmd); |
6072 | if (transport_cmd_check_stop(cmd, 1, 0)) | 5096 | if (transport_cmd_check_stop(cmd, 1, 0)) |
6073 | transport_generic_remove(cmd, 0, 0); | 5097 | transport_generic_remove(cmd, 0); |
6074 | } | 5098 | } |
6075 | spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags); | ||
6076 | } | 5099 | } |
6077 | spin_unlock_irqrestore(&dev->dev_queue_obj->cmd_queue_lock, flags); | ||
6078 | } | 5100 | } |
6079 | 5101 | ||
6080 | /* transport_processing_thread(): | 5102 | /* transport_processing_thread(): |
@@ -6083,16 +5105,15 @@ static void transport_processing_shutdown(struct se_device *dev) | |||
6083 | */ | 5105 | */ |
6084 | static int transport_processing_thread(void *param) | 5106 | static int transport_processing_thread(void *param) |
6085 | { | 5107 | { |
6086 | int ret, t_state; | 5108 | int ret; |
6087 | struct se_cmd *cmd; | 5109 | struct se_cmd *cmd; |
6088 | struct se_device *dev = (struct se_device *) param; | 5110 | struct se_device *dev = (struct se_device *) param; |
6089 | struct se_queue_req *qr; | ||
6090 | 5111 | ||
6091 | set_user_nice(current, -20); | 5112 | set_user_nice(current, -20); |
6092 | 5113 | ||
6093 | while (!kthread_should_stop()) { | 5114 | while (!kthread_should_stop()) { |
6094 | ret = wait_event_interruptible(dev->dev_queue_obj->thread_wq, | 5115 | ret = wait_event_interruptible(dev->dev_queue_obj.thread_wq, |
6095 | atomic_read(&dev->dev_queue_obj->queue_cnt) || | 5116 | atomic_read(&dev->dev_queue_obj.queue_cnt) || |
6096 | kthread_should_stop()); | 5117 | kthread_should_stop()); |
6097 | if (ret < 0) | 5118 | if (ret < 0) |
6098 | goto out; | 5119 | goto out; |
@@ -6108,22 +5129,18 @@ static int transport_processing_thread(void *param) | |||
6108 | get_cmd: | 5129 | get_cmd: |
6109 | __transport_execute_tasks(dev); | 5130 | __transport_execute_tasks(dev); |
6110 | 5131 | ||
6111 | qr = transport_get_qr_from_queue(dev->dev_queue_obj); | 5132 | cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj); |
6112 | if (!(qr)) | 5133 | if (!cmd) |
6113 | continue; | 5134 | continue; |
6114 | 5135 | ||
6115 | cmd = (struct se_cmd *)qr->cmd; | 5136 | switch (cmd->t_state) { |
6116 | t_state = qr->state; | ||
6117 | kfree(qr); | ||
6118 | |||
6119 | switch (t_state) { | ||
6120 | case TRANSPORT_NEW_CMD_MAP: | 5137 | case TRANSPORT_NEW_CMD_MAP: |
6121 | if (!(CMD_TFO(cmd)->new_cmd_map)) { | 5138 | if (!cmd->se_tfo->new_cmd_map) { |
6122 | printk(KERN_ERR "CMD_TFO(cmd)->new_cmd_map is" | 5139 | pr_err("cmd->se_tfo->new_cmd_map is" |
6123 | " NULL for TRANSPORT_NEW_CMD_MAP\n"); | 5140 | " NULL for TRANSPORT_NEW_CMD_MAP\n"); |
6124 | BUG(); | 5141 | BUG(); |
6125 | } | 5142 | } |
6126 | ret = CMD_TFO(cmd)->new_cmd_map(cmd); | 5143 | ret = cmd->se_tfo->new_cmd_map(cmd); |
6127 | if (ret < 0) { | 5144 | if (ret < 0) { |
6128 | cmd->transport_error_status = ret; | 5145 | cmd->transport_error_status = ret; |
6129 | transport_generic_request_failure(cmd, NULL, | 5146 | transport_generic_request_failure(cmd, NULL, |
@@ -6134,7 +5151,9 @@ get_cmd: | |||
6134 | /* Fall through */ | 5151 | /* Fall through */ |
6135 | case TRANSPORT_NEW_CMD: | 5152 | case TRANSPORT_NEW_CMD: |
6136 | ret = transport_generic_new_cmd(cmd); | 5153 | ret = transport_generic_new_cmd(cmd); |
6137 | if (ret < 0) { | 5154 | if (ret == -EAGAIN) |
5155 | break; | ||
5156 | else if (ret < 0) { | ||
6138 | cmd->transport_error_status = ret; | 5157 | cmd->transport_error_status = ret; |
6139 | transport_generic_request_failure(cmd, NULL, | 5158 | transport_generic_request_failure(cmd, NULL, |
6140 | 0, (cmd->data_direction != | 5159 | 0, (cmd->data_direction != |
@@ -6149,10 +5168,10 @@ get_cmd: | |||
6149 | transport_generic_complete_ok(cmd); | 5168 | transport_generic_complete_ok(cmd); |
6150 | break; | 5169 | break; |
6151 | case TRANSPORT_REMOVE: | 5170 | case TRANSPORT_REMOVE: |
6152 | transport_generic_remove(cmd, 1, 0); | 5171 | transport_generic_remove(cmd, 0); |
6153 | break; | 5172 | break; |
6154 | case TRANSPORT_FREE_CMD_INTR: | 5173 | case TRANSPORT_FREE_CMD_INTR: |
6155 | transport_generic_free_cmd(cmd, 0, 1, 0); | 5174 | transport_generic_free_cmd(cmd, 0, 0); |
6156 | break; | 5175 | break; |
6157 | case TRANSPORT_PROCESS_TMR: | 5176 | case TRANSPORT_PROCESS_TMR: |
6158 | transport_generic_do_tmr(cmd); | 5177 | transport_generic_do_tmr(cmd); |
@@ -6164,13 +5183,16 @@ get_cmd: | |||
6164 | transport_stop_all_task_timers(cmd); | 5183 | transport_stop_all_task_timers(cmd); |
6165 | transport_generic_request_timeout(cmd); | 5184 | transport_generic_request_timeout(cmd); |
6166 | break; | 5185 | break; |
5186 | case TRANSPORT_COMPLETE_QF_WP: | ||
5187 | transport_generic_write_pending(cmd); | ||
5188 | break; | ||
6167 | default: | 5189 | default: |
6168 | printk(KERN_ERR "Unknown t_state: %d deferred_t_state:" | 5190 | pr_err("Unknown t_state: %d deferred_t_state:" |
6169 | " %d for ITT: 0x%08x i_state: %d on SE LUN:" | 5191 | " %d for ITT: 0x%08x i_state: %d on SE LUN:" |
6170 | " %u\n", t_state, cmd->deferred_t_state, | 5192 | " %u\n", cmd->t_state, cmd->deferred_t_state, |
6171 | CMD_TFO(cmd)->get_task_tag(cmd), | 5193 | cmd->se_tfo->get_task_tag(cmd), |
6172 | CMD_TFO(cmd)->get_cmd_state(cmd), | 5194 | cmd->se_tfo->get_cmd_state(cmd), |
6173 | SE_LUN(cmd)->unpacked_lun); | 5195 | cmd->se_lun->unpacked_lun); |
6174 | BUG(); | 5196 | BUG(); |
6175 | } | 5197 | } |
6176 | 5198 | ||
diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c index df355176a377..31e3c652527e 100644 --- a/drivers/target/target_core_ua.c +++ b/drivers/target/target_core_ua.c | |||
@@ -49,15 +49,15 @@ int core_scsi3_ua_check( | |||
49 | struct se_session *sess = cmd->se_sess; | 49 | struct se_session *sess = cmd->se_sess; |
50 | struct se_node_acl *nacl; | 50 | struct se_node_acl *nacl; |
51 | 51 | ||
52 | if (!(sess)) | 52 | if (!sess) |
53 | return 0; | 53 | return 0; |
54 | 54 | ||
55 | nacl = sess->se_node_acl; | 55 | nacl = sess->se_node_acl; |
56 | if (!(nacl)) | 56 | if (!nacl) |
57 | return 0; | 57 | return 0; |
58 | 58 | ||
59 | deve = &nacl->device_list[cmd->orig_fe_lun]; | 59 | deve = &nacl->device_list[cmd->orig_fe_lun]; |
60 | if (!(atomic_read(&deve->ua_count))) | 60 | if (!atomic_read(&deve->ua_count)) |
61 | return 0; | 61 | return 0; |
62 | /* | 62 | /* |
63 | * From sam4r14, section 5.14 Unit attention condition: | 63 | * From sam4r14, section 5.14 Unit attention condition: |
@@ -80,10 +80,10 @@ int core_scsi3_ua_check( | |||
80 | case REQUEST_SENSE: | 80 | case REQUEST_SENSE: |
81 | return 0; | 81 | return 0; |
82 | default: | 82 | default: |
83 | return -1; | 83 | return -EINVAL; |
84 | } | 84 | } |
85 | 85 | ||
86 | return -1; | 86 | return -EINVAL; |
87 | } | 87 | } |
88 | 88 | ||
89 | int core_scsi3_ua_allocate( | 89 | int core_scsi3_ua_allocate( |
@@ -97,13 +97,13 @@ int core_scsi3_ua_allocate( | |||
97 | /* | 97 | /* |
98 | * PASSTHROUGH OPS | 98 | * PASSTHROUGH OPS |
99 | */ | 99 | */ |
100 | if (!(nacl)) | 100 | if (!nacl) |
101 | return -1; | 101 | return -EINVAL; |
102 | 102 | ||
103 | ua = kmem_cache_zalloc(se_ua_cache, GFP_ATOMIC); | 103 | ua = kmem_cache_zalloc(se_ua_cache, GFP_ATOMIC); |
104 | if (!(ua)) { | 104 | if (!ua) { |
105 | printk(KERN_ERR "Unable to allocate struct se_ua\n"); | 105 | pr_err("Unable to allocate struct se_ua\n"); |
106 | return -1; | 106 | return -ENOMEM; |
107 | } | 107 | } |
108 | INIT_LIST_HEAD(&ua->ua_dev_list); | 108 | INIT_LIST_HEAD(&ua->ua_dev_list); |
109 | INIT_LIST_HEAD(&ua->ua_nacl_list); | 109 | INIT_LIST_HEAD(&ua->ua_nacl_list); |
@@ -177,9 +177,9 @@ int core_scsi3_ua_allocate( | |||
177 | spin_unlock(&deve->ua_lock); | 177 | spin_unlock(&deve->ua_lock); |
178 | spin_unlock_irq(&nacl->device_list_lock); | 178 | spin_unlock_irq(&nacl->device_list_lock); |
179 | 179 | ||
180 | printk(KERN_INFO "[%s]: Allocated UNIT ATTENTION, mapped LUN: %u, ASC:" | 180 | pr_debug("[%s]: Allocated UNIT ATTENTION, mapped LUN: %u, ASC:" |
181 | " 0x%02x, ASCQ: 0x%02x\n", | 181 | " 0x%02x, ASCQ: 0x%02x\n", |
182 | TPG_TFO(nacl->se_tpg)->get_fabric_name(), unpacked_lun, | 182 | nacl->se_tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, |
183 | asc, ascq); | 183 | asc, ascq); |
184 | 184 | ||
185 | atomic_inc(&deve->ua_count); | 185 | atomic_inc(&deve->ua_count); |
@@ -208,23 +208,23 @@ void core_scsi3_ua_for_check_condition( | |||
208 | u8 *asc, | 208 | u8 *asc, |
209 | u8 *ascq) | 209 | u8 *ascq) |
210 | { | 210 | { |
211 | struct se_device *dev = SE_DEV(cmd); | 211 | struct se_device *dev = cmd->se_dev; |
212 | struct se_dev_entry *deve; | 212 | struct se_dev_entry *deve; |
213 | struct se_session *sess = cmd->se_sess; | 213 | struct se_session *sess = cmd->se_sess; |
214 | struct se_node_acl *nacl; | 214 | struct se_node_acl *nacl; |
215 | struct se_ua *ua = NULL, *ua_p; | 215 | struct se_ua *ua = NULL, *ua_p; |
216 | int head = 1; | 216 | int head = 1; |
217 | 217 | ||
218 | if (!(sess)) | 218 | if (!sess) |
219 | return; | 219 | return; |
220 | 220 | ||
221 | nacl = sess->se_node_acl; | 221 | nacl = sess->se_node_acl; |
222 | if (!(nacl)) | 222 | if (!nacl) |
223 | return; | 223 | return; |
224 | 224 | ||
225 | spin_lock_irq(&nacl->device_list_lock); | 225 | spin_lock_irq(&nacl->device_list_lock); |
226 | deve = &nacl->device_list[cmd->orig_fe_lun]; | 226 | deve = &nacl->device_list[cmd->orig_fe_lun]; |
227 | if (!(atomic_read(&deve->ua_count))) { | 227 | if (!atomic_read(&deve->ua_count)) { |
228 | spin_unlock_irq(&nacl->device_list_lock); | 228 | spin_unlock_irq(&nacl->device_list_lock); |
229 | return; | 229 | return; |
230 | } | 230 | } |
@@ -240,7 +240,7 @@ void core_scsi3_ua_for_check_condition( | |||
240 | * highest priority UNIT_ATTENTION and ASC/ASCQ without | 240 | * highest priority UNIT_ATTENTION and ASC/ASCQ without |
241 | * clearing it. | 241 | * clearing it. |
242 | */ | 242 | */ |
243 | if (DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl != 0) { | 243 | if (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl != 0) { |
244 | *asc = ua->ua_asc; | 244 | *asc = ua->ua_asc; |
245 | *ascq = ua->ua_ascq; | 245 | *ascq = ua->ua_ascq; |
246 | break; | 246 | break; |
@@ -264,13 +264,13 @@ void core_scsi3_ua_for_check_condition( | |||
264 | spin_unlock(&deve->ua_lock); | 264 | spin_unlock(&deve->ua_lock); |
265 | spin_unlock_irq(&nacl->device_list_lock); | 265 | spin_unlock_irq(&nacl->device_list_lock); |
266 | 266 | ||
267 | printk(KERN_INFO "[%s]: %s UNIT ATTENTION condition with" | 267 | pr_debug("[%s]: %s UNIT ATTENTION condition with" |
268 | " INTLCK_CTRL: %d, mapped LUN: %u, got CDB: 0x%02x" | 268 | " INTLCK_CTRL: %d, mapped LUN: %u, got CDB: 0x%02x" |
269 | " reported ASC: 0x%02x, ASCQ: 0x%02x\n", | 269 | " reported ASC: 0x%02x, ASCQ: 0x%02x\n", |
270 | TPG_TFO(nacl->se_tpg)->get_fabric_name(), | 270 | nacl->se_tpg->se_tpg_tfo->get_fabric_name(), |
271 | (DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl != 0) ? "Reporting" : | 271 | (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl != 0) ? "Reporting" : |
272 | "Releasing", DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl, | 272 | "Releasing", dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl, |
273 | cmd->orig_fe_lun, T_TASK(cmd)->t_task_cdb[0], *asc, *ascq); | 273 | cmd->orig_fe_lun, cmd->t_task_cdb[0], *asc, *ascq); |
274 | } | 274 | } |
275 | 275 | ||
276 | int core_scsi3_ua_clear_for_request_sense( | 276 | int core_scsi3_ua_clear_for_request_sense( |
@@ -284,18 +284,18 @@ int core_scsi3_ua_clear_for_request_sense( | |||
284 | struct se_ua *ua = NULL, *ua_p; | 284 | struct se_ua *ua = NULL, *ua_p; |
285 | int head = 1; | 285 | int head = 1; |
286 | 286 | ||
287 | if (!(sess)) | 287 | if (!sess) |
288 | return -1; | 288 | return -EINVAL; |
289 | 289 | ||
290 | nacl = sess->se_node_acl; | 290 | nacl = sess->se_node_acl; |
291 | if (!(nacl)) | 291 | if (!nacl) |
292 | return -1; | 292 | return -EINVAL; |
293 | 293 | ||
294 | spin_lock_irq(&nacl->device_list_lock); | 294 | spin_lock_irq(&nacl->device_list_lock); |
295 | deve = &nacl->device_list[cmd->orig_fe_lun]; | 295 | deve = &nacl->device_list[cmd->orig_fe_lun]; |
296 | if (!(atomic_read(&deve->ua_count))) { | 296 | if (!atomic_read(&deve->ua_count)) { |
297 | spin_unlock_irq(&nacl->device_list_lock); | 297 | spin_unlock_irq(&nacl->device_list_lock); |
298 | return -1; | 298 | return -EPERM; |
299 | } | 299 | } |
300 | /* | 300 | /* |
301 | * The highest priority Unit Attentions are placed at the head of the | 301 | * The highest priority Unit Attentions are placed at the head of the |
@@ -323,10 +323,10 @@ int core_scsi3_ua_clear_for_request_sense( | |||
323 | spin_unlock(&deve->ua_lock); | 323 | spin_unlock(&deve->ua_lock); |
324 | spin_unlock_irq(&nacl->device_list_lock); | 324 | spin_unlock_irq(&nacl->device_list_lock); |
325 | 325 | ||
326 | printk(KERN_INFO "[%s]: Released UNIT ATTENTION condition, mapped" | 326 | pr_debug("[%s]: Released UNIT ATTENTION condition, mapped" |
327 | " LUN: %u, got REQUEST_SENSE reported ASC: 0x%02x," | 327 | " LUN: %u, got REQUEST_SENSE reported ASC: 0x%02x," |
328 | " ASCQ: 0x%02x\n", TPG_TFO(nacl->se_tpg)->get_fabric_name(), | 328 | " ASCQ: 0x%02x\n", nacl->se_tpg->se_tpg_tfo->get_fabric_name(), |
329 | cmd->orig_fe_lun, *asc, *ascq); | 329 | cmd->orig_fe_lun, *asc, *ascq); |
330 | 330 | ||
331 | return (head) ? -1 : 0; | 331 | return (head) ? -EPERM : 0; |
332 | } | 332 | } |
diff --git a/drivers/target/tcm_fc/Makefile b/drivers/target/tcm_fc/Makefile index 7a5c2b64cf65..20b14bb087c9 100644 --- a/drivers/target/tcm_fc/Makefile +++ b/drivers/target/tcm_fc/Makefile | |||
@@ -1,15 +1,6 @@ | |||
1 | EXTRA_CFLAGS += -I$(srctree)/drivers/target/ \ | 1 | tcm_fc-y += tfc_cmd.o \ |
2 | -I$(srctree)/drivers/scsi/ \ | 2 | tfc_conf.o \ |
3 | -I$(srctree)/include/scsi/ \ | 3 | tfc_io.o \ |
4 | -I$(srctree)/drivers/target/tcm_fc/ | 4 | tfc_sess.o |
5 | |||
6 | tcm_fc-y += tfc_cmd.o \ | ||
7 | tfc_conf.o \ | ||
8 | tfc_io.o \ | ||
9 | tfc_sess.o | ||
10 | 5 | ||
11 | obj-$(CONFIG_TCM_FC) += tcm_fc.o | 6 | obj-$(CONFIG_TCM_FC) += tcm_fc.o |
12 | |||
13 | ifdef CONFIGFS_TCM_FC_DEBUG | ||
14 | EXTRA_CFLAGS += -DTCM_FC_DEBUG | ||
15 | endif | ||
diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h index 7b82f1b7fef8..f7fff7ed63c3 100644 --- a/drivers/target/tcm_fc/tcm_fc.h +++ b/drivers/target/tcm_fc/tcm_fc.h | |||
@@ -23,30 +23,6 @@ | |||
23 | #define FT_TPG_NAMELEN 32 /* max length of TPG name */ | 23 | #define FT_TPG_NAMELEN 32 /* max length of TPG name */ |
24 | #define FT_LUN_NAMELEN 32 /* max length of LUN name */ | 24 | #define FT_LUN_NAMELEN 32 /* max length of LUN name */ |
25 | 25 | ||
26 | /* | ||
27 | * Debug options. | ||
28 | */ | ||
29 | #define FT_DEBUG_CONF 0x01 /* configuration messages */ | ||
30 | #define FT_DEBUG_SESS 0x02 /* session messages */ | ||
31 | #define FT_DEBUG_TM 0x04 /* TM operations */ | ||
32 | #define FT_DEBUG_IO 0x08 /* I/O commands */ | ||
33 | #define FT_DEBUG_DATA 0x10 /* Data transfer */ | ||
34 | |||
35 | extern unsigned int ft_debug_logging; /* debug options */ | ||
36 | |||
37 | #define FT_DEBUG(mask, fmt, args...) \ | ||
38 | do { \ | ||
39 | if (ft_debug_logging & (mask)) \ | ||
40 | printk(KERN_INFO "tcm_fc: %s: " fmt, \ | ||
41 | __func__, ##args); \ | ||
42 | } while (0) | ||
43 | |||
44 | #define FT_CONF_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_CONF, fmt, ##args) | ||
45 | #define FT_SESS_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_SESS, fmt, ##args) | ||
46 | #define FT_TM_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_TM, fmt, ##args) | ||
47 | #define FT_IO_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_IO, fmt, ##args) | ||
48 | #define FT_DATA_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_DATA, fmt, ##args) | ||
49 | |||
50 | struct ft_transport_id { | 26 | struct ft_transport_id { |
51 | __u8 format; | 27 | __u8 format; |
52 | __u8 __resvd1[7]; | 28 | __u8 __resvd1[7]; |
@@ -195,7 +171,6 @@ int ft_write_pending(struct se_cmd *); | |||
195 | int ft_write_pending_status(struct se_cmd *); | 171 | int ft_write_pending_status(struct se_cmd *); |
196 | u32 ft_get_task_tag(struct se_cmd *); | 172 | u32 ft_get_task_tag(struct se_cmd *); |
197 | int ft_get_cmd_state(struct se_cmd *); | 173 | int ft_get_cmd_state(struct se_cmd *); |
198 | void ft_new_cmd_failure(struct se_cmd *); | ||
199 | int ft_queue_tm_resp(struct se_cmd *); | 174 | int ft_queue_tm_resp(struct se_cmd *); |
200 | int ft_is_state_remove(struct se_cmd *); | 175 | int ft_is_state_remove(struct se_cmd *); |
201 | 176 | ||
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c index b2a106729d49..a9e9a31da11d 100644 --- a/drivers/target/tcm_fc/tfc_cmd.c +++ b/drivers/target/tcm_fc/tfc_cmd.c | |||
@@ -59,33 +59,30 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller) | |||
59 | struct fc_exch *ep; | 59 | struct fc_exch *ep; |
60 | struct fc_seq *sp; | 60 | struct fc_seq *sp; |
61 | struct se_cmd *se_cmd; | 61 | struct se_cmd *se_cmd; |
62 | struct se_mem *mem; | 62 | struct scatterlist *sg; |
63 | struct se_transport_task *task; | 63 | int count; |
64 | |||
65 | if (!(ft_debug_logging & FT_DEBUG_IO)) | ||
66 | return; | ||
67 | 64 | ||
68 | se_cmd = &cmd->se_cmd; | 65 | se_cmd = &cmd->se_cmd; |
69 | printk(KERN_INFO "%s: cmd %p state %d sess %p seq %p se_cmd %p\n", | 66 | pr_debug("%s: cmd %p state %d sess %p seq %p se_cmd %p\n", |
70 | caller, cmd, cmd->state, cmd->sess, cmd->seq, se_cmd); | 67 | caller, cmd, cmd->state, cmd->sess, cmd->seq, se_cmd); |
71 | printk(KERN_INFO "%s: cmd %p cdb %p\n", | 68 | pr_debug("%s: cmd %p cdb %p\n", |
72 | caller, cmd, cmd->cdb); | 69 | caller, cmd, cmd->cdb); |
73 | printk(KERN_INFO "%s: cmd %p lun %d\n", caller, cmd, cmd->lun); | 70 | pr_debug("%s: cmd %p lun %d\n", caller, cmd, cmd->lun); |
74 | 71 | ||
75 | task = T_TASK(se_cmd); | 72 | pr_debug("%s: cmd %p data_nents %u len %u se_cmd_flags <0x%x>\n", |
76 | printk(KERN_INFO "%s: cmd %p task %p se_num %u buf %p len %u se_cmd_flags <0x%x>\n", | 73 | caller, cmd, se_cmd->t_data_nents, |
77 | caller, cmd, task, task->t_tasks_se_num, | 74 | se_cmd->data_length, se_cmd->se_cmd_flags); |
78 | task->t_task_buf, se_cmd->data_length, se_cmd->se_cmd_flags); | 75 | |
79 | if (task->t_mem_list) | 76 | for_each_sg(se_cmd->t_data_sg, sg, se_cmd->t_data_nents, count) |
80 | list_for_each_entry(mem, task->t_mem_list, se_list) | 77 | pr_debug("%s: cmd %p sg %p page %p " |
81 | printk(KERN_INFO "%s: cmd %p mem %p page %p " | 78 | "len 0x%x off 0x%x\n", |
82 | "len 0x%x off 0x%x\n", | 79 | caller, cmd, sg, |
83 | caller, cmd, mem, | 80 | sg_page(sg), sg->length, sg->offset); |
84 | mem->se_page, mem->se_len, mem->se_off); | 81 | |
85 | sp = cmd->seq; | 82 | sp = cmd->seq; |
86 | if (sp) { | 83 | if (sp) { |
87 | ep = fc_seq_exch(sp); | 84 | ep = fc_seq_exch(sp); |
88 | printk(KERN_INFO "%s: cmd %p sid %x did %x " | 85 | pr_debug("%s: cmd %p sid %x did %x " |
89 | "ox_id %x rx_id %x seq_id %x e_stat %x\n", | 86 | "ox_id %x rx_id %x seq_id %x e_stat %x\n", |
90 | caller, cmd, ep->sid, ep->did, ep->oxid, ep->rxid, | 87 | caller, cmd, ep->sid, ep->did, ep->oxid, ep->rxid, |
91 | sp->id, ep->esb_stat); | 88 | sp->id, ep->esb_stat); |
@@ -96,15 +93,17 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller) | |||
96 | 93 | ||
97 | static void ft_queue_cmd(struct ft_sess *sess, struct ft_cmd *cmd) | 94 | static void ft_queue_cmd(struct ft_sess *sess, struct ft_cmd *cmd) |
98 | { | 95 | { |
99 | struct se_queue_obj *qobj; | 96 | struct ft_tpg *tpg = sess->tport->tpg; |
97 | struct se_queue_obj *qobj = &tpg->qobj; | ||
100 | unsigned long flags; | 98 | unsigned long flags; |
101 | 99 | ||
102 | qobj = &sess->tport->tpg->qobj; | 100 | qobj = &sess->tport->tpg->qobj; |
103 | spin_lock_irqsave(&qobj->cmd_queue_lock, flags); | 101 | spin_lock_irqsave(&qobj->cmd_queue_lock, flags); |
104 | list_add_tail(&cmd->se_req.qr_list, &qobj->qobj_list); | 102 | list_add_tail(&cmd->se_req.qr_list, &qobj->qobj_list); |
105 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); | ||
106 | atomic_inc(&qobj->queue_cnt); | 103 | atomic_inc(&qobj->queue_cnt); |
107 | wake_up_interruptible(&qobj->thread_wq); | 104 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); |
105 | |||
106 | wake_up_process(tpg->thread); | ||
108 | } | 107 | } |
109 | 108 | ||
110 | static struct ft_cmd *ft_dequeue_cmd(struct se_queue_obj *qobj) | 109 | static struct ft_cmd *ft_dequeue_cmd(struct se_queue_obj *qobj) |
@@ -149,7 +148,7 @@ void ft_release_cmd(struct se_cmd *se_cmd) | |||
149 | 148 | ||
150 | void ft_check_stop_free(struct se_cmd *se_cmd) | 149 | void ft_check_stop_free(struct se_cmd *se_cmd) |
151 | { | 150 | { |
152 | transport_generic_free_cmd(se_cmd, 0, 1, 0); | 151 | transport_generic_free_cmd(se_cmd, 0, 0); |
153 | } | 152 | } |
154 | 153 | ||
155 | /* | 154 | /* |
@@ -256,15 +255,14 @@ int ft_write_pending(struct se_cmd *se_cmd) | |||
256 | (fh->fh_r_ctl == FC_RCTL_DD_DATA_DESC)) { | 255 | (fh->fh_r_ctl == FC_RCTL_DD_DATA_DESC)) { |
257 | if (se_cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { | 256 | if (se_cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { |
258 | /* | 257 | /* |
259 | * Map se_mem list to scatterlist, so that | 258 | * cmd may have been broken up into multiple |
260 | * DDP can be setup. DDP setup function require | 259 | * tasks. Link their sgs together so we can |
261 | * scatterlist. se_mem_list is internal to | 260 | * operate on them all at once. |
262 | * TCM/LIO target | ||
263 | */ | 261 | */ |
264 | transport_do_task_sg_chain(se_cmd); | 262 | transport_do_task_sg_chain(se_cmd); |
265 | cmd->sg = T_TASK(se_cmd)->t_tasks_sg_chained; | 263 | cmd->sg = se_cmd->t_tasks_sg_chained; |
266 | cmd->sg_cnt = | 264 | cmd->sg_cnt = |
267 | T_TASK(se_cmd)->t_tasks_sg_chained_no; | 265 | se_cmd->t_tasks_sg_chained_no; |
268 | } | 266 | } |
269 | if (cmd->sg && lport->tt.ddp_setup(lport, ep->xid, | 267 | if (cmd->sg && lport->tt.ddp_setup(lport, ep->xid, |
270 | cmd->sg, cmd->sg_cnt)) | 268 | cmd->sg, cmd->sg_cnt)) |
@@ -294,12 +292,6 @@ int ft_is_state_remove(struct se_cmd *se_cmd) | |||
294 | return 0; /* XXX TBD */ | 292 | return 0; /* XXX TBD */ |
295 | } | 293 | } |
296 | 294 | ||
297 | void ft_new_cmd_failure(struct se_cmd *se_cmd) | ||
298 | { | ||
299 | /* XXX TBD */ | ||
300 | printk(KERN_INFO "%s: se_cmd %p\n", __func__, se_cmd); | ||
301 | } | ||
302 | |||
303 | /* | 295 | /* |
304 | * FC sequence response handler for follow-on sequences (data) and aborts. | 296 | * FC sequence response handler for follow-on sequences (data) and aborts. |
305 | */ | 297 | */ |
@@ -312,7 +304,7 @@ static void ft_recv_seq(struct fc_seq *sp, struct fc_frame *fp, void *arg) | |||
312 | /* XXX need to find cmd if queued */ | 304 | /* XXX need to find cmd if queued */ |
313 | cmd->se_cmd.t_state = TRANSPORT_REMOVE; | 305 | cmd->se_cmd.t_state = TRANSPORT_REMOVE; |
314 | cmd->seq = NULL; | 306 | cmd->seq = NULL; |
315 | transport_generic_free_cmd(&cmd->se_cmd, 0, 1, 0); | 307 | transport_generic_free_cmd(&cmd->se_cmd, 0, 0); |
316 | return; | 308 | return; |
317 | } | 309 | } |
318 | 310 | ||
@@ -326,10 +318,10 @@ static void ft_recv_seq(struct fc_seq *sp, struct fc_frame *fp, void *arg) | |||
326 | case FC_RCTL_DD_SOL_CTL: /* transfer ready */ | 318 | case FC_RCTL_DD_SOL_CTL: /* transfer ready */ |
327 | case FC_RCTL_DD_DATA_DESC: /* transfer ready */ | 319 | case FC_RCTL_DD_DATA_DESC: /* transfer ready */ |
328 | default: | 320 | default: |
329 | printk(KERN_INFO "%s: unhandled frame r_ctl %x\n", | 321 | pr_debug("%s: unhandled frame r_ctl %x\n", |
330 | __func__, fh->fh_r_ctl); | 322 | __func__, fh->fh_r_ctl); |
331 | fc_frame_free(fp); | 323 | fc_frame_free(fp); |
332 | transport_generic_free_cmd(&cmd->se_cmd, 0, 1, 0); | 324 | transport_generic_free_cmd(&cmd->se_cmd, 0, 0); |
333 | break; | 325 | break; |
334 | } | 326 | } |
335 | } | 327 | } |
@@ -351,7 +343,7 @@ static void ft_send_resp_status(struct fc_lport *lport, | |||
351 | struct fcp_resp_rsp_info *info; | 343 | struct fcp_resp_rsp_info *info; |
352 | 344 | ||
353 | fh = fc_frame_header_get(rx_fp); | 345 | fh = fc_frame_header_get(rx_fp); |
354 | FT_IO_DBG("FCP error response: did %x oxid %x status %x code %x\n", | 346 | pr_debug("FCP error response: did %x oxid %x status %x code %x\n", |
355 | ntoh24(fh->fh_s_id), ntohs(fh->fh_ox_id), status, code); | 347 | ntoh24(fh->fh_s_id), ntohs(fh->fh_ox_id), status, code); |
356 | len = sizeof(*fcp); | 348 | len = sizeof(*fcp); |
357 | if (status == SAM_STAT_GOOD) | 349 | if (status == SAM_STAT_GOOD) |
@@ -421,15 +413,15 @@ static void ft_send_tm(struct ft_cmd *cmd) | |||
421 | * FCP4r01 indicates having a combination of | 413 | * FCP4r01 indicates having a combination of |
422 | * tm_flags set is invalid. | 414 | * tm_flags set is invalid. |
423 | */ | 415 | */ |
424 | FT_TM_DBG("invalid FCP tm_flags %x\n", fcp->fc_tm_flags); | 416 | pr_debug("invalid FCP tm_flags %x\n", fcp->fc_tm_flags); |
425 | ft_send_resp_code(cmd, FCP_CMND_FIELDS_INVALID); | 417 | ft_send_resp_code(cmd, FCP_CMND_FIELDS_INVALID); |
426 | return; | 418 | return; |
427 | } | 419 | } |
428 | 420 | ||
429 | FT_TM_DBG("alloc tm cmd fn %d\n", tm_func); | 421 | pr_debug("alloc tm cmd fn %d\n", tm_func); |
430 | tmr = core_tmr_alloc_req(&cmd->se_cmd, cmd, tm_func); | 422 | tmr = core_tmr_alloc_req(&cmd->se_cmd, cmd, tm_func); |
431 | if (!tmr) { | 423 | if (!tmr) { |
432 | FT_TM_DBG("alloc failed\n"); | 424 | pr_debug("alloc failed\n"); |
433 | ft_send_resp_code(cmd, FCP_TMF_FAILED); | 425 | ft_send_resp_code(cmd, FCP_TMF_FAILED); |
434 | return; | 426 | return; |
435 | } | 427 | } |
@@ -438,20 +430,20 @@ static void ft_send_tm(struct ft_cmd *cmd) | |||
438 | switch (fcp->fc_tm_flags) { | 430 | switch (fcp->fc_tm_flags) { |
439 | case FCP_TMF_LUN_RESET: | 431 | case FCP_TMF_LUN_RESET: |
440 | cmd->lun = scsilun_to_int((struct scsi_lun *)fcp->fc_lun); | 432 | cmd->lun = scsilun_to_int((struct scsi_lun *)fcp->fc_lun); |
441 | if (transport_get_lun_for_tmr(&cmd->se_cmd, cmd->lun) < 0) { | 433 | if (transport_lookup_tmr_lun(&cmd->se_cmd, cmd->lun) < 0) { |
442 | /* | 434 | /* |
443 | * Make sure to clean up newly allocated TMR request | 435 | * Make sure to clean up newly allocated TMR request |
444 | * since "unable to handle TMR request because failed | 436 | * since "unable to handle TMR request because failed |
445 | * to get to LUN" | 437 | * to get to LUN" |
446 | */ | 438 | */ |
447 | FT_TM_DBG("Failed to get LUN for TMR func %d, " | 439 | pr_debug("Failed to get LUN for TMR func %d, " |
448 | "se_cmd %p, unpacked_lun %d\n", | 440 | "se_cmd %p, unpacked_lun %d\n", |
449 | tm_func, &cmd->se_cmd, cmd->lun); | 441 | tm_func, &cmd->se_cmd, cmd->lun); |
450 | ft_dump_cmd(cmd, __func__); | 442 | ft_dump_cmd(cmd, __func__); |
451 | sess = cmd->sess; | 443 | sess = cmd->sess; |
452 | transport_send_check_condition_and_sense(&cmd->se_cmd, | 444 | transport_send_check_condition_and_sense(&cmd->se_cmd, |
453 | cmd->se_cmd.scsi_sense_reason, 0); | 445 | cmd->se_cmd.scsi_sense_reason, 0); |
454 | transport_generic_free_cmd(&cmd->se_cmd, 0, 1, 0); | 446 | transport_generic_free_cmd(&cmd->se_cmd, 0, 0); |
455 | ft_sess_put(sess); | 447 | ft_sess_put(sess); |
456 | return; | 448 | return; |
457 | } | 449 | } |
@@ -495,7 +487,7 @@ int ft_queue_tm_resp(struct se_cmd *se_cmd) | |||
495 | code = FCP_TMF_FAILED; | 487 | code = FCP_TMF_FAILED; |
496 | break; | 488 | break; |
497 | } | 489 | } |
498 | FT_TM_DBG("tmr fn %d resp %d fcp code %d\n", | 490 | pr_debug("tmr fn %d resp %d fcp code %d\n", |
499 | tmr->function, tmr->response, code); | 491 | tmr->function, tmr->response, code); |
500 | ft_send_resp_code(cmd, code); | 492 | ft_send_resp_code(cmd, code); |
501 | return 0; | 493 | return 0; |
@@ -523,7 +515,7 @@ static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp) | |||
523 | return; | 515 | return; |
524 | 516 | ||
525 | busy: | 517 | busy: |
526 | FT_IO_DBG("cmd or seq allocation failure - sending BUSY\n"); | 518 | pr_debug("cmd or seq allocation failure - sending BUSY\n"); |
527 | ft_send_resp_status(lport, fp, SAM_STAT_BUSY, 0); | 519 | ft_send_resp_status(lport, fp, SAM_STAT_BUSY, 0); |
528 | fc_frame_free(fp); | 520 | fc_frame_free(fp); |
529 | ft_sess_put(sess); /* undo get from lookup */ | 521 | ft_sess_put(sess); /* undo get from lookup */ |
@@ -548,7 +540,7 @@ void ft_recv_req(struct ft_sess *sess, struct fc_frame *fp) | |||
548 | case FC_RCTL_DD_DATA_DESC: /* transfer ready */ | 540 | case FC_RCTL_DD_DATA_DESC: /* transfer ready */ |
549 | case FC_RCTL_ELS4_REQ: /* SRR, perhaps */ | 541 | case FC_RCTL_ELS4_REQ: /* SRR, perhaps */ |
550 | default: | 542 | default: |
551 | printk(KERN_INFO "%s: unhandled frame r_ctl %x\n", | 543 | pr_debug("%s: unhandled frame r_ctl %x\n", |
552 | __func__, fh->fh_r_ctl); | 544 | __func__, fh->fh_r_ctl); |
553 | fc_frame_free(fp); | 545 | fc_frame_free(fp); |
554 | ft_sess_put(sess); /* undo get from lookup */ | 546 | ft_sess_put(sess); /* undo get from lookup */ |
@@ -637,7 +629,7 @@ static void ft_send_cmd(struct ft_cmd *cmd) | |||
637 | fc_seq_exch(cmd->seq)->lp->tt.seq_set_resp(cmd->seq, ft_recv_seq, cmd); | 629 | fc_seq_exch(cmd->seq)->lp->tt.seq_set_resp(cmd->seq, ft_recv_seq, cmd); |
638 | 630 | ||
639 | cmd->lun = scsilun_to_int((struct scsi_lun *)fcp->fc_lun); | 631 | cmd->lun = scsilun_to_int((struct scsi_lun *)fcp->fc_lun); |
640 | ret = transport_get_lun_for_cmd(&cmd->se_cmd, NULL, cmd->lun); | 632 | ret = transport_lookup_cmd_lun(&cmd->se_cmd, cmd->lun); |
641 | if (ret < 0) { | 633 | if (ret < 0) { |
642 | ft_dump_cmd(cmd, __func__); | 634 | ft_dump_cmd(cmd, __func__); |
643 | transport_send_check_condition_and_sense(&cmd->se_cmd, | 635 | transport_send_check_condition_and_sense(&cmd->se_cmd, |
@@ -647,22 +639,22 @@ static void ft_send_cmd(struct ft_cmd *cmd) | |||
647 | 639 | ||
648 | ret = transport_generic_allocate_tasks(se_cmd, cmd->cdb); | 640 | ret = transport_generic_allocate_tasks(se_cmd, cmd->cdb); |
649 | 641 | ||
650 | FT_IO_DBG("r_ctl %x alloc task ret %d\n", fh->fh_r_ctl, ret); | 642 | pr_debug("r_ctl %x alloc task ret %d\n", fh->fh_r_ctl, ret); |
651 | ft_dump_cmd(cmd, __func__); | 643 | ft_dump_cmd(cmd, __func__); |
652 | 644 | ||
653 | if (ret == -1) { | 645 | if (ret == -ENOMEM) { |
654 | transport_send_check_condition_and_sense(se_cmd, | 646 | transport_send_check_condition_and_sense(se_cmd, |
655 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); | 647 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); |
656 | transport_generic_free_cmd(se_cmd, 0, 1, 0); | 648 | transport_generic_free_cmd(se_cmd, 0, 0); |
657 | return; | 649 | return; |
658 | } | 650 | } |
659 | if (ret == -2) { | 651 | if (ret == -EINVAL) { |
660 | if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT) | 652 | if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT) |
661 | ft_queue_status(se_cmd); | 653 | ft_queue_status(se_cmd); |
662 | else | 654 | else |
663 | transport_send_check_condition_and_sense(se_cmd, | 655 | transport_send_check_condition_and_sense(se_cmd, |
664 | se_cmd->scsi_sense_reason, 0); | 656 | se_cmd->scsi_sense_reason, 0); |
665 | transport_generic_free_cmd(se_cmd, 0, 1, 0); | 657 | transport_generic_free_cmd(se_cmd, 0, 0); |
666 | return; | 658 | return; |
667 | } | 659 | } |
668 | transport_generic_handle_cdb(se_cmd); | 660 | transport_generic_handle_cdb(se_cmd); |
@@ -670,7 +662,6 @@ static void ft_send_cmd(struct ft_cmd *cmd) | |||
670 | 662 | ||
671 | err: | 663 | err: |
672 | ft_send_resp_code(cmd, FCP_CMND_FIELDS_INVALID); | 664 | ft_send_resp_code(cmd, FCP_CMND_FIELDS_INVALID); |
673 | return; | ||
674 | } | 665 | } |
675 | 666 | ||
676 | /* | 667 | /* |
@@ -678,7 +669,7 @@ err: | |||
678 | */ | 669 | */ |
679 | static void ft_exec_req(struct ft_cmd *cmd) | 670 | static void ft_exec_req(struct ft_cmd *cmd) |
680 | { | 671 | { |
681 | FT_IO_DBG("cmd state %x\n", cmd->state); | 672 | pr_debug("cmd state %x\n", cmd->state); |
682 | switch (cmd->state) { | 673 | switch (cmd->state) { |
683 | case FC_CMD_ST_NEW: | 674 | case FC_CMD_ST_NEW: |
684 | ft_send_cmd(cmd); | 675 | ft_send_cmd(cmd); |
@@ -697,15 +688,12 @@ int ft_thread(void *arg) | |||
697 | struct ft_tpg *tpg = arg; | 688 | struct ft_tpg *tpg = arg; |
698 | struct se_queue_obj *qobj = &tpg->qobj; | 689 | struct se_queue_obj *qobj = &tpg->qobj; |
699 | struct ft_cmd *cmd; | 690 | struct ft_cmd *cmd; |
700 | int ret; | ||
701 | |||
702 | set_user_nice(current, -20); | ||
703 | 691 | ||
704 | while (!kthread_should_stop()) { | 692 | while (!kthread_should_stop()) { |
705 | ret = wait_event_interruptible(qobj->thread_wq, | 693 | schedule_timeout_interruptible(MAX_SCHEDULE_TIMEOUT); |
706 | atomic_read(&qobj->queue_cnt) || kthread_should_stop()); | 694 | if (kthread_should_stop()) |
707 | if (ret < 0 || kthread_should_stop()) | ||
708 | goto out; | 695 | goto out; |
696 | |||
709 | cmd = ft_dequeue_cmd(qobj); | 697 | cmd = ft_dequeue_cmd(qobj); |
710 | if (cmd) | 698 | if (cmd) |
711 | ft_exec_req(cmd); | 699 | ft_exec_req(cmd); |
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c index 84e868c255dd..d63e3dd3b180 100644 --- a/drivers/target/tcm_fc/tfc_conf.c +++ b/drivers/target/tcm_fc/tfc_conf.c | |||
@@ -106,7 +106,7 @@ static ssize_t ft_parse_wwn(const char *name, u64 *wwn, int strict) | |||
106 | } | 106 | } |
107 | err = 4; | 107 | err = 4; |
108 | fail: | 108 | fail: |
109 | FT_CONF_DBG("err %u len %zu pos %u byte %u\n", | 109 | pr_debug("err %u len %zu pos %u byte %u\n", |
110 | err, cp - name, pos, byte); | 110 | err, cp - name, pos, byte); |
111 | return -1; | 111 | return -1; |
112 | } | 112 | } |
@@ -216,14 +216,14 @@ static struct se_node_acl *ft_add_acl( | |||
216 | u64 wwpn; | 216 | u64 wwpn; |
217 | u32 q_depth; | 217 | u32 q_depth; |
218 | 218 | ||
219 | FT_CONF_DBG("add acl %s\n", name); | 219 | pr_debug("add acl %s\n", name); |
220 | tpg = container_of(se_tpg, struct ft_tpg, se_tpg); | 220 | tpg = container_of(se_tpg, struct ft_tpg, se_tpg); |
221 | 221 | ||
222 | if (ft_parse_wwn(name, &wwpn, 1) < 0) | 222 | if (ft_parse_wwn(name, &wwpn, 1) < 0) |
223 | return ERR_PTR(-EINVAL); | 223 | return ERR_PTR(-EINVAL); |
224 | 224 | ||
225 | acl = kzalloc(sizeof(struct ft_node_acl), GFP_KERNEL); | 225 | acl = kzalloc(sizeof(struct ft_node_acl), GFP_KERNEL); |
226 | if (!(acl)) | 226 | if (!acl) |
227 | return ERR_PTR(-ENOMEM); | 227 | return ERR_PTR(-ENOMEM); |
228 | acl->node_auth.port_name = wwpn; | 228 | acl->node_auth.port_name = wwpn; |
229 | 229 | ||
@@ -239,11 +239,11 @@ static void ft_del_acl(struct se_node_acl *se_acl) | |||
239 | struct ft_node_acl *acl = container_of(se_acl, | 239 | struct ft_node_acl *acl = container_of(se_acl, |
240 | struct ft_node_acl, se_node_acl); | 240 | struct ft_node_acl, se_node_acl); |
241 | 241 | ||
242 | FT_CONF_DBG("del acl %s\n", | 242 | pr_debug("del acl %s\n", |
243 | config_item_name(&se_acl->acl_group.cg_item)); | 243 | config_item_name(&se_acl->acl_group.cg_item)); |
244 | 244 | ||
245 | tpg = container_of(se_tpg, struct ft_tpg, se_tpg); | 245 | tpg = container_of(se_tpg, struct ft_tpg, se_tpg); |
246 | FT_CONF_DBG("del acl %p se_acl %p tpg %p se_tpg %p\n", | 246 | pr_debug("del acl %p se_acl %p tpg %p se_tpg %p\n", |
247 | acl, se_acl, tpg, &tpg->se_tpg); | 247 | acl, se_acl, tpg, &tpg->se_tpg); |
248 | 248 | ||
249 | core_tpg_del_initiator_node_acl(&tpg->se_tpg, se_acl, 1); | 249 | core_tpg_del_initiator_node_acl(&tpg->se_tpg, se_acl, 1); |
@@ -260,11 +260,11 @@ struct ft_node_acl *ft_acl_get(struct ft_tpg *tpg, struct fc_rport_priv *rdata) | |||
260 | spin_lock_bh(&se_tpg->acl_node_lock); | 260 | spin_lock_bh(&se_tpg->acl_node_lock); |
261 | list_for_each_entry(se_acl, &se_tpg->acl_node_list, acl_list) { | 261 | list_for_each_entry(se_acl, &se_tpg->acl_node_list, acl_list) { |
262 | acl = container_of(se_acl, struct ft_node_acl, se_node_acl); | 262 | acl = container_of(se_acl, struct ft_node_acl, se_node_acl); |
263 | FT_CONF_DBG("acl %p port_name %llx\n", | 263 | pr_debug("acl %p port_name %llx\n", |
264 | acl, (unsigned long long)acl->node_auth.port_name); | 264 | acl, (unsigned long long)acl->node_auth.port_name); |
265 | if (acl->node_auth.port_name == rdata->ids.port_name || | 265 | if (acl->node_auth.port_name == rdata->ids.port_name || |
266 | acl->node_auth.node_name == rdata->ids.node_name) { | 266 | acl->node_auth.node_name == rdata->ids.node_name) { |
267 | FT_CONF_DBG("acl %p port_name %llx matched\n", acl, | 267 | pr_debug("acl %p port_name %llx matched\n", acl, |
268 | (unsigned long long)rdata->ids.port_name); | 268 | (unsigned long long)rdata->ids.port_name); |
269 | found = acl; | 269 | found = acl; |
270 | /* XXX need to hold onto ACL */ | 270 | /* XXX need to hold onto ACL */ |
@@ -280,11 +280,11 @@ struct se_node_acl *ft_tpg_alloc_fabric_acl(struct se_portal_group *se_tpg) | |||
280 | struct ft_node_acl *acl; | 280 | struct ft_node_acl *acl; |
281 | 281 | ||
282 | acl = kzalloc(sizeof(*acl), GFP_KERNEL); | 282 | acl = kzalloc(sizeof(*acl), GFP_KERNEL); |
283 | if (!(acl)) { | 283 | if (!acl) { |
284 | printk(KERN_ERR "Unable to allocate struct ft_node_acl\n"); | 284 | pr_err("Unable to allocate struct ft_node_acl\n"); |
285 | return NULL; | 285 | return NULL; |
286 | } | 286 | } |
287 | FT_CONF_DBG("acl %p\n", acl); | 287 | pr_debug("acl %p\n", acl); |
288 | return &acl->se_node_acl; | 288 | return &acl->se_node_acl; |
289 | } | 289 | } |
290 | 290 | ||
@@ -294,7 +294,7 @@ static void ft_tpg_release_fabric_acl(struct se_portal_group *se_tpg, | |||
294 | struct ft_node_acl *acl = container_of(se_acl, | 294 | struct ft_node_acl *acl = container_of(se_acl, |
295 | struct ft_node_acl, se_node_acl); | 295 | struct ft_node_acl, se_node_acl); |
296 | 296 | ||
297 | FT_CONF_DBG(KERN_INFO "acl %p\n", acl); | 297 | pr_debug("acl %p\n", acl); |
298 | kfree(acl); | 298 | kfree(acl); |
299 | } | 299 | } |
300 | 300 | ||
@@ -311,7 +311,7 @@ static struct se_portal_group *ft_add_tpg( | |||
311 | unsigned long index; | 311 | unsigned long index; |
312 | int ret; | 312 | int ret; |
313 | 313 | ||
314 | FT_CONF_DBG("tcm_fc: add tpg %s\n", name); | 314 | pr_debug("tcm_fc: add tpg %s\n", name); |
315 | 315 | ||
316 | /* | 316 | /* |
317 | * Name must be "tpgt_" followed by the index. | 317 | * Name must be "tpgt_" followed by the index. |
@@ -331,7 +331,7 @@ static struct se_portal_group *ft_add_tpg( | |||
331 | transport_init_queue_obj(&tpg->qobj); | 331 | transport_init_queue_obj(&tpg->qobj); |
332 | 332 | ||
333 | ret = core_tpg_register(&ft_configfs->tf_ops, wwn, &tpg->se_tpg, | 333 | ret = core_tpg_register(&ft_configfs->tf_ops, wwn, &tpg->se_tpg, |
334 | (void *)tpg, TRANSPORT_TPG_TYPE_NORMAL); | 334 | tpg, TRANSPORT_TPG_TYPE_NORMAL); |
335 | if (ret < 0) { | 335 | if (ret < 0) { |
336 | kfree(tpg); | 336 | kfree(tpg); |
337 | return NULL; | 337 | return NULL; |
@@ -354,7 +354,7 @@ static void ft_del_tpg(struct se_portal_group *se_tpg) | |||
354 | { | 354 | { |
355 | struct ft_tpg *tpg = container_of(se_tpg, struct ft_tpg, se_tpg); | 355 | struct ft_tpg *tpg = container_of(se_tpg, struct ft_tpg, se_tpg); |
356 | 356 | ||
357 | FT_CONF_DBG("del tpg %s\n", | 357 | pr_debug("del tpg %s\n", |
358 | config_item_name(&tpg->se_tpg.tpg_group.cg_item)); | 358 | config_item_name(&tpg->se_tpg.tpg_group.cg_item)); |
359 | 359 | ||
360 | kthread_stop(tpg->thread); | 360 | kthread_stop(tpg->thread); |
@@ -412,7 +412,7 @@ static struct se_wwn *ft_add_lport( | |||
412 | struct ft_lport_acl *old_lacl; | 412 | struct ft_lport_acl *old_lacl; |
413 | u64 wwpn; | 413 | u64 wwpn; |
414 | 414 | ||
415 | FT_CONF_DBG("add lport %s\n", name); | 415 | pr_debug("add lport %s\n", name); |
416 | if (ft_parse_wwn(name, &wwpn, 1) < 0) | 416 | if (ft_parse_wwn(name, &wwpn, 1) < 0) |
417 | return NULL; | 417 | return NULL; |
418 | lacl = kzalloc(sizeof(*lacl), GFP_KERNEL); | 418 | lacl = kzalloc(sizeof(*lacl), GFP_KERNEL); |
@@ -441,7 +441,7 @@ static void ft_del_lport(struct se_wwn *wwn) | |||
441 | struct ft_lport_acl *lacl = container_of(wwn, | 441 | struct ft_lport_acl *lacl = container_of(wwn, |
442 | struct ft_lport_acl, fc_lport_wwn); | 442 | struct ft_lport_acl, fc_lport_wwn); |
443 | 443 | ||
444 | FT_CONF_DBG("del lport %s\n", | 444 | pr_debug("del lport %s\n", |
445 | config_item_name(&wwn->wwn_group.cg_item)); | 445 | config_item_name(&wwn->wwn_group.cg_item)); |
446 | mutex_lock(&ft_lport_lock); | 446 | mutex_lock(&ft_lport_lock); |
447 | list_del(&lacl->list); | 447 | list_del(&lacl->list); |
@@ -536,8 +536,7 @@ static struct target_core_fabric_ops ft_fabric_ops = { | |||
536 | .tpg_release_fabric_acl = ft_tpg_release_fabric_acl, | 536 | .tpg_release_fabric_acl = ft_tpg_release_fabric_acl, |
537 | .tpg_get_inst_index = ft_tpg_get_inst_index, | 537 | .tpg_get_inst_index = ft_tpg_get_inst_index, |
538 | .check_stop_free = ft_check_stop_free, | 538 | .check_stop_free = ft_check_stop_free, |
539 | .release_cmd_to_pool = ft_release_cmd, | 539 | .release_cmd = ft_release_cmd, |
540 | .release_cmd_direct = ft_release_cmd, | ||
541 | .shutdown_session = ft_sess_shutdown, | 540 | .shutdown_session = ft_sess_shutdown, |
542 | .close_session = ft_sess_close, | 541 | .close_session = ft_sess_close, |
543 | .stop_session = ft_sess_stop, | 542 | .stop_session = ft_sess_stop, |
@@ -550,7 +549,6 @@ static struct target_core_fabric_ops ft_fabric_ops = { | |||
550 | .set_default_node_attributes = ft_set_default_node_attr, | 549 | .set_default_node_attributes = ft_set_default_node_attr, |
551 | .get_task_tag = ft_get_task_tag, | 550 | .get_task_tag = ft_get_task_tag, |
552 | .get_cmd_state = ft_get_cmd_state, | 551 | .get_cmd_state = ft_get_cmd_state, |
553 | .new_cmd_failure = ft_new_cmd_failure, | ||
554 | .queue_data_in = ft_queue_data_in, | 552 | .queue_data_in = ft_queue_data_in, |
555 | .queue_status = ft_queue_status, | 553 | .queue_status = ft_queue_status, |
556 | .queue_tm_rsp = ft_queue_tm_resp, | 554 | .queue_tm_rsp = ft_queue_tm_resp, |
@@ -582,10 +580,10 @@ int ft_register_configfs(void) | |||
582 | * Register the top level struct config_item_type with TCM core | 580 | * Register the top level struct config_item_type with TCM core |
583 | */ | 581 | */ |
584 | fabric = target_fabric_configfs_init(THIS_MODULE, "fc"); | 582 | fabric = target_fabric_configfs_init(THIS_MODULE, "fc"); |
585 | if (!fabric) { | 583 | if (IS_ERR(fabric)) { |
586 | printk(KERN_INFO "%s: target_fabric_configfs_init() failed!\n", | 584 | pr_err("%s: target_fabric_configfs_init() failed!\n", |
587 | __func__); | 585 | __func__); |
588 | return -1; | 586 | return PTR_ERR(fabric); |
589 | } | 587 | } |
590 | fabric->tf_ops = ft_fabric_ops; | 588 | fabric->tf_ops = ft_fabric_ops; |
591 | 589 | ||
@@ -610,11 +608,8 @@ int ft_register_configfs(void) | |||
610 | */ | 608 | */ |
611 | ret = target_fabric_configfs_register(fabric); | 609 | ret = target_fabric_configfs_register(fabric); |
612 | if (ret < 0) { | 610 | if (ret < 0) { |
613 | FT_CONF_DBG("target_fabric_configfs_register() for" | 611 | pr_debug("target_fabric_configfs_register() for" |
614 | " FC Target failed!\n"); | 612 | " FC Target failed!\n"); |
615 | printk(KERN_INFO | ||
616 | "%s: target_fabric_configfs_register() failed!\n", | ||
617 | __func__); | ||
618 | target_fabric_configfs_free(fabric); | 613 | target_fabric_configfs_free(fabric); |
619 | return -1; | 614 | return -1; |
620 | } | 615 | } |
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c index 8c4a24077d9d..11e6483fc127 100644 --- a/drivers/target/tcm_fc/tfc_io.c +++ b/drivers/target/tcm_fc/tfc_io.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include <linux/configfs.h> | 39 | #include <linux/configfs.h> |
40 | #include <linux/ctype.h> | 40 | #include <linux/ctype.h> |
41 | #include <linux/hash.h> | 41 | #include <linux/hash.h> |
42 | #include <linux/ratelimit.h> | ||
42 | #include <asm/unaligned.h> | 43 | #include <asm/unaligned.h> |
43 | #include <scsi/scsi.h> | 44 | #include <scsi/scsi.h> |
44 | #include <scsi/scsi_host.h> | 45 | #include <scsi/scsi_host.h> |
@@ -65,21 +66,20 @@ | |||
65 | int ft_queue_data_in(struct se_cmd *se_cmd) | 66 | int ft_queue_data_in(struct se_cmd *se_cmd) |
66 | { | 67 | { |
67 | struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd); | 68 | struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd); |
68 | struct se_transport_task *task; | ||
69 | struct fc_frame *fp = NULL; | 69 | struct fc_frame *fp = NULL; |
70 | struct fc_exch *ep; | 70 | struct fc_exch *ep; |
71 | struct fc_lport *lport; | 71 | struct fc_lport *lport; |
72 | struct se_mem *mem; | 72 | struct scatterlist *sg = NULL; |
73 | size_t remaining; | 73 | size_t remaining; |
74 | u32 f_ctl = FC_FC_EX_CTX | FC_FC_REL_OFF; | 74 | u32 f_ctl = FC_FC_EX_CTX | FC_FC_REL_OFF; |
75 | u32 mem_off; | 75 | u32 mem_off = 0; |
76 | u32 fh_off = 0; | 76 | u32 fh_off = 0; |
77 | u32 frame_off = 0; | 77 | u32 frame_off = 0; |
78 | size_t frame_len = 0; | 78 | size_t frame_len = 0; |
79 | size_t mem_len; | 79 | size_t mem_len = 0; |
80 | size_t tlen; | 80 | size_t tlen; |
81 | size_t off_in_page; | 81 | size_t off_in_page; |
82 | struct page *page; | 82 | struct page *page = NULL; |
83 | int use_sg; | 83 | int use_sg; |
84 | int error; | 84 | int error; |
85 | void *page_addr; | 85 | void *page_addr; |
@@ -90,24 +90,17 @@ int ft_queue_data_in(struct se_cmd *se_cmd) | |||
90 | lport = ep->lp; | 90 | lport = ep->lp; |
91 | cmd->seq = lport->tt.seq_start_next(cmd->seq); | 91 | cmd->seq = lport->tt.seq_start_next(cmd->seq); |
92 | 92 | ||
93 | task = T_TASK(se_cmd); | ||
94 | BUG_ON(!task); | ||
95 | remaining = se_cmd->data_length; | 93 | remaining = se_cmd->data_length; |
96 | 94 | ||
97 | /* | 95 | /* |
98 | * Setup to use first mem list entry if any. | 96 | * Setup to use first mem list entry, unless no data. |
99 | */ | 97 | */ |
100 | if (task->t_tasks_se_num) { | 98 | BUG_ON(remaining && !se_cmd->t_data_sg); |
101 | mem = list_first_entry(task->t_mem_list, | 99 | if (remaining) { |
102 | struct se_mem, se_list); | 100 | sg = se_cmd->t_data_sg; |
103 | mem_len = mem->se_len; | 101 | mem_len = sg->length; |
104 | mem_off = mem->se_off; | 102 | mem_off = sg->offset; |
105 | page = mem->se_page; | 103 | page = sg_page(sg); |
106 | } else { | ||
107 | mem = NULL; | ||
108 | mem_len = remaining; | ||
109 | mem_off = 0; | ||
110 | page = NULL; | ||
111 | } | 104 | } |
112 | 105 | ||
113 | /* no scatter/gather in skb for odd word length due to fc_seq_send() */ | 106 | /* no scatter/gather in skb for odd word length due to fc_seq_send() */ |
@@ -115,12 +108,10 @@ int ft_queue_data_in(struct se_cmd *se_cmd) | |||
115 | 108 | ||
116 | while (remaining) { | 109 | while (remaining) { |
117 | if (!mem_len) { | 110 | if (!mem_len) { |
118 | BUG_ON(!mem); | 111 | sg = sg_next(sg); |
119 | mem = list_entry(mem->se_list.next, | 112 | mem_len = min((size_t)sg->length, remaining); |
120 | struct se_mem, se_list); | 113 | mem_off = sg->offset; |
121 | mem_len = min((size_t)mem->se_len, remaining); | 114 | page = sg_page(sg); |
122 | mem_off = mem->se_off; | ||
123 | page = mem->se_page; | ||
124 | } | 115 | } |
125 | if (!frame_len) { | 116 | if (!frame_len) { |
126 | /* | 117 | /* |
@@ -148,18 +139,7 @@ int ft_queue_data_in(struct se_cmd *se_cmd) | |||
148 | tlen = min(mem_len, frame_len); | 139 | tlen = min(mem_len, frame_len); |
149 | 140 | ||
150 | if (use_sg) { | 141 | if (use_sg) { |
151 | if (!mem) { | 142 | off_in_page = mem_off; |
152 | BUG_ON(!task->t_task_buf); | ||
153 | page_addr = task->t_task_buf + mem_off; | ||
154 | /* | ||
155 | * In this case, offset is 'offset_in_page' of | ||
156 | * (t_task_buf + mem_off) instead of 'mem_off'. | ||
157 | */ | ||
158 | off_in_page = offset_in_page(page_addr); | ||
159 | page = virt_to_page(page_addr); | ||
160 | tlen = min(tlen, PAGE_SIZE - off_in_page); | ||
161 | } else | ||
162 | off_in_page = mem_off; | ||
163 | BUG_ON(!page); | 143 | BUG_ON(!page); |
164 | get_page(page); | 144 | get_page(page); |
165 | skb_fill_page_desc(fp_skb(fp), | 145 | skb_fill_page_desc(fp_skb(fp), |
@@ -169,7 +149,7 @@ int ft_queue_data_in(struct se_cmd *se_cmd) | |||
169 | fp_skb(fp)->data_len += tlen; | 149 | fp_skb(fp)->data_len += tlen; |
170 | fp_skb(fp)->truesize += | 150 | fp_skb(fp)->truesize += |
171 | PAGE_SIZE << compound_order(page); | 151 | PAGE_SIZE << compound_order(page); |
172 | } else if (mem) { | 152 | } else { |
173 | BUG_ON(!page); | 153 | BUG_ON(!page); |
174 | from = kmap_atomic(page + (mem_off >> PAGE_SHIFT), | 154 | from = kmap_atomic(page + (mem_off >> PAGE_SHIFT), |
175 | KM_SOFTIRQ0); | 155 | KM_SOFTIRQ0); |
@@ -180,10 +160,6 @@ int ft_queue_data_in(struct se_cmd *se_cmd) | |||
180 | memcpy(to, from, tlen); | 160 | memcpy(to, from, tlen); |
181 | kunmap_atomic(page_addr, KM_SOFTIRQ0); | 161 | kunmap_atomic(page_addr, KM_SOFTIRQ0); |
182 | to += tlen; | 162 | to += tlen; |
183 | } else { | ||
184 | from = task->t_task_buf + mem_off; | ||
185 | memcpy(to, from, tlen); | ||
186 | to += tlen; | ||
187 | } | 163 | } |
188 | 164 | ||
189 | mem_off += tlen; | 165 | mem_off += tlen; |
@@ -201,8 +177,7 @@ int ft_queue_data_in(struct se_cmd *se_cmd) | |||
201 | error = lport->tt.seq_send(lport, cmd->seq, fp); | 177 | error = lport->tt.seq_send(lport, cmd->seq, fp); |
202 | if (error) { | 178 | if (error) { |
203 | /* XXX For now, initiator will retry */ | 179 | /* XXX For now, initiator will retry */ |
204 | if (printk_ratelimit()) | 180 | pr_err_ratelimited("%s: Failed to send frame %p, " |
205 | printk(KERN_ERR "%s: Failed to send frame %p, " | ||
206 | "xid <0x%x>, remaining %zu, " | 181 | "xid <0x%x>, remaining %zu, " |
207 | "lso_max <0x%x>\n", | 182 | "lso_max <0x%x>\n", |
208 | __func__, fp, ep->xid, | 183 | __func__, fp, ep->xid, |
@@ -221,24 +196,20 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp) | |||
221 | struct fc_seq *seq = cmd->seq; | 196 | struct fc_seq *seq = cmd->seq; |
222 | struct fc_exch *ep; | 197 | struct fc_exch *ep; |
223 | struct fc_lport *lport; | 198 | struct fc_lport *lport; |
224 | struct se_transport_task *task; | ||
225 | struct fc_frame_header *fh; | 199 | struct fc_frame_header *fh; |
226 | struct se_mem *mem; | 200 | struct scatterlist *sg = NULL; |
227 | u32 mem_off; | 201 | u32 mem_off = 0; |
228 | u32 rel_off; | 202 | u32 rel_off; |
229 | size_t frame_len; | 203 | size_t frame_len; |
230 | size_t mem_len; | 204 | size_t mem_len = 0; |
231 | size_t tlen; | 205 | size_t tlen; |
232 | struct page *page; | 206 | struct page *page = NULL; |
233 | void *page_addr; | 207 | void *page_addr; |
234 | void *from; | 208 | void *from; |
235 | void *to; | 209 | void *to; |
236 | u32 f_ctl; | 210 | u32 f_ctl; |
237 | void *buf; | 211 | void *buf; |
238 | 212 | ||
239 | task = T_TASK(se_cmd); | ||
240 | BUG_ON(!task); | ||
241 | |||
242 | fh = fc_frame_header_get(fp); | 213 | fh = fc_frame_header_get(fp); |
243 | if (!(ntoh24(fh->fh_f_ctl) & FC_FC_REL_OFF)) | 214 | if (!(ntoh24(fh->fh_f_ctl) & FC_FC_REL_OFF)) |
244 | goto drop; | 215 | goto drop; |
@@ -251,7 +222,7 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp) | |||
251 | */ | 222 | */ |
252 | buf = fc_frame_payload_get(fp, 1); | 223 | buf = fc_frame_payload_get(fp, 1); |
253 | if (cmd->was_ddp_setup && buf) { | 224 | if (cmd->was_ddp_setup && buf) { |
254 | printk(KERN_INFO "%s: When DDP was setup, not expected to" | 225 | pr_debug("%s: When DDP was setup, not expected to" |
255 | "receive frame with payload, Payload shall be" | 226 | "receive frame with payload, Payload shall be" |
256 | "copied directly to buffer instead of coming " | 227 | "copied directly to buffer instead of coming " |
257 | "via. legacy receive queues\n", __func__); | 228 | "via. legacy receive queues\n", __func__); |
@@ -289,7 +260,7 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp) | |||
289 | * this point, but just in case if required in future | 260 | * this point, but just in case if required in future |
290 | * for debugging or any other purpose | 261 | * for debugging or any other purpose |
291 | */ | 262 | */ |
292 | printk(KERN_ERR "%s: Received frame with TSI bit not" | 263 | pr_err("%s: Received frame with TSI bit not" |
293 | " being SET, dropping the frame, " | 264 | " being SET, dropping the frame, " |
294 | "cmd->sg <%p>, cmd->sg_cnt <0x%x>\n", | 265 | "cmd->sg <%p>, cmd->sg_cnt <0x%x>\n", |
295 | __func__, cmd->sg, cmd->sg_cnt); | 266 | __func__, cmd->sg, cmd->sg_cnt); |
@@ -312,29 +283,22 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp) | |||
312 | frame_len = se_cmd->data_length - rel_off; | 283 | frame_len = se_cmd->data_length - rel_off; |
313 | 284 | ||
314 | /* | 285 | /* |
315 | * Setup to use first mem list entry if any. | 286 | * Setup to use first mem list entry, unless no data. |
316 | */ | 287 | */ |
317 | if (task->t_tasks_se_num) { | 288 | BUG_ON(frame_len && !se_cmd->t_data_sg); |
318 | mem = list_first_entry(task->t_mem_list, | 289 | if (frame_len) { |
319 | struct se_mem, se_list); | 290 | sg = se_cmd->t_data_sg; |
320 | mem_len = mem->se_len; | 291 | mem_len = sg->length; |
321 | mem_off = mem->se_off; | 292 | mem_off = sg->offset; |
322 | page = mem->se_page; | 293 | page = sg_page(sg); |
323 | } else { | ||
324 | mem = NULL; | ||
325 | page = NULL; | ||
326 | mem_off = 0; | ||
327 | mem_len = frame_len; | ||
328 | } | 294 | } |
329 | 295 | ||
330 | while (frame_len) { | 296 | while (frame_len) { |
331 | if (!mem_len) { | 297 | if (!mem_len) { |
332 | BUG_ON(!mem); | 298 | sg = sg_next(sg); |
333 | mem = list_entry(mem->se_list.next, | 299 | mem_len = sg->length; |
334 | struct se_mem, se_list); | 300 | mem_off = sg->offset; |
335 | mem_len = mem->se_len; | 301 | page = sg_page(sg); |
336 | mem_off = mem->se_off; | ||
337 | page = mem->se_page; | ||
338 | } | 302 | } |
339 | if (rel_off >= mem_len) { | 303 | if (rel_off >= mem_len) { |
340 | rel_off -= mem_len; | 304 | rel_off -= mem_len; |
@@ -347,19 +311,15 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp) | |||
347 | 311 | ||
348 | tlen = min(mem_len, frame_len); | 312 | tlen = min(mem_len, frame_len); |
349 | 313 | ||
350 | if (mem) { | 314 | to = kmap_atomic(page + (mem_off >> PAGE_SHIFT), |
351 | to = kmap_atomic(page + (mem_off >> PAGE_SHIFT), | 315 | KM_SOFTIRQ0); |
352 | KM_SOFTIRQ0); | 316 | page_addr = to; |
353 | page_addr = to; | 317 | to += mem_off & ~PAGE_MASK; |
354 | to += mem_off & ~PAGE_MASK; | 318 | tlen = min(tlen, (size_t)(PAGE_SIZE - |
355 | tlen = min(tlen, (size_t)(PAGE_SIZE - | 319 | (mem_off & ~PAGE_MASK))); |
356 | (mem_off & ~PAGE_MASK))); | 320 | memcpy(to, from, tlen); |
357 | memcpy(to, from, tlen); | 321 | kunmap_atomic(page_addr, KM_SOFTIRQ0); |
358 | kunmap_atomic(page_addr, KM_SOFTIRQ0); | 322 | |
359 | } else { | ||
360 | to = task->t_task_buf + mem_off; | ||
361 | memcpy(to, from, tlen); | ||
362 | } | ||
363 | from += tlen; | 323 | from += tlen; |
364 | frame_len -= tlen; | 324 | frame_len -= tlen; |
365 | mem_off += tlen; | 325 | mem_off += tlen; |
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c index 7491e21cc6ae..fbcbb3d1d06b 100644 --- a/drivers/target/tcm_fc/tfc_sess.c +++ b/drivers/target/tcm_fc/tfc_sess.c | |||
@@ -198,13 +198,13 @@ static struct ft_sess *ft_sess_get(struct fc_lport *lport, u32 port_id) | |||
198 | if (sess->port_id == port_id) { | 198 | if (sess->port_id == port_id) { |
199 | kref_get(&sess->kref); | 199 | kref_get(&sess->kref); |
200 | rcu_read_unlock(); | 200 | rcu_read_unlock(); |
201 | FT_SESS_DBG("port_id %x found %p\n", port_id, sess); | 201 | pr_debug("port_id %x found %p\n", port_id, sess); |
202 | return sess; | 202 | return sess; |
203 | } | 203 | } |
204 | } | 204 | } |
205 | out: | 205 | out: |
206 | rcu_read_unlock(); | 206 | rcu_read_unlock(); |
207 | FT_SESS_DBG("port_id %x not found\n", port_id); | 207 | pr_debug("port_id %x not found\n", port_id); |
208 | return NULL; | 208 | return NULL; |
209 | } | 209 | } |
210 | 210 | ||
@@ -240,7 +240,7 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id, | |||
240 | hlist_add_head_rcu(&sess->hash, head); | 240 | hlist_add_head_rcu(&sess->hash, head); |
241 | tport->sess_count++; | 241 | tport->sess_count++; |
242 | 242 | ||
243 | FT_SESS_DBG("port_id %x sess %p\n", port_id, sess); | 243 | pr_debug("port_id %x sess %p\n", port_id, sess); |
244 | 244 | ||
245 | transport_register_session(&tport->tpg->se_tpg, &acl->se_node_acl, | 245 | transport_register_session(&tport->tpg->se_tpg, &acl->se_node_acl, |
246 | sess->se_sess, sess); | 246 | sess->se_sess, sess); |
@@ -314,7 +314,7 @@ int ft_sess_shutdown(struct se_session *se_sess) | |||
314 | { | 314 | { |
315 | struct ft_sess *sess = se_sess->fabric_sess_ptr; | 315 | struct ft_sess *sess = se_sess->fabric_sess_ptr; |
316 | 316 | ||
317 | FT_SESS_DBG("port_id %x\n", sess->port_id); | 317 | pr_debug("port_id %x\n", sess->port_id); |
318 | return 1; | 318 | return 1; |
319 | } | 319 | } |
320 | 320 | ||
@@ -335,7 +335,7 @@ void ft_sess_close(struct se_session *se_sess) | |||
335 | mutex_unlock(&ft_lport_lock); | 335 | mutex_unlock(&ft_lport_lock); |
336 | return; | 336 | return; |
337 | } | 337 | } |
338 | FT_SESS_DBG("port_id %x\n", port_id); | 338 | pr_debug("port_id %x\n", port_id); |
339 | ft_sess_unhash(sess); | 339 | ft_sess_unhash(sess); |
340 | mutex_unlock(&ft_lport_lock); | 340 | mutex_unlock(&ft_lport_lock); |
341 | transport_deregister_session_configfs(se_sess); | 341 | transport_deregister_session_configfs(se_sess); |
@@ -348,7 +348,7 @@ void ft_sess_stop(struct se_session *se_sess, int sess_sleep, int conn_sleep) | |||
348 | { | 348 | { |
349 | struct ft_sess *sess = se_sess->fabric_sess_ptr; | 349 | struct ft_sess *sess = se_sess->fabric_sess_ptr; |
350 | 350 | ||
351 | FT_SESS_DBG("port_id %x\n", sess->port_id); | 351 | pr_debug("port_id %x\n", sess->port_id); |
352 | } | 352 | } |
353 | 353 | ||
354 | int ft_sess_logged_in(struct se_session *se_sess) | 354 | int ft_sess_logged_in(struct se_session *se_sess) |
@@ -458,7 +458,7 @@ static int ft_prli(struct fc_rport_priv *rdata, u32 spp_len, | |||
458 | mutex_lock(&ft_lport_lock); | 458 | mutex_lock(&ft_lport_lock); |
459 | ret = ft_prli_locked(rdata, spp_len, rspp, spp); | 459 | ret = ft_prli_locked(rdata, spp_len, rspp, spp); |
460 | mutex_unlock(&ft_lport_lock); | 460 | mutex_unlock(&ft_lport_lock); |
461 | FT_SESS_DBG("port_id %x flags %x ret %x\n", | 461 | pr_debug("port_id %x flags %x ret %x\n", |
462 | rdata->ids.port_id, rspp ? rspp->spp_flags : 0, ret); | 462 | rdata->ids.port_id, rspp ? rspp->spp_flags : 0, ret); |
463 | return ret; | 463 | return ret; |
464 | } | 464 | } |
@@ -518,11 +518,11 @@ static void ft_recv(struct fc_lport *lport, struct fc_frame *fp) | |||
518 | struct ft_sess *sess; | 518 | struct ft_sess *sess; |
519 | u32 sid = fc_frame_sid(fp); | 519 | u32 sid = fc_frame_sid(fp); |
520 | 520 | ||
521 | FT_SESS_DBG("sid %x\n", sid); | 521 | pr_debug("sid %x\n", sid); |
522 | 522 | ||
523 | sess = ft_sess_get(lport, sid); | 523 | sess = ft_sess_get(lport, sid); |
524 | if (!sess) { | 524 | if (!sess) { |
525 | FT_SESS_DBG("sid %x sess lookup failed\n", sid); | 525 | pr_debug("sid %x sess lookup failed\n", sid); |
526 | /* TBD XXX - if FCP_CMND, send PRLO */ | 526 | /* TBD XXX - if FCP_CMND, send PRLO */ |
527 | fc_frame_free(fp); | 527 | fc_frame_free(fp); |
528 | return; | 528 | return; |
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 561ac99def5a..27040653005e 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h | |||
@@ -9,12 +9,13 @@ | |||
9 | #include <net/sock.h> | 9 | #include <net/sock.h> |
10 | #include <net/tcp.h> | 10 | #include <net/tcp.h> |
11 | 11 | ||
12 | #define TARGET_CORE_MOD_VERSION "v4.0.0-rc7-ml" | 12 | #define TARGET_CORE_MOD_VERSION "v4.1.0-rc1-ml" |
13 | #define SHUTDOWN_SIGS (sigmask(SIGKILL)|sigmask(SIGINT)|sigmask(SIGABRT)) | 13 | #define SHUTDOWN_SIGS (sigmask(SIGKILL)|sigmask(SIGINT)|sigmask(SIGABRT)) |
14 | 14 | ||
15 | /* Used by transport_generic_allocate_iovecs() */ | 15 | /* Used by transport_generic_allocate_iovecs() */ |
16 | #define TRANSPORT_IOV_DATA_BUFFER 5 | 16 | #define TRANSPORT_IOV_DATA_BUFFER 5 |
17 | /* Maximum Number of LUNs per Target Portal Group */ | 17 | /* Maximum Number of LUNs per Target Portal Group */ |
18 | /* Don't raise above 511 or REPORT_LUNS needs to handle >1 page */ | ||
18 | #define TRANSPORT_MAX_LUNS_PER_TPG 256 | 19 | #define TRANSPORT_MAX_LUNS_PER_TPG 256 |
19 | /* | 20 | /* |
20 | * By default we use 32-byte CDBs in TCM Core and subsystem plugin code. | 21 | * By default we use 32-byte CDBs in TCM Core and subsystem plugin code. |
@@ -99,6 +100,7 @@ enum transport_state_table { | |||
99 | TRANSPORT_FREE = 15, | 100 | TRANSPORT_FREE = 15, |
100 | TRANSPORT_NEW_CMD_MAP = 16, | 101 | TRANSPORT_NEW_CMD_MAP = 16, |
101 | TRANSPORT_FREE_CMD_INTR = 17, | 102 | TRANSPORT_FREE_CMD_INTR = 17, |
103 | TRANSPORT_COMPLETE_QF_WP = 18, | ||
102 | }; | 104 | }; |
103 | 105 | ||
104 | /* Used for struct se_cmd->se_cmd_flags */ | 106 | /* Used for struct se_cmd->se_cmd_flags */ |
@@ -108,27 +110,22 @@ enum se_cmd_flags_table { | |||
108 | SCF_EMULATED_TASK_SENSE = 0x00000004, | 110 | SCF_EMULATED_TASK_SENSE = 0x00000004, |
109 | SCF_SCSI_DATA_SG_IO_CDB = 0x00000008, | 111 | SCF_SCSI_DATA_SG_IO_CDB = 0x00000008, |
110 | SCF_SCSI_CONTROL_SG_IO_CDB = 0x00000010, | 112 | SCF_SCSI_CONTROL_SG_IO_CDB = 0x00000010, |
111 | SCF_SCSI_CONTROL_NONSG_IO_CDB = 0x00000020, | ||
112 | SCF_SCSI_NON_DATA_CDB = 0x00000040, | 113 | SCF_SCSI_NON_DATA_CDB = 0x00000040, |
113 | SCF_SCSI_CDB_EXCEPTION = 0x00000080, | 114 | SCF_SCSI_CDB_EXCEPTION = 0x00000080, |
114 | SCF_SCSI_RESERVATION_CONFLICT = 0x00000100, | 115 | SCF_SCSI_RESERVATION_CONFLICT = 0x00000100, |
115 | SCF_CMD_PASSTHROUGH_NOALLOC = 0x00000200, | ||
116 | SCF_SE_CMD_FAILED = 0x00000400, | 116 | SCF_SE_CMD_FAILED = 0x00000400, |
117 | SCF_SE_LUN_CMD = 0x00000800, | 117 | SCF_SE_LUN_CMD = 0x00000800, |
118 | SCF_SE_ALLOW_EOO = 0x00001000, | 118 | SCF_SE_ALLOW_EOO = 0x00001000, |
119 | SCF_SE_DISABLE_ONLINE_CHECK = 0x00002000, | ||
120 | SCF_SENT_CHECK_CONDITION = 0x00004000, | 119 | SCF_SENT_CHECK_CONDITION = 0x00004000, |
121 | SCF_OVERFLOW_BIT = 0x00008000, | 120 | SCF_OVERFLOW_BIT = 0x00008000, |
122 | SCF_UNDERFLOW_BIT = 0x00010000, | 121 | SCF_UNDERFLOW_BIT = 0x00010000, |
123 | SCF_SENT_DELAYED_TAS = 0x00020000, | 122 | SCF_SENT_DELAYED_TAS = 0x00020000, |
124 | SCF_ALUA_NON_OPTIMIZED = 0x00040000, | 123 | SCF_ALUA_NON_OPTIMIZED = 0x00040000, |
125 | SCF_DELAYED_CMD_FROM_SAM_ATTR = 0x00080000, | 124 | SCF_DELAYED_CMD_FROM_SAM_ATTR = 0x00080000, |
126 | SCF_PASSTHROUGH_SG_TO_MEM = 0x00100000, | 125 | SCF_UNUSED = 0x00100000, |
127 | SCF_PASSTHROUGH_CONTIG_TO_SG = 0x00200000, | ||
128 | SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00400000, | 126 | SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00400000, |
129 | SCF_EMULATE_SYNC_CACHE = 0x00800000, | ||
130 | SCF_EMULATE_CDB_ASYNC = 0x01000000, | 127 | SCF_EMULATE_CDB_ASYNC = 0x01000000, |
131 | SCF_EMULATE_SYNC_UNMAP = 0x02000000 | 128 | SCF_EMULATE_QUEUE_FULL = 0x02000000, |
132 | }; | 129 | }; |
133 | 130 | ||
134 | /* struct se_dev_entry->lun_flags and struct se_lun->lun_access */ | 131 | /* struct se_dev_entry->lun_flags and struct se_lun->lun_access */ |
@@ -205,11 +202,6 @@ typedef enum { | |||
205 | SCSI_INDEX_TYPE_MAX | 202 | SCSI_INDEX_TYPE_MAX |
206 | } scsi_index_t; | 203 | } scsi_index_t; |
207 | 204 | ||
208 | struct scsi_index_table { | ||
209 | spinlock_t lock; | ||
210 | u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX]; | ||
211 | } ____cacheline_aligned; | ||
212 | |||
213 | struct se_cmd; | 205 | struct se_cmd; |
214 | 206 | ||
215 | struct t10_alua { | 207 | struct t10_alua { |
@@ -235,7 +227,7 @@ struct t10_alua_lu_gp { | |||
235 | atomic_t lu_gp_ref_cnt; | 227 | atomic_t lu_gp_ref_cnt; |
236 | spinlock_t lu_gp_lock; | 228 | spinlock_t lu_gp_lock; |
237 | struct config_group lu_gp_group; | 229 | struct config_group lu_gp_group; |
238 | struct list_head lu_gp_list; | 230 | struct list_head lu_gp_node; |
239 | struct list_head lu_gp_mem_list; | 231 | struct list_head lu_gp_mem_list; |
240 | } ____cacheline_aligned; | 232 | } ____cacheline_aligned; |
241 | 233 | ||
@@ -291,10 +283,10 @@ struct t10_vpd { | |||
291 | } ____cacheline_aligned; | 283 | } ____cacheline_aligned; |
292 | 284 | ||
293 | struct t10_wwn { | 285 | struct t10_wwn { |
294 | unsigned char vendor[8]; | 286 | char vendor[8]; |
295 | unsigned char model[16]; | 287 | char model[16]; |
296 | unsigned char revision[4]; | 288 | char revision[4]; |
297 | unsigned char unit_serial[INQUIRY_VPD_SERIAL_LEN]; | 289 | char unit_serial[INQUIRY_VPD_SERIAL_LEN]; |
298 | spinlock_t t10_vpd_lock; | 290 | spinlock_t t10_vpd_lock; |
299 | struct se_subsystem_dev *t10_sub_dev; | 291 | struct se_subsystem_dev *t10_sub_dev; |
300 | struct config_group t10_wwn_group; | 292 | struct config_group t10_wwn_group; |
@@ -366,13 +358,13 @@ struct t10_reservation_ops { | |||
366 | int (*t10_pr_clear)(struct se_cmd *); | 358 | int (*t10_pr_clear)(struct se_cmd *); |
367 | }; | 359 | }; |
368 | 360 | ||
369 | struct t10_reservation_template { | 361 | struct t10_reservation { |
370 | /* Reservation effects all target ports */ | 362 | /* Reservation effects all target ports */ |
371 | int pr_all_tg_pt; | 363 | int pr_all_tg_pt; |
372 | /* Activate Persistence across Target Power Loss enabled | 364 | /* Activate Persistence across Target Power Loss enabled |
373 | * for SCSI device */ | 365 | * for SCSI device */ |
374 | int pr_aptpl_active; | 366 | int pr_aptpl_active; |
375 | /* Used by struct t10_reservation_template->pr_aptpl_buf_len */ | 367 | /* Used by struct t10_reservation->pr_aptpl_buf_len */ |
376 | #define PR_APTPL_BUF_LEN 8192 | 368 | #define PR_APTPL_BUF_LEN 8192 |
377 | u32 pr_aptpl_buf_len; | 369 | u32 pr_aptpl_buf_len; |
378 | u32 pr_generation; | 370 | u32 pr_generation; |
@@ -397,7 +389,7 @@ struct t10_reservation_template { | |||
397 | 389 | ||
398 | struct se_queue_req { | 390 | struct se_queue_req { |
399 | int state; | 391 | int state; |
400 | void *cmd; | 392 | struct se_cmd *cmd; |
401 | struct list_head qr_list; | 393 | struct list_head qr_list; |
402 | } ____cacheline_aligned; | 394 | } ____cacheline_aligned; |
403 | 395 | ||
@@ -408,64 +400,10 @@ struct se_queue_obj { | |||
408 | wait_queue_head_t thread_wq; | 400 | wait_queue_head_t thread_wq; |
409 | } ____cacheline_aligned; | 401 | } ____cacheline_aligned; |
410 | 402 | ||
411 | /* | ||
412 | * Used one per struct se_cmd to hold all extra struct se_task | ||
413 | * metadata. This structure is setup and allocated in | ||
414 | * drivers/target/target_core_transport.c:__transport_alloc_se_cmd() | ||
415 | */ | ||
416 | struct se_transport_task { | ||
417 | unsigned char *t_task_cdb; | ||
418 | unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE]; | ||
419 | unsigned long long t_task_lba; | ||
420 | int t_tasks_failed; | ||
421 | int t_tasks_fua; | ||
422 | bool t_tasks_bidi; | ||
423 | u32 t_task_cdbs; | ||
424 | u32 t_tasks_check; | ||
425 | u32 t_tasks_no; | ||
426 | u32 t_tasks_sectors; | ||
427 | u32 t_tasks_se_num; | ||
428 | u32 t_tasks_se_bidi_num; | ||
429 | u32 t_tasks_sg_chained_no; | ||
430 | atomic_t t_fe_count; | ||
431 | atomic_t t_se_count; | ||
432 | atomic_t t_task_cdbs_left; | ||
433 | atomic_t t_task_cdbs_ex_left; | ||
434 | atomic_t t_task_cdbs_timeout_left; | ||
435 | atomic_t t_task_cdbs_sent; | ||
436 | atomic_t t_transport_aborted; | ||
437 | atomic_t t_transport_active; | ||
438 | atomic_t t_transport_complete; | ||
439 | atomic_t t_transport_queue_active; | ||
440 | atomic_t t_transport_sent; | ||
441 | atomic_t t_transport_stop; | ||
442 | atomic_t t_transport_timeout; | ||
443 | atomic_t transport_dev_active; | ||
444 | atomic_t transport_lun_active; | ||
445 | atomic_t transport_lun_fe_stop; | ||
446 | atomic_t transport_lun_stop; | ||
447 | spinlock_t t_state_lock; | ||
448 | struct completion t_transport_stop_comp; | ||
449 | struct completion transport_lun_fe_stop_comp; | ||
450 | struct completion transport_lun_stop_comp; | ||
451 | struct scatterlist *t_tasks_sg_chained; | ||
452 | struct scatterlist t_tasks_sg_bounce; | ||
453 | void *t_task_buf; | ||
454 | /* | ||
455 | * Used for pre-registered fabric SGL passthrough WRITE and READ | ||
456 | * with the special SCF_PASSTHROUGH_CONTIG_TO_SG case for TCM_Loop | ||
457 | * and other HW target mode fabric modules. | ||
458 | */ | ||
459 | struct scatterlist *t_task_pt_sgl; | ||
460 | struct list_head *t_mem_list; | ||
461 | /* Used for BIDI READ */ | ||
462 | struct list_head *t_mem_bidi_list; | ||
463 | struct list_head t_task_list; | ||
464 | } ____cacheline_aligned; | ||
465 | |||
466 | struct se_task { | 403 | struct se_task { |
467 | unsigned char task_sense; | 404 | unsigned char task_sense; |
468 | struct scatterlist *task_sg; | 405 | struct scatterlist *task_sg; |
406 | u32 task_sg_nents; | ||
469 | struct scatterlist *task_sg_bidi; | 407 | struct scatterlist *task_sg_bidi; |
470 | u8 task_scsi_status; | 408 | u8 task_scsi_status; |
471 | u8 task_flags; | 409 | u8 task_flags; |
@@ -476,8 +414,6 @@ struct se_task { | |||
476 | u32 task_no; | 414 | u32 task_no; |
477 | u32 task_sectors; | 415 | u32 task_sectors; |
478 | u32 task_size; | 416 | u32 task_size; |
479 | u32 task_sg_num; | ||
480 | u32 task_sg_offset; | ||
481 | enum dma_data_direction task_data_direction; | 417 | enum dma_data_direction task_data_direction; |
482 | struct se_cmd *task_se_cmd; | 418 | struct se_cmd *task_se_cmd; |
483 | struct se_device *se_dev; | 419 | struct se_device *se_dev; |
@@ -495,9 +431,6 @@ struct se_task { | |||
495 | struct list_head t_state_list; | 431 | struct list_head t_state_list; |
496 | } ____cacheline_aligned; | 432 | } ____cacheline_aligned; |
497 | 433 | ||
498 | #define TASK_CMD(task) ((task)->task_se_cmd) | ||
499 | #define TASK_DEV(task) ((task)->se_dev) | ||
500 | |||
501 | struct se_cmd { | 434 | struct se_cmd { |
502 | /* SAM response code being sent to initiator */ | 435 | /* SAM response code being sent to initiator */ |
503 | u8 scsi_status; | 436 | u8 scsi_status; |
@@ -531,9 +464,10 @@ struct se_cmd { | |||
531 | atomic_t transport_sent; | 464 | atomic_t transport_sent; |
532 | /* Used for sense data */ | 465 | /* Used for sense data */ |
533 | void *sense_buffer; | 466 | void *sense_buffer; |
534 | struct list_head se_delayed_list; | 467 | struct list_head se_delayed_node; |
535 | struct list_head se_ordered_list; | 468 | struct list_head se_ordered_node; |
536 | struct list_head se_lun_list; | 469 | struct list_head se_lun_node; |
470 | struct list_head se_qf_node; | ||
537 | struct se_device *se_dev; | 471 | struct se_device *se_dev; |
538 | struct se_dev_entry *se_deve; | 472 | struct se_dev_entry *se_deve; |
539 | struct se_device *se_obj_ptr; | 473 | struct se_device *se_obj_ptr; |
@@ -542,18 +476,62 @@ struct se_cmd { | |||
542 | /* Only used for internal passthrough and legacy TCM fabric modules */ | 476 | /* Only used for internal passthrough and legacy TCM fabric modules */ |
543 | struct se_session *se_sess; | 477 | struct se_session *se_sess; |
544 | struct se_tmr_req *se_tmr_req; | 478 | struct se_tmr_req *se_tmr_req; |
545 | /* t_task is setup to t_task_backstore in transport_init_se_cmd() */ | 479 | struct list_head se_queue_node; |
546 | struct se_transport_task *t_task; | ||
547 | struct se_transport_task t_task_backstore; | ||
548 | struct target_core_fabric_ops *se_tfo; | 480 | struct target_core_fabric_ops *se_tfo; |
549 | int (*transport_emulate_cdb)(struct se_cmd *); | 481 | int (*transport_emulate_cdb)(struct se_cmd *); |
550 | void (*transport_split_cdb)(unsigned long long, u32 *, unsigned char *); | 482 | void (*transport_split_cdb)(unsigned long long, u32, unsigned char *); |
551 | void (*transport_wait_for_tasks)(struct se_cmd *, int, int); | 483 | void (*transport_wait_for_tasks)(struct se_cmd *, int, int); |
552 | void (*transport_complete_callback)(struct se_cmd *); | 484 | void (*transport_complete_callback)(struct se_cmd *); |
553 | } ____cacheline_aligned; | 485 | int (*transport_qf_callback)(struct se_cmd *); |
554 | 486 | ||
555 | #define T_TASK(cmd) ((cmd)->t_task) | 487 | unsigned char *t_task_cdb; |
556 | #define CMD_TFO(cmd) ((cmd)->se_tfo) | 488 | unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE]; |
489 | unsigned long long t_task_lba; | ||
490 | int t_tasks_failed; | ||
491 | int t_tasks_fua; | ||
492 | bool t_tasks_bidi; | ||
493 | u32 t_tasks_sg_chained_no; | ||
494 | atomic_t t_fe_count; | ||
495 | atomic_t t_se_count; | ||
496 | atomic_t t_task_cdbs_left; | ||
497 | atomic_t t_task_cdbs_ex_left; | ||
498 | atomic_t t_task_cdbs_timeout_left; | ||
499 | atomic_t t_task_cdbs_sent; | ||
500 | atomic_t t_transport_aborted; | ||
501 | atomic_t t_transport_active; | ||
502 | atomic_t t_transport_complete; | ||
503 | atomic_t t_transport_queue_active; | ||
504 | atomic_t t_transport_sent; | ||
505 | atomic_t t_transport_stop; | ||
506 | atomic_t t_transport_timeout; | ||
507 | atomic_t transport_dev_active; | ||
508 | atomic_t transport_lun_active; | ||
509 | atomic_t transport_lun_fe_stop; | ||
510 | atomic_t transport_lun_stop; | ||
511 | spinlock_t t_state_lock; | ||
512 | struct completion t_transport_stop_comp; | ||
513 | struct completion transport_lun_fe_stop_comp; | ||
514 | struct completion transport_lun_stop_comp; | ||
515 | struct scatterlist *t_tasks_sg_chained; | ||
516 | |||
517 | /* | ||
518 | * Used for pre-registered fabric SGL passthrough WRITE and READ | ||
519 | * with the special SCF_PASSTHROUGH_CONTIG_TO_SG case for TCM_Loop | ||
520 | * and other HW target mode fabric modules. | ||
521 | */ | ||
522 | struct scatterlist *t_task_pt_sgl; | ||
523 | u32 t_task_pt_sgl_num; | ||
524 | |||
525 | struct scatterlist *t_data_sg; | ||
526 | unsigned int t_data_nents; | ||
527 | struct scatterlist *t_bidi_data_sg; | ||
528 | unsigned int t_bidi_data_nents; | ||
529 | |||
530 | /* Used for BIDI READ */ | ||
531 | struct list_head t_task_list; | ||
532 | u32 t_task_list_num; | ||
533 | |||
534 | } ____cacheline_aligned; | ||
557 | 535 | ||
558 | struct se_tmr_req { | 536 | struct se_tmr_req { |
559 | /* Task Management function to be preformed */ | 537 | /* Task Management function to be preformed */ |
@@ -617,9 +595,6 @@ struct se_session { | |||
617 | struct list_head sess_acl_list; | 595 | struct list_head sess_acl_list; |
618 | } ____cacheline_aligned; | 596 | } ____cacheline_aligned; |
619 | 597 | ||
620 | #define SE_SESS(cmd) ((cmd)->se_sess) | ||
621 | #define SE_NODE_ACL(sess) ((sess)->se_node_acl) | ||
622 | |||
623 | struct se_device; | 598 | struct se_device; |
624 | struct se_transform_info; | 599 | struct se_transform_info; |
625 | struct scatterlist; | 600 | struct scatterlist; |
@@ -640,8 +615,6 @@ struct se_lun_acl { | |||
640 | struct se_ml_stat_grps ml_stat_grps; | 615 | struct se_ml_stat_grps ml_stat_grps; |
641 | } ____cacheline_aligned; | 616 | } ____cacheline_aligned; |
642 | 617 | ||
643 | #define ML_STAT_GRPS(lacl) (&(lacl)->ml_stat_grps) | ||
644 | |||
645 | struct se_dev_entry { | 618 | struct se_dev_entry { |
646 | bool def_pr_registered; | 619 | bool def_pr_registered; |
647 | /* See transport_lunflags_table */ | 620 | /* See transport_lunflags_table */ |
@@ -688,6 +661,8 @@ struct se_dev_attrib { | |||
688 | int emulate_reservations; | 661 | int emulate_reservations; |
689 | int emulate_alua; | 662 | int emulate_alua; |
690 | int enforce_pr_isids; | 663 | int enforce_pr_isids; |
664 | int is_nonrot; | ||
665 | int emulate_rest_reord; | ||
691 | u32 hw_block_size; | 666 | u32 hw_block_size; |
692 | u32 block_size; | 667 | u32 block_size; |
693 | u32 hw_max_sectors; | 668 | u32 hw_max_sectors; |
@@ -727,10 +702,10 @@ struct se_subsystem_dev { | |||
727 | /* T10 Inquiry and VPD WWN Information */ | 702 | /* T10 Inquiry and VPD WWN Information */ |
728 | struct t10_wwn t10_wwn; | 703 | struct t10_wwn t10_wwn; |
729 | /* T10 SPC-2 + SPC-3 Reservations */ | 704 | /* T10 SPC-2 + SPC-3 Reservations */ |
730 | struct t10_reservation_template t10_reservation; | 705 | struct t10_reservation t10_pr; |
731 | spinlock_t se_dev_lock; | 706 | spinlock_t se_dev_lock; |
732 | void *se_dev_su_ptr; | 707 | void *se_dev_su_ptr; |
733 | struct list_head g_se_dev_list; | 708 | struct list_head se_dev_node; |
734 | struct config_group se_dev_group; | 709 | struct config_group se_dev_group; |
735 | /* For T10 Reservations */ | 710 | /* For T10 Reservations */ |
736 | struct config_group se_dev_pr_group; | 711 | struct config_group se_dev_pr_group; |
@@ -738,11 +713,6 @@ struct se_subsystem_dev { | |||
738 | struct se_dev_stat_grps dev_stat_grps; | 713 | struct se_dev_stat_grps dev_stat_grps; |
739 | } ____cacheline_aligned; | 714 | } ____cacheline_aligned; |
740 | 715 | ||
741 | #define T10_ALUA(su_dev) (&(su_dev)->t10_alua) | ||
742 | #define T10_RES(su_dev) (&(su_dev)->t10_reservation) | ||
743 | #define T10_PR_OPS(su_dev) (&(su_dev)->t10_reservation.pr_ops) | ||
744 | #define DEV_STAT_GRP(dev) (&(dev)->dev_stat_grps) | ||
745 | |||
746 | struct se_device { | 716 | struct se_device { |
747 | /* Set to 1 if thread is NOT sleeping on thread_sem */ | 717 | /* Set to 1 if thread is NOT sleeping on thread_sem */ |
748 | u8 thread_active; | 718 | u8 thread_active; |
@@ -780,11 +750,11 @@ struct se_device { | |||
780 | atomic_t dev_status_thr_count; | 750 | atomic_t dev_status_thr_count; |
781 | atomic_t dev_hoq_count; | 751 | atomic_t dev_hoq_count; |
782 | atomic_t dev_ordered_sync; | 752 | atomic_t dev_ordered_sync; |
753 | atomic_t dev_qf_count; | ||
783 | struct se_obj dev_obj; | 754 | struct se_obj dev_obj; |
784 | struct se_obj dev_access_obj; | 755 | struct se_obj dev_access_obj; |
785 | struct se_obj dev_export_obj; | 756 | struct se_obj dev_export_obj; |
786 | struct se_queue_obj *dev_queue_obj; | 757 | struct se_queue_obj dev_queue_obj; |
787 | struct se_queue_obj *dev_status_queue_obj; | ||
788 | spinlock_t delayed_cmd_lock; | 758 | spinlock_t delayed_cmd_lock; |
789 | spinlock_t ordered_cmd_lock; | 759 | spinlock_t ordered_cmd_lock; |
790 | spinlock_t execute_task_lock; | 760 | spinlock_t execute_task_lock; |
@@ -796,6 +766,7 @@ struct se_device { | |||
796 | spinlock_t dev_status_thr_lock; | 766 | spinlock_t dev_status_thr_lock; |
797 | spinlock_t se_port_lock; | 767 | spinlock_t se_port_lock; |
798 | spinlock_t se_tmr_lock; | 768 | spinlock_t se_tmr_lock; |
769 | spinlock_t qf_cmd_lock; | ||
799 | /* Used for legacy SPC-2 reservationsa */ | 770 | /* Used for legacy SPC-2 reservationsa */ |
800 | struct se_node_acl *dev_reserved_node_acl; | 771 | struct se_node_acl *dev_reserved_node_acl; |
801 | /* Used for ALUA Logical Unit Group membership */ | 772 | /* Used for ALUA Logical Unit Group membership */ |
@@ -809,10 +780,12 @@ struct se_device { | |||
809 | struct task_struct *process_thread; | 780 | struct task_struct *process_thread; |
810 | pid_t process_thread_pid; | 781 | pid_t process_thread_pid; |
811 | struct task_struct *dev_mgmt_thread; | 782 | struct task_struct *dev_mgmt_thread; |
783 | struct work_struct qf_work_queue; | ||
812 | struct list_head delayed_cmd_list; | 784 | struct list_head delayed_cmd_list; |
813 | struct list_head ordered_cmd_list; | 785 | struct list_head ordered_cmd_list; |
814 | struct list_head execute_task_list; | 786 | struct list_head execute_task_list; |
815 | struct list_head state_task_list; | 787 | struct list_head state_task_list; |
788 | struct list_head qf_cmd_list; | ||
816 | /* Pointer to associated SE HBA */ | 789 | /* Pointer to associated SE HBA */ |
817 | struct se_hba *se_hba; | 790 | struct se_hba *se_hba; |
818 | struct se_subsystem_dev *se_sub_dev; | 791 | struct se_subsystem_dev *se_sub_dev; |
@@ -824,11 +797,6 @@ struct se_device { | |||
824 | struct list_head g_se_dev_list; | 797 | struct list_head g_se_dev_list; |
825 | } ____cacheline_aligned; | 798 | } ____cacheline_aligned; |
826 | 799 | ||
827 | #define SE_DEV(cmd) ((cmd)->se_lun->lun_se_dev) | ||
828 | #define SU_DEV(dev) ((dev)->se_sub_dev) | ||
829 | #define DEV_ATTRIB(dev) (&(dev)->se_sub_dev->se_dev_attrib) | ||
830 | #define DEV_T10_WWN(dev) (&(dev)->se_sub_dev->t10_wwn) | ||
831 | |||
832 | struct se_hba { | 800 | struct se_hba { |
833 | u16 hba_tpgt; | 801 | u16 hba_tpgt; |
834 | u32 hba_id; | 802 | u32 hba_id; |
@@ -837,24 +805,17 @@ struct se_hba { | |||
837 | /* Virtual iSCSI devices attached. */ | 805 | /* Virtual iSCSI devices attached. */ |
838 | u32 dev_count; | 806 | u32 dev_count; |
839 | u32 hba_index; | 807 | u32 hba_index; |
840 | atomic_t load_balance_queue; | ||
841 | atomic_t left_queue_depth; | ||
842 | /* Maximum queue depth the HBA can handle. */ | ||
843 | atomic_t max_queue_depth; | ||
844 | /* Pointer to transport specific host structure. */ | 808 | /* Pointer to transport specific host structure. */ |
845 | void *hba_ptr; | 809 | void *hba_ptr; |
846 | /* Linked list for struct se_device */ | 810 | /* Linked list for struct se_device */ |
847 | struct list_head hba_dev_list; | 811 | struct list_head hba_dev_list; |
848 | struct list_head hba_list; | 812 | struct list_head hba_node; |
849 | spinlock_t device_lock; | 813 | spinlock_t device_lock; |
850 | spinlock_t hba_queue_lock; | ||
851 | struct config_group hba_group; | 814 | struct config_group hba_group; |
852 | struct mutex hba_access_mutex; | 815 | struct mutex hba_access_mutex; |
853 | struct se_subsystem_api *transport; | 816 | struct se_subsystem_api *transport; |
854 | } ____cacheline_aligned; | 817 | } ____cacheline_aligned; |
855 | 818 | ||
856 | #define SE_HBA(dev) ((dev)->se_hba) | ||
857 | |||
858 | struct se_port_stat_grps { | 819 | struct se_port_stat_grps { |
859 | struct config_group stat_group; | 820 | struct config_group stat_group; |
860 | struct config_group scsi_port_group; | 821 | struct config_group scsi_port_group; |
@@ -881,9 +842,6 @@ struct se_lun { | |||
881 | struct se_port_stat_grps port_stat_grps; | 842 | struct se_port_stat_grps port_stat_grps; |
882 | } ____cacheline_aligned; | 843 | } ____cacheline_aligned; |
883 | 844 | ||
884 | #define SE_LUN(cmd) ((cmd)->se_lun) | ||
885 | #define PORT_STAT_GRP(lun) (&(lun)->port_stat_grps) | ||
886 | |||
887 | struct scsi_port_stats { | 845 | struct scsi_port_stats { |
888 | u64 cmd_pdus; | 846 | u64 cmd_pdus; |
889 | u64 tx_data_octets; | 847 | u64 tx_data_octets; |
@@ -930,7 +888,7 @@ struct se_portal_group { | |||
930 | spinlock_t tpg_lun_lock; | 888 | spinlock_t tpg_lun_lock; |
931 | /* Pointer to $FABRIC_MOD portal group */ | 889 | /* Pointer to $FABRIC_MOD portal group */ |
932 | void *se_tpg_fabric_ptr; | 890 | void *se_tpg_fabric_ptr; |
933 | struct list_head se_tpg_list; | 891 | struct list_head se_tpg_node; |
934 | /* linked list for initiator ACL list */ | 892 | /* linked list for initiator ACL list */ |
935 | struct list_head acl_node_list; | 893 | struct list_head acl_node_list; |
936 | struct se_lun *tpg_lun_list; | 894 | struct se_lun *tpg_lun_list; |
@@ -949,8 +907,6 @@ struct se_portal_group { | |||
949 | struct config_group tpg_param_group; | 907 | struct config_group tpg_param_group; |
950 | } ____cacheline_aligned; | 908 | } ____cacheline_aligned; |
951 | 909 | ||
952 | #define TPG_TFO(se_tpg) ((se_tpg)->se_tpg_tfo) | ||
953 | |||
954 | struct se_wwn { | 910 | struct se_wwn { |
955 | struct target_fabric_configfs *wwn_tf; | 911 | struct target_fabric_configfs *wwn_tf; |
956 | struct config_group wwn_group; | 912 | struct config_group wwn_group; |
@@ -958,28 +914,4 @@ struct se_wwn { | |||
958 | struct config_group fabric_stat_group; | 914 | struct config_group fabric_stat_group; |
959 | } ____cacheline_aligned; | 915 | } ____cacheline_aligned; |
960 | 916 | ||
961 | struct se_global { | ||
962 | u16 alua_lu_gps_counter; | ||
963 | int g_sub_api_initialized; | ||
964 | u32 in_shutdown; | ||
965 | u32 alua_lu_gps_count; | ||
966 | u32 g_hba_id_counter; | ||
967 | struct config_group target_core_hbagroup; | ||
968 | struct config_group alua_group; | ||
969 | struct config_group alua_lu_gps_group; | ||
970 | struct list_head g_lu_gps_list; | ||
971 | struct list_head g_se_tpg_list; | ||
972 | struct list_head g_hba_list; | ||
973 | struct list_head g_se_dev_list; | ||
974 | struct se_hba *g_lun0_hba; | ||
975 | struct se_subsystem_dev *g_lun0_su_dev; | ||
976 | struct se_device *g_lun0_dev; | ||
977 | struct t10_alua_lu_gp *default_lu_gp; | ||
978 | spinlock_t g_device_lock; | ||
979 | spinlock_t hba_lock; | ||
980 | spinlock_t se_tpg_lock; | ||
981 | spinlock_t lu_gps_lock; | ||
982 | spinlock_t plugin_class_lock; | ||
983 | } ____cacheline_aligned; | ||
984 | |||
985 | #endif /* TARGET_CORE_BASE_H */ | 917 | #endif /* TARGET_CORE_BASE_H */ |
diff --git a/include/target/target_core_device.h b/include/target/target_core_device.h index 52b18a5752c9..46571912086c 100644 --- a/include/target/target_core_device.h +++ b/include/target/target_core_device.h | |||
@@ -1,8 +1,8 @@ | |||
1 | #ifndef TARGET_CORE_DEVICE_H | 1 | #ifndef TARGET_CORE_DEVICE_H |
2 | #define TARGET_CORE_DEVICE_H | 2 | #define TARGET_CORE_DEVICE_H |
3 | 3 | ||
4 | extern int transport_get_lun_for_cmd(struct se_cmd *, unsigned char *, u32); | 4 | extern int transport_lookup_cmd_lun(struct se_cmd *, u32); |
5 | extern int transport_get_lun_for_tmr(struct se_cmd *, u32); | 5 | extern int transport_lookup_tmr_lun(struct se_cmd *, u32); |
6 | extern struct se_dev_entry *core_get_se_deve_from_rtpi( | 6 | extern struct se_dev_entry *core_get_se_deve_from_rtpi( |
7 | struct se_node_acl *, u16); | 7 | struct se_node_acl *, u16); |
8 | extern int core_free_device_list_for_node(struct se_node_acl *, | 8 | extern int core_free_device_list_for_node(struct se_node_acl *, |
@@ -39,6 +39,8 @@ extern int se_dev_set_emulate_tas(struct se_device *, int); | |||
39 | extern int se_dev_set_emulate_tpu(struct se_device *, int); | 39 | extern int se_dev_set_emulate_tpu(struct se_device *, int); |
40 | extern int se_dev_set_emulate_tpws(struct se_device *, int); | 40 | extern int se_dev_set_emulate_tpws(struct se_device *, int); |
41 | extern int se_dev_set_enforce_pr_isids(struct se_device *, int); | 41 | extern int se_dev_set_enforce_pr_isids(struct se_device *, int); |
42 | extern int se_dev_set_is_nonrot(struct se_device *, int); | ||
43 | extern int se_dev_set_emulate_rest_reord(struct se_device *dev, int); | ||
42 | extern int se_dev_set_queue_depth(struct se_device *, u32); | 44 | extern int se_dev_set_queue_depth(struct se_device *, u32); |
43 | extern int se_dev_set_max_sectors(struct se_device *, u32); | 45 | extern int se_dev_set_max_sectors(struct se_device *, u32); |
44 | extern int se_dev_set_optimal_sectors(struct se_device *, u32); | 46 | extern int se_dev_set_optimal_sectors(struct se_device *, u32); |
diff --git a/include/target/target_core_fabric_ops.h b/include/target/target_core_fabric_ops.h index 747e1404dca0..2de8fe907596 100644 --- a/include/target/target_core_fabric_ops.h +++ b/include/target/target_core_fabric_ops.h | |||
@@ -39,17 +39,11 @@ struct target_core_fabric_ops { | |||
39 | */ | 39 | */ |
40 | int (*new_cmd_map)(struct se_cmd *); | 40 | int (*new_cmd_map)(struct se_cmd *); |
41 | /* | 41 | /* |
42 | * Optional function pointer for TCM fabric modules that use | ||
43 | * Linux/NET sockets to allocate struct iovec array to struct se_cmd | ||
44 | */ | ||
45 | int (*alloc_cmd_iovecs)(struct se_cmd *); | ||
46 | /* | ||
47 | * Optional to release struct se_cmd and fabric dependent allocated | 42 | * Optional to release struct se_cmd and fabric dependent allocated |
48 | * I/O descriptor in transport_cmd_check_stop() | 43 | * I/O descriptor in transport_cmd_check_stop() |
49 | */ | 44 | */ |
50 | void (*check_stop_free)(struct se_cmd *); | 45 | void (*check_stop_free)(struct se_cmd *); |
51 | void (*release_cmd_to_pool)(struct se_cmd *); | 46 | void (*release_cmd)(struct se_cmd *); |
52 | void (*release_cmd_direct)(struct se_cmd *); | ||
53 | /* | 47 | /* |
54 | * Called with spin_lock_bh(struct se_portal_group->session_lock held. | 48 | * Called with spin_lock_bh(struct se_portal_group->session_lock held. |
55 | */ | 49 | */ |
@@ -70,7 +64,6 @@ struct target_core_fabric_ops { | |||
70 | void (*set_default_node_attributes)(struct se_node_acl *); | 64 | void (*set_default_node_attributes)(struct se_node_acl *); |
71 | u32 (*get_task_tag)(struct se_cmd *); | 65 | u32 (*get_task_tag)(struct se_cmd *); |
72 | int (*get_cmd_state)(struct se_cmd *); | 66 | int (*get_cmd_state)(struct se_cmd *); |
73 | void (*new_cmd_failure)(struct se_cmd *); | ||
74 | int (*queue_data_in)(struct se_cmd *); | 67 | int (*queue_data_in)(struct se_cmd *); |
75 | int (*queue_status)(struct se_cmd *); | 68 | int (*queue_status)(struct se_cmd *); |
76 | int (*queue_tm_rsp)(struct se_cmd *); | 69 | int (*queue_tm_rsp)(struct se_cmd *); |
diff --git a/include/target/target_core_transport.h b/include/target/target_core_transport.h index 24a1c6cb83c3..46aae4f94ede 100644 --- a/include/target/target_core_transport.h +++ b/include/target/target_core_transport.h | |||
@@ -101,6 +101,10 @@ | |||
101 | #define DA_ENFORCE_PR_ISIDS 1 | 101 | #define DA_ENFORCE_PR_ISIDS 1 |
102 | #define DA_STATUS_MAX_SECTORS_MIN 16 | 102 | #define DA_STATUS_MAX_SECTORS_MIN 16 |
103 | #define DA_STATUS_MAX_SECTORS_MAX 8192 | 103 | #define DA_STATUS_MAX_SECTORS_MAX 8192 |
104 | /* By default don't report non-rotating (solid state) medium */ | ||
105 | #define DA_IS_NONROT 0 | ||
106 | /* Queue Algorithm Modifier default for restricted reordering in control mode page */ | ||
107 | #define DA_EMULATE_REST_REORD 0 | ||
104 | 108 | ||
105 | #define SE_MODE_PAGE_BUF 512 | 109 | #define SE_MODE_PAGE_BUF 512 |
106 | 110 | ||
@@ -111,9 +115,8 @@ struct se_subsystem_api; | |||
111 | 115 | ||
112 | extern struct kmem_cache *se_mem_cache; | 116 | extern struct kmem_cache *se_mem_cache; |
113 | 117 | ||
114 | extern int init_se_global(void); | 118 | extern int init_se_kmem_caches(void); |
115 | extern void release_se_global(void); | 119 | extern void release_se_kmem_caches(void); |
116 | extern void init_scsi_index_table(void); | ||
117 | extern u32 scsi_get_new_index(scsi_index_t); | 120 | extern u32 scsi_get_new_index(scsi_index_t); |
118 | extern void transport_init_queue_obj(struct se_queue_obj *); | 121 | extern void transport_init_queue_obj(struct se_queue_obj *); |
119 | extern int transport_subsystem_check_init(void); | 122 | extern int transport_subsystem_check_init(void); |
@@ -160,36 +163,38 @@ extern struct se_device *transport_add_device_to_core_hba(struct se_hba *, | |||
160 | struct se_subsystem_dev *, u32, | 163 | struct se_subsystem_dev *, u32, |
161 | void *, struct se_dev_limits *, | 164 | void *, struct se_dev_limits *, |
162 | const char *, const char *); | 165 | const char *, const char *); |
163 | extern void transport_device_setup_cmd(struct se_cmd *); | ||
164 | extern void transport_init_se_cmd(struct se_cmd *, | 166 | extern void transport_init_se_cmd(struct se_cmd *, |
165 | struct target_core_fabric_ops *, | 167 | struct target_core_fabric_ops *, |
166 | struct se_session *, u32, int, int, | 168 | struct se_session *, u32, int, int, |
167 | unsigned char *); | 169 | unsigned char *); |
170 | void *transport_kmap_first_data_page(struct se_cmd *cmd); | ||
171 | void transport_kunmap_first_data_page(struct se_cmd *cmd); | ||
168 | extern void transport_free_se_cmd(struct se_cmd *); | 172 | extern void transport_free_se_cmd(struct se_cmd *); |
169 | extern int transport_generic_allocate_tasks(struct se_cmd *, unsigned char *); | 173 | extern int transport_generic_allocate_tasks(struct se_cmd *, unsigned char *); |
170 | extern int transport_generic_handle_cdb(struct se_cmd *); | 174 | extern int transport_generic_handle_cdb(struct se_cmd *); |
175 | extern int transport_handle_cdb_direct(struct se_cmd *); | ||
171 | extern int transport_generic_handle_cdb_map(struct se_cmd *); | 176 | extern int transport_generic_handle_cdb_map(struct se_cmd *); |
172 | extern int transport_generic_handle_data(struct se_cmd *); | 177 | extern int transport_generic_handle_data(struct se_cmd *); |
173 | extern void transport_new_cmd_failure(struct se_cmd *); | 178 | extern void transport_new_cmd_failure(struct se_cmd *); |
174 | extern int transport_generic_handle_tmr(struct se_cmd *); | 179 | extern int transport_generic_handle_tmr(struct se_cmd *); |
175 | extern void transport_generic_free_cmd_intr(struct se_cmd *); | 180 | extern void transport_generic_free_cmd_intr(struct se_cmd *); |
176 | extern void __transport_stop_task_timer(struct se_task *, unsigned long *); | 181 | extern void __transport_stop_task_timer(struct se_task *, unsigned long *); |
177 | extern unsigned char transport_asciihex_to_binaryhex(unsigned char val[2]); | ||
178 | extern int transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *, u32, | 182 | extern int transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *, u32, |
179 | struct scatterlist *, u32); | 183 | struct scatterlist *, u32); |
180 | extern int transport_clear_lun_from_sessions(struct se_lun *); | 184 | extern int transport_clear_lun_from_sessions(struct se_lun *); |
181 | extern int transport_check_aborted_status(struct se_cmd *, int); | 185 | extern int transport_check_aborted_status(struct se_cmd *, int); |
182 | extern int transport_send_check_condition_and_sense(struct se_cmd *, u8, int); | 186 | extern int transport_send_check_condition_and_sense(struct se_cmd *, u8, int); |
183 | extern void transport_send_task_abort(struct se_cmd *); | 187 | extern void transport_send_task_abort(struct se_cmd *); |
184 | extern void transport_release_cmd_to_pool(struct se_cmd *); | 188 | extern void transport_release_cmd(struct se_cmd *); |
185 | extern void transport_generic_free_cmd(struct se_cmd *, int, int, int); | 189 | extern void transport_generic_free_cmd(struct se_cmd *, int, int); |
186 | extern void transport_generic_wait_for_cmds(struct se_cmd *, int); | 190 | extern void transport_generic_wait_for_cmds(struct se_cmd *, int); |
187 | extern u32 transport_calc_sg_num(struct se_task *, struct se_mem *, u32); | 191 | extern int transport_init_task_sg(struct se_task *, struct se_mem *, u32); |
188 | extern int transport_map_mem_to_sg(struct se_task *, struct list_head *, | 192 | extern int transport_map_mem_to_sg(struct se_task *, struct list_head *, |
189 | void *, struct se_mem *, | 193 | struct scatterlist *, struct se_mem *, |
190 | struct se_mem **, u32 *, u32 *); | 194 | struct se_mem **, u32 *, u32 *); |
191 | extern void transport_do_task_sg_chain(struct se_cmd *); | 195 | extern void transport_do_task_sg_chain(struct se_cmd *); |
192 | extern void transport_generic_process_write(struct se_cmd *); | 196 | extern void transport_generic_process_write(struct se_cmd *); |
197 | extern int transport_generic_new_cmd(struct se_cmd *); | ||
193 | extern int transport_generic_do_tmr(struct se_cmd *); | 198 | extern int transport_generic_do_tmr(struct se_cmd *); |
194 | /* From target_core_alua.c */ | 199 | /* From target_core_alua.c */ |
195 | extern int core_alua_check_nonop_delay(struct se_cmd *); | 200 | extern int core_alua_check_nonop_delay(struct se_cmd *); |
@@ -235,13 +240,13 @@ struct se_subsystem_api { | |||
235 | */ | 240 | */ |
236 | int (*cdb_none)(struct se_task *); | 241 | int (*cdb_none)(struct se_task *); |
237 | /* | 242 | /* |
238 | * For SCF_SCSI_CONTROL_NONSG_IO_CDB | 243 | * For SCF_SCSI_DATA_SG_IO_CDB |
239 | */ | 244 | */ |
240 | int (*map_task_non_SG)(struct se_task *); | 245 | int (*map_data_SG)(struct se_task *); |
241 | /* | 246 | /* |
242 | * For SCF_SCSI_DATA_SG_IO_CDB and SCF_SCSI_CONTROL_SG_IO_CDB | 247 | * For SCF_SCSI_CONTROL_SG_IO_CDB |
243 | */ | 248 | */ |
244 | int (*map_task_SG)(struct se_task *); | 249 | int (*map_control_SG)(struct se_task *); |
245 | /* | 250 | /* |
246 | * attach_hba(): | 251 | * attach_hba(): |
247 | */ | 252 | */ |
@@ -292,7 +297,7 @@ struct se_subsystem_api { | |||
292 | * drivers. Provided out of convenience. | 297 | * drivers. Provided out of convenience. |
293 | */ | 298 | */ |
294 | int (*transport_complete)(struct se_task *task); | 299 | int (*transport_complete)(struct se_task *task); |
295 | struct se_task *(*alloc_task)(struct se_cmd *); | 300 | struct se_task *(*alloc_task)(unsigned char *cdb); |
296 | /* | 301 | /* |
297 | * do_task(): | 302 | * do_task(): |
298 | */ | 303 | */ |
@@ -342,19 +347,9 @@ struct se_subsystem_api { | |||
342 | */ | 347 | */ |
343 | sector_t (*get_blocks)(struct se_device *); | 348 | sector_t (*get_blocks)(struct se_device *); |
344 | /* | 349 | /* |
345 | * do_se_mem_map(): | ||
346 | */ | ||
347 | int (*do_se_mem_map)(struct se_task *, struct list_head *, void *, | ||
348 | struct se_mem *, struct se_mem **, u32 *, u32 *); | ||
349 | /* | ||
350 | * get_sense_buffer(): | 350 | * get_sense_buffer(): |
351 | */ | 351 | */ |
352 | unsigned char *(*get_sense_buffer)(struct se_task *); | 352 | unsigned char *(*get_sense_buffer)(struct se_task *); |
353 | } ____cacheline_aligned; | 353 | } ____cacheline_aligned; |
354 | 354 | ||
355 | #define TRANSPORT(dev) ((dev)->transport) | ||
356 | #define HBA_TRANSPORT(hba) ((hba)->transport) | ||
357 | |||
358 | extern struct se_global *se_global; | ||
359 | |||
360 | #endif /* TARGET_CORE_TRANSPORT_H */ | 355 | #endif /* TARGET_CORE_TRANSPORT_H */ |