diff options
-rw-r--r-- | drivers/target/target_core_device.c | 2 | ||||
-rw-r--r-- | drivers/target/target_core_tmr.c | 54 | ||||
-rw-r--r-- | drivers/target/target_core_transport.c | 173 | ||||
-rw-r--r-- | include/target/target_core_base.h | 13 |
4 files changed, 19 insertions, 223 deletions
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index 5ad972856a8d..bcef6771bafc 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c | |||
@@ -715,7 +715,7 @@ void se_release_device_for_hba(struct se_device *dev) | |||
715 | se_dev_stop(dev); | 715 | se_dev_stop(dev); |
716 | 716 | ||
717 | if (dev->dev_ptr) { | 717 | if (dev->dev_ptr) { |
718 | kthread_stop(dev->process_thread); | 718 | destroy_workqueue(dev->tmr_wq); |
719 | if (dev->transport->free_device) | 719 | if (dev->transport->free_device) |
720 | dev->transport->free_device(dev->dev_ptr); | 720 | dev->transport->free_device(dev->dev_ptr); |
721 | } | 721 | } |
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index 4185db109edf..1c59a3c23b2c 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c | |||
@@ -351,57 +351,6 @@ static void core_tmr_drain_state_list( | |||
351 | } | 351 | } |
352 | } | 352 | } |
353 | 353 | ||
354 | static void core_tmr_drain_cmd_list( | ||
355 | struct se_device *dev, | ||
356 | struct se_cmd *prout_cmd, | ||
357 | struct se_node_acl *tmr_nacl, | ||
358 | int tas, | ||
359 | struct list_head *preempt_and_abort_list) | ||
360 | { | ||
361 | LIST_HEAD(drain_cmd_list); | ||
362 | struct se_queue_obj *qobj = &dev->dev_queue_obj; | ||
363 | struct se_cmd *cmd, *tcmd; | ||
364 | unsigned long flags; | ||
365 | |||
366 | /* | ||
367 | * Release all commands remaining in the per-device command queue. | ||
368 | * | ||
369 | * This follows the same logic as above for the state list. | ||
370 | */ | ||
371 | spin_lock_irqsave(&qobj->cmd_queue_lock, flags); | ||
372 | list_for_each_entry_safe(cmd, tcmd, &qobj->qobj_list, se_queue_node) { | ||
373 | /* | ||
374 | * For PREEMPT_AND_ABORT usage, only process commands | ||
375 | * with a matching reservation key. | ||
376 | */ | ||
377 | if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd)) | ||
378 | continue; | ||
379 | /* | ||
380 | * Not aborting PROUT PREEMPT_AND_ABORT CDB.. | ||
381 | */ | ||
382 | if (prout_cmd == cmd) | ||
383 | continue; | ||
384 | |||
385 | cmd->transport_state &= ~CMD_T_QUEUED; | ||
386 | atomic_dec(&qobj->queue_cnt); | ||
387 | list_move_tail(&cmd->se_queue_node, &drain_cmd_list); | ||
388 | } | ||
389 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); | ||
390 | |||
391 | while (!list_empty(&drain_cmd_list)) { | ||
392 | cmd = list_entry(drain_cmd_list.next, struct se_cmd, se_queue_node); | ||
393 | list_del_init(&cmd->se_queue_node); | ||
394 | |||
395 | pr_debug("LUN_RESET: %s from Device Queue: cmd: %p t_state:" | ||
396 | " %d t_fe_count: %d\n", (preempt_and_abort_list) ? | ||
397 | "Preempt" : "", cmd, cmd->t_state, | ||
398 | atomic_read(&cmd->t_fe_count)); | ||
399 | |||
400 | core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, | ||
401 | atomic_read(&cmd->t_fe_count)); | ||
402 | } | ||
403 | } | ||
404 | |||
405 | int core_tmr_lun_reset( | 354 | int core_tmr_lun_reset( |
406 | struct se_device *dev, | 355 | struct se_device *dev, |
407 | struct se_tmr_req *tmr, | 356 | struct se_tmr_req *tmr, |
@@ -444,8 +393,7 @@ int core_tmr_lun_reset( | |||
444 | core_tmr_drain_tmr_list(dev, tmr, preempt_and_abort_list); | 393 | core_tmr_drain_tmr_list(dev, tmr, preempt_and_abort_list); |
445 | core_tmr_drain_state_list(dev, prout_cmd, tmr_nacl, tas, | 394 | core_tmr_drain_state_list(dev, prout_cmd, tmr_nacl, tas, |
446 | preempt_and_abort_list); | 395 | preempt_and_abort_list); |
447 | core_tmr_drain_cmd_list(dev, prout_cmd, tmr_nacl, tas, | 396 | |
448 | preempt_and_abort_list); | ||
449 | /* | 397 | /* |
450 | * Clear any legacy SPC-2 reservation when called during | 398 | * Clear any legacy SPC-2 reservation when called during |
451 | * LOGICAL UNIT RESET | 399 | * LOGICAL UNIT RESET |
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 9b7bbbe70211..45ed170f9151 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c | |||
@@ -66,13 +66,11 @@ struct kmem_cache *t10_alua_lu_gp_mem_cache; | |||
66 | struct kmem_cache *t10_alua_tg_pt_gp_cache; | 66 | struct kmem_cache *t10_alua_tg_pt_gp_cache; |
67 | struct kmem_cache *t10_alua_tg_pt_gp_mem_cache; | 67 | struct kmem_cache *t10_alua_tg_pt_gp_mem_cache; |
68 | 68 | ||
69 | static int transport_processing_thread(void *param); | ||
70 | static void transport_complete_task_attr(struct se_cmd *cmd); | 69 | static void transport_complete_task_attr(struct se_cmd *cmd); |
71 | static void transport_handle_queue_full(struct se_cmd *cmd, | 70 | static void transport_handle_queue_full(struct se_cmd *cmd, |
72 | struct se_device *dev); | 71 | struct se_device *dev); |
73 | static int transport_generic_get_mem(struct se_cmd *cmd); | 72 | static int transport_generic_get_mem(struct se_cmd *cmd); |
74 | static void transport_put_cmd(struct se_cmd *cmd); | 73 | static void transport_put_cmd(struct se_cmd *cmd); |
75 | static void transport_remove_cmd_from_queue(struct se_cmd *cmd); | ||
76 | static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq); | 74 | static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq); |
77 | static void target_complete_ok_work(struct work_struct *work); | 75 | static void target_complete_ok_work(struct work_struct *work); |
78 | 76 | ||
@@ -193,14 +191,6 @@ u32 scsi_get_new_index(scsi_index_t type) | |||
193 | return new_index; | 191 | return new_index; |
194 | } | 192 | } |
195 | 193 | ||
196 | static void transport_init_queue_obj(struct se_queue_obj *qobj) | ||
197 | { | ||
198 | atomic_set(&qobj->queue_cnt, 0); | ||
199 | INIT_LIST_HEAD(&qobj->qobj_list); | ||
200 | init_waitqueue_head(&qobj->thread_wq); | ||
201 | spin_lock_init(&qobj->cmd_queue_lock); | ||
202 | } | ||
203 | |||
204 | void transport_subsystem_check_init(void) | 194 | void transport_subsystem_check_init(void) |
205 | { | 195 | { |
206 | int ret; | 196 | int ret; |
@@ -566,79 +556,8 @@ void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) | |||
566 | 556 | ||
567 | if (transport_cmd_check_stop_to_fabric(cmd)) | 557 | if (transport_cmd_check_stop_to_fabric(cmd)) |
568 | return; | 558 | return; |
569 | if (remove) { | 559 | if (remove) |
570 | transport_remove_cmd_from_queue(cmd); | ||
571 | transport_put_cmd(cmd); | 560 | transport_put_cmd(cmd); |
572 | } | ||
573 | } | ||
574 | |||
575 | static void transport_add_cmd_to_queue(struct se_cmd *cmd, int t_state, | ||
576 | bool at_head) | ||
577 | { | ||
578 | struct se_device *dev = cmd->se_dev; | ||
579 | struct se_queue_obj *qobj = &dev->dev_queue_obj; | ||
580 | unsigned long flags; | ||
581 | |||
582 | if (t_state) { | ||
583 | spin_lock_irqsave(&cmd->t_state_lock, flags); | ||
584 | cmd->t_state = t_state; | ||
585 | cmd->transport_state |= CMD_T_ACTIVE; | ||
586 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | ||
587 | } | ||
588 | |||
589 | spin_lock_irqsave(&qobj->cmd_queue_lock, flags); | ||
590 | |||
591 | /* If the cmd is already on the list, remove it before we add it */ | ||
592 | if (!list_empty(&cmd->se_queue_node)) | ||
593 | list_del(&cmd->se_queue_node); | ||
594 | else | ||
595 | atomic_inc(&qobj->queue_cnt); | ||
596 | |||
597 | if (at_head) | ||
598 | list_add(&cmd->se_queue_node, &qobj->qobj_list); | ||
599 | else | ||
600 | list_add_tail(&cmd->se_queue_node, &qobj->qobj_list); | ||
601 | cmd->transport_state |= CMD_T_QUEUED; | ||
602 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); | ||
603 | |||
604 | wake_up_interruptible(&qobj->thread_wq); | ||
605 | } | ||
606 | |||
607 | static struct se_cmd * | ||
608 | transport_get_cmd_from_queue(struct se_queue_obj *qobj) | ||
609 | { | ||
610 | struct se_cmd *cmd; | ||
611 | unsigned long flags; | ||
612 | |||
613 | spin_lock_irqsave(&qobj->cmd_queue_lock, flags); | ||
614 | if (list_empty(&qobj->qobj_list)) { | ||
615 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); | ||
616 | return NULL; | ||
617 | } | ||
618 | cmd = list_first_entry(&qobj->qobj_list, struct se_cmd, se_queue_node); | ||
619 | |||
620 | cmd->transport_state &= ~CMD_T_QUEUED; | ||
621 | list_del_init(&cmd->se_queue_node); | ||
622 | atomic_dec(&qobj->queue_cnt); | ||
623 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); | ||
624 | |||
625 | return cmd; | ||
626 | } | ||
627 | |||
628 | static void transport_remove_cmd_from_queue(struct se_cmd *cmd) | ||
629 | { | ||
630 | struct se_queue_obj *qobj = &cmd->se_dev->dev_queue_obj; | ||
631 | unsigned long flags; | ||
632 | |||
633 | spin_lock_irqsave(&qobj->cmd_queue_lock, flags); | ||
634 | if (!(cmd->transport_state & CMD_T_QUEUED)) { | ||
635 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); | ||
636 | return; | ||
637 | } | ||
638 | cmd->transport_state &= ~CMD_T_QUEUED; | ||
639 | atomic_dec(&qobj->queue_cnt); | ||
640 | list_del_init(&cmd->se_queue_node); | ||
641 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); | ||
642 | } | 561 | } |
643 | 562 | ||
644 | static void target_complete_failure_work(struct work_struct *work) | 563 | static void target_complete_failure_work(struct work_struct *work) |
@@ -1132,7 +1051,6 @@ struct se_device *transport_add_device_to_core_hba( | |||
1132 | return NULL; | 1051 | return NULL; |
1133 | } | 1052 | } |
1134 | 1053 | ||
1135 | transport_init_queue_obj(&dev->dev_queue_obj); | ||
1136 | dev->dev_flags = device_flags; | 1054 | dev->dev_flags = device_flags; |
1137 | dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED; | 1055 | dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED; |
1138 | dev->dev_ptr = transport_dev; | 1056 | dev->dev_ptr = transport_dev; |
@@ -1185,10 +1103,10 @@ struct se_device *transport_add_device_to_core_hba( | |||
1185 | /* | 1103 | /* |
1186 | * Startup the struct se_device processing thread | 1104 | * Startup the struct se_device processing thread |
1187 | */ | 1105 | */ |
1188 | dev->process_thread = kthread_run(transport_processing_thread, dev, | 1106 | dev->tmr_wq = alloc_workqueue("tmr-%s", WQ_MEM_RECLAIM | WQ_UNBOUND, 1, |
1189 | "LIO_%s", dev->transport->name); | 1107 | dev->transport->name); |
1190 | if (IS_ERR(dev->process_thread)) { | 1108 | if (!dev->tmr_wq) { |
1191 | pr_err("Unable to create kthread: LIO_%s\n", | 1109 | pr_err("Unable to create tmr workqueue for %s\n", |
1192 | dev->transport->name); | 1110 | dev->transport->name); |
1193 | goto out; | 1111 | goto out; |
1194 | } | 1112 | } |
@@ -1219,7 +1137,7 @@ struct se_device *transport_add_device_to_core_hba( | |||
1219 | 1137 | ||
1220 | return dev; | 1138 | return dev; |
1221 | out: | 1139 | out: |
1222 | kthread_stop(dev->process_thread); | 1140 | destroy_workqueue(dev->tmr_wq); |
1223 | 1141 | ||
1224 | spin_lock(&hba->device_lock); | 1142 | spin_lock(&hba->device_lock); |
1225 | list_del(&dev->dev_list); | 1143 | list_del(&dev->dev_list); |
@@ -1299,7 +1217,6 @@ void transport_init_se_cmd( | |||
1299 | INIT_LIST_HEAD(&cmd->se_lun_node); | 1217 | INIT_LIST_HEAD(&cmd->se_lun_node); |
1300 | INIT_LIST_HEAD(&cmd->se_delayed_node); | 1218 | INIT_LIST_HEAD(&cmd->se_delayed_node); |
1301 | INIT_LIST_HEAD(&cmd->se_qf_node); | 1219 | INIT_LIST_HEAD(&cmd->se_qf_node); |
1302 | INIT_LIST_HEAD(&cmd->se_queue_node); | ||
1303 | INIT_LIST_HEAD(&cmd->se_cmd_list); | 1220 | INIT_LIST_HEAD(&cmd->se_cmd_list); |
1304 | INIT_LIST_HEAD(&cmd->state_list); | 1221 | INIT_LIST_HEAD(&cmd->state_list); |
1305 | init_completion(&cmd->transport_lun_fe_stop_comp); | 1222 | init_completion(&cmd->transport_lun_fe_stop_comp); |
@@ -1494,10 +1411,9 @@ int transport_handle_cdb_direct( | |||
1494 | return -EINVAL; | 1411 | return -EINVAL; |
1495 | } | 1412 | } |
1496 | /* | 1413 | /* |
1497 | * Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE following | 1414 | * Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE to ensure that |
1498 | * transport_generic_handle_cdb*() -> transport_add_cmd_to_queue() | 1415 | * outstanding descriptors are handled correctly during shutdown via |
1499 | * in existing usage to ensure that outstanding descriptors are handled | 1416 | * transport_wait_for_tasks() |
1500 | * correctly during shutdown via transport_wait_for_tasks() | ||
1501 | * | 1417 | * |
1502 | * Also, we don't take cmd->t_state_lock here as we only expect | 1418 | * Also, we don't take cmd->t_state_lock here as we only expect |
1503 | * this to be called for initial descriptor submission. | 1419 | * this to be called for initial descriptor submission. |
@@ -1661,18 +1577,6 @@ int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess, | |||
1661 | } | 1577 | } |
1662 | EXPORT_SYMBOL(target_submit_tmr); | 1578 | EXPORT_SYMBOL(target_submit_tmr); |
1663 | 1579 | ||
1664 | /* transport_generic_handle_tmr(): | ||
1665 | * | ||
1666 | * | ||
1667 | */ | ||
1668 | int transport_generic_handle_tmr( | ||
1669 | struct se_cmd *cmd) | ||
1670 | { | ||
1671 | transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR, false); | ||
1672 | return 0; | ||
1673 | } | ||
1674 | EXPORT_SYMBOL(transport_generic_handle_tmr); | ||
1675 | |||
1676 | /* | 1580 | /* |
1677 | * If the cmd is active, request it to be stopped and sleep until it | 1581 | * If the cmd is active, request it to be stopped and sleep until it |
1678 | * has completed. | 1582 | * has completed. |
@@ -2653,8 +2557,6 @@ static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun) | |||
2653 | cmd->transport_state |= CMD_T_LUN_FE_STOP; | 2557 | cmd->transport_state |= CMD_T_LUN_FE_STOP; |
2654 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 2558 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
2655 | 2559 | ||
2656 | wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq); | ||
2657 | |||
2658 | // XXX: audit task_flags checks. | 2560 | // XXX: audit task_flags checks. |
2659 | spin_lock_irqsave(&cmd->t_state_lock, flags); | 2561 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
2660 | if ((cmd->transport_state & CMD_T_BUSY) && | 2562 | if ((cmd->transport_state & CMD_T_BUSY) && |
@@ -2673,7 +2575,6 @@ static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun) | |||
2673 | pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n", | 2575 | pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n", |
2674 | cmd->se_tfo->get_task_tag(cmd)); | 2576 | cmd->se_tfo->get_task_tag(cmd)); |
2675 | } | 2577 | } |
2676 | transport_remove_cmd_from_queue(cmd); | ||
2677 | 2578 | ||
2678 | return 0; | 2579 | return 0; |
2679 | } | 2580 | } |
@@ -2872,8 +2773,6 @@ bool transport_wait_for_tasks(struct se_cmd *cmd) | |||
2872 | 2773 | ||
2873 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 2774 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
2874 | 2775 | ||
2875 | wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq); | ||
2876 | |||
2877 | wait_for_completion(&cmd->t_transport_stop_comp); | 2776 | wait_for_completion(&cmd->t_transport_stop_comp); |
2878 | 2777 | ||
2879 | spin_lock_irqsave(&cmd->t_state_lock, flags); | 2778 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
@@ -3156,8 +3055,9 @@ void transport_send_task_abort(struct se_cmd *cmd) | |||
3156 | cmd->se_tfo->queue_status(cmd); | 3055 | cmd->se_tfo->queue_status(cmd); |
3157 | } | 3056 | } |
3158 | 3057 | ||
3159 | static int transport_generic_do_tmr(struct se_cmd *cmd) | 3058 | static void target_tmr_work(struct work_struct *work) |
3160 | { | 3059 | { |
3060 | struct se_cmd *cmd = container_of(work, struct se_cmd, work); | ||
3161 | struct se_device *dev = cmd->se_dev; | 3061 | struct se_device *dev = cmd->se_dev; |
3162 | struct se_tmr_req *tmr = cmd->se_tmr_req; | 3062 | struct se_tmr_req *tmr = cmd->se_tmr_req; |
3163 | int ret; | 3063 | int ret; |
@@ -3193,54 +3093,13 @@ static int transport_generic_do_tmr(struct se_cmd *cmd) | |||
3193 | cmd->se_tfo->queue_tm_rsp(cmd); | 3093 | cmd->se_tfo->queue_tm_rsp(cmd); |
3194 | 3094 | ||
3195 | transport_cmd_check_stop_to_fabric(cmd); | 3095 | transport_cmd_check_stop_to_fabric(cmd); |
3196 | return 0; | ||
3197 | } | 3096 | } |
3198 | 3097 | ||
3199 | /* transport_processing_thread(): | 3098 | int transport_generic_handle_tmr( |
3200 | * | 3099 | struct se_cmd *cmd) |
3201 | * | ||
3202 | */ | ||
3203 | static int transport_processing_thread(void *param) | ||
3204 | { | 3100 | { |
3205 | int ret; | 3101 | INIT_WORK(&cmd->work, target_tmr_work); |
3206 | struct se_cmd *cmd; | 3102 | queue_work(cmd->se_dev->tmr_wq, &cmd->work); |
3207 | struct se_device *dev = param; | ||
3208 | |||
3209 | while (!kthread_should_stop()) { | ||
3210 | ret = wait_event_interruptible(dev->dev_queue_obj.thread_wq, | ||
3211 | atomic_read(&dev->dev_queue_obj.queue_cnt) || | ||
3212 | kthread_should_stop()); | ||
3213 | if (ret < 0) | ||
3214 | goto out; | ||
3215 | |||
3216 | get_cmd: | ||
3217 | cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj); | ||
3218 | if (!cmd) | ||
3219 | continue; | ||
3220 | |||
3221 | switch (cmd->t_state) { | ||
3222 | case TRANSPORT_NEW_CMD: | ||
3223 | BUG(); | ||
3224 | break; | ||
3225 | case TRANSPORT_PROCESS_TMR: | ||
3226 | transport_generic_do_tmr(cmd); | ||
3227 | break; | ||
3228 | default: | ||
3229 | pr_err("Unknown t_state: %d for ITT: 0x%08x " | ||
3230 | "i_state: %d on SE LUN: %u\n", | ||
3231 | cmd->t_state, | ||
3232 | cmd->se_tfo->get_task_tag(cmd), | ||
3233 | cmd->se_tfo->get_cmd_state(cmd), | ||
3234 | cmd->se_lun->unpacked_lun); | ||
3235 | BUG(); | ||
3236 | } | ||
3237 | |||
3238 | goto get_cmd; | ||
3239 | } | ||
3240 | |||
3241 | out: | ||
3242 | WARN_ON(!list_empty(&dev->state_list)); | ||
3243 | WARN_ON(!list_empty(&dev->dev_queue_obj.qobj_list)); | ||
3244 | dev->process_thread = NULL; | ||
3245 | return 0; | 3103 | return 0; |
3246 | } | 3104 | } |
3105 | EXPORT_SYMBOL(transport_generic_handle_tmr); | ||
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 11052b24ee41..4f4f04219b11 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h | |||
@@ -147,7 +147,6 @@ enum transport_state_table { | |||
147 | TRANSPORT_WRITE_PENDING = 3, | 147 | TRANSPORT_WRITE_PENDING = 3, |
148 | TRANSPORT_PROCESSING = 5, | 148 | TRANSPORT_PROCESSING = 5, |
149 | TRANSPORT_COMPLETE = 6, | 149 | TRANSPORT_COMPLETE = 6, |
150 | TRANSPORT_PROCESS_TMR = 9, | ||
151 | TRANSPORT_ISTATE_PROCESSING = 11, | 150 | TRANSPORT_ISTATE_PROCESSING = 11, |
152 | TRANSPORT_COMPLETE_QF_WP = 18, | 151 | TRANSPORT_COMPLETE_QF_WP = 18, |
153 | TRANSPORT_COMPLETE_QF_OK = 19, | 152 | TRANSPORT_COMPLETE_QF_OK = 19, |
@@ -464,13 +463,6 @@ struct t10_reservation { | |||
464 | struct t10_reservation_ops pr_ops; | 463 | struct t10_reservation_ops pr_ops; |
465 | }; | 464 | }; |
466 | 465 | ||
467 | struct se_queue_obj { | ||
468 | atomic_t queue_cnt; | ||
469 | spinlock_t cmd_queue_lock; | ||
470 | struct list_head qobj_list; | ||
471 | wait_queue_head_t thread_wq; | ||
472 | }; | ||
473 | |||
474 | struct se_tmr_req { | 466 | struct se_tmr_req { |
475 | /* Task Management function to be performed */ | 467 | /* Task Management function to be performed */ |
476 | u8 function; | 468 | u8 function; |
@@ -527,7 +519,6 @@ struct se_cmd { | |||
527 | /* Only used for internal passthrough and legacy TCM fabric modules */ | 519 | /* Only used for internal passthrough and legacy TCM fabric modules */ |
528 | struct se_session *se_sess; | 520 | struct se_session *se_sess; |
529 | struct se_tmr_req *se_tmr_req; | 521 | struct se_tmr_req *se_tmr_req; |
530 | struct list_head se_queue_node; | ||
531 | struct list_head se_cmd_list; | 522 | struct list_head se_cmd_list; |
532 | struct completion cmd_wait_comp; | 523 | struct completion cmd_wait_comp; |
533 | struct kref cmd_kref; | 524 | struct kref cmd_kref; |
@@ -774,7 +765,6 @@ struct se_device { | |||
774 | struct se_obj dev_obj; | 765 | struct se_obj dev_obj; |
775 | struct se_obj dev_access_obj; | 766 | struct se_obj dev_access_obj; |
776 | struct se_obj dev_export_obj; | 767 | struct se_obj dev_export_obj; |
777 | struct se_queue_obj dev_queue_obj; | ||
778 | spinlock_t delayed_cmd_lock; | 768 | spinlock_t delayed_cmd_lock; |
779 | spinlock_t execute_task_lock; | 769 | spinlock_t execute_task_lock; |
780 | spinlock_t dev_reservation_lock; | 770 | spinlock_t dev_reservation_lock; |
@@ -790,8 +780,7 @@ struct se_device { | |||
790 | struct t10_pr_registration *dev_pr_res_holder; | 780 | struct t10_pr_registration *dev_pr_res_holder; |
791 | struct list_head dev_sep_list; | 781 | struct list_head dev_sep_list; |
792 | struct list_head dev_tmr_list; | 782 | struct list_head dev_tmr_list; |
793 | /* Pointer to descriptor for processing thread */ | 783 | struct workqueue_struct *tmr_wq; |
794 | struct task_struct *process_thread; | ||
795 | struct work_struct qf_work_queue; | 784 | struct work_struct qf_work_queue; |
796 | struct list_head delayed_cmd_list; | 785 | struct list_head delayed_cmd_list; |
797 | struct list_head state_list; | 786 | struct list_head state_list; |