diff options
| -rw-r--r-- | drivers/target/target_core_transport.c | 194 | ||||
| -rw-r--r-- | include/target/target_core_base.h | 9 |
2 files changed, 188 insertions, 15 deletions
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 1ae6eb7a621b..056c4cb4736d 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c | |||
| @@ -204,6 +204,9 @@ static int transport_generic_write_pending(struct se_cmd *); | |||
| 204 | static int transport_processing_thread(void *param); | 204 | static int transport_processing_thread(void *param); |
| 205 | static int __transport_execute_tasks(struct se_device *dev); | 205 | static int __transport_execute_tasks(struct se_device *dev); |
| 206 | static void transport_complete_task_attr(struct se_cmd *cmd); | 206 | static void transport_complete_task_attr(struct se_cmd *cmd); |
| 207 | static int transport_complete_qf(struct se_cmd *cmd); | ||
| 208 | static void transport_handle_queue_full(struct se_cmd *cmd, | ||
| 209 | struct se_device *dev, int (*qf_callback)(struct se_cmd *)); | ||
| 207 | static void transport_direct_request_timeout(struct se_cmd *cmd); | 210 | static void transport_direct_request_timeout(struct se_cmd *cmd); |
| 208 | static void transport_free_dev_tasks(struct se_cmd *cmd); | 211 | static void transport_free_dev_tasks(struct se_cmd *cmd); |
| 209 | static u32 transport_allocate_tasks(struct se_cmd *cmd, | 212 | static u32 transport_allocate_tasks(struct se_cmd *cmd, |
| @@ -768,7 +771,11 @@ static void transport_add_cmd_to_queue( | |||
| 768 | } | 771 | } |
| 769 | 772 | ||
| 770 | spin_lock_irqsave(&qobj->cmd_queue_lock, flags); | 773 | spin_lock_irqsave(&qobj->cmd_queue_lock, flags); |
| 771 | list_add_tail(&cmd->se_queue_node, &qobj->qobj_list); | 774 | if (cmd->se_cmd_flags & SCF_EMULATE_QUEUE_FULL) { |
| 775 | cmd->se_cmd_flags &= ~SCF_EMULATE_QUEUE_FULL; | ||
| 776 | list_add(&cmd->se_queue_node, &qobj->qobj_list); | ||
| 777 | } else | ||
| 778 | list_add_tail(&cmd->se_queue_node, &qobj->qobj_list); | ||
| 772 | atomic_inc(&cmd->t_transport_queue_active); | 779 | atomic_inc(&cmd->t_transport_queue_active); |
| 773 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); | 780 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); |
| 774 | 781 | ||
| @@ -1102,6 +1109,40 @@ void transport_remove_task_from_execute_queue( | |||
| 1102 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); | 1109 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); |
| 1103 | } | 1110 | } |
| 1104 | 1111 | ||
| 1112 | /* | ||
| 1113 | * Handle QUEUE_FULL / -EAGAIN status | ||
| 1114 | */ | ||
| 1115 | |||
| 1116 | static void target_qf_do_work(struct work_struct *work) | ||
| 1117 | { | ||
| 1118 | struct se_device *dev = container_of(work, struct se_device, | ||
| 1119 | qf_work_queue); | ||
| 1120 | struct se_cmd *cmd, *cmd_tmp; | ||
| 1121 | |||
| 1122 | spin_lock_irq(&dev->qf_cmd_lock); | ||
| 1123 | list_for_each_entry_safe(cmd, cmd_tmp, &dev->qf_cmd_list, se_qf_node) { | ||
| 1124 | |||
| 1125 | list_del(&cmd->se_qf_node); | ||
| 1126 | atomic_dec(&dev->dev_qf_count); | ||
| 1127 | smp_mb__after_atomic_dec(); | ||
| 1128 | spin_unlock_irq(&dev->qf_cmd_lock); | ||
| 1129 | |||
| 1130 | printk(KERN_INFO "Processing %s cmd: %p QUEUE_FULL in work queue" | ||
| 1131 | " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd, | ||
| 1132 | (cmd->t_state == TRANSPORT_COMPLETE_OK) ? "COMPLETE_OK" : | ||
| 1133 | (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING" | ||
| 1134 | : "UNKNOWN"); | ||
| 1135 | /* | ||
| 1136 | * The SCF_EMULATE_QUEUE_FULL flag will be cleared once se_cmd | ||
| 1137 | * has been added to head of queue | ||
| 1138 | */ | ||
| 1139 | transport_add_cmd_to_queue(cmd, cmd->t_state); | ||
| 1140 | |||
| 1141 | spin_lock_irq(&dev->qf_cmd_lock); | ||
| 1142 | } | ||
| 1143 | spin_unlock_irq(&dev->qf_cmd_lock); | ||
| 1144 | } | ||
| 1145 | |||
| 1105 | unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd) | 1146 | unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd) |
| 1106 | { | 1147 | { |
| 1107 | switch (cmd->data_direction) { | 1148 | switch (cmd->data_direction) { |
| @@ -1531,6 +1572,7 @@ struct se_device *transport_add_device_to_core_hba( | |||
| 1531 | INIT_LIST_HEAD(&dev->delayed_cmd_list); | 1572 | INIT_LIST_HEAD(&dev->delayed_cmd_list); |
| 1532 | INIT_LIST_HEAD(&dev->ordered_cmd_list); | 1573 | INIT_LIST_HEAD(&dev->ordered_cmd_list); |
| 1533 | INIT_LIST_HEAD(&dev->state_task_list); | 1574 | INIT_LIST_HEAD(&dev->state_task_list); |
| 1575 | INIT_LIST_HEAD(&dev->qf_cmd_list); | ||
| 1534 | spin_lock_init(&dev->execute_task_lock); | 1576 | spin_lock_init(&dev->execute_task_lock); |
| 1535 | spin_lock_init(&dev->delayed_cmd_lock); | 1577 | spin_lock_init(&dev->delayed_cmd_lock); |
| 1536 | spin_lock_init(&dev->ordered_cmd_lock); | 1578 | spin_lock_init(&dev->ordered_cmd_lock); |
| @@ -1541,6 +1583,7 @@ struct se_device *transport_add_device_to_core_hba( | |||
| 1541 | spin_lock_init(&dev->dev_status_thr_lock); | 1583 | spin_lock_init(&dev->dev_status_thr_lock); |
| 1542 | spin_lock_init(&dev->se_port_lock); | 1584 | spin_lock_init(&dev->se_port_lock); |
| 1543 | spin_lock_init(&dev->se_tmr_lock); | 1585 | spin_lock_init(&dev->se_tmr_lock); |
| 1586 | spin_lock_init(&dev->qf_cmd_lock); | ||
| 1544 | 1587 | ||
| 1545 | dev->queue_depth = dev_limits->queue_depth; | 1588 | dev->queue_depth = dev_limits->queue_depth; |
| 1546 | atomic_set(&dev->depth_left, dev->queue_depth); | 1589 | atomic_set(&dev->depth_left, dev->queue_depth); |
| @@ -1584,7 +1627,10 @@ struct se_device *transport_add_device_to_core_hba( | |||
| 1584 | dev->transport->name); | 1627 | dev->transport->name); |
| 1585 | goto out; | 1628 | goto out; |
| 1586 | } | 1629 | } |
| 1587 | 1630 | /* | |
| 1631 | * Setup work_queue for QUEUE_FULL | ||
| 1632 | */ | ||
| 1633 | INIT_WORK(&dev->qf_work_queue, target_qf_do_work); | ||
| 1588 | /* | 1634 | /* |
| 1589 | * Preload the initial INQUIRY const values if we are doing | 1635 | * Preload the initial INQUIRY const values if we are doing |
| 1590 | * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI | 1636 | * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI |
| @@ -1697,6 +1743,7 @@ void transport_init_se_cmd( | |||
| 1697 | INIT_LIST_HEAD(&cmd->se_lun_node); | 1743 | INIT_LIST_HEAD(&cmd->se_lun_node); |
| 1698 | INIT_LIST_HEAD(&cmd->se_delayed_node); | 1744 | INIT_LIST_HEAD(&cmd->se_delayed_node); |
| 1699 | INIT_LIST_HEAD(&cmd->se_ordered_node); | 1745 | INIT_LIST_HEAD(&cmd->se_ordered_node); |
| 1746 | INIT_LIST_HEAD(&cmd->se_qf_node); | ||
| 1700 | 1747 | ||
| 1701 | INIT_LIST_HEAD(&cmd->t_mem_list); | 1748 | INIT_LIST_HEAD(&cmd->t_mem_list); |
| 1702 | INIT_LIST_HEAD(&cmd->t_mem_bidi_list); | 1749 | INIT_LIST_HEAD(&cmd->t_mem_bidi_list); |
| @@ -2019,6 +2066,8 @@ static void transport_generic_request_failure( | |||
| 2019 | int complete, | 2066 | int complete, |
| 2020 | int sc) | 2067 | int sc) |
| 2021 | { | 2068 | { |
| 2069 | int ret = 0; | ||
| 2070 | |||
| 2022 | DEBUG_GRF("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x" | 2071 | DEBUG_GRF("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x" |
| 2023 | " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd), | 2072 | " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd), |
| 2024 | cmd->t_task_cdb[0]); | 2073 | cmd->t_task_cdb[0]); |
| @@ -2109,7 +2158,9 @@ static void transport_generic_request_failure( | |||
| 2109 | cmd->orig_fe_lun, 0x2C, | 2158 | cmd->orig_fe_lun, 0x2C, |
| 2110 | ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); | 2159 | ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); |
| 2111 | 2160 | ||
| 2112 | cmd->se_tfo->queue_status(cmd); | 2161 | ret = cmd->se_tfo->queue_status(cmd); |
| 2162 | if (ret == -EAGAIN) | ||
| 2163 | goto queue_full; | ||
| 2113 | goto check_stop; | 2164 | goto check_stop; |
| 2114 | case PYX_TRANSPORT_USE_SENSE_REASON: | 2165 | case PYX_TRANSPORT_USE_SENSE_REASON: |
| 2115 | /* | 2166 | /* |
| @@ -2126,13 +2177,22 @@ static void transport_generic_request_failure( | |||
| 2126 | 2177 | ||
| 2127 | if (!sc) | 2178 | if (!sc) |
| 2128 | transport_new_cmd_failure(cmd); | 2179 | transport_new_cmd_failure(cmd); |
| 2129 | else | 2180 | else { |
| 2130 | transport_send_check_condition_and_sense(cmd, | 2181 | ret = transport_send_check_condition_and_sense(cmd, |
| 2131 | cmd->scsi_sense_reason, 0); | 2182 | cmd->scsi_sense_reason, 0); |
| 2183 | if (ret == -EAGAIN) | ||
| 2184 | goto queue_full; | ||
| 2185 | } | ||
| 2186 | |||
| 2132 | check_stop: | 2187 | check_stop: |
| 2133 | transport_lun_remove_cmd(cmd); | 2188 | transport_lun_remove_cmd(cmd); |
| 2134 | if (!(transport_cmd_check_stop_to_fabric(cmd))) | 2189 | if (!(transport_cmd_check_stop_to_fabric(cmd))) |
| 2135 | ; | 2190 | ; |
| 2191 | return; | ||
| 2192 | |||
| 2193 | queue_full: | ||
| 2194 | cmd->t_state = TRANSPORT_COMPLETE_OK; | ||
| 2195 | transport_handle_queue_full(cmd, cmd->se_dev, transport_complete_qf); | ||
| 2136 | } | 2196 | } |
| 2137 | 2197 | ||
| 2138 | static void transport_direct_request_timeout(struct se_cmd *cmd) | 2198 | static void transport_direct_request_timeout(struct se_cmd *cmd) |
| @@ -3637,9 +3697,53 @@ static void transport_complete_task_attr(struct se_cmd *cmd) | |||
| 3637 | wake_up_interruptible(&dev->dev_queue_obj.thread_wq); | 3697 | wake_up_interruptible(&dev->dev_queue_obj.thread_wq); |
| 3638 | } | 3698 | } |
| 3639 | 3699 | ||
| 3700 | static int transport_complete_qf(struct se_cmd *cmd) | ||
| 3701 | { | ||
| 3702 | int ret = 0; | ||
| 3703 | |||
| 3704 | if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) | ||
| 3705 | return cmd->se_tfo->queue_status(cmd); | ||
| 3706 | |||
| 3707 | switch (cmd->data_direction) { | ||
| 3708 | case DMA_FROM_DEVICE: | ||
| 3709 | ret = cmd->se_tfo->queue_data_in(cmd); | ||
| 3710 | break; | ||
| 3711 | case DMA_TO_DEVICE: | ||
| 3712 | if (!list_empty(&cmd->t_mem_bidi_list)) { | ||
| 3713 | ret = cmd->se_tfo->queue_data_in(cmd); | ||
| 3714 | if (ret < 0) | ||
| 3715 | return ret; | ||
| 3716 | } | ||
| 3717 | /* Fall through for DMA_TO_DEVICE */ | ||
| 3718 | case DMA_NONE: | ||
| 3719 | ret = cmd->se_tfo->queue_status(cmd); | ||
| 3720 | break; | ||
| 3721 | default: | ||
| 3722 | break; | ||
| 3723 | } | ||
| 3724 | |||
| 3725 | return ret; | ||
| 3726 | } | ||
| 3727 | |||
| 3728 | static void transport_handle_queue_full( | ||
| 3729 | struct se_cmd *cmd, | ||
| 3730 | struct se_device *dev, | ||
| 3731 | int (*qf_callback)(struct se_cmd *)) | ||
| 3732 | { | ||
| 3733 | spin_lock_irq(&dev->qf_cmd_lock); | ||
| 3734 | cmd->se_cmd_flags |= SCF_EMULATE_QUEUE_FULL; | ||
| 3735 | cmd->transport_qf_callback = qf_callback; | ||
| 3736 | list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list); | ||
| 3737 | atomic_inc(&dev->dev_qf_count); | ||
| 3738 | smp_mb__after_atomic_inc(); | ||
| 3739 | spin_unlock_irq(&cmd->se_dev->qf_cmd_lock); | ||
| 3740 | |||
| 3741 | schedule_work(&cmd->se_dev->qf_work_queue); | ||
| 3742 | } | ||
| 3743 | |||
| 3640 | static void transport_generic_complete_ok(struct se_cmd *cmd) | 3744 | static void transport_generic_complete_ok(struct se_cmd *cmd) |
| 3641 | { | 3745 | { |
| 3642 | int reason = 0; | 3746 | int reason = 0, ret; |
| 3643 | /* | 3747 | /* |
| 3644 | * Check if we need to move delayed/dormant tasks from cmds on the | 3748 | * Check if we need to move delayed/dormant tasks from cmds on the |
| 3645 | * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task | 3749 | * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task |
| @@ -3648,6 +3752,21 @@ static void transport_generic_complete_ok(struct se_cmd *cmd) | |||
| 3648 | if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) | 3752 | if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) |
| 3649 | transport_complete_task_attr(cmd); | 3753 | transport_complete_task_attr(cmd); |
| 3650 | /* | 3754 | /* |
| 3755 | * Check to schedule QUEUE_FULL work, or execute an existing | ||
| 3756 | * cmd->transport_qf_callback() | ||
| 3757 | */ | ||
| 3758 | if (atomic_read(&cmd->se_dev->dev_qf_count) != 0) | ||
| 3759 | schedule_work(&cmd->se_dev->qf_work_queue); | ||
| 3760 | |||
| 3761 | if (cmd->transport_qf_callback) { | ||
| 3762 | ret = cmd->transport_qf_callback(cmd); | ||
| 3763 | if (ret < 0) | ||
| 3764 | goto queue_full; | ||
| 3765 | |||
| 3766 | cmd->transport_qf_callback = NULL; | ||
| 3767 | goto done; | ||
| 3768 | } | ||
| 3769 | /* | ||
| 3651 | * Check if we need to retrieve a sense buffer from | 3770 | * Check if we need to retrieve a sense buffer from |
| 3652 | * the struct se_cmd in question. | 3771 | * the struct se_cmd in question. |
| 3653 | */ | 3772 | */ |
| @@ -3660,8 +3779,11 @@ static void transport_generic_complete_ok(struct se_cmd *cmd) | |||
| 3660 | * a non GOOD status. | 3779 | * a non GOOD status. |
| 3661 | */ | 3780 | */ |
| 3662 | if (cmd->scsi_status) { | 3781 | if (cmd->scsi_status) { |
| 3663 | transport_send_check_condition_and_sense( | 3782 | ret = transport_send_check_condition_and_sense( |
| 3664 | cmd, reason, 1); | 3783 | cmd, reason, 1); |
| 3784 | if (ret == -EAGAIN) | ||
| 3785 | goto queue_full; | ||
| 3786 | |||
| 3665 | transport_lun_remove_cmd(cmd); | 3787 | transport_lun_remove_cmd(cmd); |
| 3666 | transport_cmd_check_stop_to_fabric(cmd); | 3788 | transport_cmd_check_stop_to_fabric(cmd); |
| 3667 | return; | 3789 | return; |
| @@ -3693,7 +3815,9 @@ static void transport_generic_complete_ok(struct se_cmd *cmd) | |||
| 3693 | cmd->t_task_buf, | 3815 | cmd->t_task_buf, |
| 3694 | cmd->data_length); | 3816 | cmd->data_length); |
| 3695 | 3817 | ||
| 3696 | cmd->se_tfo->queue_data_in(cmd); | 3818 | ret = cmd->se_tfo->queue_data_in(cmd); |
| 3819 | if (ret == -EAGAIN) | ||
| 3820 | goto queue_full; | ||
| 3697 | break; | 3821 | break; |
| 3698 | case DMA_TO_DEVICE: | 3822 | case DMA_TO_DEVICE: |
| 3699 | spin_lock(&cmd->se_lun->lun_sep_lock); | 3823 | spin_lock(&cmd->se_lun->lun_sep_lock); |
| @@ -3712,19 +3836,30 @@ static void transport_generic_complete_ok(struct se_cmd *cmd) | |||
| 3712 | cmd->data_length; | 3836 | cmd->data_length; |
| 3713 | } | 3837 | } |
| 3714 | spin_unlock(&cmd->se_lun->lun_sep_lock); | 3838 | spin_unlock(&cmd->se_lun->lun_sep_lock); |
| 3715 | cmd->se_tfo->queue_data_in(cmd); | 3839 | ret = cmd->se_tfo->queue_data_in(cmd); |
| 3840 | if (ret == -EAGAIN) | ||
| 3841 | goto queue_full; | ||
| 3716 | break; | 3842 | break; |
| 3717 | } | 3843 | } |
| 3718 | /* Fall through for DMA_TO_DEVICE */ | 3844 | /* Fall through for DMA_TO_DEVICE */ |
| 3719 | case DMA_NONE: | 3845 | case DMA_NONE: |
| 3720 | cmd->se_tfo->queue_status(cmd); | 3846 | ret = cmd->se_tfo->queue_status(cmd); |
| 3847 | if (ret == -EAGAIN) | ||
| 3848 | goto queue_full; | ||
| 3721 | break; | 3849 | break; |
| 3722 | default: | 3850 | default: |
| 3723 | break; | 3851 | break; |
| 3724 | } | 3852 | } |
| 3725 | 3853 | ||
| 3854 | done: | ||
| 3726 | transport_lun_remove_cmd(cmd); | 3855 | transport_lun_remove_cmd(cmd); |
| 3727 | transport_cmd_check_stop_to_fabric(cmd); | 3856 | transport_cmd_check_stop_to_fabric(cmd); |
| 3857 | return; | ||
| 3858 | |||
| 3859 | queue_full: | ||
| 3860 | printk(KERN_INFO "Handling complete_ok QUEUE_FULL: se_cmd: %p," | ||
| 3861 | " data_direction: %d\n", cmd, cmd->data_direction); | ||
| 3862 | transport_handle_queue_full(cmd, cmd->se_dev, transport_complete_qf); | ||
| 3728 | } | 3863 | } |
| 3729 | 3864 | ||
| 3730 | static void transport_free_dev_tasks(struct se_cmd *cmd) | 3865 | static void transport_free_dev_tasks(struct se_cmd *cmd) |
| @@ -4866,6 +5001,11 @@ void transport_generic_process_write(struct se_cmd *cmd) | |||
| 4866 | } | 5001 | } |
| 4867 | EXPORT_SYMBOL(transport_generic_process_write); | 5002 | EXPORT_SYMBOL(transport_generic_process_write); |
| 4868 | 5003 | ||
| 5004 | static int transport_write_pending_qf(struct se_cmd *cmd) | ||
| 5005 | { | ||
| 5006 | return cmd->se_tfo->write_pending(cmd); | ||
| 5007 | } | ||
| 5008 | |||
| 4869 | /* transport_generic_write_pending(): | 5009 | /* transport_generic_write_pending(): |
| 4870 | * | 5010 | * |
| 4871 | * | 5011 | * |
| @@ -4878,6 +5018,17 @@ static int transport_generic_write_pending(struct se_cmd *cmd) | |||
| 4878 | spin_lock_irqsave(&cmd->t_state_lock, flags); | 5018 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
| 4879 | cmd->t_state = TRANSPORT_WRITE_PENDING; | 5019 | cmd->t_state = TRANSPORT_WRITE_PENDING; |
| 4880 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 5020 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
| 5021 | |||
| 5022 | if (cmd->transport_qf_callback) { | ||
| 5023 | ret = cmd->transport_qf_callback(cmd); | ||
| 5024 | if (ret == -EAGAIN) | ||
| 5025 | goto queue_full; | ||
| 5026 | else if (ret < 0) | ||
| 5027 | return ret; | ||
| 5028 | |||
| 5029 | cmd->transport_qf_callback = NULL; | ||
| 5030 | return 0; | ||
| 5031 | } | ||
| 4881 | /* | 5032 | /* |
| 4882 | * For the TCM control CDBs using a contiguous buffer, do the memcpy | 5033 | * For the TCM control CDBs using a contiguous buffer, do the memcpy |
| 4883 | * from the passed Linux/SCSI struct scatterlist located at | 5034 | * from the passed Linux/SCSI struct scatterlist located at |
| @@ -4903,10 +5054,19 @@ static int transport_generic_write_pending(struct se_cmd *cmd) | |||
| 4903 | * frontend know that WRITE buffers are ready. | 5054 | * frontend know that WRITE buffers are ready. |
| 4904 | */ | 5055 | */ |
| 4905 | ret = cmd->se_tfo->write_pending(cmd); | 5056 | ret = cmd->se_tfo->write_pending(cmd); |
| 4906 | if (ret < 0) | 5057 | if (ret == -EAGAIN) |
| 5058 | goto queue_full; | ||
| 5059 | else if (ret < 0) | ||
| 4907 | return ret; | 5060 | return ret; |
| 4908 | 5061 | ||
| 4909 | return PYX_TRANSPORT_WRITE_PENDING; | 5062 | return PYX_TRANSPORT_WRITE_PENDING; |
| 5063 | |||
| 5064 | queue_full: | ||
| 5065 | printk(KERN_INFO "Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd); | ||
| 5066 | cmd->t_state = TRANSPORT_COMPLETE_QF_WP; | ||
| 5067 | transport_handle_queue_full(cmd, cmd->se_dev, | ||
| 5068 | transport_write_pending_qf); | ||
| 5069 | return ret; | ||
| 4910 | } | 5070 | } |
| 4911 | 5071 | ||
| 4912 | void transport_release_cmd(struct se_cmd *cmd) | 5072 | void transport_release_cmd(struct se_cmd *cmd) |
| @@ -5410,8 +5570,7 @@ int transport_send_check_condition_and_sense( | |||
| 5410 | cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER + offset; | 5570 | cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER + offset; |
| 5411 | 5571 | ||
| 5412 | after_reason: | 5572 | after_reason: |
| 5413 | cmd->se_tfo->queue_status(cmd); | 5573 | return cmd->se_tfo->queue_status(cmd); |
| 5414 | return 0; | ||
| 5415 | } | 5574 | } |
| 5416 | EXPORT_SYMBOL(transport_send_check_condition_and_sense); | 5575 | EXPORT_SYMBOL(transport_send_check_condition_and_sense); |
| 5417 | 5576 | ||
| @@ -5733,7 +5892,9 @@ get_cmd: | |||
| 5733 | /* Fall through */ | 5892 | /* Fall through */ |
| 5734 | case TRANSPORT_NEW_CMD: | 5893 | case TRANSPORT_NEW_CMD: |
| 5735 | ret = transport_generic_new_cmd(cmd); | 5894 | ret = transport_generic_new_cmd(cmd); |
| 5736 | if (ret < 0) { | 5895 | if (ret == -EAGAIN) |
| 5896 | break; | ||
| 5897 | else if (ret < 0) { | ||
| 5737 | cmd->transport_error_status = ret; | 5898 | cmd->transport_error_status = ret; |
| 5738 | transport_generic_request_failure(cmd, NULL, | 5899 | transport_generic_request_failure(cmd, NULL, |
| 5739 | 0, (cmd->data_direction != | 5900 | 0, (cmd->data_direction != |
| @@ -5763,6 +5924,9 @@ get_cmd: | |||
| 5763 | transport_stop_all_task_timers(cmd); | 5924 | transport_stop_all_task_timers(cmd); |
| 5764 | transport_generic_request_timeout(cmd); | 5925 | transport_generic_request_timeout(cmd); |
| 5765 | break; | 5926 | break; |
| 5927 | case TRANSPORT_COMPLETE_QF_WP: | ||
| 5928 | transport_generic_write_pending(cmd); | ||
| 5929 | break; | ||
| 5766 | default: | 5930 | default: |
| 5767 | printk(KERN_ERR "Unknown t_state: %d deferred_t_state:" | 5931 | printk(KERN_ERR "Unknown t_state: %d deferred_t_state:" |
| 5768 | " %d for ITT: 0x%08x i_state: %d on SE LUN:" | 5932 | " %d for ITT: 0x%08x i_state: %d on SE LUN:" |
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 71abc4c5e2b4..cd163dd94cd4 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h | |||
| @@ -99,6 +99,7 @@ enum transport_state_table { | |||
| 99 | TRANSPORT_FREE = 15, | 99 | TRANSPORT_FREE = 15, |
| 100 | TRANSPORT_NEW_CMD_MAP = 16, | 100 | TRANSPORT_NEW_CMD_MAP = 16, |
| 101 | TRANSPORT_FREE_CMD_INTR = 17, | 101 | TRANSPORT_FREE_CMD_INTR = 17, |
| 102 | TRANSPORT_COMPLETE_QF_WP = 18, | ||
| 102 | }; | 103 | }; |
| 103 | 104 | ||
| 104 | /* Used for struct se_cmd->se_cmd_flags */ | 105 | /* Used for struct se_cmd->se_cmd_flags */ |
| @@ -125,6 +126,7 @@ enum se_cmd_flags_table { | |||
| 125 | SCF_PASSTHROUGH_CONTIG_TO_SG = 0x00200000, | 126 | SCF_PASSTHROUGH_CONTIG_TO_SG = 0x00200000, |
| 126 | SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00400000, | 127 | SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00400000, |
| 127 | SCF_EMULATE_CDB_ASYNC = 0x01000000, | 128 | SCF_EMULATE_CDB_ASYNC = 0x01000000, |
| 129 | SCF_EMULATE_QUEUE_FULL = 0x02000000, | ||
| 128 | }; | 130 | }; |
| 129 | 131 | ||
| 130 | /* struct se_dev_entry->lun_flags and struct se_lun->lun_access */ | 132 | /* struct se_dev_entry->lun_flags and struct se_lun->lun_access */ |
| @@ -466,6 +468,7 @@ struct se_cmd { | |||
| 466 | struct list_head se_delayed_node; | 468 | struct list_head se_delayed_node; |
| 467 | struct list_head se_ordered_node; | 469 | struct list_head se_ordered_node; |
| 468 | struct list_head se_lun_node; | 470 | struct list_head se_lun_node; |
| 471 | struct list_head se_qf_node; | ||
| 469 | struct se_device *se_dev; | 472 | struct se_device *se_dev; |
| 470 | struct se_dev_entry *se_deve; | 473 | struct se_dev_entry *se_deve; |
| 471 | struct se_device *se_obj_ptr; | 474 | struct se_device *se_obj_ptr; |
| @@ -480,6 +483,8 @@ struct se_cmd { | |||
| 480 | void (*transport_split_cdb)(unsigned long long, u32 *, unsigned char *); | 483 | void (*transport_split_cdb)(unsigned long long, u32 *, unsigned char *); |
| 481 | void (*transport_wait_for_tasks)(struct se_cmd *, int, int); | 484 | void (*transport_wait_for_tasks)(struct se_cmd *, int, int); |
| 482 | void (*transport_complete_callback)(struct se_cmd *); | 485 | void (*transport_complete_callback)(struct se_cmd *); |
| 486 | int (*transport_qf_callback)(struct se_cmd *); | ||
| 487 | |||
| 483 | unsigned char *t_task_cdb; | 488 | unsigned char *t_task_cdb; |
| 484 | unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE]; | 489 | unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE]; |
| 485 | unsigned long long t_task_lba; | 490 | unsigned long long t_task_lba; |
| @@ -743,6 +748,7 @@ struct se_device { | |||
| 743 | atomic_t dev_status_thr_count; | 748 | atomic_t dev_status_thr_count; |
| 744 | atomic_t dev_hoq_count; | 749 | atomic_t dev_hoq_count; |
| 745 | atomic_t dev_ordered_sync; | 750 | atomic_t dev_ordered_sync; |
| 751 | atomic_t dev_qf_count; | ||
| 746 | struct se_obj dev_obj; | 752 | struct se_obj dev_obj; |
| 747 | struct se_obj dev_access_obj; | 753 | struct se_obj dev_access_obj; |
| 748 | struct se_obj dev_export_obj; | 754 | struct se_obj dev_export_obj; |
| @@ -758,6 +764,7 @@ struct se_device { | |||
| 758 | spinlock_t dev_status_thr_lock; | 764 | spinlock_t dev_status_thr_lock; |
| 759 | spinlock_t se_port_lock; | 765 | spinlock_t se_port_lock; |
| 760 | spinlock_t se_tmr_lock; | 766 | spinlock_t se_tmr_lock; |
| 767 | spinlock_t qf_cmd_lock; | ||
| 761 | /* Used for legacy SPC-2 reservationsa */ | 768 | /* Used for legacy SPC-2 reservationsa */ |
| 762 | struct se_node_acl *dev_reserved_node_acl; | 769 | struct se_node_acl *dev_reserved_node_acl; |
| 763 | /* Used for ALUA Logical Unit Group membership */ | 770 | /* Used for ALUA Logical Unit Group membership */ |
| @@ -771,10 +778,12 @@ struct se_device { | |||
| 771 | struct task_struct *process_thread; | 778 | struct task_struct *process_thread; |
| 772 | pid_t process_thread_pid; | 779 | pid_t process_thread_pid; |
| 773 | struct task_struct *dev_mgmt_thread; | 780 | struct task_struct *dev_mgmt_thread; |
| 781 | struct work_struct qf_work_queue; | ||
| 774 | struct list_head delayed_cmd_list; | 782 | struct list_head delayed_cmd_list; |
| 775 | struct list_head ordered_cmd_list; | 783 | struct list_head ordered_cmd_list; |
| 776 | struct list_head execute_task_list; | 784 | struct list_head execute_task_list; |
| 777 | struct list_head state_task_list; | 785 | struct list_head state_task_list; |
| 786 | struct list_head qf_cmd_list; | ||
| 778 | /* Pointer to associated SE HBA */ | 787 | /* Pointer to associated SE HBA */ |
| 779 | struct se_hba *se_hba; | 788 | struct se_hba *se_hba; |
| 780 | struct se_subsystem_dev *se_sub_dev; | 789 | struct se_subsystem_dev *se_sub_dev; |
