aboutsummaryrefslogtreecommitdiffstats
path: root/include/target
diff options
context:
space:
mode:
authorNicholas Bellinger <nab@linux-iscsi.org>2011-06-13 17:46:09 -0400
committerNicholas Bellinger <nab@linux-iscsi.org>2011-07-22 05:37:45 -0400
commit07bde79a5c355dbca66ca4318645aa17b4c0d859 (patch)
treee602132193959df436a2f5d325cd3da9984ada4f /include/target
parent695434e1cbd57f404110bf4ab187a5127ffd79bb (diff)
target: Add SCF_EMULATE_QUEUE_FULL -> transport_handle_queue_full
This patch adds SCF_EMULATE_QUEUE_FULL support using -EAGAIN failures via transport_handle_queue_full() to signal queue full in completion path TFO->queue_data_in() and TFO->queue_status() callbacks. This is done using a new se_cmd->transport_qf_callback() to handle the following queue full exception cases within target core: *) TRANSPORT_COMPLETE_OK (for completion path queue full) *) TRANSPORT_COMPLETE_QF_WP (for TRANSPORT_WRITE_PENDING queue full) *) transport_send_check_condition_and_sense() failure paths in transport_generic_request_failure() and transport_generic_complete_ok() All logic is driven using se_device->qf_work_queue -> target_qf_do_work() to to requeue outstanding se_cmd at the head of se_dev->queue_obj->qobj_list for transport_processing_thread() execution. Tested using tcm_qla2xxx with MAX_OUTSTANDING_COMMANDS=128 for FCP READ to trigger the TRANSPORT_COMPLETE_OK queue full cases, and a simulated TFO->write_pending() -EAGAIN failure to trigger TRANSPORT_COMPLETE_QF_WP. Reported-by: Roland Dreier <roland@purestorage.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
Diffstat (limited to 'include/target')
-rw-r--r--include/target/target_core_base.h9
1 files changed, 9 insertions, 0 deletions
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 71abc4c5e2b4..cd163dd94cd4 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -99,6 +99,7 @@ enum transport_state_table {
99 TRANSPORT_FREE = 15, 99 TRANSPORT_FREE = 15,
100 TRANSPORT_NEW_CMD_MAP = 16, 100 TRANSPORT_NEW_CMD_MAP = 16,
101 TRANSPORT_FREE_CMD_INTR = 17, 101 TRANSPORT_FREE_CMD_INTR = 17,
102 TRANSPORT_COMPLETE_QF_WP = 18,
102}; 103};
103 104
104/* Used for struct se_cmd->se_cmd_flags */ 105/* Used for struct se_cmd->se_cmd_flags */
@@ -125,6 +126,7 @@ enum se_cmd_flags_table {
125 SCF_PASSTHROUGH_CONTIG_TO_SG = 0x00200000, 126 SCF_PASSTHROUGH_CONTIG_TO_SG = 0x00200000,
126 SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00400000, 127 SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00400000,
127 SCF_EMULATE_CDB_ASYNC = 0x01000000, 128 SCF_EMULATE_CDB_ASYNC = 0x01000000,
129 SCF_EMULATE_QUEUE_FULL = 0x02000000,
128}; 130};
129 131
130/* struct se_dev_entry->lun_flags and struct se_lun->lun_access */ 132/* struct se_dev_entry->lun_flags and struct se_lun->lun_access */
@@ -466,6 +468,7 @@ struct se_cmd {
466 struct list_head se_delayed_node; 468 struct list_head se_delayed_node;
467 struct list_head se_ordered_node; 469 struct list_head se_ordered_node;
468 struct list_head se_lun_node; 470 struct list_head se_lun_node;
471 struct list_head se_qf_node;
469 struct se_device *se_dev; 472 struct se_device *se_dev;
470 struct se_dev_entry *se_deve; 473 struct se_dev_entry *se_deve;
471 struct se_device *se_obj_ptr; 474 struct se_device *se_obj_ptr;
@@ -480,6 +483,8 @@ struct se_cmd {
480 void (*transport_split_cdb)(unsigned long long, u32 *, unsigned char *); 483 void (*transport_split_cdb)(unsigned long long, u32 *, unsigned char *);
481 void (*transport_wait_for_tasks)(struct se_cmd *, int, int); 484 void (*transport_wait_for_tasks)(struct se_cmd *, int, int);
482 void (*transport_complete_callback)(struct se_cmd *); 485 void (*transport_complete_callback)(struct se_cmd *);
486 int (*transport_qf_callback)(struct se_cmd *);
487
483 unsigned char *t_task_cdb; 488 unsigned char *t_task_cdb;
484 unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE]; 489 unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE];
485 unsigned long long t_task_lba; 490 unsigned long long t_task_lba;
@@ -743,6 +748,7 @@ struct se_device {
743 atomic_t dev_status_thr_count; 748 atomic_t dev_status_thr_count;
744 atomic_t dev_hoq_count; 749 atomic_t dev_hoq_count;
745 atomic_t dev_ordered_sync; 750 atomic_t dev_ordered_sync;
751 atomic_t dev_qf_count;
746 struct se_obj dev_obj; 752 struct se_obj dev_obj;
747 struct se_obj dev_access_obj; 753 struct se_obj dev_access_obj;
748 struct se_obj dev_export_obj; 754 struct se_obj dev_export_obj;
@@ -758,6 +764,7 @@ struct se_device {
758 spinlock_t dev_status_thr_lock; 764 spinlock_t dev_status_thr_lock;
759 spinlock_t se_port_lock; 765 spinlock_t se_port_lock;
760 spinlock_t se_tmr_lock; 766 spinlock_t se_tmr_lock;
767 spinlock_t qf_cmd_lock;
761 /* Used for legacy SPC-2 reservationsa */ 768 /* Used for legacy SPC-2 reservationsa */
762 struct se_node_acl *dev_reserved_node_acl; 769 struct se_node_acl *dev_reserved_node_acl;
763 /* Used for ALUA Logical Unit Group membership */ 770 /* Used for ALUA Logical Unit Group membership */
@@ -771,10 +778,12 @@ struct se_device {
771 struct task_struct *process_thread; 778 struct task_struct *process_thread;
772 pid_t process_thread_pid; 779 pid_t process_thread_pid;
773 struct task_struct *dev_mgmt_thread; 780 struct task_struct *dev_mgmt_thread;
781 struct work_struct qf_work_queue;
774 struct list_head delayed_cmd_list; 782 struct list_head delayed_cmd_list;
775 struct list_head ordered_cmd_list; 783 struct list_head ordered_cmd_list;
776 struct list_head execute_task_list; 784 struct list_head execute_task_list;
777 struct list_head state_task_list; 785 struct list_head state_task_list;
786 struct list_head qf_cmd_list;
778 /* Pointer to associated SE HBA */ 787 /* Pointer to associated SE HBA */
779 struct se_hba *se_hba; 788 struct se_hba *se_hba;
780 struct se_subsystem_dev *se_sub_dev; 789 struct se_subsystem_dev *se_sub_dev;