aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNicholas Bellinger <nab@linux-iscsi.org>2011-10-23 21:46:36 -0400
committerNicholas Bellinger <nab@linux-iscsi.org>2011-10-23 23:22:08 -0400
commit2e982ab92dff057c639d4a43ccfa275be62f5e59 (patch)
tree8c8467e06736efa042006b11210281894c75bd95
parent415a090ade7e674018e3fa4255938e4c312339b3 (diff)
target: Remove legacy se_task->task_timer and associated logic
This patch removes the legacy usage of se_task->task_timer and associated infrastructure that originally was used as a way to help manage buggy backend SCSI LLDs that in certain cases would never return back an outstanding task. This includes the removal of target_complete_timeout_work(), timeout logic from transport_complete_task(), transport_task_timeout_handler(), transport_start_task_timer(), the per device task_timeout configfs attribute, and all task_timeout associated structure members and defines in target_core_base.h This is being removed in preparation to make transport_complete_task() run in lock-less mode. Cc: Christoph Hellwig <hch@lst.de> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
-rw-r--r--drivers/target/target_core_configfs.c4
-rw-r--r--drivers/target/target_core_device.c15
-rw-r--r--drivers/target/target_core_transport.c156
-rw-r--r--include/target/target_core_base.h7
-rw-r--r--include/target/target_core_transport.h16
5 files changed, 4 insertions, 194 deletions
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 1511a2ff86d8..e0c1e8a8dd4e 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -716,9 +716,6 @@ SE_DEV_ATTR_RO(hw_queue_depth);
716DEF_DEV_ATTRIB(queue_depth); 716DEF_DEV_ATTRIB(queue_depth);
717SE_DEV_ATTR(queue_depth, S_IRUGO | S_IWUSR); 717SE_DEV_ATTR(queue_depth, S_IRUGO | S_IWUSR);
718 718
719DEF_DEV_ATTRIB(task_timeout);
720SE_DEV_ATTR(task_timeout, S_IRUGO | S_IWUSR);
721
722DEF_DEV_ATTRIB(max_unmap_lba_count); 719DEF_DEV_ATTRIB(max_unmap_lba_count);
723SE_DEV_ATTR(max_unmap_lba_count, S_IRUGO | S_IWUSR); 720SE_DEV_ATTR(max_unmap_lba_count, S_IRUGO | S_IWUSR);
724 721
@@ -752,7 +749,6 @@ static struct configfs_attribute *target_core_dev_attrib_attrs[] = {
752 &target_core_dev_attrib_optimal_sectors.attr, 749 &target_core_dev_attrib_optimal_sectors.attr,
753 &target_core_dev_attrib_hw_queue_depth.attr, 750 &target_core_dev_attrib_hw_queue_depth.attr,
754 &target_core_dev_attrib_queue_depth.attr, 751 &target_core_dev_attrib_queue_depth.attr,
755 &target_core_dev_attrib_task_timeout.attr,
756 &target_core_dev_attrib_max_unmap_lba_count.attr, 752 &target_core_dev_attrib_max_unmap_lba_count.attr,
757 &target_core_dev_attrib_max_unmap_block_desc_count.attr, 753 &target_core_dev_attrib_max_unmap_block_desc_count.attr,
758 &target_core_dev_attrib_unmap_granularity.attr, 754 &target_core_dev_attrib_unmap_granularity.attr,
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 81352b7f9130..f870c3bcfd82 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -914,21 +914,6 @@ void se_dev_set_default_attribs(
914 dev->se_sub_dev->se_dev_attrib.queue_depth = dev_limits->queue_depth; 914 dev->se_sub_dev->se_dev_attrib.queue_depth = dev_limits->queue_depth;
915} 915}
916 916
917int se_dev_set_task_timeout(struct se_device *dev, u32 task_timeout)
918{
919 if (task_timeout > DA_TASK_TIMEOUT_MAX) {
920 pr_err("dev[%p]: Passed task_timeout: %u larger then"
921 " DA_TASK_TIMEOUT_MAX\n", dev, task_timeout);
922 return -EINVAL;
923 } else {
924 dev->se_sub_dev->se_dev_attrib.task_timeout = task_timeout;
925 pr_debug("dev[%p]: Set SE Device task_timeout: %u\n",
926 dev, task_timeout);
927 }
928
929 return 0;
930}
931
932int se_dev_set_max_unmap_lba_count( 917int se_dev_set_max_unmap_lba_count(
933 struct se_device *dev, 918 struct se_device *dev,
934 u32 max_unmap_lba_count) 919 u32 max_unmap_lba_count)
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 5027619552f0..d75255804481 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -75,7 +75,6 @@ static int __transport_execute_tasks(struct se_device *dev);
75static void transport_complete_task_attr(struct se_cmd *cmd); 75static void transport_complete_task_attr(struct se_cmd *cmd);
76static void transport_handle_queue_full(struct se_cmd *cmd, 76static void transport_handle_queue_full(struct se_cmd *cmd,
77 struct se_device *dev); 77 struct se_device *dev);
78static void transport_direct_request_timeout(struct se_cmd *cmd);
79static void transport_free_dev_tasks(struct se_cmd *cmd); 78static void transport_free_dev_tasks(struct se_cmd *cmd);
80static int transport_generic_get_mem(struct se_cmd *cmd); 79static int transport_generic_get_mem(struct se_cmd *cmd);
81static void transport_put_cmd(struct se_cmd *cmd); 80static void transport_put_cmd(struct se_cmd *cmd);
@@ -682,26 +681,6 @@ void transport_complete_sync_cache(struct se_cmd *cmd, int good)
682} 681}
683EXPORT_SYMBOL(transport_complete_sync_cache); 682EXPORT_SYMBOL(transport_complete_sync_cache);
684 683
685static void target_complete_timeout_work(struct work_struct *work)
686{
687 struct se_cmd *cmd = container_of(work, struct se_cmd, work);
688 unsigned long flags;
689
690 /*
691 * Reset cmd->t_se_count to allow transport_put_cmd()
692 * to allow last call to free memory resources.
693 */
694 spin_lock_irqsave(&cmd->t_state_lock, flags);
695 if (atomic_read(&cmd->t_transport_timeout) > 1) {
696 int tmp = (atomic_read(&cmd->t_transport_timeout) - 1);
697
698 atomic_sub(tmp, &cmd->t_se_count);
699 }
700 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
701
702 transport_put_cmd(cmd);
703}
704
705static void target_complete_failure_work(struct work_struct *work) 684static void target_complete_failure_work(struct work_struct *work)
706{ 685{
707 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 686 struct se_cmd *cmd = container_of(work, struct se_cmd, work);
@@ -726,8 +705,6 @@ void transport_complete_task(struct se_task *task, int success)
726 if (dev) 705 if (dev)
727 atomic_inc(&dev->depth_left); 706 atomic_inc(&dev->depth_left);
728 707
729 del_timer(&task->task_timer);
730
731 spin_lock_irqsave(&cmd->t_state_lock, flags); 708 spin_lock_irqsave(&cmd->t_state_lock, flags);
732 task->task_flags &= ~TF_ACTIVE; 709 task->task_flags &= ~TF_ACTIVE;
733 710
@@ -749,35 +726,11 @@ void transport_complete_task(struct se_task *task, int success)
749 * to complete for an exception condition 726 * to complete for an exception condition
750 */ 727 */
751 if (task->task_flags & TF_REQUEST_STOP) { 728 if (task->task_flags & TF_REQUEST_STOP) {
752 /*
753 * Decrement cmd->t_se_count if this task had
754 * previously thrown its timeout exception handler.
755 */
756 if (task->task_flags & TF_TIMEOUT) {
757 atomic_dec(&cmd->t_se_count);
758 task->task_flags &= ~TF_TIMEOUT;
759 }
760 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 729 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
761
762 complete(&task->task_stop_comp); 730 complete(&task->task_stop_comp);
763 return; 731 return;
764 } 732 }
765 /* 733 /*
766 * If the task's timeout handler has fired, use the t_task_cdbs_timeout
767 * left counter to determine when the struct se_cmd is ready to be queued to
768 * the processing thread.
769 */
770 if (task->task_flags & TF_TIMEOUT) {
771 if (!atomic_dec_and_test(&cmd->t_task_cdbs_timeout_left)) {
772 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
773 return;
774 }
775 INIT_WORK(&cmd->work, target_complete_timeout_work);
776 goto out_queue;
777 }
778 atomic_dec(&cmd->t_task_cdbs_timeout_left);
779
780 /*
781 * Decrement the outstanding t_task_cdbs_left count. The last 734 * Decrement the outstanding t_task_cdbs_left count. The last
782 * struct se_task from struct se_cmd will complete itself into the 735 * struct se_task from struct se_cmd will complete itself into the
783 * device queue depending upon int success. 736 * device queue depending upon int success.
@@ -800,7 +753,6 @@ void transport_complete_task(struct se_task *task, int success)
800 INIT_WORK(&cmd->work, target_complete_ok_work); 753 INIT_WORK(&cmd->work, target_complete_ok_work);
801 } 754 }
802 755
803out_queue:
804 cmd->t_state = TRANSPORT_COMPLETE; 756 cmd->t_state = TRANSPORT_COMPLETE;
805 atomic_set(&cmd->t_transport_active, 1); 757 atomic_set(&cmd->t_transport_active, 1);
806 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 758 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
@@ -1519,7 +1471,6 @@ transport_generic_get_task(struct se_cmd *cmd,
1519 INIT_LIST_HEAD(&task->t_list); 1471 INIT_LIST_HEAD(&task->t_list);
1520 INIT_LIST_HEAD(&task->t_execute_list); 1472 INIT_LIST_HEAD(&task->t_execute_list);
1521 INIT_LIST_HEAD(&task->t_state_list); 1473 INIT_LIST_HEAD(&task->t_state_list);
1522 init_timer(&task->task_timer);
1523 init_completion(&task->task_stop_comp); 1474 init_completion(&task->task_stop_comp);
1524 task->task_se_cmd = cmd; 1475 task->task_se_cmd = cmd;
1525 task->task_data_direction = data_direction; 1476 task->task_data_direction = data_direction;
@@ -1787,7 +1738,6 @@ bool target_stop_task(struct se_task *task, unsigned long *flags)
1787 spin_unlock_irqrestore(&cmd->t_state_lock, *flags); 1738 spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
1788 1739
1789 pr_debug("Task %p waiting to complete\n", task); 1740 pr_debug("Task %p waiting to complete\n", task);
1790 del_timer_sync(&task->task_timer);
1791 wait_for_completion(&task->task_stop_comp); 1741 wait_for_completion(&task->task_stop_comp);
1792 pr_debug("Task %p stopped successfully\n", task); 1742 pr_debug("Task %p stopped successfully\n", task);
1793 1743
@@ -1876,7 +1826,6 @@ static void transport_generic_request_failure(
1876 transport_complete_task_attr(cmd); 1826 transport_complete_task_attr(cmd);
1877 1827
1878 if (complete) { 1828 if (complete) {
1879 transport_direct_request_timeout(cmd);
1880 cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE; 1829 cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE;
1881 } 1830 }
1882 1831
@@ -1979,25 +1928,6 @@ queue_full:
1979 transport_handle_queue_full(cmd, cmd->se_dev); 1928 transport_handle_queue_full(cmd, cmd->se_dev);
1980} 1929}
1981 1930
1982static void transport_direct_request_timeout(struct se_cmd *cmd)
1983{
1984 unsigned long flags;
1985
1986 spin_lock_irqsave(&cmd->t_state_lock, flags);
1987 if (!atomic_read(&cmd->t_transport_timeout)) {
1988 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
1989 return;
1990 }
1991 if (atomic_read(&cmd->t_task_cdbs_timeout_left)) {
1992 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
1993 return;
1994 }
1995
1996 atomic_sub(atomic_read(&cmd->t_transport_timeout),
1997 &cmd->t_se_count);
1998 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
1999}
2000
2001static inline u32 transport_lba_21(unsigned char *cdb) 1931static inline u32 transport_lba_21(unsigned char *cdb)
2002{ 1932{
2003 return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3]; 1933 return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3];
@@ -2040,80 +1970,6 @@ static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd)
2040 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); 1970 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
2041} 1971}
2042 1972
2043/*
2044 * Called from interrupt context.
2045 */
2046static void transport_task_timeout_handler(unsigned long data)
2047{
2048 struct se_task *task = (struct se_task *)data;
2049 struct se_cmd *cmd = task->task_se_cmd;
2050 unsigned long flags;
2051
2052 pr_debug("transport task timeout fired! task: %p cmd: %p\n", task, cmd);
2053
2054 spin_lock_irqsave(&cmd->t_state_lock, flags);
2055
2056 /*
2057 * Determine if transport_complete_task() has already been called.
2058 */
2059 if (!(task->task_flags & TF_ACTIVE)) {
2060 pr_debug("transport task: %p cmd: %p timeout !TF_ACTIVE\n",
2061 task, cmd);
2062 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2063 return;
2064 }
2065
2066 atomic_inc(&cmd->t_se_count);
2067 atomic_inc(&cmd->t_transport_timeout);
2068 cmd->t_tasks_failed = 1;
2069
2070 task->task_flags |= TF_TIMEOUT;
2071 task->task_error_status = PYX_TRANSPORT_TASK_TIMEOUT;
2072 task->task_scsi_status = 1;
2073
2074 if (task->task_flags & TF_REQUEST_STOP) {
2075 pr_debug("transport task: %p cmd: %p timeout TF_REQUEST_STOP"
2076 " == 1\n", task, cmd);
2077 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2078 complete(&task->task_stop_comp);
2079 return;
2080 }
2081
2082 if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) {
2083 pr_debug("transport task: %p cmd: %p timeout non zero"
2084 " t_task_cdbs_left\n", task, cmd);
2085 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2086 return;
2087 }
2088 pr_debug("transport task: %p cmd: %p timeout ZERO t_task_cdbs_left\n",
2089 task, cmd);
2090
2091 INIT_WORK(&cmd->work, target_complete_failure_work);
2092 cmd->t_state = TRANSPORT_COMPLETE;
2093 atomic_set(&cmd->t_transport_active, 1);
2094 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2095
2096 queue_work(target_completion_wq, &cmd->work);
2097}
2098
2099static void transport_start_task_timer(struct se_task *task)
2100{
2101 struct se_device *dev = task->task_se_cmd->se_dev;
2102 int timeout;
2103
2104 /*
2105 * If the task_timeout is disabled, exit now.
2106 */
2107 timeout = dev->se_sub_dev->se_dev_attrib.task_timeout;
2108 if (!timeout)
2109 return;
2110
2111 task->task_timer.expires = (get_jiffies_64() + timeout * HZ);
2112 task->task_timer.data = (unsigned long) task;
2113 task->task_timer.function = transport_task_timeout_handler;
2114 add_timer(&task->task_timer);
2115}
2116
2117static inline int transport_tcq_window_closed(struct se_device *dev) 1973static inline int transport_tcq_window_closed(struct se_device *dev)
2118{ 1974{
2119 if (dev->dev_tcq_window_closed++ < 1975 if (dev->dev_tcq_window_closed++ <
@@ -2296,7 +2152,6 @@ check_depth:
2296 cmd->t_task_list_num) 2152 cmd->t_task_list_num)
2297 atomic_set(&cmd->t_transport_sent, 1); 2153 atomic_set(&cmd->t_transport_sent, 1);
2298 2154
2299 transport_start_task_timer(task);
2300 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2155 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2301 /* 2156 /*
2302 * The struct se_cmd->transport_emulate_cdb() function pointer is used 2157 * The struct se_cmd->transport_emulate_cdb() function pointer is used
@@ -2310,7 +2165,6 @@ check_depth:
2310 spin_lock_irqsave(&cmd->t_state_lock, flags); 2165 spin_lock_irqsave(&cmd->t_state_lock, flags);
2311 task->task_flags &= ~TF_ACTIVE; 2166 task->task_flags &= ~TF_ACTIVE;
2312 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2167 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2313 del_timer_sync(&task->task_timer);
2314 atomic_set(&cmd->t_transport_sent, 0); 2168 atomic_set(&cmd->t_transport_sent, 0);
2315 transport_stop_tasks_for_cmd(cmd); 2169 transport_stop_tasks_for_cmd(cmd);
2316 atomic_inc(&dev->depth_left); 2170 atomic_inc(&dev->depth_left);
@@ -2350,7 +2204,6 @@ check_depth:
2350 spin_lock_irqsave(&cmd->t_state_lock, flags); 2204 spin_lock_irqsave(&cmd->t_state_lock, flags);
2351 task->task_flags &= ~TF_ACTIVE; 2205 task->task_flags &= ~TF_ACTIVE;
2352 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2206 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2353 del_timer_sync(&task->task_timer);
2354 atomic_set(&cmd->t_transport_sent, 0); 2207 atomic_set(&cmd->t_transport_sent, 0);
2355 transport_stop_tasks_for_cmd(cmd); 2208 transport_stop_tasks_for_cmd(cmd);
2356 atomic_inc(&dev->depth_left); 2209 atomic_inc(&dev->depth_left);
@@ -3543,14 +3396,6 @@ static void transport_free_dev_tasks(struct se_cmd *cmd)
3543 while (!list_empty(&dispose_list)) { 3396 while (!list_empty(&dispose_list)) {
3544 task = list_first_entry(&dispose_list, struct se_task, t_list); 3397 task = list_first_entry(&dispose_list, struct se_task, t_list);
3545 3398
3546 /*
3547 * We already cancelled all pending timers in
3548 * transport_complete_task, but that was just a pure del_timer,
3549 * so do a full del_timer_sync here to make sure any handler
3550 * that was running at that point has finished execution.
3551 */
3552 del_timer_sync(&task->task_timer);
3553
3554 if (task->task_sg != cmd->t_data_sg && 3399 if (task->task_sg != cmd->t_data_sg &&
3555 task->task_sg != cmd->t_bidi_data_sg) 3400 task->task_sg != cmd->t_bidi_data_sg)
3556 kfree(task->task_sg); 3401 kfree(task->task_sg);
@@ -4007,7 +3852,6 @@ int transport_generic_new_cmd(struct se_cmd *cmd)
4007 cmd->t_task_list_num = (task_cdbs + task_cdbs_bidi); 3852 cmd->t_task_list_num = (task_cdbs + task_cdbs_bidi);
4008 atomic_set(&cmd->t_task_cdbs_left, cmd->t_task_list_num); 3853 atomic_set(&cmd->t_task_cdbs_left, cmd->t_task_list_num);
4009 atomic_set(&cmd->t_task_cdbs_ex_left, cmd->t_task_list_num); 3854 atomic_set(&cmd->t_task_cdbs_ex_left, cmd->t_task_list_num);
4010 atomic_set(&cmd->t_task_cdbs_timeout_left, cmd->t_task_list_num);
4011 3855
4012 /* 3856 /*
4013 * For WRITEs, let the fabric know its buffer is ready.. 3857 * For WRITEs, let the fabric know its buffer is ready..
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index d210f1fe9962..35aa786f93da 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -75,8 +75,7 @@ enum transport_tpg_type_table {
75enum se_task_flags { 75enum se_task_flags {
76 TF_ACTIVE = (1 << 0), 76 TF_ACTIVE = (1 << 0),
77 TF_SENT = (1 << 1), 77 TF_SENT = (1 << 1),
78 TF_TIMEOUT = (1 << 2), 78 TF_REQUEST_STOP = (1 << 2),
79 TF_REQUEST_STOP = (1 << 3),
80}; 79};
81 80
82/* Special transport agnostic struct se_cmd->t_states */ 81/* Special transport agnostic struct se_cmd->t_states */
@@ -404,7 +403,6 @@ struct se_task {
404 int task_error_status; 403 int task_error_status;
405 enum dma_data_direction task_data_direction; 404 enum dma_data_direction task_data_direction;
406 atomic_t task_state_active; 405 atomic_t task_state_active;
407 struct timer_list task_timer;
408 struct list_head t_list; 406 struct list_head t_list;
409 struct list_head t_execute_list; 407 struct list_head t_execute_list;
410 struct list_head t_state_list; 408 struct list_head t_state_list;
@@ -469,7 +467,6 @@ struct se_cmd {
469 atomic_t t_se_count; 467 atomic_t t_se_count;
470 atomic_t t_task_cdbs_left; 468 atomic_t t_task_cdbs_left;
471 atomic_t t_task_cdbs_ex_left; 469 atomic_t t_task_cdbs_ex_left;
472 atomic_t t_task_cdbs_timeout_left;
473 atomic_t t_task_cdbs_sent; 470 atomic_t t_task_cdbs_sent;
474 atomic_t t_transport_aborted; 471 atomic_t t_transport_aborted;
475 atomic_t t_transport_active; 472 atomic_t t_transport_active;
@@ -477,7 +474,6 @@ struct se_cmd {
477 atomic_t t_transport_queue_active; 474 atomic_t t_transport_queue_active;
478 atomic_t t_transport_sent; 475 atomic_t t_transport_sent;
479 atomic_t t_transport_stop; 476 atomic_t t_transport_stop;
480 atomic_t t_transport_timeout;
481 atomic_t transport_dev_active; 477 atomic_t transport_dev_active;
482 atomic_t transport_lun_active; 478 atomic_t transport_lun_active;
483 atomic_t transport_lun_fe_stop; 479 atomic_t transport_lun_fe_stop;
@@ -646,7 +642,6 @@ struct se_dev_attrib {
646 u32 optimal_sectors; 642 u32 optimal_sectors;
647 u32 hw_queue_depth; 643 u32 hw_queue_depth;
648 u32 queue_depth; 644 u32 queue_depth;
649 u32 task_timeout;
650 u32 max_unmap_lba_count; 645 u32 max_unmap_lba_count;
651 u32 max_unmap_block_desc_count; 646 u32 max_unmap_block_desc_count;
652 u32 unmap_granularity; 647 u32 unmap_granularity;
diff --git a/include/target/target_core_transport.h b/include/target/target_core_transport.h
index 32c586346c0e..a037a1a6fbba 100644
--- a/include/target/target_core_transport.h
+++ b/include/target/target_core_transport.h
@@ -22,10 +22,9 @@
22#define PYX_TRANSPORT_LU_COMM_FAILURE -7 22#define PYX_TRANSPORT_LU_COMM_FAILURE -7
23#define PYX_TRANSPORT_UNKNOWN_MODE_PAGE -8 23#define PYX_TRANSPORT_UNKNOWN_MODE_PAGE -8
24#define PYX_TRANSPORT_WRITE_PROTECTED -9 24#define PYX_TRANSPORT_WRITE_PROTECTED -9
25#define PYX_TRANSPORT_TASK_TIMEOUT -10 25#define PYX_TRANSPORT_RESERVATION_CONFLICT -10
26#define PYX_TRANSPORT_RESERVATION_CONFLICT -11 26#define PYX_TRANSPORT_ILLEGAL_REQUEST -11
27#define PYX_TRANSPORT_ILLEGAL_REQUEST -12 27#define PYX_TRANSPORT_USE_SENSE_REASON -12
28#define PYX_TRANSPORT_USE_SENSE_REASON -13
29 28
30#ifndef SAM_STAT_RESERVATION_CONFLICT 29#ifndef SAM_STAT_RESERVATION_CONFLICT
31#define SAM_STAT_RESERVATION_CONFLICT 0x18 30#define SAM_STAT_RESERVATION_CONFLICT 0x18
@@ -38,13 +37,6 @@
38#define TRANSPORT_PLUGIN_VHBA_PDEV 2 37#define TRANSPORT_PLUGIN_VHBA_PDEV 2
39#define TRANSPORT_PLUGIN_VHBA_VDEV 3 38#define TRANSPORT_PLUGIN_VHBA_VDEV 3
40 39
41/* For SE OBJ Plugins, in seconds */
42#define TRANSPORT_TIMEOUT_TUR 10
43#define TRANSPORT_TIMEOUT_TYPE_DISK 60
44#define TRANSPORT_TIMEOUT_TYPE_ROM 120
45#define TRANSPORT_TIMEOUT_TYPE_TAPE 600
46#define TRANSPORT_TIMEOUT_TYPE_OTHER 300
47
48/* 40/*
49 * struct se_subsystem_dev->su_dev_flags 41 * struct se_subsystem_dev->su_dev_flags
50*/ 42*/
@@ -61,8 +53,6 @@
61#define DF_SPC2_RESERVATIONS_WITH_ISID 0x00000004 53#define DF_SPC2_RESERVATIONS_WITH_ISID 0x00000004
62 54
63/* struct se_dev_attrib sanity values */ 55/* struct se_dev_attrib sanity values */
64/* 10 Minutes */
65#define DA_TASK_TIMEOUT_MAX 600
66/* Default max_unmap_lba_count */ 56/* Default max_unmap_lba_count */
67#define DA_MAX_UNMAP_LBA_COUNT 0 57#define DA_MAX_UNMAP_LBA_COUNT 0
68/* Default max_unmap_block_desc_count */ 58/* Default max_unmap_block_desc_count */