diff options
author | Andy Grover <agrover@redhat.com> | 2011-07-19 04:55:10 -0400 |
---|---|---|
committer | Nicholas Bellinger <nab@linux-iscsi.org> | 2011-07-22 05:37:43 -0400 |
commit | e3d6f909ed803d92a5ac9b4a2c087e0eae9b90d0 (patch) | |
tree | 2eb65e958a2cc35c896a0e184ec09edcb9076b3b /drivers/target | |
parent | a8c6da90b823fb94ca76ca0df6bb44e6e205dc87 (diff) |
target: Core cleanups from AGrover (round 1)
This patch contains the squashed version of a number of cleanups and
minor fixes from Andy's initial series (round 1) for target core this
past spring. The condensed log looks like:
target: use errno values instead of returning -1 for everything
target: Rename transport_calc_sg_num to transport_init_task_sg
target: Fix leak in error path in transport_init_task_sg
target/pscsi: Remove pscsi_get_sh() usage
target: Make two runtime checks into WARN_ONs
target: Remove hba queue depth and convert to spin_lock_irq usage
target: dev->dev_status_queue_obj is unused
target: Make struct se_queue_req.cmd type struct se_cmd *
target: Remove __transport_get_qr_from_queue()
target: Rename se_dev->g_se_dev_list to se_dev_node
target: Remove struct se_global
target: Simplify scsi mib index table code
target: Make dev_queue_obj a member of se_device instead of a pointer
target: remove extraneous returns at end of void functions
target: Ensure transport_dump_vpd_ident_type returns null-terminated str
target: Function pointers don't need to use '&' to be assigned
target: Fix comment in __transport_execute_tasks()
target: Misc style cleanups
target: rename struct pr_reservation_template to pr_reservation
target: Remove #defines that just perform indirection
target: Inline transport_get_task_from_execute_queue()
target: Minor header comment fixes
Signed-off-by: Andy Grover <agrover@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
Diffstat (limited to 'drivers/target')
25 files changed, 1687 insertions, 1855 deletions
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c index 2d0f22a91f67..2f19e1926493 100644 --- a/drivers/target/loopback/tcm_loop.c +++ b/drivers/target/loopback/tcm_loop.c | |||
@@ -118,7 +118,7 @@ static struct se_cmd *tcm_loop_allocate_core_cmd( | |||
118 | * Signal BIDI usage with T_TASK(cmd)->t_tasks_bidi | 118 | * Signal BIDI usage with T_TASK(cmd)->t_tasks_bidi |
119 | */ | 119 | */ |
120 | if (scsi_bidi_cmnd(sc)) | 120 | if (scsi_bidi_cmnd(sc)) |
121 | T_TASK(se_cmd)->t_tasks_bidi = 1; | 121 | se_cmd->t_task->t_tasks_bidi = 1; |
122 | /* | 122 | /* |
123 | * Locate the struct se_lun pointer and attach it to struct se_cmd | 123 | * Locate the struct se_lun pointer and attach it to struct se_cmd |
124 | */ | 124 | */ |
@@ -176,7 +176,7 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd) | |||
176 | * For BIDI commands, pass in the extra READ buffer | 176 | * For BIDI commands, pass in the extra READ buffer |
177 | * to transport_generic_map_mem_to_cmd() below.. | 177 | * to transport_generic_map_mem_to_cmd() below.. |
178 | */ | 178 | */ |
179 | if (T_TASK(se_cmd)->t_tasks_bidi) { | 179 | if (se_cmd->t_task->t_tasks_bidi) { |
180 | struct scsi_data_buffer *sdb = scsi_in(sc); | 180 | struct scsi_data_buffer *sdb = scsi_in(sc); |
181 | 181 | ||
182 | mem_bidi_ptr = (void *)sdb->table.sgl; | 182 | mem_bidi_ptr = (void *)sdb->table.sgl; |
@@ -1402,9 +1402,9 @@ static int tcm_loop_register_configfs(void) | |||
1402 | * Register the top level struct config_item_type with TCM core | 1402 | * Register the top level struct config_item_type with TCM core |
1403 | */ | 1403 | */ |
1404 | fabric = target_fabric_configfs_init(THIS_MODULE, "loopback"); | 1404 | fabric = target_fabric_configfs_init(THIS_MODULE, "loopback"); |
1405 | if (!fabric) { | 1405 | if (IS_ERR(fabric)) { |
1406 | printk(KERN_ERR "tcm_loop_register_configfs() failed!\n"); | 1406 | printk(KERN_ERR "tcm_loop_register_configfs() failed!\n"); |
1407 | return -1; | 1407 | return PTR_ERR(fabric); |
1408 | } | 1408 | } |
1409 | /* | 1409 | /* |
1410 | * Setup the fabric API of function pointers used by target_core_mod | 1410 | * Setup the fabric API of function pointers used by target_core_mod |
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index 47abb42d9c36..bfc42adea510 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c | |||
@@ -46,6 +46,14 @@ static int core_alua_set_tg_pt_secondary_state( | |||
46 | struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, | 46 | struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, |
47 | struct se_port *port, int explict, int offline); | 47 | struct se_port *port, int explict, int offline); |
48 | 48 | ||
49 | static u16 alua_lu_gps_counter; | ||
50 | static u32 alua_lu_gps_count; | ||
51 | |||
52 | static DEFINE_SPINLOCK(lu_gps_lock); | ||
53 | static LIST_HEAD(lu_gps_list); | ||
54 | |||
55 | struct t10_alua_lu_gp *default_lu_gp; | ||
56 | |||
49 | /* | 57 | /* |
50 | * REPORT_TARGET_PORT_GROUPS | 58 | * REPORT_TARGET_PORT_GROUPS |
51 | * | 59 | * |
@@ -53,16 +61,16 @@ static int core_alua_set_tg_pt_secondary_state( | |||
53 | */ | 61 | */ |
54 | int core_emulate_report_target_port_groups(struct se_cmd *cmd) | 62 | int core_emulate_report_target_port_groups(struct se_cmd *cmd) |
55 | { | 63 | { |
56 | struct se_subsystem_dev *su_dev = SE_DEV(cmd)->se_sub_dev; | 64 | struct se_subsystem_dev *su_dev = cmd->se_lun->lun_se_dev->se_sub_dev; |
57 | struct se_port *port; | 65 | struct se_port *port; |
58 | struct t10_alua_tg_pt_gp *tg_pt_gp; | 66 | struct t10_alua_tg_pt_gp *tg_pt_gp; |
59 | struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; | 67 | struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; |
60 | unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf; | 68 | unsigned char *buf = (unsigned char *)cmd->t_task->t_task_buf; |
61 | u32 rd_len = 0, off = 4; /* Skip over RESERVED area to first | 69 | u32 rd_len = 0, off = 4; /* Skip over RESERVED area to first |
62 | Target port group descriptor */ | 70 | Target port group descriptor */ |
63 | 71 | ||
64 | spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock); | 72 | spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); |
65 | list_for_each_entry(tg_pt_gp, &T10_ALUA(su_dev)->tg_pt_gps_list, | 73 | list_for_each_entry(tg_pt_gp, &su_dev->t10_alua.tg_pt_gps_list, |
66 | tg_pt_gp_list) { | 74 | tg_pt_gp_list) { |
67 | /* | 75 | /* |
68 | * PREF: Preferred target port bit, determine if this | 76 | * PREF: Preferred target port bit, determine if this |
@@ -124,7 +132,7 @@ int core_emulate_report_target_port_groups(struct se_cmd *cmd) | |||
124 | } | 132 | } |
125 | spin_unlock(&tg_pt_gp->tg_pt_gp_lock); | 133 | spin_unlock(&tg_pt_gp->tg_pt_gp_lock); |
126 | } | 134 | } |
127 | spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); | 135 | spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); |
128 | /* | 136 | /* |
129 | * Set the RETURN DATA LENGTH set in the header of the DataIN Payload | 137 | * Set the RETURN DATA LENGTH set in the header of the DataIN Payload |
130 | */ | 138 | */ |
@@ -143,13 +151,13 @@ int core_emulate_report_target_port_groups(struct se_cmd *cmd) | |||
143 | */ | 151 | */ |
144 | int core_emulate_set_target_port_groups(struct se_cmd *cmd) | 152 | int core_emulate_set_target_port_groups(struct se_cmd *cmd) |
145 | { | 153 | { |
146 | struct se_device *dev = SE_DEV(cmd); | 154 | struct se_device *dev = cmd->se_lun->lun_se_dev; |
147 | struct se_subsystem_dev *su_dev = SE_DEV(cmd)->se_sub_dev; | 155 | struct se_subsystem_dev *su_dev = dev->se_sub_dev; |
148 | struct se_port *port, *l_port = SE_LUN(cmd)->lun_sep; | 156 | struct se_port *port, *l_port = cmd->se_lun->lun_sep; |
149 | struct se_node_acl *nacl = SE_SESS(cmd)->se_node_acl; | 157 | struct se_node_acl *nacl = cmd->se_sess->se_node_acl; |
150 | struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp; | 158 | struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp; |
151 | struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *l_tg_pt_gp_mem; | 159 | struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *l_tg_pt_gp_mem; |
152 | unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf; | 160 | unsigned char *buf = (unsigned char *)cmd->t_task->t_task_buf; |
153 | unsigned char *ptr = &buf[4]; /* Skip over RESERVED area in header */ | 161 | unsigned char *ptr = &buf[4]; /* Skip over RESERVED area in header */ |
154 | u32 len = 4; /* Skip over RESERVED area in header */ | 162 | u32 len = 4; /* Skip over RESERVED area in header */ |
155 | int alua_access_state, primary = 0, rc; | 163 | int alua_access_state, primary = 0, rc; |
@@ -224,9 +232,9 @@ int core_emulate_set_target_port_groups(struct se_cmd *cmd) | |||
224 | * Locate the matching target port group ID from | 232 | * Locate the matching target port group ID from |
225 | * the global tg_pt_gp list | 233 | * the global tg_pt_gp list |
226 | */ | 234 | */ |
227 | spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock); | 235 | spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); |
228 | list_for_each_entry(tg_pt_gp, | 236 | list_for_each_entry(tg_pt_gp, |
229 | &T10_ALUA(su_dev)->tg_pt_gps_list, | 237 | &su_dev->t10_alua.tg_pt_gps_list, |
230 | tg_pt_gp_list) { | 238 | tg_pt_gp_list) { |
231 | if (!(tg_pt_gp->tg_pt_gp_valid_id)) | 239 | if (!(tg_pt_gp->tg_pt_gp_valid_id)) |
232 | continue; | 240 | continue; |
@@ -236,18 +244,18 @@ int core_emulate_set_target_port_groups(struct se_cmd *cmd) | |||
236 | 244 | ||
237 | atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); | 245 | atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); |
238 | smp_mb__after_atomic_inc(); | 246 | smp_mb__after_atomic_inc(); |
239 | spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); | 247 | spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); |
240 | 248 | ||
241 | rc = core_alua_do_port_transition(tg_pt_gp, | 249 | rc = core_alua_do_port_transition(tg_pt_gp, |
242 | dev, l_port, nacl, | 250 | dev, l_port, nacl, |
243 | alua_access_state, 1); | 251 | alua_access_state, 1); |
244 | 252 | ||
245 | spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock); | 253 | spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); |
246 | atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); | 254 | atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); |
247 | smp_mb__after_atomic_dec(); | 255 | smp_mb__after_atomic_dec(); |
248 | break; | 256 | break; |
249 | } | 257 | } |
250 | spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); | 258 | spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); |
251 | /* | 259 | /* |
252 | * If not matching target port group ID can be located | 260 | * If not matching target port group ID can be located |
253 | * throw an exception with ASCQ: INVALID_PARAMETER_LIST | 261 | * throw an exception with ASCQ: INVALID_PARAMETER_LIST |
@@ -464,7 +472,7 @@ static int core_alua_state_check( | |||
464 | unsigned char *cdb, | 472 | unsigned char *cdb, |
465 | u8 *alua_ascq) | 473 | u8 *alua_ascq) |
466 | { | 474 | { |
467 | struct se_lun *lun = SE_LUN(cmd); | 475 | struct se_lun *lun = cmd->se_lun; |
468 | struct se_port *port = lun->lun_sep; | 476 | struct se_port *port = lun->lun_sep; |
469 | struct t10_alua_tg_pt_gp *tg_pt_gp; | 477 | struct t10_alua_tg_pt_gp *tg_pt_gp; |
470 | struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; | 478 | struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; |
@@ -522,7 +530,7 @@ static int core_alua_state_check( | |||
522 | default: | 530 | default: |
523 | printk(KERN_ERR "Unknown ALUA access state: 0x%02x\n", | 531 | printk(KERN_ERR "Unknown ALUA access state: 0x%02x\n", |
524 | out_alua_state); | 532 | out_alua_state); |
525 | return -1; | 533 | return -EINVAL; |
526 | } | 534 | } |
527 | 535 | ||
528 | return 0; | 536 | return 0; |
@@ -553,7 +561,7 @@ static int core_alua_check_transition(int state, int *primary) | |||
553 | break; | 561 | break; |
554 | default: | 562 | default: |
555 | printk(KERN_ERR "Unknown ALUA access state: 0x%02x\n", state); | 563 | printk(KERN_ERR "Unknown ALUA access state: 0x%02x\n", state); |
556 | return -1; | 564 | return -EINVAL; |
557 | } | 565 | } |
558 | 566 | ||
559 | return 0; | 567 | return 0; |
@@ -866,9 +874,9 @@ int core_alua_do_port_transition( | |||
866 | smp_mb__after_atomic_inc(); | 874 | smp_mb__after_atomic_inc(); |
867 | spin_unlock(&lu_gp->lu_gp_lock); | 875 | spin_unlock(&lu_gp->lu_gp_lock); |
868 | 876 | ||
869 | spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock); | 877 | spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); |
870 | list_for_each_entry(tg_pt_gp, | 878 | list_for_each_entry(tg_pt_gp, |
871 | &T10_ALUA(su_dev)->tg_pt_gps_list, | 879 | &su_dev->t10_alua.tg_pt_gps_list, |
872 | tg_pt_gp_list) { | 880 | tg_pt_gp_list) { |
873 | 881 | ||
874 | if (!(tg_pt_gp->tg_pt_gp_valid_id)) | 882 | if (!(tg_pt_gp->tg_pt_gp_valid_id)) |
@@ -893,7 +901,7 @@ int core_alua_do_port_transition( | |||
893 | } | 901 | } |
894 | atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); | 902 | atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); |
895 | smp_mb__after_atomic_inc(); | 903 | smp_mb__after_atomic_inc(); |
896 | spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); | 904 | spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); |
897 | /* | 905 | /* |
898 | * core_alua_do_transition_tg_pt() will always return | 906 | * core_alua_do_transition_tg_pt() will always return |
899 | * success. | 907 | * success. |
@@ -901,11 +909,11 @@ int core_alua_do_port_transition( | |||
901 | core_alua_do_transition_tg_pt(tg_pt_gp, port, | 909 | core_alua_do_transition_tg_pt(tg_pt_gp, port, |
902 | nacl, md_buf, new_state, explict); | 910 | nacl, md_buf, new_state, explict); |
903 | 911 | ||
904 | spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock); | 912 | spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); |
905 | atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); | 913 | atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); |
906 | smp_mb__after_atomic_dec(); | 914 | smp_mb__after_atomic_dec(); |
907 | } | 915 | } |
908 | spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); | 916 | spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); |
909 | 917 | ||
910 | spin_lock(&lu_gp->lu_gp_lock); | 918 | spin_lock(&lu_gp->lu_gp_lock); |
911 | atomic_dec(&lu_gp_mem->lu_gp_mem_ref_cnt); | 919 | atomic_dec(&lu_gp_mem->lu_gp_mem_ref_cnt); |
@@ -942,11 +950,11 @@ static int core_alua_update_tpg_secondary_metadata( | |||
942 | memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN); | 950 | memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN); |
943 | 951 | ||
944 | len = snprintf(wwn, ALUA_SECONDARY_METADATA_WWN_LEN, "%s", | 952 | len = snprintf(wwn, ALUA_SECONDARY_METADATA_WWN_LEN, "%s", |
945 | TPG_TFO(se_tpg)->tpg_get_wwn(se_tpg)); | 953 | se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg)); |
946 | 954 | ||
947 | if (TPG_TFO(se_tpg)->tpg_get_tag != NULL) | 955 | if (se_tpg->se_tpg_tfo->tpg_get_tag != NULL) |
948 | snprintf(wwn+len, ALUA_SECONDARY_METADATA_WWN_LEN-len, "+%hu", | 956 | snprintf(wwn+len, ALUA_SECONDARY_METADATA_WWN_LEN-len, "+%hu", |
949 | TPG_TFO(se_tpg)->tpg_get_tag(se_tpg)); | 957 | se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg)); |
950 | 958 | ||
951 | len = snprintf(md_buf, md_buf_len, "alua_tg_pt_offline=%d\n" | 959 | len = snprintf(md_buf, md_buf_len, "alua_tg_pt_offline=%d\n" |
952 | "alua_tg_pt_status=0x%02x\n", | 960 | "alua_tg_pt_status=0x%02x\n", |
@@ -954,7 +962,7 @@ static int core_alua_update_tpg_secondary_metadata( | |||
954 | port->sep_tg_pt_secondary_stat); | 962 | port->sep_tg_pt_secondary_stat); |
955 | 963 | ||
956 | snprintf(path, ALUA_METADATA_PATH_LEN, "/var/target/alua/%s/%s/lun_%u", | 964 | snprintf(path, ALUA_METADATA_PATH_LEN, "/var/target/alua/%s/%s/lun_%u", |
957 | TPG_TFO(se_tpg)->get_fabric_name(), wwn, | 965 | se_tpg->se_tpg_tfo->get_fabric_name(), wwn, |
958 | port->sep_lun->unpacked_lun); | 966 | port->sep_lun->unpacked_lun); |
959 | 967 | ||
960 | return core_alua_write_tpg_metadata(path, md_buf, len); | 968 | return core_alua_write_tpg_metadata(path, md_buf, len); |
@@ -977,7 +985,7 @@ static int core_alua_set_tg_pt_secondary_state( | |||
977 | spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); | 985 | spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); |
978 | printk(KERN_ERR "Unable to complete secondary state" | 986 | printk(KERN_ERR "Unable to complete secondary state" |
979 | " transition\n"); | 987 | " transition\n"); |
980 | return -1; | 988 | return -EINVAL; |
981 | } | 989 | } |
982 | trans_delay_msecs = tg_pt_gp->tg_pt_gp_trans_delay_msecs; | 990 | trans_delay_msecs = tg_pt_gp->tg_pt_gp_trans_delay_msecs; |
983 | /* | 991 | /* |
@@ -1015,7 +1023,7 @@ static int core_alua_set_tg_pt_secondary_state( | |||
1015 | if (!(md_buf)) { | 1023 | if (!(md_buf)) { |
1016 | printk(KERN_ERR "Unable to allocate md_buf for" | 1024 | printk(KERN_ERR "Unable to allocate md_buf for" |
1017 | " secondary ALUA access metadata\n"); | 1025 | " secondary ALUA access metadata\n"); |
1018 | return -1; | 1026 | return -ENOMEM; |
1019 | } | 1027 | } |
1020 | mutex_lock(&port->sep_tg_pt_md_mutex); | 1028 | mutex_lock(&port->sep_tg_pt_md_mutex); |
1021 | core_alua_update_tpg_secondary_metadata(tg_pt_gp_mem, port, | 1029 | core_alua_update_tpg_secondary_metadata(tg_pt_gp_mem, port, |
@@ -1038,15 +1046,15 @@ core_alua_allocate_lu_gp(const char *name, int def_group) | |||
1038 | printk(KERN_ERR "Unable to allocate struct t10_alua_lu_gp\n"); | 1046 | printk(KERN_ERR "Unable to allocate struct t10_alua_lu_gp\n"); |
1039 | return ERR_PTR(-ENOMEM); | 1047 | return ERR_PTR(-ENOMEM); |
1040 | } | 1048 | } |
1041 | INIT_LIST_HEAD(&lu_gp->lu_gp_list); | 1049 | INIT_LIST_HEAD(&lu_gp->lu_gp_node); |
1042 | INIT_LIST_HEAD(&lu_gp->lu_gp_mem_list); | 1050 | INIT_LIST_HEAD(&lu_gp->lu_gp_mem_list); |
1043 | spin_lock_init(&lu_gp->lu_gp_lock); | 1051 | spin_lock_init(&lu_gp->lu_gp_lock); |
1044 | atomic_set(&lu_gp->lu_gp_ref_cnt, 0); | 1052 | atomic_set(&lu_gp->lu_gp_ref_cnt, 0); |
1045 | 1053 | ||
1046 | if (def_group) { | 1054 | if (def_group) { |
1047 | lu_gp->lu_gp_id = se_global->alua_lu_gps_counter++; | 1055 | lu_gp->lu_gp_id = alua_lu_gps_counter++; |
1048 | lu_gp->lu_gp_valid_id = 1; | 1056 | lu_gp->lu_gp_valid_id = 1; |
1049 | se_global->alua_lu_gps_count++; | 1057 | alua_lu_gps_count++; |
1050 | } | 1058 | } |
1051 | 1059 | ||
1052 | return lu_gp; | 1060 | return lu_gp; |
@@ -1062,22 +1070,22 @@ int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *lu_gp, u16 lu_gp_id) | |||
1062 | if (lu_gp->lu_gp_valid_id) { | 1070 | if (lu_gp->lu_gp_valid_id) { |
1063 | printk(KERN_WARNING "ALUA LU Group already has a valid ID," | 1071 | printk(KERN_WARNING "ALUA LU Group already has a valid ID," |
1064 | " ignoring request\n"); | 1072 | " ignoring request\n"); |
1065 | return -1; | 1073 | return -EINVAL; |
1066 | } | 1074 | } |
1067 | 1075 | ||
1068 | spin_lock(&se_global->lu_gps_lock); | 1076 | spin_lock(&lu_gps_lock); |
1069 | if (se_global->alua_lu_gps_count == 0x0000ffff) { | 1077 | if (alua_lu_gps_count == 0x0000ffff) { |
1070 | printk(KERN_ERR "Maximum ALUA se_global->alua_lu_gps_count:" | 1078 | printk(KERN_ERR "Maximum ALUA alua_lu_gps_count:" |
1071 | " 0x0000ffff reached\n"); | 1079 | " 0x0000ffff reached\n"); |
1072 | spin_unlock(&se_global->lu_gps_lock); | 1080 | spin_unlock(&lu_gps_lock); |
1073 | kmem_cache_free(t10_alua_lu_gp_cache, lu_gp); | 1081 | kmem_cache_free(t10_alua_lu_gp_cache, lu_gp); |
1074 | return -1; | 1082 | return -ENOSPC; |
1075 | } | 1083 | } |
1076 | again: | 1084 | again: |
1077 | lu_gp_id_tmp = (lu_gp_id != 0) ? lu_gp_id : | 1085 | lu_gp_id_tmp = (lu_gp_id != 0) ? lu_gp_id : |
1078 | se_global->alua_lu_gps_counter++; | 1086 | alua_lu_gps_counter++; |
1079 | 1087 | ||
1080 | list_for_each_entry(lu_gp_tmp, &se_global->g_lu_gps_list, lu_gp_list) { | 1088 | list_for_each_entry(lu_gp_tmp, &lu_gps_list, lu_gp_node) { |
1081 | if (lu_gp_tmp->lu_gp_id == lu_gp_id_tmp) { | 1089 | if (lu_gp_tmp->lu_gp_id == lu_gp_id_tmp) { |
1082 | if (!(lu_gp_id)) | 1090 | if (!(lu_gp_id)) |
1083 | goto again; | 1091 | goto again; |
@@ -1085,16 +1093,16 @@ again: | |||
1085 | printk(KERN_WARNING "ALUA Logical Unit Group ID: %hu" | 1093 | printk(KERN_WARNING "ALUA Logical Unit Group ID: %hu" |
1086 | " already exists, ignoring request\n", | 1094 | " already exists, ignoring request\n", |
1087 | lu_gp_id); | 1095 | lu_gp_id); |
1088 | spin_unlock(&se_global->lu_gps_lock); | 1096 | spin_unlock(&lu_gps_lock); |
1089 | return -1; | 1097 | return -EINVAL; |
1090 | } | 1098 | } |
1091 | } | 1099 | } |
1092 | 1100 | ||
1093 | lu_gp->lu_gp_id = lu_gp_id_tmp; | 1101 | lu_gp->lu_gp_id = lu_gp_id_tmp; |
1094 | lu_gp->lu_gp_valid_id = 1; | 1102 | lu_gp->lu_gp_valid_id = 1; |
1095 | list_add_tail(&lu_gp->lu_gp_list, &se_global->g_lu_gps_list); | 1103 | list_add_tail(&lu_gp->lu_gp_node, &lu_gps_list); |
1096 | se_global->alua_lu_gps_count++; | 1104 | alua_lu_gps_count++; |
1097 | spin_unlock(&se_global->lu_gps_lock); | 1105 | spin_unlock(&lu_gps_lock); |
1098 | 1106 | ||
1099 | return 0; | 1107 | return 0; |
1100 | } | 1108 | } |
@@ -1130,11 +1138,11 @@ void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp) | |||
1130 | * no associations can be made while we are releasing | 1138 | * no associations can be made while we are releasing |
1131 | * struct t10_alua_lu_gp. | 1139 | * struct t10_alua_lu_gp. |
1132 | */ | 1140 | */ |
1133 | spin_lock(&se_global->lu_gps_lock); | 1141 | spin_lock(&lu_gps_lock); |
1134 | atomic_set(&lu_gp->lu_gp_shutdown, 1); | 1142 | atomic_set(&lu_gp->lu_gp_shutdown, 1); |
1135 | list_del(&lu_gp->lu_gp_list); | 1143 | list_del(&lu_gp->lu_gp_node); |
1136 | se_global->alua_lu_gps_count--; | 1144 | alua_lu_gps_count--; |
1137 | spin_unlock(&se_global->lu_gps_lock); | 1145 | spin_unlock(&lu_gps_lock); |
1138 | /* | 1146 | /* |
1139 | * Allow struct t10_alua_lu_gp * referenced by core_alua_get_lu_gp_by_name() | 1147 | * Allow struct t10_alua_lu_gp * referenced by core_alua_get_lu_gp_by_name() |
1140 | * in target_core_configfs.c:target_core_store_alua_lu_gp() to be | 1148 | * in target_core_configfs.c:target_core_store_alua_lu_gp() to be |
@@ -1165,9 +1173,9 @@ void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp) | |||
1165 | * we want to re-assocate a given lu_gp_mem with default_lu_gp. | 1173 | * we want to re-assocate a given lu_gp_mem with default_lu_gp. |
1166 | */ | 1174 | */ |
1167 | spin_lock(&lu_gp_mem->lu_gp_mem_lock); | 1175 | spin_lock(&lu_gp_mem->lu_gp_mem_lock); |
1168 | if (lu_gp != se_global->default_lu_gp) | 1176 | if (lu_gp != default_lu_gp) |
1169 | __core_alua_attach_lu_gp_mem(lu_gp_mem, | 1177 | __core_alua_attach_lu_gp_mem(lu_gp_mem, |
1170 | se_global->default_lu_gp); | 1178 | default_lu_gp); |
1171 | else | 1179 | else |
1172 | lu_gp_mem->lu_gp = NULL; | 1180 | lu_gp_mem->lu_gp = NULL; |
1173 | spin_unlock(&lu_gp_mem->lu_gp_mem_lock); | 1181 | spin_unlock(&lu_gp_mem->lu_gp_mem_lock); |
@@ -1182,7 +1190,7 @@ void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp) | |||
1182 | void core_alua_free_lu_gp_mem(struct se_device *dev) | 1190 | void core_alua_free_lu_gp_mem(struct se_device *dev) |
1183 | { | 1191 | { |
1184 | struct se_subsystem_dev *su_dev = dev->se_sub_dev; | 1192 | struct se_subsystem_dev *su_dev = dev->se_sub_dev; |
1185 | struct t10_alua *alua = T10_ALUA(su_dev); | 1193 | struct t10_alua *alua = &su_dev->t10_alua; |
1186 | struct t10_alua_lu_gp *lu_gp; | 1194 | struct t10_alua_lu_gp *lu_gp; |
1187 | struct t10_alua_lu_gp_member *lu_gp_mem; | 1195 | struct t10_alua_lu_gp_member *lu_gp_mem; |
1188 | 1196 | ||
@@ -1218,27 +1226,27 @@ struct t10_alua_lu_gp *core_alua_get_lu_gp_by_name(const char *name) | |||
1218 | struct t10_alua_lu_gp *lu_gp; | 1226 | struct t10_alua_lu_gp *lu_gp; |
1219 | struct config_item *ci; | 1227 | struct config_item *ci; |
1220 | 1228 | ||
1221 | spin_lock(&se_global->lu_gps_lock); | 1229 | spin_lock(&lu_gps_lock); |
1222 | list_for_each_entry(lu_gp, &se_global->g_lu_gps_list, lu_gp_list) { | 1230 | list_for_each_entry(lu_gp, &lu_gps_list, lu_gp_node) { |
1223 | if (!(lu_gp->lu_gp_valid_id)) | 1231 | if (!(lu_gp->lu_gp_valid_id)) |
1224 | continue; | 1232 | continue; |
1225 | ci = &lu_gp->lu_gp_group.cg_item; | 1233 | ci = &lu_gp->lu_gp_group.cg_item; |
1226 | if (!(strcmp(config_item_name(ci), name))) { | 1234 | if (!(strcmp(config_item_name(ci), name))) { |
1227 | atomic_inc(&lu_gp->lu_gp_ref_cnt); | 1235 | atomic_inc(&lu_gp->lu_gp_ref_cnt); |
1228 | spin_unlock(&se_global->lu_gps_lock); | 1236 | spin_unlock(&lu_gps_lock); |
1229 | return lu_gp; | 1237 | return lu_gp; |
1230 | } | 1238 | } |
1231 | } | 1239 | } |
1232 | spin_unlock(&se_global->lu_gps_lock); | 1240 | spin_unlock(&lu_gps_lock); |
1233 | 1241 | ||
1234 | return NULL; | 1242 | return NULL; |
1235 | } | 1243 | } |
1236 | 1244 | ||
1237 | void core_alua_put_lu_gp_from_name(struct t10_alua_lu_gp *lu_gp) | 1245 | void core_alua_put_lu_gp_from_name(struct t10_alua_lu_gp *lu_gp) |
1238 | { | 1246 | { |
1239 | spin_lock(&se_global->lu_gps_lock); | 1247 | spin_lock(&lu_gps_lock); |
1240 | atomic_dec(&lu_gp->lu_gp_ref_cnt); | 1248 | atomic_dec(&lu_gp->lu_gp_ref_cnt); |
1241 | spin_unlock(&se_global->lu_gps_lock); | 1249 | spin_unlock(&lu_gps_lock); |
1242 | } | 1250 | } |
1243 | 1251 | ||
1244 | /* | 1252 | /* |
@@ -1304,14 +1312,14 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp( | |||
1304 | tg_pt_gp->tg_pt_gp_trans_delay_msecs = ALUA_DEFAULT_TRANS_DELAY_MSECS; | 1312 | tg_pt_gp->tg_pt_gp_trans_delay_msecs = ALUA_DEFAULT_TRANS_DELAY_MSECS; |
1305 | 1313 | ||
1306 | if (def_group) { | 1314 | if (def_group) { |
1307 | spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock); | 1315 | spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); |
1308 | tg_pt_gp->tg_pt_gp_id = | 1316 | tg_pt_gp->tg_pt_gp_id = |
1309 | T10_ALUA(su_dev)->alua_tg_pt_gps_counter++; | 1317 | su_dev->t10_alua.alua_tg_pt_gps_counter++; |
1310 | tg_pt_gp->tg_pt_gp_valid_id = 1; | 1318 | tg_pt_gp->tg_pt_gp_valid_id = 1; |
1311 | T10_ALUA(su_dev)->alua_tg_pt_gps_count++; | 1319 | su_dev->t10_alua.alua_tg_pt_gps_count++; |
1312 | list_add_tail(&tg_pt_gp->tg_pt_gp_list, | 1320 | list_add_tail(&tg_pt_gp->tg_pt_gp_list, |
1313 | &T10_ALUA(su_dev)->tg_pt_gps_list); | 1321 | &su_dev->t10_alua.tg_pt_gps_list); |
1314 | spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); | 1322 | spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); |
1315 | } | 1323 | } |
1316 | 1324 | ||
1317 | return tg_pt_gp; | 1325 | return tg_pt_gp; |
@@ -1330,22 +1338,22 @@ int core_alua_set_tg_pt_gp_id( | |||
1330 | if (tg_pt_gp->tg_pt_gp_valid_id) { | 1338 | if (tg_pt_gp->tg_pt_gp_valid_id) { |
1331 | printk(KERN_WARNING "ALUA TG PT Group already has a valid ID," | 1339 | printk(KERN_WARNING "ALUA TG PT Group already has a valid ID," |
1332 | " ignoring request\n"); | 1340 | " ignoring request\n"); |
1333 | return -1; | 1341 | return -EINVAL; |
1334 | } | 1342 | } |
1335 | 1343 | ||
1336 | spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock); | 1344 | spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); |
1337 | if (T10_ALUA(su_dev)->alua_tg_pt_gps_count == 0x0000ffff) { | 1345 | if (su_dev->t10_alua.alua_tg_pt_gps_count == 0x0000ffff) { |
1338 | printk(KERN_ERR "Maximum ALUA alua_tg_pt_gps_count:" | 1346 | printk(KERN_ERR "Maximum ALUA alua_tg_pt_gps_count:" |
1339 | " 0x0000ffff reached\n"); | 1347 | " 0x0000ffff reached\n"); |
1340 | spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); | 1348 | spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); |
1341 | kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp); | 1349 | kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp); |
1342 | return -1; | 1350 | return -ENOSPC; |
1343 | } | 1351 | } |
1344 | again: | 1352 | again: |
1345 | tg_pt_gp_id_tmp = (tg_pt_gp_id != 0) ? tg_pt_gp_id : | 1353 | tg_pt_gp_id_tmp = (tg_pt_gp_id != 0) ? tg_pt_gp_id : |
1346 | T10_ALUA(su_dev)->alua_tg_pt_gps_counter++; | 1354 | su_dev->t10_alua.alua_tg_pt_gps_counter++; |
1347 | 1355 | ||
1348 | list_for_each_entry(tg_pt_gp_tmp, &T10_ALUA(su_dev)->tg_pt_gps_list, | 1356 | list_for_each_entry(tg_pt_gp_tmp, &su_dev->t10_alua.tg_pt_gps_list, |
1349 | tg_pt_gp_list) { | 1357 | tg_pt_gp_list) { |
1350 | if (tg_pt_gp_tmp->tg_pt_gp_id == tg_pt_gp_id_tmp) { | 1358 | if (tg_pt_gp_tmp->tg_pt_gp_id == tg_pt_gp_id_tmp) { |
1351 | if (!(tg_pt_gp_id)) | 1359 | if (!(tg_pt_gp_id)) |
@@ -1353,17 +1361,17 @@ again: | |||
1353 | 1361 | ||
1354 | printk(KERN_ERR "ALUA Target Port Group ID: %hu already" | 1362 | printk(KERN_ERR "ALUA Target Port Group ID: %hu already" |
1355 | " exists, ignoring request\n", tg_pt_gp_id); | 1363 | " exists, ignoring request\n", tg_pt_gp_id); |
1356 | spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); | 1364 | spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); |
1357 | return -1; | 1365 | return -EINVAL; |
1358 | } | 1366 | } |
1359 | } | 1367 | } |
1360 | 1368 | ||
1361 | tg_pt_gp->tg_pt_gp_id = tg_pt_gp_id_tmp; | 1369 | tg_pt_gp->tg_pt_gp_id = tg_pt_gp_id_tmp; |
1362 | tg_pt_gp->tg_pt_gp_valid_id = 1; | 1370 | tg_pt_gp->tg_pt_gp_valid_id = 1; |
1363 | list_add_tail(&tg_pt_gp->tg_pt_gp_list, | 1371 | list_add_tail(&tg_pt_gp->tg_pt_gp_list, |
1364 | &T10_ALUA(su_dev)->tg_pt_gps_list); | 1372 | &su_dev->t10_alua.tg_pt_gps_list); |
1365 | T10_ALUA(su_dev)->alua_tg_pt_gps_count++; | 1373 | su_dev->t10_alua.alua_tg_pt_gps_count++; |
1366 | spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); | 1374 | spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); |
1367 | 1375 | ||
1368 | return 0; | 1376 | return 0; |
1369 | } | 1377 | } |
@@ -1403,10 +1411,10 @@ void core_alua_free_tg_pt_gp( | |||
1403 | * no assications *OR* explict ALUA via SET_TARGET_PORT_GROUPS | 1411 | * no assications *OR* explict ALUA via SET_TARGET_PORT_GROUPS |
1404 | * can be made while we are releasing struct t10_alua_tg_pt_gp. | 1412 | * can be made while we are releasing struct t10_alua_tg_pt_gp. |
1405 | */ | 1413 | */ |
1406 | spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock); | 1414 | spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); |
1407 | list_del(&tg_pt_gp->tg_pt_gp_list); | 1415 | list_del(&tg_pt_gp->tg_pt_gp_list); |
1408 | T10_ALUA(su_dev)->alua_tg_pt_gps_counter--; | 1416 | su_dev->t10_alua.alua_tg_pt_gps_counter--; |
1409 | spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); | 1417 | spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); |
1410 | /* | 1418 | /* |
1411 | * Allow a struct t10_alua_tg_pt_gp_member * referenced by | 1419 | * Allow a struct t10_alua_tg_pt_gp_member * referenced by |
1412 | * core_alua_get_tg_pt_gp_by_name() in | 1420 | * core_alua_get_tg_pt_gp_by_name() in |
@@ -1438,9 +1446,9 @@ void core_alua_free_tg_pt_gp( | |||
1438 | * default_tg_pt_gp. | 1446 | * default_tg_pt_gp. |
1439 | */ | 1447 | */ |
1440 | spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); | 1448 | spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); |
1441 | if (tg_pt_gp != T10_ALUA(su_dev)->default_tg_pt_gp) { | 1449 | if (tg_pt_gp != su_dev->t10_alua.default_tg_pt_gp) { |
1442 | __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, | 1450 | __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, |
1443 | T10_ALUA(su_dev)->default_tg_pt_gp); | 1451 | su_dev->t10_alua.default_tg_pt_gp); |
1444 | } else | 1452 | } else |
1445 | tg_pt_gp_mem->tg_pt_gp = NULL; | 1453 | tg_pt_gp_mem->tg_pt_gp = NULL; |
1446 | spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); | 1454 | spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); |
@@ -1455,7 +1463,7 @@ void core_alua_free_tg_pt_gp( | |||
1455 | void core_alua_free_tg_pt_gp_mem(struct se_port *port) | 1463 | void core_alua_free_tg_pt_gp_mem(struct se_port *port) |
1456 | { | 1464 | { |
1457 | struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev; | 1465 | struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev; |
1458 | struct t10_alua *alua = T10_ALUA(su_dev); | 1466 | struct t10_alua *alua = &su_dev->t10_alua; |
1459 | struct t10_alua_tg_pt_gp *tg_pt_gp; | 1467 | struct t10_alua_tg_pt_gp *tg_pt_gp; |
1460 | struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; | 1468 | struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; |
1461 | 1469 | ||
@@ -1493,19 +1501,19 @@ static struct t10_alua_tg_pt_gp *core_alua_get_tg_pt_gp_by_name( | |||
1493 | struct t10_alua_tg_pt_gp *tg_pt_gp; | 1501 | struct t10_alua_tg_pt_gp *tg_pt_gp; |
1494 | struct config_item *ci; | 1502 | struct config_item *ci; |
1495 | 1503 | ||
1496 | spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock); | 1504 | spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); |
1497 | list_for_each_entry(tg_pt_gp, &T10_ALUA(su_dev)->tg_pt_gps_list, | 1505 | list_for_each_entry(tg_pt_gp, &su_dev->t10_alua.tg_pt_gps_list, |
1498 | tg_pt_gp_list) { | 1506 | tg_pt_gp_list) { |
1499 | if (!(tg_pt_gp->tg_pt_gp_valid_id)) | 1507 | if (!(tg_pt_gp->tg_pt_gp_valid_id)) |
1500 | continue; | 1508 | continue; |
1501 | ci = &tg_pt_gp->tg_pt_gp_group.cg_item; | 1509 | ci = &tg_pt_gp->tg_pt_gp_group.cg_item; |
1502 | if (!(strcmp(config_item_name(ci), name))) { | 1510 | if (!(strcmp(config_item_name(ci), name))) { |
1503 | atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); | 1511 | atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); |
1504 | spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); | 1512 | spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); |
1505 | return tg_pt_gp; | 1513 | return tg_pt_gp; |
1506 | } | 1514 | } |
1507 | } | 1515 | } |
1508 | spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); | 1516 | spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); |
1509 | 1517 | ||
1510 | return NULL; | 1518 | return NULL; |
1511 | } | 1519 | } |
@@ -1515,9 +1523,9 @@ static void core_alua_put_tg_pt_gp_from_name( | |||
1515 | { | 1523 | { |
1516 | struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev; | 1524 | struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev; |
1517 | 1525 | ||
1518 | spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock); | 1526 | spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); |
1519 | atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); | 1527 | atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); |
1520 | spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); | 1528 | spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); |
1521 | } | 1529 | } |
1522 | 1530 | ||
1523 | /* | 1531 | /* |
@@ -1555,7 +1563,7 @@ ssize_t core_alua_show_tg_pt_gp_info(struct se_port *port, char *page) | |||
1555 | { | 1563 | { |
1556 | struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev; | 1564 | struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev; |
1557 | struct config_item *tg_pt_ci; | 1565 | struct config_item *tg_pt_ci; |
1558 | struct t10_alua *alua = T10_ALUA(su_dev); | 1566 | struct t10_alua *alua = &su_dev->t10_alua; |
1559 | struct t10_alua_tg_pt_gp *tg_pt_gp; | 1567 | struct t10_alua_tg_pt_gp *tg_pt_gp; |
1560 | struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; | 1568 | struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; |
1561 | ssize_t len = 0; | 1569 | ssize_t len = 0; |
@@ -1605,10 +1613,10 @@ ssize_t core_alua_store_tg_pt_gp_info( | |||
1605 | tpg = port->sep_tpg; | 1613 | tpg = port->sep_tpg; |
1606 | lun = port->sep_lun; | 1614 | lun = port->sep_lun; |
1607 | 1615 | ||
1608 | if (T10_ALUA(su_dev)->alua_type != SPC3_ALUA_EMULATED) { | 1616 | if (su_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED) { |
1609 | printk(KERN_WARNING "SPC3_ALUA_EMULATED not enabled for" | 1617 | printk(KERN_WARNING "SPC3_ALUA_EMULATED not enabled for" |
1610 | " %s/tpgt_%hu/%s\n", TPG_TFO(tpg)->tpg_get_wwn(tpg), | 1618 | " %s/tpgt_%hu/%s\n", tpg->se_tpg_tfo->tpg_get_wwn(tpg), |
1611 | TPG_TFO(tpg)->tpg_get_tag(tpg), | 1619 | tpg->se_tpg_tfo->tpg_get_tag(tpg), |
1612 | config_item_name(&lun->lun_group.cg_item)); | 1620 | config_item_name(&lun->lun_group.cg_item)); |
1613 | return -EINVAL; | 1621 | return -EINVAL; |
1614 | } | 1622 | } |
@@ -1654,8 +1662,8 @@ ssize_t core_alua_store_tg_pt_gp_info( | |||
1654 | " %s/tpgt_%hu/%s from ALUA Target Port Group:" | 1662 | " %s/tpgt_%hu/%s from ALUA Target Port Group:" |
1655 | " alua/%s, ID: %hu back to" | 1663 | " alua/%s, ID: %hu back to" |
1656 | " default_tg_pt_gp\n", | 1664 | " default_tg_pt_gp\n", |
1657 | TPG_TFO(tpg)->tpg_get_wwn(tpg), | 1665 | tpg->se_tpg_tfo->tpg_get_wwn(tpg), |
1658 | TPG_TFO(tpg)->tpg_get_tag(tpg), | 1666 | tpg->se_tpg_tfo->tpg_get_tag(tpg), |
1659 | config_item_name(&lun->lun_group.cg_item), | 1667 | config_item_name(&lun->lun_group.cg_item), |
1660 | config_item_name( | 1668 | config_item_name( |
1661 | &tg_pt_gp->tg_pt_gp_group.cg_item), | 1669 | &tg_pt_gp->tg_pt_gp_group.cg_item), |
@@ -1663,7 +1671,7 @@ ssize_t core_alua_store_tg_pt_gp_info( | |||
1663 | 1671 | ||
1664 | __core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp); | 1672 | __core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp); |
1665 | __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, | 1673 | __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, |
1666 | T10_ALUA(su_dev)->default_tg_pt_gp); | 1674 | su_dev->t10_alua.default_tg_pt_gp); |
1667 | spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); | 1675 | spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); |
1668 | 1676 | ||
1669 | return count; | 1677 | return count; |
@@ -1681,8 +1689,8 @@ ssize_t core_alua_store_tg_pt_gp_info( | |||
1681 | spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); | 1689 | spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); |
1682 | printk(KERN_INFO "Target_Core_ConfigFS: %s %s/tpgt_%hu/%s to ALUA" | 1690 | printk(KERN_INFO "Target_Core_ConfigFS: %s %s/tpgt_%hu/%s to ALUA" |
1683 | " Target Port Group: alua/%s, ID: %hu\n", (move) ? | 1691 | " Target Port Group: alua/%s, ID: %hu\n", (move) ? |
1684 | "Moving" : "Adding", TPG_TFO(tpg)->tpg_get_wwn(tpg), | 1692 | "Moving" : "Adding", tpg->se_tpg_tfo->tpg_get_wwn(tpg), |
1685 | TPG_TFO(tpg)->tpg_get_tag(tpg), | 1693 | tpg->se_tpg_tfo->tpg_get_tag(tpg), |
1686 | config_item_name(&lun->lun_group.cg_item), | 1694 | config_item_name(&lun->lun_group.cg_item), |
1687 | config_item_name(&tg_pt_gp_new->tg_pt_gp_group.cg_item), | 1695 | config_item_name(&tg_pt_gp_new->tg_pt_gp_group.cg_item), |
1688 | tg_pt_gp_new->tg_pt_gp_id); | 1696 | tg_pt_gp_new->tg_pt_gp_id); |
@@ -1939,7 +1947,7 @@ ssize_t core_alua_store_secondary_write_metadata( | |||
1939 | int core_setup_alua(struct se_device *dev, int force_pt) | 1947 | int core_setup_alua(struct se_device *dev, int force_pt) |
1940 | { | 1948 | { |
1941 | struct se_subsystem_dev *su_dev = dev->se_sub_dev; | 1949 | struct se_subsystem_dev *su_dev = dev->se_sub_dev; |
1942 | struct t10_alua *alua = T10_ALUA(su_dev); | 1950 | struct t10_alua *alua = &su_dev->t10_alua; |
1943 | struct t10_alua_lu_gp_member *lu_gp_mem; | 1951 | struct t10_alua_lu_gp_member *lu_gp_mem; |
1944 | /* | 1952 | /* |
1945 | * If this device is from Target_Core_Mod/pSCSI, use the ALUA logic | 1953 | * If this device is from Target_Core_Mod/pSCSI, use the ALUA logic |
@@ -1947,44 +1955,44 @@ int core_setup_alua(struct se_device *dev, int force_pt) | |||
1947 | * cause a problem because libata and some SATA RAID HBAs appear | 1955 | * cause a problem because libata and some SATA RAID HBAs appear |
1948 | * under Linux/SCSI, but emulate SCSI logic themselves. | 1956 | * under Linux/SCSI, but emulate SCSI logic themselves. |
1949 | */ | 1957 | */ |
1950 | if (((TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) && | 1958 | if (((dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) && |
1951 | !(DEV_ATTRIB(dev)->emulate_alua)) || force_pt) { | 1959 | !(dev->se_sub_dev->se_dev_attrib.emulate_alua)) || force_pt) { |
1952 | alua->alua_type = SPC_ALUA_PASSTHROUGH; | 1960 | alua->alua_type = SPC_ALUA_PASSTHROUGH; |
1953 | alua->alua_state_check = &core_alua_state_check_nop; | 1961 | alua->alua_state_check = &core_alua_state_check_nop; |
1954 | printk(KERN_INFO "%s: Using SPC_ALUA_PASSTHROUGH, no ALUA" | 1962 | printk(KERN_INFO "%s: Using SPC_ALUA_PASSTHROUGH, no ALUA" |
1955 | " emulation\n", TRANSPORT(dev)->name); | 1963 | " emulation\n", dev->transport->name); |
1956 | return 0; | 1964 | return 0; |
1957 | } | 1965 | } |
1958 | /* | 1966 | /* |
1959 | * If SPC-3 or above is reported by real or emulated struct se_device, | 1967 | * If SPC-3 or above is reported by real or emulated struct se_device, |
1960 | * use emulated ALUA. | 1968 | * use emulated ALUA. |
1961 | */ | 1969 | */ |
1962 | if (TRANSPORT(dev)->get_device_rev(dev) >= SCSI_3) { | 1970 | if (dev->transport->get_device_rev(dev) >= SCSI_3) { |
1963 | printk(KERN_INFO "%s: Enabling ALUA Emulation for SPC-3" | 1971 | printk(KERN_INFO "%s: Enabling ALUA Emulation for SPC-3" |
1964 | " device\n", TRANSPORT(dev)->name); | 1972 | " device\n", dev->transport->name); |
1965 | /* | 1973 | /* |
1966 | * Associate this struct se_device with the default ALUA | 1974 | * Associate this struct se_device with the default ALUA |
1967 | * LUN Group. | 1975 | * LUN Group. |
1968 | */ | 1976 | */ |
1969 | lu_gp_mem = core_alua_allocate_lu_gp_mem(dev); | 1977 | lu_gp_mem = core_alua_allocate_lu_gp_mem(dev); |
1970 | if (IS_ERR(lu_gp_mem) || !lu_gp_mem) | 1978 | if (IS_ERR(lu_gp_mem)) |
1971 | return -1; | 1979 | return PTR_ERR(lu_gp_mem); |
1972 | 1980 | ||
1973 | alua->alua_type = SPC3_ALUA_EMULATED; | 1981 | alua->alua_type = SPC3_ALUA_EMULATED; |
1974 | alua->alua_state_check = &core_alua_state_check; | 1982 | alua->alua_state_check = &core_alua_state_check; |
1975 | spin_lock(&lu_gp_mem->lu_gp_mem_lock); | 1983 | spin_lock(&lu_gp_mem->lu_gp_mem_lock); |
1976 | __core_alua_attach_lu_gp_mem(lu_gp_mem, | 1984 | __core_alua_attach_lu_gp_mem(lu_gp_mem, |
1977 | se_global->default_lu_gp); | 1985 | default_lu_gp); |
1978 | spin_unlock(&lu_gp_mem->lu_gp_mem_lock); | 1986 | spin_unlock(&lu_gp_mem->lu_gp_mem_lock); |
1979 | 1987 | ||
1980 | printk(KERN_INFO "%s: Adding to default ALUA LU Group:" | 1988 | printk(KERN_INFO "%s: Adding to default ALUA LU Group:" |
1981 | " core/alua/lu_gps/default_lu_gp\n", | 1989 | " core/alua/lu_gps/default_lu_gp\n", |
1982 | TRANSPORT(dev)->name); | 1990 | dev->transport->name); |
1983 | } else { | 1991 | } else { |
1984 | alua->alua_type = SPC2_ALUA_DISABLED; | 1992 | alua->alua_type = SPC2_ALUA_DISABLED; |
1985 | alua->alua_state_check = &core_alua_state_check_nop; | 1993 | alua->alua_state_check = &core_alua_state_check_nop; |
1986 | printk(KERN_INFO "%s: Disabling ALUA Emulation for SPC-2" | 1994 | printk(KERN_INFO "%s: Disabling ALUA Emulation for SPC-2" |
1987 | " device\n", TRANSPORT(dev)->name); | 1995 | " device\n", dev->transport->name); |
1988 | } | 1996 | } |
1989 | 1997 | ||
1990 | return 0; | 1998 | return 0; |
diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c index 7f19c8b7b84c..7d9ccf3aa9c3 100644 --- a/drivers/target/target_core_cdb.c +++ b/drivers/target/target_core_cdb.c | |||
@@ -64,8 +64,8 @@ target_fill_alua_data(struct se_port *port, unsigned char *buf) | |||
64 | static int | 64 | static int |
65 | target_emulate_inquiry_std(struct se_cmd *cmd) | 65 | target_emulate_inquiry_std(struct se_cmd *cmd) |
66 | { | 66 | { |
67 | struct se_lun *lun = SE_LUN(cmd); | 67 | struct se_lun *lun = cmd->se_lun; |
68 | struct se_device *dev = SE_DEV(cmd); | 68 | struct se_device *dev = cmd->se_lun->lun_se_dev; |
69 | unsigned char *buf = cmd->t_task->t_task_buf; | 69 | unsigned char *buf = cmd->t_task->t_task_buf; |
70 | 70 | ||
71 | /* | 71 | /* |
@@ -75,7 +75,7 @@ target_emulate_inquiry_std(struct se_cmd *cmd) | |||
75 | if (cmd->data_length < 6) { | 75 | if (cmd->data_length < 6) { |
76 | printk(KERN_ERR "SCSI Inquiry payload length: %u" | 76 | printk(KERN_ERR "SCSI Inquiry payload length: %u" |
77 | " too small for EVPD=0\n", cmd->data_length); | 77 | " too small for EVPD=0\n", cmd->data_length); |
78 | return -1; | 78 | return -EINVAL; |
79 | } | 79 | } |
80 | 80 | ||
81 | buf[0] = dev->transport->get_device_type(dev); | 81 | buf[0] = dev->transport->get_device_type(dev); |
@@ -86,7 +86,7 @@ target_emulate_inquiry_std(struct se_cmd *cmd) | |||
86 | /* | 86 | /* |
87 | * Enable SCCS and TPGS fields for Emulated ALUA | 87 | * Enable SCCS and TPGS fields for Emulated ALUA |
88 | */ | 88 | */ |
89 | if (T10_ALUA(dev->se_sub_dev)->alua_type == SPC3_ALUA_EMULATED) | 89 | if (dev->se_sub_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) |
90 | target_fill_alua_data(lun->lun_sep, buf); | 90 | target_fill_alua_data(lun->lun_sep, buf); |
91 | 91 | ||
92 | if (cmd->data_length < 8) { | 92 | if (cmd->data_length < 8) { |
@@ -107,9 +107,9 @@ target_emulate_inquiry_std(struct se_cmd *cmd) | |||
107 | 107 | ||
108 | snprintf((unsigned char *)&buf[8], 8, "LIO-ORG"); | 108 | snprintf((unsigned char *)&buf[8], 8, "LIO-ORG"); |
109 | snprintf((unsigned char *)&buf[16], 16, "%s", | 109 | snprintf((unsigned char *)&buf[16], 16, "%s", |
110 | &DEV_T10_WWN(dev)->model[0]); | 110 | &dev->se_sub_dev->t10_wwn.model[0]); |
111 | snprintf((unsigned char *)&buf[32], 4, "%s", | 111 | snprintf((unsigned char *)&buf[32], 4, "%s", |
112 | &DEV_T10_WWN(dev)->revision[0]); | 112 | &dev->se_sub_dev->t10_wwn.revision[0]); |
113 | buf[4] = 31; /* Set additional length to 31 */ | 113 | buf[4] = 31; /* Set additional length to 31 */ |
114 | return 0; | 114 | return 0; |
115 | } | 115 | } |
@@ -128,7 +128,7 @@ target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf) | |||
128 | * Registered Extended LUN WWN has been set via ConfigFS | 128 | * Registered Extended LUN WWN has been set via ConfigFS |
129 | * during device creation/restart. | 129 | * during device creation/restart. |
130 | */ | 130 | */ |
131 | if (SE_DEV(cmd)->se_sub_dev->su_dev_flags & | 131 | if (cmd->se_lun->lun_se_dev->se_sub_dev->su_dev_flags & |
132 | SDF_EMULATED_VPD_UNIT_SERIAL) { | 132 | SDF_EMULATED_VPD_UNIT_SERIAL) { |
133 | buf[3] = 3; | 133 | buf[3] = 3; |
134 | buf[5] = 0x80; | 134 | buf[5] = 0x80; |
@@ -143,7 +143,7 @@ target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf) | |||
143 | static int | 143 | static int |
144 | target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf) | 144 | target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf) |
145 | { | 145 | { |
146 | struct se_device *dev = SE_DEV(cmd); | 146 | struct se_device *dev = cmd->se_lun->lun_se_dev; |
147 | u16 len = 0; | 147 | u16 len = 0; |
148 | 148 | ||
149 | buf[1] = 0x80; | 149 | buf[1] = 0x80; |
@@ -152,7 +152,7 @@ target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf) | |||
152 | u32 unit_serial_len; | 152 | u32 unit_serial_len; |
153 | 153 | ||
154 | unit_serial_len = | 154 | unit_serial_len = |
155 | strlen(&DEV_T10_WWN(dev)->unit_serial[0]); | 155 | strlen(&dev->se_sub_dev->t10_wwn.unit_serial[0]); |
156 | unit_serial_len++; /* For NULL Terminator */ | 156 | unit_serial_len++; /* For NULL Terminator */ |
157 | 157 | ||
158 | if (((len + 4) + unit_serial_len) > cmd->data_length) { | 158 | if (((len + 4) + unit_serial_len) > cmd->data_length) { |
@@ -162,7 +162,7 @@ target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf) | |||
162 | return 0; | 162 | return 0; |
163 | } | 163 | } |
164 | len += sprintf((unsigned char *)&buf[4], "%s", | 164 | len += sprintf((unsigned char *)&buf[4], "%s", |
165 | &DEV_T10_WWN(dev)->unit_serial[0]); | 165 | &dev->se_sub_dev->t10_wwn.unit_serial[0]); |
166 | len++; /* Extra Byte for NULL Terminator */ | 166 | len++; /* Extra Byte for NULL Terminator */ |
167 | buf[3] = len; | 167 | buf[3] = len; |
168 | } | 168 | } |
@@ -176,15 +176,15 @@ target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf) | |||
176 | static int | 176 | static int |
177 | target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf) | 177 | target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf) |
178 | { | 178 | { |
179 | struct se_device *dev = SE_DEV(cmd); | 179 | struct se_device *dev = cmd->se_lun->lun_se_dev; |
180 | struct se_lun *lun = SE_LUN(cmd); | 180 | struct se_lun *lun = cmd->se_lun; |
181 | struct se_port *port = NULL; | 181 | struct se_port *port = NULL; |
182 | struct se_portal_group *tpg = NULL; | 182 | struct se_portal_group *tpg = NULL; |
183 | struct t10_alua_lu_gp_member *lu_gp_mem; | 183 | struct t10_alua_lu_gp_member *lu_gp_mem; |
184 | struct t10_alua_tg_pt_gp *tg_pt_gp; | 184 | struct t10_alua_tg_pt_gp *tg_pt_gp; |
185 | struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; | 185 | struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; |
186 | unsigned char binary, binary_new; | 186 | unsigned char binary, binary_new; |
187 | unsigned char *prod = &DEV_T10_WWN(dev)->model[0]; | 187 | unsigned char *prod = &dev->se_sub_dev->t10_wwn.model[0]; |
188 | u32 prod_len; | 188 | u32 prod_len; |
189 | u32 unit_serial_len, off = 0; | 189 | u32 unit_serial_len, off = 0; |
190 | int i; | 190 | int i; |
@@ -238,11 +238,11 @@ target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf) | |||
238 | * VENDOR_SPECIFIC_IDENTIFIER_EXTENTION | 238 | * VENDOR_SPECIFIC_IDENTIFIER_EXTENTION |
239 | */ | 239 | */ |
240 | binary = transport_asciihex_to_binaryhex( | 240 | binary = transport_asciihex_to_binaryhex( |
241 | &DEV_T10_WWN(dev)->unit_serial[0]); | 241 | &dev->se_sub_dev->t10_wwn.unit_serial[0]); |
242 | buf[off++] |= (binary & 0xf0) >> 4; | 242 | buf[off++] |= (binary & 0xf0) >> 4; |
243 | for (i = 0; i < 24; i += 2) { | 243 | for (i = 0; i < 24; i += 2) { |
244 | binary_new = transport_asciihex_to_binaryhex( | 244 | binary_new = transport_asciihex_to_binaryhex( |
245 | &DEV_T10_WWN(dev)->unit_serial[i+2]); | 245 | &dev->se_sub_dev->t10_wwn.unit_serial[i+2]); |
246 | buf[off] = (binary & 0x0f) << 4; | 246 | buf[off] = (binary & 0x0f) << 4; |
247 | buf[off++] |= (binary_new & 0xf0) >> 4; | 247 | buf[off++] |= (binary_new & 0xf0) >> 4; |
248 | binary = binary_new; | 248 | binary = binary_new; |
@@ -263,7 +263,7 @@ check_t10_vend_desc: | |||
263 | if (dev->se_sub_dev->su_dev_flags & | 263 | if (dev->se_sub_dev->su_dev_flags & |
264 | SDF_EMULATED_VPD_UNIT_SERIAL) { | 264 | SDF_EMULATED_VPD_UNIT_SERIAL) { |
265 | unit_serial_len = | 265 | unit_serial_len = |
266 | strlen(&DEV_T10_WWN(dev)->unit_serial[0]); | 266 | strlen(&dev->se_sub_dev->t10_wwn.unit_serial[0]); |
267 | unit_serial_len++; /* For NULL Terminator */ | 267 | unit_serial_len++; /* For NULL Terminator */ |
268 | 268 | ||
269 | if ((len + (id_len + 4) + | 269 | if ((len + (id_len + 4) + |
@@ -274,7 +274,7 @@ check_t10_vend_desc: | |||
274 | } | 274 | } |
275 | id_len += sprintf((unsigned char *)&buf[off+12], | 275 | id_len += sprintf((unsigned char *)&buf[off+12], |
276 | "%s:%s", prod, | 276 | "%s:%s", prod, |
277 | &DEV_T10_WWN(dev)->unit_serial[0]); | 277 | &dev->se_sub_dev->t10_wwn.unit_serial[0]); |
278 | } | 278 | } |
279 | buf[off] = 0x2; /* ASCII */ | 279 | buf[off] = 0x2; /* ASCII */ |
280 | buf[off+1] = 0x1; /* T10 Vendor ID */ | 280 | buf[off+1] = 0x1; /* T10 Vendor ID */ |
@@ -312,7 +312,7 @@ check_port: | |||
312 | goto check_tpgi; | 312 | goto check_tpgi; |
313 | } | 313 | } |
314 | buf[off] = | 314 | buf[off] = |
315 | (TPG_TFO(tpg)->get_fabric_proto_ident(tpg) << 4); | 315 | (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4); |
316 | buf[off++] |= 0x1; /* CODE SET == Binary */ | 316 | buf[off++] |= 0x1; /* CODE SET == Binary */ |
317 | buf[off] = 0x80; /* Set PIV=1 */ | 317 | buf[off] = 0x80; /* Set PIV=1 */ |
318 | /* Set ASSOICATION == target port: 01b */ | 318 | /* Set ASSOICATION == target port: 01b */ |
@@ -335,7 +335,7 @@ check_port: | |||
335 | * section 7.5.1 Table 362 | 335 | * section 7.5.1 Table 362 |
336 | */ | 336 | */ |
337 | check_tpgi: | 337 | check_tpgi: |
338 | if (T10_ALUA(dev->se_sub_dev)->alua_type != | 338 | if (dev->se_sub_dev->t10_alua.alua_type != |
339 | SPC3_ALUA_EMULATED) | 339 | SPC3_ALUA_EMULATED) |
340 | goto check_scsi_name; | 340 | goto check_scsi_name; |
341 | 341 | ||
@@ -357,7 +357,7 @@ check_tpgi: | |||
357 | spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); | 357 | spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); |
358 | 358 | ||
359 | buf[off] = | 359 | buf[off] = |
360 | (TPG_TFO(tpg)->get_fabric_proto_ident(tpg) << 4); | 360 | (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4); |
361 | buf[off++] |= 0x1; /* CODE SET == Binary */ | 361 | buf[off++] |= 0x1; /* CODE SET == Binary */ |
362 | buf[off] = 0x80; /* Set PIV=1 */ | 362 | buf[off] = 0x80; /* Set PIV=1 */ |
363 | /* Set ASSOICATION == target port: 01b */ | 363 | /* Set ASSOICATION == target port: 01b */ |
@@ -409,7 +409,7 @@ check_lu_gp: | |||
409 | * section 7.5.1 Table 362 | 409 | * section 7.5.1 Table 362 |
410 | */ | 410 | */ |
411 | check_scsi_name: | 411 | check_scsi_name: |
412 | scsi_name_len = strlen(TPG_TFO(tpg)->tpg_get_wwn(tpg)); | 412 | scsi_name_len = strlen(tpg->se_tpg_tfo->tpg_get_wwn(tpg)); |
413 | /* UTF-8 ",t,0x<16-bit TPGT>" + NULL Terminator */ | 413 | /* UTF-8 ",t,0x<16-bit TPGT>" + NULL Terminator */ |
414 | scsi_name_len += 10; | 414 | scsi_name_len += 10; |
415 | /* Check for 4-byte padding */ | 415 | /* Check for 4-byte padding */ |
@@ -424,7 +424,7 @@ check_scsi_name: | |||
424 | goto set_len; | 424 | goto set_len; |
425 | } | 425 | } |
426 | buf[off] = | 426 | buf[off] = |
427 | (TPG_TFO(tpg)->get_fabric_proto_ident(tpg) << 4); | 427 | (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4); |
428 | buf[off++] |= 0x3; /* CODE SET == UTF-8 */ | 428 | buf[off++] |= 0x3; /* CODE SET == UTF-8 */ |
429 | buf[off] = 0x80; /* Set PIV=1 */ | 429 | buf[off] = 0x80; /* Set PIV=1 */ |
430 | /* Set ASSOICATION == target port: 01b */ | 430 | /* Set ASSOICATION == target port: 01b */ |
@@ -438,9 +438,9 @@ check_scsi_name: | |||
438 | * Target Port, this means "<iSCSI name>,t,0x<TPGT> in | 438 | * Target Port, this means "<iSCSI name>,t,0x<TPGT> in |
439 | * UTF-8 encoding. | 439 | * UTF-8 encoding. |
440 | */ | 440 | */ |
441 | tpgt = TPG_TFO(tpg)->tpg_get_tag(tpg); | 441 | tpgt = tpg->se_tpg_tfo->tpg_get_tag(tpg); |
442 | scsi_name_len = sprintf(&buf[off], "%s,t,0x%04x", | 442 | scsi_name_len = sprintf(&buf[off], "%s,t,0x%04x", |
443 | TPG_TFO(tpg)->tpg_get_wwn(tpg), tpgt); | 443 | tpg->se_tpg_tfo->tpg_get_wwn(tpg), tpgt); |
444 | scsi_name_len += 1 /* Include NULL terminator */; | 444 | scsi_name_len += 1 /* Include NULL terminator */; |
445 | /* | 445 | /* |
446 | * The null-terminated, null-padded (see 4.4.2) SCSI | 446 | * The null-terminated, null-padded (see 4.4.2) SCSI |
@@ -477,7 +477,7 @@ target_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf) | |||
477 | buf[5] = 0x07; | 477 | buf[5] = 0x07; |
478 | 478 | ||
479 | /* If WriteCache emulation is enabled, set V_SUP */ | 479 | /* If WriteCache emulation is enabled, set V_SUP */ |
480 | if (DEV_ATTRIB(SE_DEV(cmd))->emulate_write_cache > 0) | 480 | if (cmd->se_lun->lun_se_dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) |
481 | buf[6] = 0x01; | 481 | buf[6] = 0x01; |
482 | return 0; | 482 | return 0; |
483 | } | 483 | } |
@@ -486,7 +486,7 @@ target_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf) | |||
486 | static int | 486 | static int |
487 | target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) | 487 | target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) |
488 | { | 488 | { |
489 | struct se_device *dev = SE_DEV(cmd); | 489 | struct se_device *dev = cmd->se_lun->lun_se_dev; |
490 | int have_tp = 0; | 490 | int have_tp = 0; |
491 | 491 | ||
492 | /* | 492 | /* |
@@ -494,14 +494,14 @@ target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) | |||
494 | * emulate_tpu=1 or emulate_tpws=1 we will be expect a | 494 | * emulate_tpu=1 or emulate_tpws=1 we will be expect a |
495 | * different page length for Thin Provisioning. | 495 | * different page length for Thin Provisioning. |
496 | */ | 496 | */ |
497 | if (DEV_ATTRIB(dev)->emulate_tpu || DEV_ATTRIB(dev)->emulate_tpws) | 497 | if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws) |
498 | have_tp = 1; | 498 | have_tp = 1; |
499 | 499 | ||
500 | if (cmd->data_length < (0x10 + 4)) { | 500 | if (cmd->data_length < (0x10 + 4)) { |
501 | printk(KERN_INFO "Received data_length: %u" | 501 | printk(KERN_INFO "Received data_length: %u" |
502 | " too small for EVPD 0xb0\n", | 502 | " too small for EVPD 0xb0\n", |
503 | cmd->data_length); | 503 | cmd->data_length); |
504 | return -1; | 504 | return -EINVAL; |
505 | } | 505 | } |
506 | 506 | ||
507 | if (have_tp && cmd->data_length < (0x3c + 4)) { | 507 | if (have_tp && cmd->data_length < (0x3c + 4)) { |
@@ -523,12 +523,12 @@ target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) | |||
523 | /* | 523 | /* |
524 | * Set MAXIMUM TRANSFER LENGTH | 524 | * Set MAXIMUM TRANSFER LENGTH |
525 | */ | 525 | */ |
526 | put_unaligned_be32(DEV_ATTRIB(dev)->max_sectors, &buf[8]); | 526 | put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.max_sectors, &buf[8]); |
527 | 527 | ||
528 | /* | 528 | /* |
529 | * Set OPTIMAL TRANSFER LENGTH | 529 | * Set OPTIMAL TRANSFER LENGTH |
530 | */ | 530 | */ |
531 | put_unaligned_be32(DEV_ATTRIB(dev)->optimal_sectors, &buf[12]); | 531 | put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.optimal_sectors, &buf[12]); |
532 | 532 | ||
533 | /* | 533 | /* |
534 | * Exit now if we don't support TP or the initiator sent a too | 534 | * Exit now if we don't support TP or the initiator sent a too |
@@ -540,25 +540,25 @@ target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) | |||
540 | /* | 540 | /* |
541 | * Set MAXIMUM UNMAP LBA COUNT | 541 | * Set MAXIMUM UNMAP LBA COUNT |
542 | */ | 542 | */ |
543 | put_unaligned_be32(DEV_ATTRIB(dev)->max_unmap_lba_count, &buf[20]); | 543 | put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count, &buf[20]); |
544 | 544 | ||
545 | /* | 545 | /* |
546 | * Set MAXIMUM UNMAP BLOCK DESCRIPTOR COUNT | 546 | * Set MAXIMUM UNMAP BLOCK DESCRIPTOR COUNT |
547 | */ | 547 | */ |
548 | put_unaligned_be32(DEV_ATTRIB(dev)->max_unmap_block_desc_count, | 548 | put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count, |
549 | &buf[24]); | 549 | &buf[24]); |
550 | 550 | ||
551 | /* | 551 | /* |
552 | * Set OPTIMAL UNMAP GRANULARITY | 552 | * Set OPTIMAL UNMAP GRANULARITY |
553 | */ | 553 | */ |
554 | put_unaligned_be32(DEV_ATTRIB(dev)->unmap_granularity, &buf[28]); | 554 | put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.unmap_granularity, &buf[28]); |
555 | 555 | ||
556 | /* | 556 | /* |
557 | * UNMAP GRANULARITY ALIGNMENT | 557 | * UNMAP GRANULARITY ALIGNMENT |
558 | */ | 558 | */ |
559 | put_unaligned_be32(DEV_ATTRIB(dev)->unmap_granularity_alignment, | 559 | put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment, |
560 | &buf[32]); | 560 | &buf[32]); |
561 | if (DEV_ATTRIB(dev)->unmap_granularity_alignment != 0) | 561 | if (dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment != 0) |
562 | buf[32] |= 0x80; /* Set the UGAVALID bit */ | 562 | buf[32] |= 0x80; /* Set the UGAVALID bit */ |
563 | 563 | ||
564 | return 0; | 564 | return 0; |
@@ -568,7 +568,7 @@ target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) | |||
568 | static int | 568 | static int |
569 | target_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf) | 569 | target_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf) |
570 | { | 570 | { |
571 | struct se_device *dev = SE_DEV(cmd); | 571 | struct se_device *dev = cmd->se_lun->lun_se_dev; |
572 | 572 | ||
573 | /* | 573 | /* |
574 | * From sbc3r22 section 6.5.4 Thin Provisioning VPD page: | 574 | * From sbc3r22 section 6.5.4 Thin Provisioning VPD page: |
@@ -602,7 +602,7 @@ target_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf) | |||
602 | * the UNMAP command (see 5.25). A TPU bit set to zero indicates | 602 | * the UNMAP command (see 5.25). A TPU bit set to zero indicates |
603 | * that the device server does not support the UNMAP command. | 603 | * that the device server does not support the UNMAP command. |
604 | */ | 604 | */ |
605 | if (DEV_ATTRIB(dev)->emulate_tpu != 0) | 605 | if (dev->se_sub_dev->se_dev_attrib.emulate_tpu != 0) |
606 | buf[5] = 0x80; | 606 | buf[5] = 0x80; |
607 | 607 | ||
608 | /* | 608 | /* |
@@ -611,7 +611,7 @@ target_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf) | |||
611 | * A TPWS bit set to zero indicates that the device server does not | 611 | * A TPWS bit set to zero indicates that the device server does not |
612 | * support the use of the WRITE SAME (16) command to unmap LBAs. | 612 | * support the use of the WRITE SAME (16) command to unmap LBAs. |
613 | */ | 613 | */ |
614 | if (DEV_ATTRIB(dev)->emulate_tpws != 0) | 614 | if (dev->se_sub_dev->se_dev_attrib.emulate_tpws != 0) |
615 | buf[5] |= 0x40; | 615 | buf[5] |= 0x40; |
616 | 616 | ||
617 | return 0; | 617 | return 0; |
@@ -620,7 +620,7 @@ target_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf) | |||
620 | static int | 620 | static int |
621 | target_emulate_inquiry(struct se_cmd *cmd) | 621 | target_emulate_inquiry(struct se_cmd *cmd) |
622 | { | 622 | { |
623 | struct se_device *dev = SE_DEV(cmd); | 623 | struct se_device *dev = cmd->se_lun->lun_se_dev; |
624 | unsigned char *buf = cmd->t_task->t_task_buf; | 624 | unsigned char *buf = cmd->t_task->t_task_buf; |
625 | unsigned char *cdb = cmd->t_task->t_task_cdb; | 625 | unsigned char *cdb = cmd->t_task->t_task_cdb; |
626 | 626 | ||
@@ -637,7 +637,7 @@ target_emulate_inquiry(struct se_cmd *cmd) | |||
637 | if (cmd->data_length < 4) { | 637 | if (cmd->data_length < 4) { |
638 | printk(KERN_ERR "SCSI Inquiry payload length: %u" | 638 | printk(KERN_ERR "SCSI Inquiry payload length: %u" |
639 | " too small for EVPD=1\n", cmd->data_length); | 639 | " too small for EVPD=1\n", cmd->data_length); |
640 | return -1; | 640 | return -EINVAL; |
641 | } | 641 | } |
642 | buf[0] = dev->transport->get_device_type(dev); | 642 | buf[0] = dev->transport->get_device_type(dev); |
643 | 643 | ||
@@ -656,7 +656,7 @@ target_emulate_inquiry(struct se_cmd *cmd) | |||
656 | return target_emulate_evpd_b2(cmd, buf); | 656 | return target_emulate_evpd_b2(cmd, buf); |
657 | default: | 657 | default: |
658 | printk(KERN_ERR "Unknown VPD Code: 0x%02x\n", cdb[2]); | 658 | printk(KERN_ERR "Unknown VPD Code: 0x%02x\n", cdb[2]); |
659 | return -1; | 659 | return -EINVAL; |
660 | } | 660 | } |
661 | 661 | ||
662 | return 0; | 662 | return 0; |
@@ -665,7 +665,7 @@ target_emulate_inquiry(struct se_cmd *cmd) | |||
665 | static int | 665 | static int |
666 | target_emulate_readcapacity(struct se_cmd *cmd) | 666 | target_emulate_readcapacity(struct se_cmd *cmd) |
667 | { | 667 | { |
668 | struct se_device *dev = SE_DEV(cmd); | 668 | struct se_device *dev = cmd->se_lun->lun_se_dev; |
669 | unsigned char *buf = cmd->t_task->t_task_buf; | 669 | unsigned char *buf = cmd->t_task->t_task_buf; |
670 | unsigned long long blocks_long = dev->transport->get_blocks(dev); | 670 | unsigned long long blocks_long = dev->transport->get_blocks(dev); |
671 | u32 blocks; | 671 | u32 blocks; |
@@ -679,14 +679,14 @@ target_emulate_readcapacity(struct se_cmd *cmd) | |||
679 | buf[1] = (blocks >> 16) & 0xff; | 679 | buf[1] = (blocks >> 16) & 0xff; |
680 | buf[2] = (blocks >> 8) & 0xff; | 680 | buf[2] = (blocks >> 8) & 0xff; |
681 | buf[3] = blocks & 0xff; | 681 | buf[3] = blocks & 0xff; |
682 | buf[4] = (DEV_ATTRIB(dev)->block_size >> 24) & 0xff; | 682 | buf[4] = (dev->se_sub_dev->se_dev_attrib.block_size >> 24) & 0xff; |
683 | buf[5] = (DEV_ATTRIB(dev)->block_size >> 16) & 0xff; | 683 | buf[5] = (dev->se_sub_dev->se_dev_attrib.block_size >> 16) & 0xff; |
684 | buf[6] = (DEV_ATTRIB(dev)->block_size >> 8) & 0xff; | 684 | buf[6] = (dev->se_sub_dev->se_dev_attrib.block_size >> 8) & 0xff; |
685 | buf[7] = DEV_ATTRIB(dev)->block_size & 0xff; | 685 | buf[7] = dev->se_sub_dev->se_dev_attrib.block_size & 0xff; |
686 | /* | 686 | /* |
687 | * Set max 32-bit blocks to signal SERVICE ACTION READ_CAPACITY_16 | 687 | * Set max 32-bit blocks to signal SERVICE ACTION READ_CAPACITY_16 |
688 | */ | 688 | */ |
689 | if (DEV_ATTRIB(dev)->emulate_tpu || DEV_ATTRIB(dev)->emulate_tpws) | 689 | if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws) |
690 | put_unaligned_be32(0xFFFFFFFF, &buf[0]); | 690 | put_unaligned_be32(0xFFFFFFFF, &buf[0]); |
691 | 691 | ||
692 | return 0; | 692 | return 0; |
@@ -695,7 +695,7 @@ target_emulate_readcapacity(struct se_cmd *cmd) | |||
695 | static int | 695 | static int |
696 | target_emulate_readcapacity_16(struct se_cmd *cmd) | 696 | target_emulate_readcapacity_16(struct se_cmd *cmd) |
697 | { | 697 | { |
698 | struct se_device *dev = SE_DEV(cmd); | 698 | struct se_device *dev = cmd->se_lun->lun_se_dev; |
699 | unsigned char *buf = cmd->t_task->t_task_buf; | 699 | unsigned char *buf = cmd->t_task->t_task_buf; |
700 | unsigned long long blocks = dev->transport->get_blocks(dev); | 700 | unsigned long long blocks = dev->transport->get_blocks(dev); |
701 | 701 | ||
@@ -707,15 +707,15 @@ target_emulate_readcapacity_16(struct se_cmd *cmd) | |||
707 | buf[5] = (blocks >> 16) & 0xff; | 707 | buf[5] = (blocks >> 16) & 0xff; |
708 | buf[6] = (blocks >> 8) & 0xff; | 708 | buf[6] = (blocks >> 8) & 0xff; |
709 | buf[7] = blocks & 0xff; | 709 | buf[7] = blocks & 0xff; |
710 | buf[8] = (DEV_ATTRIB(dev)->block_size >> 24) & 0xff; | 710 | buf[8] = (dev->se_sub_dev->se_dev_attrib.block_size >> 24) & 0xff; |
711 | buf[9] = (DEV_ATTRIB(dev)->block_size >> 16) & 0xff; | 711 | buf[9] = (dev->se_sub_dev->se_dev_attrib.block_size >> 16) & 0xff; |
712 | buf[10] = (DEV_ATTRIB(dev)->block_size >> 8) & 0xff; | 712 | buf[10] = (dev->se_sub_dev->se_dev_attrib.block_size >> 8) & 0xff; |
713 | buf[11] = DEV_ATTRIB(dev)->block_size & 0xff; | 713 | buf[11] = dev->se_sub_dev->se_dev_attrib.block_size & 0xff; |
714 | /* | 714 | /* |
715 | * Set Thin Provisioning Enable bit following sbc3r22 in section | 715 | * Set Thin Provisioning Enable bit following sbc3r22 in section |
716 | * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled. | 716 | * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled. |
717 | */ | 717 | */ |
718 | if (DEV_ATTRIB(dev)->emulate_tpu || DEV_ATTRIB(dev)->emulate_tpws) | 718 | if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws) |
719 | buf[14] = 0x80; | 719 | buf[14] = 0x80; |
720 | 720 | ||
721 | return 0; | 721 | return 0; |
@@ -765,8 +765,8 @@ target_modesense_control(struct se_device *dev, unsigned char *p) | |||
765 | * for a BUSY, TASK SET FULL, or RESERVATION CONFLICT status regardless | 765 | * for a BUSY, TASK SET FULL, or RESERVATION CONFLICT status regardless |
766 | * to the number of commands completed with one of those status codes. | 766 | * to the number of commands completed with one of those status codes. |
767 | */ | 767 | */ |
768 | p[4] = (DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl == 2) ? 0x30 : | 768 | p[4] = (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2) ? 0x30 : |
769 | (DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl == 1) ? 0x20 : 0x00; | 769 | (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 1) ? 0x20 : 0x00; |
770 | /* | 770 | /* |
771 | * From spc4r17, section 7.4.6 Control mode Page | 771 | * From spc4r17, section 7.4.6 Control mode Page |
772 | * | 772 | * |
@@ -779,7 +779,7 @@ target_modesense_control(struct se_device *dev, unsigned char *p) | |||
779 | * which the command was received shall be completed with TASK ABORTED | 779 | * which the command was received shall be completed with TASK ABORTED |
780 | * status (see SAM-4). | 780 | * status (see SAM-4). |
781 | */ | 781 | */ |
782 | p[5] = (DEV_ATTRIB(dev)->emulate_tas) ? 0x40 : 0x00; | 782 | p[5] = (dev->se_sub_dev->se_dev_attrib.emulate_tas) ? 0x40 : 0x00; |
783 | p[8] = 0xff; | 783 | p[8] = 0xff; |
784 | p[9] = 0xff; | 784 | p[9] = 0xff; |
785 | p[11] = 30; | 785 | p[11] = 30; |
@@ -792,7 +792,7 @@ target_modesense_caching(struct se_device *dev, unsigned char *p) | |||
792 | { | 792 | { |
793 | p[0] = 0x08; | 793 | p[0] = 0x08; |
794 | p[1] = 0x12; | 794 | p[1] = 0x12; |
795 | if (DEV_ATTRIB(dev)->emulate_write_cache > 0) | 795 | if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) |
796 | p[2] = 0x04; /* Write Cache Enable */ | 796 | p[2] = 0x04; /* Write Cache Enable */ |
797 | p[12] = 0x20; /* Disabled Read Ahead */ | 797 | p[12] = 0x20; /* Disabled Read Ahead */ |
798 | 798 | ||
@@ -830,7 +830,7 @@ target_modesense_dpofua(unsigned char *buf, int type) | |||
830 | static int | 830 | static int |
831 | target_emulate_modesense(struct se_cmd *cmd, int ten) | 831 | target_emulate_modesense(struct se_cmd *cmd, int ten) |
832 | { | 832 | { |
833 | struct se_device *dev = SE_DEV(cmd); | 833 | struct se_device *dev = cmd->se_lun->lun_se_dev; |
834 | char *cdb = cmd->t_task->t_task_cdb; | 834 | char *cdb = cmd->t_task->t_task_cdb; |
835 | unsigned char *rbuf = cmd->t_task->t_task_buf; | 835 | unsigned char *rbuf = cmd->t_task->t_task_buf; |
836 | int type = dev->transport->get_device_type(dev); | 836 | int type = dev->transport->get_device_type(dev); |
@@ -867,13 +867,13 @@ target_emulate_modesense(struct se_cmd *cmd, int ten) | |||
867 | buf[0] = (offset >> 8) & 0xff; | 867 | buf[0] = (offset >> 8) & 0xff; |
868 | buf[1] = offset & 0xff; | 868 | buf[1] = offset & 0xff; |
869 | 869 | ||
870 | if ((SE_LUN(cmd)->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) || | 870 | if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) || |
871 | (cmd->se_deve && | 871 | (cmd->se_deve && |
872 | (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY))) | 872 | (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY))) |
873 | target_modesense_write_protect(&buf[3], type); | 873 | target_modesense_write_protect(&buf[3], type); |
874 | 874 | ||
875 | if ((DEV_ATTRIB(dev)->emulate_write_cache > 0) && | 875 | if ((dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) && |
876 | (DEV_ATTRIB(dev)->emulate_fua_write > 0)) | 876 | (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0)) |
877 | target_modesense_dpofua(&buf[3], type); | 877 | target_modesense_dpofua(&buf[3], type); |
878 | 878 | ||
879 | if ((offset + 2) > cmd->data_length) | 879 | if ((offset + 2) > cmd->data_length) |
@@ -883,13 +883,13 @@ target_emulate_modesense(struct se_cmd *cmd, int ten) | |||
883 | offset -= 1; | 883 | offset -= 1; |
884 | buf[0] = offset & 0xff; | 884 | buf[0] = offset & 0xff; |
885 | 885 | ||
886 | if ((SE_LUN(cmd)->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) || | 886 | if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) || |
887 | (cmd->se_deve && | 887 | (cmd->se_deve && |
888 | (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY))) | 888 | (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY))) |
889 | target_modesense_write_protect(&buf[2], type); | 889 | target_modesense_write_protect(&buf[2], type); |
890 | 890 | ||
891 | if ((DEV_ATTRIB(dev)->emulate_write_cache > 0) && | 891 | if ((dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) && |
892 | (DEV_ATTRIB(dev)->emulate_fua_write > 0)) | 892 | (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0)) |
893 | target_modesense_dpofua(&buf[2], type); | 893 | target_modesense_dpofua(&buf[2], type); |
894 | 894 | ||
895 | if ((offset + 1) > cmd->data_length) | 895 | if ((offset + 1) > cmd->data_length) |
@@ -963,8 +963,8 @@ target_emulate_request_sense(struct se_cmd *cmd) | |||
963 | static int | 963 | static int |
964 | target_emulate_unmap(struct se_task *task) | 964 | target_emulate_unmap(struct se_task *task) |
965 | { | 965 | { |
966 | struct se_cmd *cmd = TASK_CMD(task); | 966 | struct se_cmd *cmd = task->task_se_cmd; |
967 | struct se_device *dev = SE_DEV(cmd); | 967 | struct se_device *dev = cmd->se_lun->lun_se_dev; |
968 | unsigned char *buf = cmd->t_task->t_task_buf, *ptr = NULL; | 968 | unsigned char *buf = cmd->t_task->t_task_buf, *ptr = NULL; |
969 | unsigned char *cdb = &cmd->t_task->t_task_cdb[0]; | 969 | unsigned char *cdb = &cmd->t_task->t_task_cdb[0]; |
970 | sector_t lba; | 970 | sector_t lba; |
@@ -991,7 +991,7 @@ target_emulate_unmap(struct se_task *task) | |||
991 | if (ret < 0) { | 991 | if (ret < 0) { |
992 | printk(KERN_ERR "blkdev_issue_discard() failed: %d\n", | 992 | printk(KERN_ERR "blkdev_issue_discard() failed: %d\n", |
993 | ret); | 993 | ret); |
994 | return -1; | 994 | return ret; |
995 | } | 995 | } |
996 | 996 | ||
997 | ptr += 16; | 997 | ptr += 16; |
@@ -1010,13 +1010,13 @@ target_emulate_unmap(struct se_task *task) | |||
1010 | static int | 1010 | static int |
1011 | target_emulate_write_same(struct se_task *task) | 1011 | target_emulate_write_same(struct se_task *task) |
1012 | { | 1012 | { |
1013 | struct se_cmd *cmd = TASK_CMD(task); | 1013 | struct se_cmd *cmd = task->task_se_cmd; |
1014 | struct se_device *dev = SE_DEV(cmd); | 1014 | struct se_device *dev = cmd->se_lun->lun_se_dev; |
1015 | sector_t lba = cmd->t_task->t_task_lba; | 1015 | sector_t lba = cmd->t_task->t_task_lba; |
1016 | unsigned int range; | 1016 | unsigned int range; |
1017 | int ret; | 1017 | int ret; |
1018 | 1018 | ||
1019 | range = (cmd->data_length / DEV_ATTRIB(dev)->block_size); | 1019 | range = (cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size); |
1020 | 1020 | ||
1021 | printk(KERN_INFO "WRITE_SAME UNMAP: LBA: %llu Range: %u\n", | 1021 | printk(KERN_INFO "WRITE_SAME UNMAP: LBA: %llu Range: %u\n", |
1022 | (unsigned long long)lba, range); | 1022 | (unsigned long long)lba, range); |
@@ -1024,7 +1024,7 @@ target_emulate_write_same(struct se_task *task) | |||
1024 | ret = dev->transport->do_discard(dev, lba, range); | 1024 | ret = dev->transport->do_discard(dev, lba, range); |
1025 | if (ret < 0) { | 1025 | if (ret < 0) { |
1026 | printk(KERN_INFO "blkdev_issue_discard() failed for WRITE_SAME\n"); | 1026 | printk(KERN_INFO "blkdev_issue_discard() failed for WRITE_SAME\n"); |
1027 | return -1; | 1027 | return ret; |
1028 | } | 1028 | } |
1029 | 1029 | ||
1030 | task->task_scsi_status = GOOD; | 1030 | task->task_scsi_status = GOOD; |
@@ -1035,8 +1035,8 @@ target_emulate_write_same(struct se_task *task) | |||
1035 | int | 1035 | int |
1036 | transport_emulate_control_cdb(struct se_task *task) | 1036 | transport_emulate_control_cdb(struct se_task *task) |
1037 | { | 1037 | { |
1038 | struct se_cmd *cmd = TASK_CMD(task); | 1038 | struct se_cmd *cmd = task->task_se_cmd; |
1039 | struct se_device *dev = SE_DEV(cmd); | 1039 | struct se_device *dev = cmd->se_lun->lun_se_dev; |
1040 | unsigned short service_action; | 1040 | unsigned short service_action; |
1041 | int ret = 0; | 1041 | int ret = 0; |
1042 | 1042 | ||
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index c6140004307b..64418efa671b 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c | |||
@@ -37,6 +37,7 @@ | |||
37 | #include <linux/parser.h> | 37 | #include <linux/parser.h> |
38 | #include <linux/syscalls.h> | 38 | #include <linux/syscalls.h> |
39 | #include <linux/configfs.h> | 39 | #include <linux/configfs.h> |
40 | #include <linux/spinlock.h> | ||
40 | 41 | ||
41 | #include <target/target_core_base.h> | 42 | #include <target/target_core_base.h> |
42 | #include <target/target_core_device.h> | 43 | #include <target/target_core_device.h> |
@@ -52,6 +53,8 @@ | |||
52 | #include "target_core_rd.h" | 53 | #include "target_core_rd.h" |
53 | #include "target_core_stat.h" | 54 | #include "target_core_stat.h" |
54 | 55 | ||
56 | extern struct t10_alua_lu_gp *default_lu_gp; | ||
57 | |||
55 | static struct list_head g_tf_list; | 58 | static struct list_head g_tf_list; |
56 | static struct mutex g_tf_lock; | 59 | static struct mutex g_tf_lock; |
57 | 60 | ||
@@ -61,6 +64,13 @@ struct target_core_configfs_attribute { | |||
61 | ssize_t (*store)(void *, const char *, size_t); | 64 | ssize_t (*store)(void *, const char *, size_t); |
62 | }; | 65 | }; |
63 | 66 | ||
67 | static struct config_group target_core_hbagroup; | ||
68 | static struct config_group alua_group; | ||
69 | static struct config_group alua_lu_gps_group; | ||
70 | |||
71 | static DEFINE_SPINLOCK(se_device_lock); | ||
72 | static LIST_HEAD(se_dev_list); | ||
73 | |||
64 | static inline struct se_hba * | 74 | static inline struct se_hba * |
65 | item_to_hba(struct config_item *item) | 75 | item_to_hba(struct config_item *item) |
66 | { | 76 | { |
@@ -298,21 +308,21 @@ struct target_fabric_configfs *target_fabric_configfs_init( | |||
298 | 308 | ||
299 | if (!(fabric_mod)) { | 309 | if (!(fabric_mod)) { |
300 | printk(KERN_ERR "Missing struct module *fabric_mod pointer\n"); | 310 | printk(KERN_ERR "Missing struct module *fabric_mod pointer\n"); |
301 | return NULL; | 311 | return ERR_PTR(-EINVAL); |
302 | } | 312 | } |
303 | if (!(name)) { | 313 | if (!(name)) { |
304 | printk(KERN_ERR "Unable to locate passed fabric name\n"); | 314 | printk(KERN_ERR "Unable to locate passed fabric name\n"); |
305 | return NULL; | 315 | return ERR_PTR(-EINVAL); |
306 | } | 316 | } |
307 | if (strlen(name) >= TARGET_FABRIC_NAME_SIZE) { | 317 | if (strlen(name) >= TARGET_FABRIC_NAME_SIZE) { |
308 | printk(KERN_ERR "Passed name: %s exceeds TARGET_FABRIC" | 318 | printk(KERN_ERR "Passed name: %s exceeds TARGET_FABRIC" |
309 | "_NAME_SIZE\n", name); | 319 | "_NAME_SIZE\n", name); |
310 | return NULL; | 320 | return ERR_PTR(-EINVAL); |
311 | } | 321 | } |
312 | 322 | ||
313 | tf = kzalloc(sizeof(struct target_fabric_configfs), GFP_KERNEL); | 323 | tf = kzalloc(sizeof(struct target_fabric_configfs), GFP_KERNEL); |
314 | if (!(tf)) | 324 | if (!(tf)) |
315 | return NULL; | 325 | return ERR_PTR(-ENOMEM); |
316 | 326 | ||
317 | INIT_LIST_HEAD(&tf->tf_list); | 327 | INIT_LIST_HEAD(&tf->tf_list); |
318 | atomic_set(&tf->tf_access_cnt, 0); | 328 | atomic_set(&tf->tf_access_cnt, 0); |
@@ -591,7 +601,6 @@ void target_fabric_configfs_deregister( | |||
591 | 601 | ||
592 | printk("<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>>>>>>" | 602 | printk("<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>>>>>>" |
593 | ">>>>>\n"); | 603 | ">>>>>\n"); |
594 | return; | ||
595 | } | 604 | } |
596 | EXPORT_SYMBOL(target_fabric_configfs_deregister); | 605 | EXPORT_SYMBOL(target_fabric_configfs_deregister); |
597 | 606 | ||
@@ -616,7 +625,8 @@ static ssize_t target_core_dev_show_attr_##_name( \ | |||
616 | spin_unlock(&se_dev->se_dev_lock); \ | 625 | spin_unlock(&se_dev->se_dev_lock); \ |
617 | return -ENODEV; \ | 626 | return -ENODEV; \ |
618 | } \ | 627 | } \ |
619 | rb = snprintf(page, PAGE_SIZE, "%u\n", (u32)DEV_ATTRIB(dev)->_name); \ | 628 | rb = snprintf(page, PAGE_SIZE, "%u\n", \ |
629 | (u32)dev->se_sub_dev->se_dev_attrib._name); \ | ||
620 | spin_unlock(&se_dev->se_dev_lock); \ | 630 | spin_unlock(&se_dev->se_dev_lock); \ |
621 | \ | 631 | \ |
622 | return rb; \ | 632 | return rb; \ |
@@ -1078,7 +1088,7 @@ static ssize_t target_core_dev_pr_show_spc3_res( | |||
1078 | PR_REG_ISID_ID_LEN); | 1088 | PR_REG_ISID_ID_LEN); |
1079 | 1089 | ||
1080 | *len += sprintf(page + *len, "SPC-3 Reservation: %s Initiator: %s%s\n", | 1090 | *len += sprintf(page + *len, "SPC-3 Reservation: %s Initiator: %s%s\n", |
1081 | TPG_TFO(se_nacl->se_tpg)->get_fabric_name(), | 1091 | se_nacl->se_tpg->se_tpg_tfo->get_fabric_name(), |
1082 | se_nacl->initiatorname, (prf_isid) ? &i_buf[0] : ""); | 1092 | se_nacl->initiatorname, (prf_isid) ? &i_buf[0] : ""); |
1083 | spin_unlock(&dev->dev_reservation_lock); | 1093 | spin_unlock(&dev->dev_reservation_lock); |
1084 | 1094 | ||
@@ -1100,7 +1110,7 @@ static ssize_t target_core_dev_pr_show_spc2_res( | |||
1100 | return *len; | 1110 | return *len; |
1101 | } | 1111 | } |
1102 | *len += sprintf(page + *len, "SPC-2 Reservation: %s Initiator: %s\n", | 1112 | *len += sprintf(page + *len, "SPC-2 Reservation: %s Initiator: %s\n", |
1103 | TPG_TFO(se_nacl->se_tpg)->get_fabric_name(), | 1113 | se_nacl->se_tpg->se_tpg_tfo->get_fabric_name(), |
1104 | se_nacl->initiatorname); | 1114 | se_nacl->initiatorname); |
1105 | spin_unlock(&dev->dev_reservation_lock); | 1115 | spin_unlock(&dev->dev_reservation_lock); |
1106 | 1116 | ||
@@ -1116,7 +1126,7 @@ static ssize_t target_core_dev_pr_show_attr_res_holder( | |||
1116 | if (!(su_dev->se_dev_ptr)) | 1126 | if (!(su_dev->se_dev_ptr)) |
1117 | return -ENODEV; | 1127 | return -ENODEV; |
1118 | 1128 | ||
1119 | switch (T10_RES(su_dev)->res_type) { | 1129 | switch (su_dev->t10_pr.res_type) { |
1120 | case SPC3_PERSISTENT_RESERVATIONS: | 1130 | case SPC3_PERSISTENT_RESERVATIONS: |
1121 | target_core_dev_pr_show_spc3_res(su_dev->se_dev_ptr, | 1131 | target_core_dev_pr_show_spc3_res(su_dev->se_dev_ptr, |
1122 | page, &len); | 1132 | page, &len); |
@@ -1153,7 +1163,7 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_all_tgt_pts( | |||
1153 | if (!(dev)) | 1163 | if (!(dev)) |
1154 | return -ENODEV; | 1164 | return -ENODEV; |
1155 | 1165 | ||
1156 | if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS) | 1166 | if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) |
1157 | return len; | 1167 | return len; |
1158 | 1168 | ||
1159 | spin_lock(&dev->dev_reservation_lock); | 1169 | spin_lock(&dev->dev_reservation_lock); |
@@ -1190,10 +1200,10 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_generation( | |||
1190 | if (!(su_dev->se_dev_ptr)) | 1200 | if (!(su_dev->se_dev_ptr)) |
1191 | return -ENODEV; | 1201 | return -ENODEV; |
1192 | 1202 | ||
1193 | if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS) | 1203 | if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) |
1194 | return 0; | 1204 | return 0; |
1195 | 1205 | ||
1196 | return sprintf(page, "0x%08x\n", T10_RES(su_dev)->pr_generation); | 1206 | return sprintf(page, "0x%08x\n", su_dev->t10_pr.pr_generation); |
1197 | } | 1207 | } |
1198 | 1208 | ||
1199 | SE_DEV_PR_ATTR_RO(res_pr_generation); | 1209 | SE_DEV_PR_ATTR_RO(res_pr_generation); |
@@ -1217,7 +1227,7 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_holder_tg_port( | |||
1217 | if (!(dev)) | 1227 | if (!(dev)) |
1218 | return -ENODEV; | 1228 | return -ENODEV; |
1219 | 1229 | ||
1220 | if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS) | 1230 | if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) |
1221 | return len; | 1231 | return len; |
1222 | 1232 | ||
1223 | spin_lock(&dev->dev_reservation_lock); | 1233 | spin_lock(&dev->dev_reservation_lock); |
@@ -1230,7 +1240,7 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_holder_tg_port( | |||
1230 | se_nacl = pr_reg->pr_reg_nacl; | 1240 | se_nacl = pr_reg->pr_reg_nacl; |
1231 | se_tpg = se_nacl->se_tpg; | 1241 | se_tpg = se_nacl->se_tpg; |
1232 | lun = pr_reg->pr_reg_tg_pt_lun; | 1242 | lun = pr_reg->pr_reg_tg_pt_lun; |
1233 | tfo = TPG_TFO(se_tpg); | 1243 | tfo = se_tpg->se_tpg_tfo; |
1234 | 1244 | ||
1235 | len += sprintf(page+len, "SPC-3 Reservation: %s" | 1245 | len += sprintf(page+len, "SPC-3 Reservation: %s" |
1236 | " Target Node Endpoint: %s\n", tfo->get_fabric_name(), | 1246 | " Target Node Endpoint: %s\n", tfo->get_fabric_name(), |
@@ -1264,13 +1274,13 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts( | |||
1264 | if (!(su_dev->se_dev_ptr)) | 1274 | if (!(su_dev->se_dev_ptr)) |
1265 | return -ENODEV; | 1275 | return -ENODEV; |
1266 | 1276 | ||
1267 | if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS) | 1277 | if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) |
1268 | return len; | 1278 | return len; |
1269 | 1279 | ||
1270 | len += sprintf(page+len, "SPC-3 PR Registrations:\n"); | 1280 | len += sprintf(page+len, "SPC-3 PR Registrations:\n"); |
1271 | 1281 | ||
1272 | spin_lock(&T10_RES(su_dev)->registration_lock); | 1282 | spin_lock(&su_dev->t10_pr.registration_lock); |
1273 | list_for_each_entry(pr_reg, &T10_RES(su_dev)->registration_list, | 1283 | list_for_each_entry(pr_reg, &su_dev->t10_pr.registration_list, |
1274 | pr_reg_list) { | 1284 | pr_reg_list) { |
1275 | 1285 | ||
1276 | memset(buf, 0, 384); | 1286 | memset(buf, 0, 384); |
@@ -1290,7 +1300,7 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts( | |||
1290 | len += sprintf(page+len, "%s", buf); | 1300 | len += sprintf(page+len, "%s", buf); |
1291 | reg_count++; | 1301 | reg_count++; |
1292 | } | 1302 | } |
1293 | spin_unlock(&T10_RES(su_dev)->registration_lock); | 1303 | spin_unlock(&su_dev->t10_pr.registration_lock); |
1294 | 1304 | ||
1295 | if (!(reg_count)) | 1305 | if (!(reg_count)) |
1296 | len += sprintf(page+len, "None\n"); | 1306 | len += sprintf(page+len, "None\n"); |
@@ -1315,7 +1325,7 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_type( | |||
1315 | if (!(dev)) | 1325 | if (!(dev)) |
1316 | return -ENODEV; | 1326 | return -ENODEV; |
1317 | 1327 | ||
1318 | if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS) | 1328 | if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) |
1319 | return len; | 1329 | return len; |
1320 | 1330 | ||
1321 | spin_lock(&dev->dev_reservation_lock); | 1331 | spin_lock(&dev->dev_reservation_lock); |
@@ -1346,7 +1356,7 @@ static ssize_t target_core_dev_pr_show_attr_res_type( | |||
1346 | if (!(su_dev->se_dev_ptr)) | 1356 | if (!(su_dev->se_dev_ptr)) |
1347 | return -ENODEV; | 1357 | return -ENODEV; |
1348 | 1358 | ||
1349 | switch (T10_RES(su_dev)->res_type) { | 1359 | switch (su_dev->t10_pr.res_type) { |
1350 | case SPC3_PERSISTENT_RESERVATIONS: | 1360 | case SPC3_PERSISTENT_RESERVATIONS: |
1351 | len = sprintf(page, "SPC3_PERSISTENT_RESERVATIONS\n"); | 1361 | len = sprintf(page, "SPC3_PERSISTENT_RESERVATIONS\n"); |
1352 | break; | 1362 | break; |
@@ -1377,11 +1387,11 @@ static ssize_t target_core_dev_pr_show_attr_res_aptpl_active( | |||
1377 | if (!(su_dev->se_dev_ptr)) | 1387 | if (!(su_dev->se_dev_ptr)) |
1378 | return -ENODEV; | 1388 | return -ENODEV; |
1379 | 1389 | ||
1380 | if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS) | 1390 | if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) |
1381 | return 0; | 1391 | return 0; |
1382 | 1392 | ||
1383 | return sprintf(page, "APTPL Bit Status: %s\n", | 1393 | return sprintf(page, "APTPL Bit Status: %s\n", |
1384 | (T10_RES(su_dev)->pr_aptpl_active) ? "Activated" : "Disabled"); | 1394 | (su_dev->t10_pr.pr_aptpl_active) ? "Activated" : "Disabled"); |
1385 | } | 1395 | } |
1386 | 1396 | ||
1387 | SE_DEV_PR_ATTR_RO(res_aptpl_active); | 1397 | SE_DEV_PR_ATTR_RO(res_aptpl_active); |
@@ -1396,7 +1406,7 @@ static ssize_t target_core_dev_pr_show_attr_res_aptpl_metadata( | |||
1396 | if (!(su_dev->se_dev_ptr)) | 1406 | if (!(su_dev->se_dev_ptr)) |
1397 | return -ENODEV; | 1407 | return -ENODEV; |
1398 | 1408 | ||
1399 | if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS) | 1409 | if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) |
1400 | return 0; | 1410 | return 0; |
1401 | 1411 | ||
1402 | return sprintf(page, "Ready to process PR APTPL metadata..\n"); | 1412 | return sprintf(page, "Ready to process PR APTPL metadata..\n"); |
@@ -1448,7 +1458,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata( | |||
1448 | if (!(dev)) | 1458 | if (!(dev)) |
1449 | return -ENODEV; | 1459 | return -ENODEV; |
1450 | 1460 | ||
1451 | if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS) | 1461 | if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) |
1452 | return 0; | 1462 | return 0; |
1453 | 1463 | ||
1454 | if (atomic_read(&dev->dev_export_obj.obj_access_count)) { | 1464 | if (atomic_read(&dev->dev_export_obj.obj_access_count)) { |
@@ -1594,7 +1604,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata( | |||
1594 | goto out; | 1604 | goto out; |
1595 | } | 1605 | } |
1596 | 1606 | ||
1597 | ret = core_scsi3_alloc_aptpl_registration(T10_RES(su_dev), sa_res_key, | 1607 | ret = core_scsi3_alloc_aptpl_registration(&su_dev->t10_pr, sa_res_key, |
1598 | i_port, isid, mapped_lun, t_port, tpgt, target_lun, | 1608 | i_port, isid, mapped_lun, t_port, tpgt, target_lun, |
1599 | res_holder, all_tg_pt, type); | 1609 | res_holder, all_tg_pt, type); |
1600 | out: | 1610 | out: |
@@ -1842,7 +1852,7 @@ static ssize_t target_core_show_alua_lu_gp(void *p, char *page) | |||
1842 | if (!(dev)) | 1852 | if (!(dev)) |
1843 | return -ENODEV; | 1853 | return -ENODEV; |
1844 | 1854 | ||
1845 | if (T10_ALUA(su_dev)->alua_type != SPC3_ALUA_EMULATED) | 1855 | if (su_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED) |
1846 | return len; | 1856 | return len; |
1847 | 1857 | ||
1848 | lu_gp_mem = dev->dev_alua_lu_gp_mem; | 1858 | lu_gp_mem = dev->dev_alua_lu_gp_mem; |
@@ -1881,7 +1891,7 @@ static ssize_t target_core_store_alua_lu_gp( | |||
1881 | if (!(dev)) | 1891 | if (!(dev)) |
1882 | return -ENODEV; | 1892 | return -ENODEV; |
1883 | 1893 | ||
1884 | if (T10_ALUA(su_dev)->alua_type != SPC3_ALUA_EMULATED) { | 1894 | if (su_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED) { |
1885 | printk(KERN_WARNING "SPC3_ALUA_EMULATED not enabled for %s/%s\n", | 1895 | printk(KERN_WARNING "SPC3_ALUA_EMULATED not enabled for %s/%s\n", |
1886 | config_item_name(&hba->hba_group.cg_item), | 1896 | config_item_name(&hba->hba_group.cg_item), |
1887 | config_item_name(&su_dev->se_dev_group.cg_item)); | 1897 | config_item_name(&su_dev->se_dev_group.cg_item)); |
@@ -2557,9 +2567,9 @@ static ssize_t target_core_alua_tg_pt_gp_show_attr_members( | |||
2557 | lun = port->sep_lun; | 2567 | lun = port->sep_lun; |
2558 | 2568 | ||
2559 | cur_len = snprintf(buf, TG_PT_GROUP_NAME_BUF, "%s/%s/tpgt_%hu" | 2569 | cur_len = snprintf(buf, TG_PT_GROUP_NAME_BUF, "%s/%s/tpgt_%hu" |
2560 | "/%s\n", TPG_TFO(tpg)->get_fabric_name(), | 2570 | "/%s\n", tpg->se_tpg_tfo->get_fabric_name(), |
2561 | TPG_TFO(tpg)->tpg_get_wwn(tpg), | 2571 | tpg->se_tpg_tfo->tpg_get_wwn(tpg), |
2562 | TPG_TFO(tpg)->tpg_get_tag(tpg), | 2572 | tpg->se_tpg_tfo->tpg_get_tag(tpg), |
2563 | config_item_name(&lun->lun_group.cg_item)); | 2573 | config_item_name(&lun->lun_group.cg_item)); |
2564 | cur_len++; /* Extra byte for NULL terminator */ | 2574 | cur_len++; /* Extra byte for NULL terminator */ |
2565 | 2575 | ||
@@ -2748,17 +2758,17 @@ static struct config_group *target_core_make_subdev( | |||
2748 | " struct se_subsystem_dev\n"); | 2758 | " struct se_subsystem_dev\n"); |
2749 | goto unlock; | 2759 | goto unlock; |
2750 | } | 2760 | } |
2751 | INIT_LIST_HEAD(&se_dev->g_se_dev_list); | 2761 | INIT_LIST_HEAD(&se_dev->se_dev_node); |
2752 | INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list); | 2762 | INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list); |
2753 | spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock); | 2763 | spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock); |
2754 | INIT_LIST_HEAD(&se_dev->t10_reservation.registration_list); | 2764 | INIT_LIST_HEAD(&se_dev->t10_pr.registration_list); |
2755 | INIT_LIST_HEAD(&se_dev->t10_reservation.aptpl_reg_list); | 2765 | INIT_LIST_HEAD(&se_dev->t10_pr.aptpl_reg_list); |
2756 | spin_lock_init(&se_dev->t10_reservation.registration_lock); | 2766 | spin_lock_init(&se_dev->t10_pr.registration_lock); |
2757 | spin_lock_init(&se_dev->t10_reservation.aptpl_reg_lock); | 2767 | spin_lock_init(&se_dev->t10_pr.aptpl_reg_lock); |
2758 | INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list); | 2768 | INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list); |
2759 | spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock); | 2769 | spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock); |
2760 | spin_lock_init(&se_dev->se_dev_lock); | 2770 | spin_lock_init(&se_dev->se_dev_lock); |
2761 | se_dev->t10_reservation.pr_aptpl_buf_len = PR_APTPL_BUF_LEN; | 2771 | se_dev->t10_pr.pr_aptpl_buf_len = PR_APTPL_BUF_LEN; |
2762 | se_dev->t10_wwn.t10_sub_dev = se_dev; | 2772 | se_dev->t10_wwn.t10_sub_dev = se_dev; |
2763 | se_dev->t10_alua.t10_sub_dev = se_dev; | 2773 | se_dev->t10_alua.t10_sub_dev = se_dev; |
2764 | se_dev->se_dev_attrib.da_sub_dev = se_dev; | 2774 | se_dev->se_dev_attrib.da_sub_dev = se_dev; |
@@ -2784,9 +2794,9 @@ static struct config_group *target_core_make_subdev( | |||
2784 | " from allocate_virtdevice()\n"); | 2794 | " from allocate_virtdevice()\n"); |
2785 | goto out; | 2795 | goto out; |
2786 | } | 2796 | } |
2787 | spin_lock(&se_global->g_device_lock); | 2797 | spin_lock(&se_device_lock); |
2788 | list_add_tail(&se_dev->g_se_dev_list, &se_global->g_se_dev_list); | 2798 | list_add_tail(&se_dev->se_dev_node, &se_dev_list); |
2789 | spin_unlock(&se_global->g_device_lock); | 2799 | spin_unlock(&se_device_lock); |
2790 | 2800 | ||
2791 | config_group_init_type_name(&se_dev->se_dev_group, name, | 2801 | config_group_init_type_name(&se_dev->se_dev_group, name, |
2792 | &target_core_dev_cit); | 2802 | &target_core_dev_cit); |
@@ -2814,7 +2824,7 @@ static struct config_group *target_core_make_subdev( | |||
2814 | if (!(tg_pt_gp)) | 2824 | if (!(tg_pt_gp)) |
2815 | goto out; | 2825 | goto out; |
2816 | 2826 | ||
2817 | tg_pt_gp_cg = &T10_ALUA(se_dev)->alua_tg_pt_gps_group; | 2827 | tg_pt_gp_cg = &se_dev->t10_alua.alua_tg_pt_gps_group; |
2818 | tg_pt_gp_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, | 2828 | tg_pt_gp_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, |
2819 | GFP_KERNEL); | 2829 | GFP_KERNEL); |
2820 | if (!(tg_pt_gp_cg->default_groups)) { | 2830 | if (!(tg_pt_gp_cg->default_groups)) { |
@@ -2827,11 +2837,11 @@ static struct config_group *target_core_make_subdev( | |||
2827 | "default_tg_pt_gp", &target_core_alua_tg_pt_gp_cit); | 2837 | "default_tg_pt_gp", &target_core_alua_tg_pt_gp_cit); |
2828 | tg_pt_gp_cg->default_groups[0] = &tg_pt_gp->tg_pt_gp_group; | 2838 | tg_pt_gp_cg->default_groups[0] = &tg_pt_gp->tg_pt_gp_group; |
2829 | tg_pt_gp_cg->default_groups[1] = NULL; | 2839 | tg_pt_gp_cg->default_groups[1] = NULL; |
2830 | T10_ALUA(se_dev)->default_tg_pt_gp = tg_pt_gp; | 2840 | se_dev->t10_alua.default_tg_pt_gp = tg_pt_gp; |
2831 | /* | 2841 | /* |
2832 | * Add core/$HBA/$DEV/statistics/ default groups | 2842 | * Add core/$HBA/$DEV/statistics/ default groups |
2833 | */ | 2843 | */ |
2834 | dev_stat_grp = &DEV_STAT_GRP(se_dev)->stat_group; | 2844 | dev_stat_grp = &se_dev->dev_stat_grps.stat_group; |
2835 | dev_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 4, | 2845 | dev_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 4, |
2836 | GFP_KERNEL); | 2846 | GFP_KERNEL); |
2837 | if (!dev_stat_grp->default_groups) { | 2847 | if (!dev_stat_grp->default_groups) { |
@@ -2846,9 +2856,9 @@ static struct config_group *target_core_make_subdev( | |||
2846 | mutex_unlock(&hba->hba_access_mutex); | 2856 | mutex_unlock(&hba->hba_access_mutex); |
2847 | return &se_dev->se_dev_group; | 2857 | return &se_dev->se_dev_group; |
2848 | out: | 2858 | out: |
2849 | if (T10_ALUA(se_dev)->default_tg_pt_gp) { | 2859 | if (se_dev->t10_alua.default_tg_pt_gp) { |
2850 | core_alua_free_tg_pt_gp(T10_ALUA(se_dev)->default_tg_pt_gp); | 2860 | core_alua_free_tg_pt_gp(se_dev->t10_alua.default_tg_pt_gp); |
2851 | T10_ALUA(se_dev)->default_tg_pt_gp = NULL; | 2861 | se_dev->t10_alua.default_tg_pt_gp = NULL; |
2852 | } | 2862 | } |
2853 | if (dev_stat_grp) | 2863 | if (dev_stat_grp) |
2854 | kfree(dev_stat_grp->default_groups); | 2864 | kfree(dev_stat_grp->default_groups); |
@@ -2881,11 +2891,11 @@ static void target_core_drop_subdev( | |||
2881 | mutex_lock(&hba->hba_access_mutex); | 2891 | mutex_lock(&hba->hba_access_mutex); |
2882 | t = hba->transport; | 2892 | t = hba->transport; |
2883 | 2893 | ||
2884 | spin_lock(&se_global->g_device_lock); | 2894 | spin_lock(&se_device_lock); |
2885 | list_del(&se_dev->g_se_dev_list); | 2895 | list_del(&se_dev->se_dev_node); |
2886 | spin_unlock(&se_global->g_device_lock); | 2896 | spin_unlock(&se_device_lock); |
2887 | 2897 | ||
2888 | dev_stat_grp = &DEV_STAT_GRP(se_dev)->stat_group; | 2898 | dev_stat_grp = &se_dev->dev_stat_grps.stat_group; |
2889 | for (i = 0; dev_stat_grp->default_groups[i]; i++) { | 2899 | for (i = 0; dev_stat_grp->default_groups[i]; i++) { |
2890 | df_item = &dev_stat_grp->default_groups[i]->cg_item; | 2900 | df_item = &dev_stat_grp->default_groups[i]->cg_item; |
2891 | dev_stat_grp->default_groups[i] = NULL; | 2901 | dev_stat_grp->default_groups[i] = NULL; |
@@ -2893,7 +2903,7 @@ static void target_core_drop_subdev( | |||
2893 | } | 2903 | } |
2894 | kfree(dev_stat_grp->default_groups); | 2904 | kfree(dev_stat_grp->default_groups); |
2895 | 2905 | ||
2896 | tg_pt_gp_cg = &T10_ALUA(se_dev)->alua_tg_pt_gps_group; | 2906 | tg_pt_gp_cg = &se_dev->t10_alua.alua_tg_pt_gps_group; |
2897 | for (i = 0; tg_pt_gp_cg->default_groups[i]; i++) { | 2907 | for (i = 0; tg_pt_gp_cg->default_groups[i]; i++) { |
2898 | df_item = &tg_pt_gp_cg->default_groups[i]->cg_item; | 2908 | df_item = &tg_pt_gp_cg->default_groups[i]->cg_item; |
2899 | tg_pt_gp_cg->default_groups[i] = NULL; | 2909 | tg_pt_gp_cg->default_groups[i] = NULL; |
@@ -2904,7 +2914,7 @@ static void target_core_drop_subdev( | |||
2904 | * core_alua_free_tg_pt_gp() is called from ->default_tg_pt_gp | 2914 | * core_alua_free_tg_pt_gp() is called from ->default_tg_pt_gp |
2905 | * directly from target_core_alua_tg_pt_gp_release(). | 2915 | * directly from target_core_alua_tg_pt_gp_release(). |
2906 | */ | 2916 | */ |
2907 | T10_ALUA(se_dev)->default_tg_pt_gp = NULL; | 2917 | se_dev->t10_alua.default_tg_pt_gp = NULL; |
2908 | 2918 | ||
2909 | dev_cg = &se_dev->se_dev_group; | 2919 | dev_cg = &se_dev->se_dev_group; |
2910 | for (i = 0; dev_cg->default_groups[i]; i++) { | 2920 | for (i = 0; dev_cg->default_groups[i]; i++) { |
@@ -3130,10 +3140,9 @@ static int __init target_core_init_configfs(void) | |||
3130 | 3140 | ||
3131 | INIT_LIST_HEAD(&g_tf_list); | 3141 | INIT_LIST_HEAD(&g_tf_list); |
3132 | mutex_init(&g_tf_lock); | 3142 | mutex_init(&g_tf_lock); |
3133 | init_scsi_index_table(); | 3143 | ret = init_se_kmem_caches(); |
3134 | ret = init_se_global(); | ||
3135 | if (ret < 0) | 3144 | if (ret < 0) |
3136 | return -1; | 3145 | return ret; |
3137 | /* | 3146 | /* |
3138 | * Create $CONFIGFS/target/core default group for HBA <-> Storage Object | 3147 | * Create $CONFIGFS/target/core default group for HBA <-> Storage Object |
3139 | * and ALUA Logical Unit Group and Target Port Group infrastructure. | 3148 | * and ALUA Logical Unit Group and Target Port Group infrastructure. |
@@ -3146,29 +3155,29 @@ static int __init target_core_init_configfs(void) | |||
3146 | goto out_global; | 3155 | goto out_global; |
3147 | } | 3156 | } |
3148 | 3157 | ||
3149 | config_group_init_type_name(&se_global->target_core_hbagroup, | 3158 | config_group_init_type_name(&target_core_hbagroup, |
3150 | "core", &target_core_cit); | 3159 | "core", &target_core_cit); |
3151 | target_cg->default_groups[0] = &se_global->target_core_hbagroup; | 3160 | target_cg->default_groups[0] = &target_core_hbagroup; |
3152 | target_cg->default_groups[1] = NULL; | 3161 | target_cg->default_groups[1] = NULL; |
3153 | /* | 3162 | /* |
3154 | * Create ALUA infrastructure under /sys/kernel/config/target/core/alua/ | 3163 | * Create ALUA infrastructure under /sys/kernel/config/target/core/alua/ |
3155 | */ | 3164 | */ |
3156 | hba_cg = &se_global->target_core_hbagroup; | 3165 | hba_cg = &target_core_hbagroup; |
3157 | hba_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, | 3166 | hba_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, |
3158 | GFP_KERNEL); | 3167 | GFP_KERNEL); |
3159 | if (!(hba_cg->default_groups)) { | 3168 | if (!(hba_cg->default_groups)) { |
3160 | printk(KERN_ERR "Unable to allocate hba_cg->default_groups\n"); | 3169 | printk(KERN_ERR "Unable to allocate hba_cg->default_groups\n"); |
3161 | goto out_global; | 3170 | goto out_global; |
3162 | } | 3171 | } |
3163 | config_group_init_type_name(&se_global->alua_group, | 3172 | config_group_init_type_name(&alua_group, |
3164 | "alua", &target_core_alua_cit); | 3173 | "alua", &target_core_alua_cit); |
3165 | hba_cg->default_groups[0] = &se_global->alua_group; | 3174 | hba_cg->default_groups[0] = &alua_group; |
3166 | hba_cg->default_groups[1] = NULL; | 3175 | hba_cg->default_groups[1] = NULL; |
3167 | /* | 3176 | /* |
3168 | * Add ALUA Logical Unit Group and Target Port Group ConfigFS | 3177 | * Add ALUA Logical Unit Group and Target Port Group ConfigFS |
3169 | * groups under /sys/kernel/config/target/core/alua/ | 3178 | * groups under /sys/kernel/config/target/core/alua/ |
3170 | */ | 3179 | */ |
3171 | alua_cg = &se_global->alua_group; | 3180 | alua_cg = &alua_group; |
3172 | alua_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, | 3181 | alua_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, |
3173 | GFP_KERNEL); | 3182 | GFP_KERNEL); |
3174 | if (!(alua_cg->default_groups)) { | 3183 | if (!(alua_cg->default_groups)) { |
@@ -3176,9 +3185,9 @@ static int __init target_core_init_configfs(void) | |||
3176 | goto out_global; | 3185 | goto out_global; |
3177 | } | 3186 | } |
3178 | 3187 | ||
3179 | config_group_init_type_name(&se_global->alua_lu_gps_group, | 3188 | config_group_init_type_name(&alua_lu_gps_group, |
3180 | "lu_gps", &target_core_alua_lu_gps_cit); | 3189 | "lu_gps", &target_core_alua_lu_gps_cit); |
3181 | alua_cg->default_groups[0] = &se_global->alua_lu_gps_group; | 3190 | alua_cg->default_groups[0] = &alua_lu_gps_group; |
3182 | alua_cg->default_groups[1] = NULL; | 3191 | alua_cg->default_groups[1] = NULL; |
3183 | /* | 3192 | /* |
3184 | * Add core/alua/lu_gps/default_lu_gp | 3193 | * Add core/alua/lu_gps/default_lu_gp |
@@ -3187,7 +3196,7 @@ static int __init target_core_init_configfs(void) | |||
3187 | if (IS_ERR(lu_gp)) | 3196 | if (IS_ERR(lu_gp)) |
3188 | goto out_global; | 3197 | goto out_global; |
3189 | 3198 | ||
3190 | lu_gp_cg = &se_global->alua_lu_gps_group; | 3199 | lu_gp_cg = &alua_lu_gps_group; |
3191 | lu_gp_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, | 3200 | lu_gp_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, |
3192 | GFP_KERNEL); | 3201 | GFP_KERNEL); |
3193 | if (!(lu_gp_cg->default_groups)) { | 3202 | if (!(lu_gp_cg->default_groups)) { |
@@ -3199,7 +3208,7 @@ static int __init target_core_init_configfs(void) | |||
3199 | &target_core_alua_lu_gp_cit); | 3208 | &target_core_alua_lu_gp_cit); |
3200 | lu_gp_cg->default_groups[0] = &lu_gp->lu_gp_group; | 3209 | lu_gp_cg->default_groups[0] = &lu_gp->lu_gp_group; |
3201 | lu_gp_cg->default_groups[1] = NULL; | 3210 | lu_gp_cg->default_groups[1] = NULL; |
3202 | se_global->default_lu_gp = lu_gp; | 3211 | default_lu_gp = lu_gp; |
3203 | /* | 3212 | /* |
3204 | * Register the target_core_mod subsystem with configfs. | 3213 | * Register the target_core_mod subsystem with configfs. |
3205 | */ | 3214 | */ |
@@ -3229,9 +3238,9 @@ out: | |||
3229 | core_dev_release_virtual_lun0(); | 3238 | core_dev_release_virtual_lun0(); |
3230 | rd_module_exit(); | 3239 | rd_module_exit(); |
3231 | out_global: | 3240 | out_global: |
3232 | if (se_global->default_lu_gp) { | 3241 | if (default_lu_gp) { |
3233 | core_alua_free_lu_gp(se_global->default_lu_gp); | 3242 | core_alua_free_lu_gp(default_lu_gp); |
3234 | se_global->default_lu_gp = NULL; | 3243 | default_lu_gp = NULL; |
3235 | } | 3244 | } |
3236 | if (lu_gp_cg) | 3245 | if (lu_gp_cg) |
3237 | kfree(lu_gp_cg->default_groups); | 3246 | kfree(lu_gp_cg->default_groups); |
@@ -3240,8 +3249,8 @@ out_global: | |||
3240 | if (hba_cg) | 3249 | if (hba_cg) |
3241 | kfree(hba_cg->default_groups); | 3250 | kfree(hba_cg->default_groups); |
3242 | kfree(target_cg->default_groups); | 3251 | kfree(target_cg->default_groups); |
3243 | release_se_global(); | 3252 | release_se_kmem_caches(); |
3244 | return -1; | 3253 | return ret; |
3245 | } | 3254 | } |
3246 | 3255 | ||
3247 | static void __exit target_core_exit_configfs(void) | 3256 | static void __exit target_core_exit_configfs(void) |
@@ -3251,10 +3260,9 @@ static void __exit target_core_exit_configfs(void) | |||
3251 | struct config_item *item; | 3260 | struct config_item *item; |
3252 | int i; | 3261 | int i; |
3253 | 3262 | ||
3254 | se_global->in_shutdown = 1; | ||
3255 | subsys = target_core_subsystem[0]; | 3263 | subsys = target_core_subsystem[0]; |
3256 | 3264 | ||
3257 | lu_gp_cg = &se_global->alua_lu_gps_group; | 3265 | lu_gp_cg = &alua_lu_gps_group; |
3258 | for (i = 0; lu_gp_cg->default_groups[i]; i++) { | 3266 | for (i = 0; lu_gp_cg->default_groups[i]; i++) { |
3259 | item = &lu_gp_cg->default_groups[i]->cg_item; | 3267 | item = &lu_gp_cg->default_groups[i]->cg_item; |
3260 | lu_gp_cg->default_groups[i] = NULL; | 3268 | lu_gp_cg->default_groups[i] = NULL; |
@@ -3263,7 +3271,7 @@ static void __exit target_core_exit_configfs(void) | |||
3263 | kfree(lu_gp_cg->default_groups); | 3271 | kfree(lu_gp_cg->default_groups); |
3264 | lu_gp_cg->default_groups = NULL; | 3272 | lu_gp_cg->default_groups = NULL; |
3265 | 3273 | ||
3266 | alua_cg = &se_global->alua_group; | 3274 | alua_cg = &alua_group; |
3267 | for (i = 0; alua_cg->default_groups[i]; i++) { | 3275 | for (i = 0; alua_cg->default_groups[i]; i++) { |
3268 | item = &alua_cg->default_groups[i]->cg_item; | 3276 | item = &alua_cg->default_groups[i]->cg_item; |
3269 | alua_cg->default_groups[i] = NULL; | 3277 | alua_cg->default_groups[i] = NULL; |
@@ -3272,7 +3280,7 @@ static void __exit target_core_exit_configfs(void) | |||
3272 | kfree(alua_cg->default_groups); | 3280 | kfree(alua_cg->default_groups); |
3273 | alua_cg->default_groups = NULL; | 3281 | alua_cg->default_groups = NULL; |
3274 | 3282 | ||
3275 | hba_cg = &se_global->target_core_hbagroup; | 3283 | hba_cg = &target_core_hbagroup; |
3276 | for (i = 0; hba_cg->default_groups[i]; i++) { | 3284 | for (i = 0; hba_cg->default_groups[i]; i++) { |
3277 | item = &hba_cg->default_groups[i]->cg_item; | 3285 | item = &hba_cg->default_groups[i]->cg_item; |
3278 | hba_cg->default_groups[i] = NULL; | 3286 | hba_cg->default_groups[i] = NULL; |
@@ -3287,17 +3295,15 @@ static void __exit target_core_exit_configfs(void) | |||
3287 | configfs_unregister_subsystem(subsys); | 3295 | configfs_unregister_subsystem(subsys); |
3288 | kfree(subsys->su_group.default_groups); | 3296 | kfree(subsys->su_group.default_groups); |
3289 | 3297 | ||
3290 | core_alua_free_lu_gp(se_global->default_lu_gp); | 3298 | core_alua_free_lu_gp(default_lu_gp); |
3291 | se_global->default_lu_gp = NULL; | 3299 | default_lu_gp = NULL; |
3292 | 3300 | ||
3293 | printk(KERN_INFO "TARGET_CORE[0]: Released ConfigFS Fabric" | 3301 | printk(KERN_INFO "TARGET_CORE[0]: Released ConfigFS Fabric" |
3294 | " Infrastructure\n"); | 3302 | " Infrastructure\n"); |
3295 | 3303 | ||
3296 | core_dev_release_virtual_lun0(); | 3304 | core_dev_release_virtual_lun0(); |
3297 | rd_module_exit(); | 3305 | rd_module_exit(); |
3298 | release_se_global(); | 3306 | release_se_kmem_caches(); |
3299 | |||
3300 | return; | ||
3301 | } | 3307 | } |
3302 | 3308 | ||
3303 | MODULE_DESCRIPTION("Target_Core_Mod/ConfigFS"); | 3309 | MODULE_DESCRIPTION("Target_Core_Mod/ConfigFS"); |
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index e76ffc5b2079..fd923854505c 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | * Filename: target_core_device.c (based on iscsi_target_device.c) | 2 | * Filename: target_core_device.c (based on iscsi_target_device.c) |
3 | * | 3 | * |
4 | * This file contains the iSCSI Virtual Device and Disk Transport | 4 | * This file contains the TCM Virtual Device and Disk Transport |
5 | * agnostic related functions. | 5 | * agnostic related functions. |
6 | * | 6 | * |
7 | * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc. | 7 | * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc. |
@@ -54,25 +54,30 @@ | |||
54 | static void se_dev_start(struct se_device *dev); | 54 | static void se_dev_start(struct se_device *dev); |
55 | static void se_dev_stop(struct se_device *dev); | 55 | static void se_dev_stop(struct se_device *dev); |
56 | 56 | ||
57 | static struct se_hba *lun0_hba; | ||
58 | static struct se_subsystem_dev *lun0_su_dev; | ||
59 | /* not static, needed by tpg.c */ | ||
60 | struct se_device *g_lun0_dev; | ||
61 | |||
57 | int transport_get_lun_for_cmd( | 62 | int transport_get_lun_for_cmd( |
58 | struct se_cmd *se_cmd, | 63 | struct se_cmd *se_cmd, |
59 | u32 unpacked_lun) | 64 | u32 unpacked_lun) |
60 | { | 65 | { |
61 | struct se_dev_entry *deve; | 66 | struct se_dev_entry *deve; |
62 | struct se_lun *se_lun = NULL; | 67 | struct se_lun *se_lun = NULL; |
63 | struct se_session *se_sess = SE_SESS(se_cmd); | 68 | struct se_session *se_sess = se_cmd->se_sess; |
64 | unsigned long flags; | 69 | unsigned long flags; |
65 | int read_only = 0; | 70 | int read_only = 0; |
66 | 71 | ||
67 | if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) { | 72 | if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) { |
68 | se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN; | 73 | se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN; |
69 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | 74 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
70 | return -1; | 75 | return -ENODEV; |
71 | } | 76 | } |
72 | 77 | ||
73 | spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); | 78 | spin_lock_irq(&se_sess->se_node_acl->device_list_lock); |
74 | deve = se_cmd->se_deve = | 79 | deve = se_cmd->se_deve = |
75 | &SE_NODE_ACL(se_sess)->device_list[unpacked_lun]; | 80 | &se_sess->se_node_acl->device_list[unpacked_lun]; |
76 | if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { | 81 | if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { |
77 | if (se_cmd) { | 82 | if (se_cmd) { |
78 | deve->total_cmds++; | 83 | deve->total_cmds++; |
@@ -95,11 +100,11 @@ int transport_get_lun_for_cmd( | |||
95 | se_lun = se_cmd->se_lun = deve->se_lun; | 100 | se_lun = se_cmd->se_lun = deve->se_lun; |
96 | se_cmd->pr_res_key = deve->pr_res_key; | 101 | se_cmd->pr_res_key = deve->pr_res_key; |
97 | se_cmd->orig_fe_lun = unpacked_lun; | 102 | se_cmd->orig_fe_lun = unpacked_lun; |
98 | se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev; | 103 | se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev; |
99 | se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; | 104 | se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; |
100 | } | 105 | } |
101 | out: | 106 | out: |
102 | spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); | 107 | spin_unlock_irq(&se_sess->se_node_acl->device_list_lock); |
103 | 108 | ||
104 | if (!se_lun) { | 109 | if (!se_lun) { |
105 | if (read_only) { | 110 | if (read_only) { |
@@ -107,9 +112,9 @@ out: | |||
107 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | 112 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
108 | printk("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN" | 113 | printk("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN" |
109 | " Access for 0x%08x\n", | 114 | " Access for 0x%08x\n", |
110 | CMD_TFO(se_cmd)->get_fabric_name(), | 115 | se_cmd->se_tfo->get_fabric_name(), |
111 | unpacked_lun); | 116 | unpacked_lun); |
112 | return -1; | 117 | return -EACCES; |
113 | } else { | 118 | } else { |
114 | /* | 119 | /* |
115 | * Use the se_portal_group->tpg_virt_lun0 to allow for | 120 | * Use the se_portal_group->tpg_virt_lun0 to allow for |
@@ -121,9 +126,9 @@ out: | |||
121 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | 126 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
122 | printk("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" | 127 | printk("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" |
123 | " Access for 0x%08x\n", | 128 | " Access for 0x%08x\n", |
124 | CMD_TFO(se_cmd)->get_fabric_name(), | 129 | se_cmd->se_tfo->get_fabric_name(), |
125 | unpacked_lun); | 130 | unpacked_lun); |
126 | return -1; | 131 | return -ENODEV; |
127 | } | 132 | } |
128 | /* | 133 | /* |
129 | * Force WRITE PROTECT for virtual LUN 0 | 134 | * Force WRITE PROTECT for virtual LUN 0 |
@@ -132,15 +137,15 @@ out: | |||
132 | (se_cmd->data_direction != DMA_NONE)) { | 137 | (se_cmd->data_direction != DMA_NONE)) { |
133 | se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED; | 138 | se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED; |
134 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | 139 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
135 | return -1; | 140 | return -EACCES; |
136 | } | 141 | } |
137 | #if 0 | 142 | #if 0 |
138 | printk("TARGET_CORE[%s]: Using virtual LUN0! :-)\n", | 143 | printk("TARGET_CORE[%s]: Using virtual LUN0! :-)\n", |
139 | CMD_TFO(se_cmd)->get_fabric_name()); | 144 | se_cmd->se_tfo->get_fabric_name()); |
140 | #endif | 145 | #endif |
141 | se_lun = se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0; | 146 | se_lun = se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0; |
142 | se_cmd->orig_fe_lun = 0; | 147 | se_cmd->orig_fe_lun = 0; |
143 | se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev; | 148 | se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev; |
144 | se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; | 149 | se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; |
145 | } | 150 | } |
146 | } | 151 | } |
@@ -151,7 +156,7 @@ out: | |||
151 | if (se_dev_check_online(se_lun->lun_se_dev) != 0) { | 156 | if (se_dev_check_online(se_lun->lun_se_dev) != 0) { |
152 | se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN; | 157 | se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN; |
153 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | 158 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
154 | return -1; | 159 | return -ENODEV; |
155 | } | 160 | } |
156 | 161 | ||
157 | { | 162 | { |
@@ -171,10 +176,10 @@ out: | |||
171 | */ | 176 | */ |
172 | spin_lock_irqsave(&se_lun->lun_cmd_lock, flags); | 177 | spin_lock_irqsave(&se_lun->lun_cmd_lock, flags); |
173 | list_add_tail(&se_cmd->se_lun_list, &se_lun->lun_cmd_list); | 178 | list_add_tail(&se_cmd->se_lun_list, &se_lun->lun_cmd_list); |
174 | atomic_set(&T_TASK(se_cmd)->transport_lun_active, 1); | 179 | atomic_set(&se_cmd->t_task->transport_lun_active, 1); |
175 | #if 0 | 180 | #if 0 |
176 | printk(KERN_INFO "Adding ITT: 0x%08x to LUN LIST[%d]\n", | 181 | printk(KERN_INFO "Adding ITT: 0x%08x to LUN LIST[%d]\n", |
177 | CMD_TFO(se_cmd)->get_task_tag(se_cmd), se_lun->unpacked_lun); | 182 | se_cmd->se_tfo->get_task_tag(se_cmd), se_lun->unpacked_lun); |
178 | #endif | 183 | #endif |
179 | spin_unlock_irqrestore(&se_lun->lun_cmd_lock, flags); | 184 | spin_unlock_irqrestore(&se_lun->lun_cmd_lock, flags); |
180 | 185 | ||
@@ -189,35 +194,35 @@ int transport_get_lun_for_tmr( | |||
189 | struct se_device *dev = NULL; | 194 | struct se_device *dev = NULL; |
190 | struct se_dev_entry *deve; | 195 | struct se_dev_entry *deve; |
191 | struct se_lun *se_lun = NULL; | 196 | struct se_lun *se_lun = NULL; |
192 | struct se_session *se_sess = SE_SESS(se_cmd); | 197 | struct se_session *se_sess = se_cmd->se_sess; |
193 | struct se_tmr_req *se_tmr = se_cmd->se_tmr_req; | 198 | struct se_tmr_req *se_tmr = se_cmd->se_tmr_req; |
194 | 199 | ||
195 | if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) { | 200 | if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) { |
196 | se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN; | 201 | se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN; |
197 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | 202 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
198 | return -1; | 203 | return -ENODEV; |
199 | } | 204 | } |
200 | 205 | ||
201 | spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); | 206 | spin_lock_irq(&se_sess->se_node_acl->device_list_lock); |
202 | deve = se_cmd->se_deve = | 207 | deve = se_cmd->se_deve = |
203 | &SE_NODE_ACL(se_sess)->device_list[unpacked_lun]; | 208 | &se_sess->se_node_acl->device_list[unpacked_lun]; |
204 | if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { | 209 | if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { |
205 | se_lun = se_cmd->se_lun = se_tmr->tmr_lun = deve->se_lun; | 210 | se_lun = se_cmd->se_lun = se_tmr->tmr_lun = deve->se_lun; |
206 | dev = se_lun->lun_se_dev; | 211 | dev = se_lun->lun_se_dev; |
207 | se_cmd->pr_res_key = deve->pr_res_key; | 212 | se_cmd->pr_res_key = deve->pr_res_key; |
208 | se_cmd->orig_fe_lun = unpacked_lun; | 213 | se_cmd->orig_fe_lun = unpacked_lun; |
209 | se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev; | 214 | se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev; |
210 | /* se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; */ | 215 | /* se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; */ |
211 | } | 216 | } |
212 | spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); | 217 | spin_unlock_irq(&se_sess->se_node_acl->device_list_lock); |
213 | 218 | ||
214 | if (!se_lun) { | 219 | if (!se_lun) { |
215 | printk(KERN_INFO "TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" | 220 | printk(KERN_INFO "TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" |
216 | " Access for 0x%08x\n", | 221 | " Access for 0x%08x\n", |
217 | CMD_TFO(se_cmd)->get_fabric_name(), | 222 | se_cmd->se_tfo->get_fabric_name(), |
218 | unpacked_lun); | 223 | unpacked_lun); |
219 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | 224 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
220 | return -1; | 225 | return -ENODEV; |
221 | } | 226 | } |
222 | /* | 227 | /* |
223 | * Determine if the struct se_lun is online. | 228 | * Determine if the struct se_lun is online. |
@@ -225,7 +230,7 @@ int transport_get_lun_for_tmr( | |||
225 | /* #warning FIXME: Check for LUN_RESET + UNIT Attention */ | 230 | /* #warning FIXME: Check for LUN_RESET + UNIT Attention */ |
226 | if (se_dev_check_online(se_lun->lun_se_dev) != 0) { | 231 | if (se_dev_check_online(se_lun->lun_se_dev) != 0) { |
227 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | 232 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
228 | return -1; | 233 | return -ENODEV; |
229 | } | 234 | } |
230 | se_tmr->tmr_dev = dev; | 235 | se_tmr->tmr_dev = dev; |
231 | 236 | ||
@@ -263,14 +268,14 @@ struct se_dev_entry *core_get_se_deve_from_rtpi( | |||
263 | if (!(lun)) { | 268 | if (!(lun)) { |
264 | printk(KERN_ERR "%s device entries device pointer is" | 269 | printk(KERN_ERR "%s device entries device pointer is" |
265 | " NULL, but Initiator has access.\n", | 270 | " NULL, but Initiator has access.\n", |
266 | TPG_TFO(tpg)->get_fabric_name()); | 271 | tpg->se_tpg_tfo->get_fabric_name()); |
267 | continue; | 272 | continue; |
268 | } | 273 | } |
269 | port = lun->lun_sep; | 274 | port = lun->lun_sep; |
270 | if (!(port)) { | 275 | if (!(port)) { |
271 | printk(KERN_ERR "%s device entries device pointer is" | 276 | printk(KERN_ERR "%s device entries device pointer is" |
272 | " NULL, but Initiator has access.\n", | 277 | " NULL, but Initiator has access.\n", |
273 | TPG_TFO(tpg)->get_fabric_name()); | 278 | tpg->se_tpg_tfo->get_fabric_name()); |
274 | continue; | 279 | continue; |
275 | } | 280 | } |
276 | if (port->sep_rtpi != rtpi) | 281 | if (port->sep_rtpi != rtpi) |
@@ -308,7 +313,7 @@ int core_free_device_list_for_node( | |||
308 | if (!deve->se_lun) { | 313 | if (!deve->se_lun) { |
309 | printk(KERN_ERR "%s device entries device pointer is" | 314 | printk(KERN_ERR "%s device entries device pointer is" |
310 | " NULL, but Initiator has access.\n", | 315 | " NULL, but Initiator has access.\n", |
311 | TPG_TFO(tpg)->get_fabric_name()); | 316 | tpg->se_tpg_tfo->get_fabric_name()); |
312 | continue; | 317 | continue; |
313 | } | 318 | } |
314 | lun = deve->se_lun; | 319 | lun = deve->se_lun; |
@@ -334,8 +339,6 @@ void core_dec_lacl_count(struct se_node_acl *se_nacl, struct se_cmd *se_cmd) | |||
334 | deve = &se_nacl->device_list[se_cmd->orig_fe_lun]; | 339 | deve = &se_nacl->device_list[se_cmd->orig_fe_lun]; |
335 | deve->deve_cmds--; | 340 | deve->deve_cmds--; |
336 | spin_unlock_irq(&se_nacl->device_list_lock); | 341 | spin_unlock_irq(&se_nacl->device_list_lock); |
337 | |||
338 | return; | ||
339 | } | 342 | } |
340 | 343 | ||
341 | void core_update_device_list_access( | 344 | void core_update_device_list_access( |
@@ -355,8 +358,6 @@ void core_update_device_list_access( | |||
355 | deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY; | 358 | deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY; |
356 | } | 359 | } |
357 | spin_unlock_irq(&nacl->device_list_lock); | 360 | spin_unlock_irq(&nacl->device_list_lock); |
358 | |||
359 | return; | ||
360 | } | 361 | } |
361 | 362 | ||
362 | /* core_update_device_list_for_node(): | 363 | /* core_update_device_list_for_node(): |
@@ -408,14 +409,14 @@ int core_update_device_list_for_node( | |||
408 | " already set for demo mode -> explict" | 409 | " already set for demo mode -> explict" |
409 | " LUN ACL transition\n"); | 410 | " LUN ACL transition\n"); |
410 | spin_unlock_irq(&nacl->device_list_lock); | 411 | spin_unlock_irq(&nacl->device_list_lock); |
411 | return -1; | 412 | return -EINVAL; |
412 | } | 413 | } |
413 | if (deve->se_lun != lun) { | 414 | if (deve->se_lun != lun) { |
414 | printk(KERN_ERR "struct se_dev_entry->se_lun does" | 415 | printk(KERN_ERR "struct se_dev_entry->se_lun does" |
415 | " match passed struct se_lun for demo mode" | 416 | " match passed struct se_lun for demo mode" |
416 | " -> explict LUN ACL transition\n"); | 417 | " -> explict LUN ACL transition\n"); |
417 | spin_unlock_irq(&nacl->device_list_lock); | 418 | spin_unlock_irq(&nacl->device_list_lock); |
418 | return -1; | 419 | return -EINVAL; |
419 | } | 420 | } |
420 | deve->se_lun_acl = lun_acl; | 421 | deve->se_lun_acl = lun_acl; |
421 | trans = 1; | 422 | trans = 1; |
@@ -503,8 +504,6 @@ void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg) | |||
503 | spin_lock_bh(&tpg->acl_node_lock); | 504 | spin_lock_bh(&tpg->acl_node_lock); |
504 | } | 505 | } |
505 | spin_unlock_bh(&tpg->acl_node_lock); | 506 | spin_unlock_bh(&tpg->acl_node_lock); |
506 | |||
507 | return; | ||
508 | } | 507 | } |
509 | 508 | ||
510 | static struct se_port *core_alloc_port(struct se_device *dev) | 509 | static struct se_port *core_alloc_port(struct se_device *dev) |
@@ -514,7 +513,7 @@ static struct se_port *core_alloc_port(struct se_device *dev) | |||
514 | port = kzalloc(sizeof(struct se_port), GFP_KERNEL); | 513 | port = kzalloc(sizeof(struct se_port), GFP_KERNEL); |
515 | if (!(port)) { | 514 | if (!(port)) { |
516 | printk(KERN_ERR "Unable to allocate struct se_port\n"); | 515 | printk(KERN_ERR "Unable to allocate struct se_port\n"); |
517 | return NULL; | 516 | return ERR_PTR(-ENOMEM); |
518 | } | 517 | } |
519 | INIT_LIST_HEAD(&port->sep_alua_list); | 518 | INIT_LIST_HEAD(&port->sep_alua_list); |
520 | INIT_LIST_HEAD(&port->sep_list); | 519 | INIT_LIST_HEAD(&port->sep_list); |
@@ -527,7 +526,7 @@ static struct se_port *core_alloc_port(struct se_device *dev) | |||
527 | printk(KERN_WARNING "Reached dev->dev_port_count ==" | 526 | printk(KERN_WARNING "Reached dev->dev_port_count ==" |
528 | " 0x0000ffff\n"); | 527 | " 0x0000ffff\n"); |
529 | spin_unlock(&dev->se_port_lock); | 528 | spin_unlock(&dev->se_port_lock); |
530 | return NULL; | 529 | return ERR_PTR(-ENOSPC); |
531 | } | 530 | } |
532 | again: | 531 | again: |
533 | /* | 532 | /* |
@@ -565,7 +564,7 @@ static void core_export_port( | |||
565 | struct se_port *port, | 564 | struct se_port *port, |
566 | struct se_lun *lun) | 565 | struct se_lun *lun) |
567 | { | 566 | { |
568 | struct se_subsystem_dev *su_dev = SU_DEV(dev); | 567 | struct se_subsystem_dev *su_dev = dev->se_sub_dev; |
569 | struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem = NULL; | 568 | struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem = NULL; |
570 | 569 | ||
571 | spin_lock(&dev->se_port_lock); | 570 | spin_lock(&dev->se_port_lock); |
@@ -578,7 +577,7 @@ static void core_export_port( | |||
578 | list_add_tail(&port->sep_list, &dev->dev_sep_list); | 577 | list_add_tail(&port->sep_list, &dev->dev_sep_list); |
579 | spin_unlock(&dev->se_port_lock); | 578 | spin_unlock(&dev->se_port_lock); |
580 | 579 | ||
581 | if (T10_ALUA(su_dev)->alua_type == SPC3_ALUA_EMULATED) { | 580 | if (su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) { |
582 | tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port); | 581 | tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port); |
583 | if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) { | 582 | if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) { |
584 | printk(KERN_ERR "Unable to allocate t10_alua_tg_pt" | 583 | printk(KERN_ERR "Unable to allocate t10_alua_tg_pt" |
@@ -587,11 +586,11 @@ static void core_export_port( | |||
587 | } | 586 | } |
588 | spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); | 587 | spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); |
589 | __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, | 588 | __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, |
590 | T10_ALUA(su_dev)->default_tg_pt_gp); | 589 | su_dev->t10_alua.default_tg_pt_gp); |
591 | spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); | 590 | spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); |
592 | printk(KERN_INFO "%s/%s: Adding to default ALUA Target Port" | 591 | printk(KERN_INFO "%s/%s: Adding to default ALUA Target Port" |
593 | " Group: alua/default_tg_pt_gp\n", | 592 | " Group: alua/default_tg_pt_gp\n", |
594 | TRANSPORT(dev)->name, TPG_TFO(tpg)->get_fabric_name()); | 593 | dev->transport->name, tpg->se_tpg_tfo->get_fabric_name()); |
595 | } | 594 | } |
596 | 595 | ||
597 | dev->dev_port_count++; | 596 | dev->dev_port_count++; |
@@ -618,8 +617,6 @@ static void core_release_port(struct se_device *dev, struct se_port *port) | |||
618 | list_del(&port->sep_list); | 617 | list_del(&port->sep_list); |
619 | dev->dev_port_count--; | 618 | dev->dev_port_count--; |
620 | kfree(port); | 619 | kfree(port); |
621 | |||
622 | return; | ||
623 | } | 620 | } |
624 | 621 | ||
625 | int core_dev_export( | 622 | int core_dev_export( |
@@ -630,8 +627,8 @@ int core_dev_export( | |||
630 | struct se_port *port; | 627 | struct se_port *port; |
631 | 628 | ||
632 | port = core_alloc_port(dev); | 629 | port = core_alloc_port(dev); |
633 | if (!(port)) | 630 | if (IS_ERR(port)) |
634 | return -1; | 631 | return PTR_ERR(port); |
635 | 632 | ||
636 | lun->lun_se_dev = dev; | 633 | lun->lun_se_dev = dev; |
637 | se_dev_start(dev); | 634 | se_dev_start(dev); |
@@ -668,12 +665,12 @@ int transport_core_report_lun_response(struct se_cmd *se_cmd) | |||
668 | { | 665 | { |
669 | struct se_dev_entry *deve; | 666 | struct se_dev_entry *deve; |
670 | struct se_lun *se_lun; | 667 | struct se_lun *se_lun; |
671 | struct se_session *se_sess = SE_SESS(se_cmd); | 668 | struct se_session *se_sess = se_cmd->se_sess; |
672 | struct se_task *se_task; | 669 | struct se_task *se_task; |
673 | unsigned char *buf = (unsigned char *)T_TASK(se_cmd)->t_task_buf; | 670 | unsigned char *buf = se_cmd->t_task->t_task_buf; |
674 | u32 cdb_offset = 0, lun_count = 0, offset = 8, i; | 671 | u32 cdb_offset = 0, lun_count = 0, offset = 8, i; |
675 | 672 | ||
676 | list_for_each_entry(se_task, &T_TASK(se_cmd)->t_task_list, t_list) | 673 | list_for_each_entry(se_task, &se_cmd->t_task->t_task_list, t_list) |
677 | break; | 674 | break; |
678 | 675 | ||
679 | if (!(se_task)) { | 676 | if (!(se_task)) { |
@@ -692,9 +689,9 @@ int transport_core_report_lun_response(struct se_cmd *se_cmd) | |||
692 | goto done; | 689 | goto done; |
693 | } | 690 | } |
694 | 691 | ||
695 | spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); | 692 | spin_lock_irq(&se_sess->se_node_acl->device_list_lock); |
696 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { | 693 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { |
697 | deve = &SE_NODE_ACL(se_sess)->device_list[i]; | 694 | deve = &se_sess->se_node_acl->device_list[i]; |
698 | if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)) | 695 | if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)) |
699 | continue; | 696 | continue; |
700 | se_lun = deve->se_lun; | 697 | se_lun = deve->se_lun; |
@@ -711,7 +708,7 @@ int transport_core_report_lun_response(struct se_cmd *se_cmd) | |||
711 | offset += 8; | 708 | offset += 8; |
712 | cdb_offset += 8; | 709 | cdb_offset += 8; |
713 | } | 710 | } |
714 | spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); | 711 | spin_unlock_irq(&se_sess->se_node_acl->device_list_lock); |
715 | 712 | ||
716 | /* | 713 | /* |
717 | * See SPC3 r07, page 159. | 714 | * See SPC3 r07, page 159. |
@@ -755,26 +752,20 @@ void se_release_device_for_hba(struct se_device *dev) | |||
755 | core_scsi3_free_all_registrations(dev); | 752 | core_scsi3_free_all_registrations(dev); |
756 | se_release_vpd_for_dev(dev); | 753 | se_release_vpd_for_dev(dev); |
757 | 754 | ||
758 | kfree(dev->dev_status_queue_obj); | ||
759 | kfree(dev->dev_queue_obj); | ||
760 | kfree(dev); | 755 | kfree(dev); |
761 | |||
762 | return; | ||
763 | } | 756 | } |
764 | 757 | ||
765 | void se_release_vpd_for_dev(struct se_device *dev) | 758 | void se_release_vpd_for_dev(struct se_device *dev) |
766 | { | 759 | { |
767 | struct t10_vpd *vpd, *vpd_tmp; | 760 | struct t10_vpd *vpd, *vpd_tmp; |
768 | 761 | ||
769 | spin_lock(&DEV_T10_WWN(dev)->t10_vpd_lock); | 762 | spin_lock(&dev->se_sub_dev->t10_wwn.t10_vpd_lock); |
770 | list_for_each_entry_safe(vpd, vpd_tmp, | 763 | list_for_each_entry_safe(vpd, vpd_tmp, |
771 | &DEV_T10_WWN(dev)->t10_vpd_list, vpd_list) { | 764 | &dev->se_sub_dev->t10_wwn.t10_vpd_list, vpd_list) { |
772 | list_del(&vpd->vpd_list); | 765 | list_del(&vpd->vpd_list); |
773 | kfree(vpd); | 766 | kfree(vpd); |
774 | } | 767 | } |
775 | spin_unlock(&DEV_T10_WWN(dev)->t10_vpd_lock); | 768 | spin_unlock(&dev->se_sub_dev->t10_wwn.t10_vpd_lock); |
776 | |||
777 | return; | ||
778 | } | 769 | } |
779 | 770 | ||
780 | /* se_free_virtual_device(): | 771 | /* se_free_virtual_device(): |
@@ -860,48 +851,48 @@ void se_dev_set_default_attribs( | |||
860 | { | 851 | { |
861 | struct queue_limits *limits = &dev_limits->limits; | 852 | struct queue_limits *limits = &dev_limits->limits; |
862 | 853 | ||
863 | DEV_ATTRIB(dev)->emulate_dpo = DA_EMULATE_DPO; | 854 | dev->se_sub_dev->se_dev_attrib.emulate_dpo = DA_EMULATE_DPO; |
864 | DEV_ATTRIB(dev)->emulate_fua_write = DA_EMULATE_FUA_WRITE; | 855 | dev->se_sub_dev->se_dev_attrib.emulate_fua_write = DA_EMULATE_FUA_WRITE; |
865 | DEV_ATTRIB(dev)->emulate_fua_read = DA_EMULATE_FUA_READ; | 856 | dev->se_sub_dev->se_dev_attrib.emulate_fua_read = DA_EMULATE_FUA_READ; |
866 | DEV_ATTRIB(dev)->emulate_write_cache = DA_EMULATE_WRITE_CACHE; | 857 | dev->se_sub_dev->se_dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE; |
867 | DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL; | 858 | dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL; |
868 | DEV_ATTRIB(dev)->emulate_tas = DA_EMULATE_TAS; | 859 | dev->se_sub_dev->se_dev_attrib.emulate_tas = DA_EMULATE_TAS; |
869 | DEV_ATTRIB(dev)->emulate_tpu = DA_EMULATE_TPU; | 860 | dev->se_sub_dev->se_dev_attrib.emulate_tpu = DA_EMULATE_TPU; |
870 | DEV_ATTRIB(dev)->emulate_tpws = DA_EMULATE_TPWS; | 861 | dev->se_sub_dev->se_dev_attrib.emulate_tpws = DA_EMULATE_TPWS; |
871 | DEV_ATTRIB(dev)->emulate_reservations = DA_EMULATE_RESERVATIONS; | 862 | dev->se_sub_dev->se_dev_attrib.emulate_reservations = DA_EMULATE_RESERVATIONS; |
872 | DEV_ATTRIB(dev)->emulate_alua = DA_EMULATE_ALUA; | 863 | dev->se_sub_dev->se_dev_attrib.emulate_alua = DA_EMULATE_ALUA; |
873 | DEV_ATTRIB(dev)->enforce_pr_isids = DA_ENFORCE_PR_ISIDS; | 864 | dev->se_sub_dev->se_dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS; |
874 | /* | 865 | /* |
875 | * The TPU=1 and TPWS=1 settings will be set in TCM/IBLOCK | 866 | * The TPU=1 and TPWS=1 settings will be set in TCM/IBLOCK |
876 | * iblock_create_virtdevice() from struct queue_limits values | 867 | * iblock_create_virtdevice() from struct queue_limits values |
877 | * if blk_queue_discard()==1 | 868 | * if blk_queue_discard()==1 |
878 | */ | 869 | */ |
879 | DEV_ATTRIB(dev)->max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT; | 870 | dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT; |
880 | DEV_ATTRIB(dev)->max_unmap_block_desc_count = | 871 | dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count = |
881 | DA_MAX_UNMAP_BLOCK_DESC_COUNT; | 872 | DA_MAX_UNMAP_BLOCK_DESC_COUNT; |
882 | DEV_ATTRIB(dev)->unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT; | 873 | dev->se_sub_dev->se_dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT; |
883 | DEV_ATTRIB(dev)->unmap_granularity_alignment = | 874 | dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment = |
884 | DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT; | 875 | DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT; |
885 | /* | 876 | /* |
886 | * block_size is based on subsystem plugin dependent requirements. | 877 | * block_size is based on subsystem plugin dependent requirements. |
887 | */ | 878 | */ |
888 | DEV_ATTRIB(dev)->hw_block_size = limits->logical_block_size; | 879 | dev->se_sub_dev->se_dev_attrib.hw_block_size = limits->logical_block_size; |
889 | DEV_ATTRIB(dev)->block_size = limits->logical_block_size; | 880 | dev->se_sub_dev->se_dev_attrib.block_size = limits->logical_block_size; |
890 | /* | 881 | /* |
891 | * max_sectors is based on subsystem plugin dependent requirements. | 882 | * max_sectors is based on subsystem plugin dependent requirements. |
892 | */ | 883 | */ |
893 | DEV_ATTRIB(dev)->hw_max_sectors = limits->max_hw_sectors; | 884 | dev->se_sub_dev->se_dev_attrib.hw_max_sectors = limits->max_hw_sectors; |
894 | DEV_ATTRIB(dev)->max_sectors = limits->max_sectors; | 885 | dev->se_sub_dev->se_dev_attrib.max_sectors = limits->max_sectors; |
895 | /* | 886 | /* |
896 | * Set optimal_sectors from max_sectors, which can be lowered via | 887 | * Set optimal_sectors from max_sectors, which can be lowered via |
897 | * configfs. | 888 | * configfs. |
898 | */ | 889 | */ |
899 | DEV_ATTRIB(dev)->optimal_sectors = limits->max_sectors; | 890 | dev->se_sub_dev->se_dev_attrib.optimal_sectors = limits->max_sectors; |
900 | /* | 891 | /* |
901 | * queue_depth is based on subsystem plugin dependent requirements. | 892 | * queue_depth is based on subsystem plugin dependent requirements. |
902 | */ | 893 | */ |
903 | DEV_ATTRIB(dev)->hw_queue_depth = dev_limits->hw_queue_depth; | 894 | dev->se_sub_dev->se_dev_attrib.hw_queue_depth = dev_limits->hw_queue_depth; |
904 | DEV_ATTRIB(dev)->queue_depth = dev_limits->queue_depth; | 895 | dev->se_sub_dev->se_dev_attrib.queue_depth = dev_limits->queue_depth; |
905 | } | 896 | } |
906 | 897 | ||
907 | int se_dev_set_task_timeout(struct se_device *dev, u32 task_timeout) | 898 | int se_dev_set_task_timeout(struct se_device *dev, u32 task_timeout) |
@@ -909,9 +900,9 @@ int se_dev_set_task_timeout(struct se_device *dev, u32 task_timeout) | |||
909 | if (task_timeout > DA_TASK_TIMEOUT_MAX) { | 900 | if (task_timeout > DA_TASK_TIMEOUT_MAX) { |
910 | printk(KERN_ERR "dev[%p]: Passed task_timeout: %u larger then" | 901 | printk(KERN_ERR "dev[%p]: Passed task_timeout: %u larger then" |
911 | " DA_TASK_TIMEOUT_MAX\n", dev, task_timeout); | 902 | " DA_TASK_TIMEOUT_MAX\n", dev, task_timeout); |
912 | return -1; | 903 | return -EINVAL; |
913 | } else { | 904 | } else { |
914 | DEV_ATTRIB(dev)->task_timeout = task_timeout; | 905 | dev->se_sub_dev->se_dev_attrib.task_timeout = task_timeout; |
915 | printk(KERN_INFO "dev[%p]: Set SE Device task_timeout: %u\n", | 906 | printk(KERN_INFO "dev[%p]: Set SE Device task_timeout: %u\n", |
916 | dev, task_timeout); | 907 | dev, task_timeout); |
917 | } | 908 | } |
@@ -923,9 +914,9 @@ int se_dev_set_max_unmap_lba_count( | |||
923 | struct se_device *dev, | 914 | struct se_device *dev, |
924 | u32 max_unmap_lba_count) | 915 | u32 max_unmap_lba_count) |
925 | { | 916 | { |
926 | DEV_ATTRIB(dev)->max_unmap_lba_count = max_unmap_lba_count; | 917 | dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = max_unmap_lba_count; |
927 | printk(KERN_INFO "dev[%p]: Set max_unmap_lba_count: %u\n", | 918 | printk(KERN_INFO "dev[%p]: Set max_unmap_lba_count: %u\n", |
928 | dev, DEV_ATTRIB(dev)->max_unmap_lba_count); | 919 | dev, dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count); |
929 | return 0; | 920 | return 0; |
930 | } | 921 | } |
931 | 922 | ||
@@ -933,9 +924,10 @@ int se_dev_set_max_unmap_block_desc_count( | |||
933 | struct se_device *dev, | 924 | struct se_device *dev, |
934 | u32 max_unmap_block_desc_count) | 925 | u32 max_unmap_block_desc_count) |
935 | { | 926 | { |
936 | DEV_ATTRIB(dev)->max_unmap_block_desc_count = max_unmap_block_desc_count; | 927 | dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count = |
928 | max_unmap_block_desc_count; | ||
937 | printk(KERN_INFO "dev[%p]: Set max_unmap_block_desc_count: %u\n", | 929 | printk(KERN_INFO "dev[%p]: Set max_unmap_block_desc_count: %u\n", |
938 | dev, DEV_ATTRIB(dev)->max_unmap_block_desc_count); | 930 | dev, dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count); |
939 | return 0; | 931 | return 0; |
940 | } | 932 | } |
941 | 933 | ||
@@ -943,9 +935,9 @@ int se_dev_set_unmap_granularity( | |||
943 | struct se_device *dev, | 935 | struct se_device *dev, |
944 | u32 unmap_granularity) | 936 | u32 unmap_granularity) |
945 | { | 937 | { |
946 | DEV_ATTRIB(dev)->unmap_granularity = unmap_granularity; | 938 | dev->se_sub_dev->se_dev_attrib.unmap_granularity = unmap_granularity; |
947 | printk(KERN_INFO "dev[%p]: Set unmap_granularity: %u\n", | 939 | printk(KERN_INFO "dev[%p]: Set unmap_granularity: %u\n", |
948 | dev, DEV_ATTRIB(dev)->unmap_granularity); | 940 | dev, dev->se_sub_dev->se_dev_attrib.unmap_granularity); |
949 | return 0; | 941 | return 0; |
950 | } | 942 | } |
951 | 943 | ||
@@ -953,9 +945,9 @@ int se_dev_set_unmap_granularity_alignment( | |||
953 | struct se_device *dev, | 945 | struct se_device *dev, |
954 | u32 unmap_granularity_alignment) | 946 | u32 unmap_granularity_alignment) |
955 | { | 947 | { |
956 | DEV_ATTRIB(dev)->unmap_granularity_alignment = unmap_granularity_alignment; | 948 | dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment = unmap_granularity_alignment; |
957 | printk(KERN_INFO "dev[%p]: Set unmap_granularity_alignment: %u\n", | 949 | printk(KERN_INFO "dev[%p]: Set unmap_granularity_alignment: %u\n", |
958 | dev, DEV_ATTRIB(dev)->unmap_granularity_alignment); | 950 | dev, dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment); |
959 | return 0; | 951 | return 0; |
960 | } | 952 | } |
961 | 953 | ||
@@ -963,19 +955,19 @@ int se_dev_set_emulate_dpo(struct se_device *dev, int flag) | |||
963 | { | 955 | { |
964 | if ((flag != 0) && (flag != 1)) { | 956 | if ((flag != 0) && (flag != 1)) { |
965 | printk(KERN_ERR "Illegal value %d\n", flag); | 957 | printk(KERN_ERR "Illegal value %d\n", flag); |
966 | return -1; | 958 | return -EINVAL; |
967 | } | 959 | } |
968 | if (TRANSPORT(dev)->dpo_emulated == NULL) { | 960 | if (dev->transport->dpo_emulated == NULL) { |
969 | printk(KERN_ERR "TRANSPORT(dev)->dpo_emulated is NULL\n"); | 961 | printk(KERN_ERR "dev->transport->dpo_emulated is NULL\n"); |
970 | return -1; | 962 | return -EINVAL; |
971 | } | 963 | } |
972 | if (TRANSPORT(dev)->dpo_emulated(dev) == 0) { | 964 | if (dev->transport->dpo_emulated(dev) == 0) { |
973 | printk(KERN_ERR "TRANSPORT(dev)->dpo_emulated not supported\n"); | 965 | printk(KERN_ERR "dev->transport->dpo_emulated not supported\n"); |
974 | return -1; | 966 | return -EINVAL; |
975 | } | 967 | } |
976 | DEV_ATTRIB(dev)->emulate_dpo = flag; | 968 | dev->se_sub_dev->se_dev_attrib.emulate_dpo = flag; |
977 | printk(KERN_INFO "dev[%p]: SE Device Page Out (DPO) Emulation" | 969 | printk(KERN_INFO "dev[%p]: SE Device Page Out (DPO) Emulation" |
978 | " bit: %d\n", dev, DEV_ATTRIB(dev)->emulate_dpo); | 970 | " bit: %d\n", dev, dev->se_sub_dev->se_dev_attrib.emulate_dpo); |
979 | return 0; | 971 | return 0; |
980 | } | 972 | } |
981 | 973 | ||
@@ -983,19 +975,19 @@ int se_dev_set_emulate_fua_write(struct se_device *dev, int flag) | |||
983 | { | 975 | { |
984 | if ((flag != 0) && (flag != 1)) { | 976 | if ((flag != 0) && (flag != 1)) { |
985 | printk(KERN_ERR "Illegal value %d\n", flag); | 977 | printk(KERN_ERR "Illegal value %d\n", flag); |
986 | return -1; | 978 | return -EINVAL; |
987 | } | 979 | } |
988 | if (TRANSPORT(dev)->fua_write_emulated == NULL) { | 980 | if (dev->transport->fua_write_emulated == NULL) { |
989 | printk(KERN_ERR "TRANSPORT(dev)->fua_write_emulated is NULL\n"); | 981 | printk(KERN_ERR "dev->transport->fua_write_emulated is NULL\n"); |
990 | return -1; | 982 | return -EINVAL; |
991 | } | 983 | } |
992 | if (TRANSPORT(dev)->fua_write_emulated(dev) == 0) { | 984 | if (dev->transport->fua_write_emulated(dev) == 0) { |
993 | printk(KERN_ERR "TRANSPORT(dev)->fua_write_emulated not supported\n"); | 985 | printk(KERN_ERR "dev->transport->fua_write_emulated not supported\n"); |
994 | return -1; | 986 | return -EINVAL; |
995 | } | 987 | } |
996 | DEV_ATTRIB(dev)->emulate_fua_write = flag; | 988 | dev->se_sub_dev->se_dev_attrib.emulate_fua_write = flag; |
997 | printk(KERN_INFO "dev[%p]: SE Device Forced Unit Access WRITEs: %d\n", | 989 | printk(KERN_INFO "dev[%p]: SE Device Forced Unit Access WRITEs: %d\n", |
998 | dev, DEV_ATTRIB(dev)->emulate_fua_write); | 990 | dev, dev->se_sub_dev->se_dev_attrib.emulate_fua_write); |
999 | return 0; | 991 | return 0; |
1000 | } | 992 | } |
1001 | 993 | ||
@@ -1003,19 +995,19 @@ int se_dev_set_emulate_fua_read(struct se_device *dev, int flag) | |||
1003 | { | 995 | { |
1004 | if ((flag != 0) && (flag != 1)) { | 996 | if ((flag != 0) && (flag != 1)) { |
1005 | printk(KERN_ERR "Illegal value %d\n", flag); | 997 | printk(KERN_ERR "Illegal value %d\n", flag); |
1006 | return -1; | 998 | return -EINVAL; |
1007 | } | 999 | } |
1008 | if (TRANSPORT(dev)->fua_read_emulated == NULL) { | 1000 | if (dev->transport->fua_read_emulated == NULL) { |
1009 | printk(KERN_ERR "TRANSPORT(dev)->fua_read_emulated is NULL\n"); | 1001 | printk(KERN_ERR "dev->transport->fua_read_emulated is NULL\n"); |
1010 | return -1; | 1002 | return -EINVAL; |
1011 | } | 1003 | } |
1012 | if (TRANSPORT(dev)->fua_read_emulated(dev) == 0) { | 1004 | if (dev->transport->fua_read_emulated(dev) == 0) { |
1013 | printk(KERN_ERR "TRANSPORT(dev)->fua_read_emulated not supported\n"); | 1005 | printk(KERN_ERR "dev->transport->fua_read_emulated not supported\n"); |
1014 | return -1; | 1006 | return -EINVAL; |
1015 | } | 1007 | } |
1016 | DEV_ATTRIB(dev)->emulate_fua_read = flag; | 1008 | dev->se_sub_dev->se_dev_attrib.emulate_fua_read = flag; |
1017 | printk(KERN_INFO "dev[%p]: SE Device Forced Unit Access READs: %d\n", | 1009 | printk(KERN_INFO "dev[%p]: SE Device Forced Unit Access READs: %d\n", |
1018 | dev, DEV_ATTRIB(dev)->emulate_fua_read); | 1010 | dev, dev->se_sub_dev->se_dev_attrib.emulate_fua_read); |
1019 | return 0; | 1011 | return 0; |
1020 | } | 1012 | } |
1021 | 1013 | ||
@@ -1023,19 +1015,19 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag) | |||
1023 | { | 1015 | { |
1024 | if ((flag != 0) && (flag != 1)) { | 1016 | if ((flag != 0) && (flag != 1)) { |
1025 | printk(KERN_ERR "Illegal value %d\n", flag); | 1017 | printk(KERN_ERR "Illegal value %d\n", flag); |
1026 | return -1; | 1018 | return -EINVAL; |
1027 | } | 1019 | } |
1028 | if (TRANSPORT(dev)->write_cache_emulated == NULL) { | 1020 | if (dev->transport->write_cache_emulated == NULL) { |
1029 | printk(KERN_ERR "TRANSPORT(dev)->write_cache_emulated is NULL\n"); | 1021 | printk(KERN_ERR "dev->transport->write_cache_emulated is NULL\n"); |
1030 | return -1; | 1022 | return -EINVAL; |
1031 | } | 1023 | } |
1032 | if (TRANSPORT(dev)->write_cache_emulated(dev) == 0) { | 1024 | if (dev->transport->write_cache_emulated(dev) == 0) { |
1033 | printk(KERN_ERR "TRANSPORT(dev)->write_cache_emulated not supported\n"); | 1025 | printk(KERN_ERR "dev->transport->write_cache_emulated not supported\n"); |
1034 | return -1; | 1026 | return -EINVAL; |
1035 | } | 1027 | } |
1036 | DEV_ATTRIB(dev)->emulate_write_cache = flag; | 1028 | dev->se_sub_dev->se_dev_attrib.emulate_write_cache = flag; |
1037 | printk(KERN_INFO "dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n", | 1029 | printk(KERN_INFO "dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n", |
1038 | dev, DEV_ATTRIB(dev)->emulate_write_cache); | 1030 | dev, dev->se_sub_dev->se_dev_attrib.emulate_write_cache); |
1039 | return 0; | 1031 | return 0; |
1040 | } | 1032 | } |
1041 | 1033 | ||
@@ -1043,7 +1035,7 @@ int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag) | |||
1043 | { | 1035 | { |
1044 | if ((flag != 0) && (flag != 1) && (flag != 2)) { | 1036 | if ((flag != 0) && (flag != 1) && (flag != 2)) { |
1045 | printk(KERN_ERR "Illegal value %d\n", flag); | 1037 | printk(KERN_ERR "Illegal value %d\n", flag); |
1046 | return -1; | 1038 | return -EINVAL; |
1047 | } | 1039 | } |
1048 | 1040 | ||
1049 | if (atomic_read(&dev->dev_export_obj.obj_access_count)) { | 1041 | if (atomic_read(&dev->dev_export_obj.obj_access_count)) { |
@@ -1051,11 +1043,11 @@ int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag) | |||
1051 | " UA_INTRLCK_CTRL while dev_export_obj: %d count" | 1043 | " UA_INTRLCK_CTRL while dev_export_obj: %d count" |
1052 | " exists\n", dev, | 1044 | " exists\n", dev, |
1053 | atomic_read(&dev->dev_export_obj.obj_access_count)); | 1045 | atomic_read(&dev->dev_export_obj.obj_access_count)); |
1054 | return -1; | 1046 | return -EINVAL; |
1055 | } | 1047 | } |
1056 | DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl = flag; | 1048 | dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl = flag; |
1057 | printk(KERN_INFO "dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n", | 1049 | printk(KERN_INFO "dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n", |
1058 | dev, DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl); | 1050 | dev, dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl); |
1059 | 1051 | ||
1060 | return 0; | 1052 | return 0; |
1061 | } | 1053 | } |
@@ -1064,18 +1056,18 @@ int se_dev_set_emulate_tas(struct se_device *dev, int flag) | |||
1064 | { | 1056 | { |
1065 | if ((flag != 0) && (flag != 1)) { | 1057 | if ((flag != 0) && (flag != 1)) { |
1066 | printk(KERN_ERR "Illegal value %d\n", flag); | 1058 | printk(KERN_ERR "Illegal value %d\n", flag); |
1067 | return -1; | 1059 | return -EINVAL; |
1068 | } | 1060 | } |
1069 | 1061 | ||
1070 | if (atomic_read(&dev->dev_export_obj.obj_access_count)) { | 1062 | if (atomic_read(&dev->dev_export_obj.obj_access_count)) { |
1071 | printk(KERN_ERR "dev[%p]: Unable to change SE Device TAS while" | 1063 | printk(KERN_ERR "dev[%p]: Unable to change SE Device TAS while" |
1072 | " dev_export_obj: %d count exists\n", dev, | 1064 | " dev_export_obj: %d count exists\n", dev, |
1073 | atomic_read(&dev->dev_export_obj.obj_access_count)); | 1065 | atomic_read(&dev->dev_export_obj.obj_access_count)); |
1074 | return -1; | 1066 | return -EINVAL; |
1075 | } | 1067 | } |
1076 | DEV_ATTRIB(dev)->emulate_tas = flag; | 1068 | dev->se_sub_dev->se_dev_attrib.emulate_tas = flag; |
1077 | printk(KERN_INFO "dev[%p]: SE Device TASK_ABORTED status bit: %s\n", | 1069 | printk(KERN_INFO "dev[%p]: SE Device TASK_ABORTED status bit: %s\n", |
1078 | dev, (DEV_ATTRIB(dev)->emulate_tas) ? "Enabled" : "Disabled"); | 1070 | dev, (dev->se_sub_dev->se_dev_attrib.emulate_tas) ? "Enabled" : "Disabled"); |
1079 | 1071 | ||
1080 | return 0; | 1072 | return 0; |
1081 | } | 1073 | } |
@@ -1084,18 +1076,18 @@ int se_dev_set_emulate_tpu(struct se_device *dev, int flag) | |||
1084 | { | 1076 | { |
1085 | if ((flag != 0) && (flag != 1)) { | 1077 | if ((flag != 0) && (flag != 1)) { |
1086 | printk(KERN_ERR "Illegal value %d\n", flag); | 1078 | printk(KERN_ERR "Illegal value %d\n", flag); |
1087 | return -1; | 1079 | return -EINVAL; |
1088 | } | 1080 | } |
1089 | /* | 1081 | /* |
1090 | * We expect this value to be non-zero when generic Block Layer | 1082 | * We expect this value to be non-zero when generic Block Layer |
1091 | * Discard supported is detected iblock_create_virtdevice(). | 1083 | * Discard supported is detected iblock_create_virtdevice(). |
1092 | */ | 1084 | */ |
1093 | if (!(DEV_ATTRIB(dev)->max_unmap_block_desc_count)) { | 1085 | if (!(dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count)) { |
1094 | printk(KERN_ERR "Generic Block Discard not supported\n"); | 1086 | printk(KERN_ERR "Generic Block Discard not supported\n"); |
1095 | return -ENOSYS; | 1087 | return -ENOSYS; |
1096 | } | 1088 | } |
1097 | 1089 | ||
1098 | DEV_ATTRIB(dev)->emulate_tpu = flag; | 1090 | dev->se_sub_dev->se_dev_attrib.emulate_tpu = flag; |
1099 | printk(KERN_INFO "dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n", | 1091 | printk(KERN_INFO "dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n", |
1100 | dev, flag); | 1092 | dev, flag); |
1101 | return 0; | 1093 | return 0; |
@@ -1105,18 +1097,18 @@ int se_dev_set_emulate_tpws(struct se_device *dev, int flag) | |||
1105 | { | 1097 | { |
1106 | if ((flag != 0) && (flag != 1)) { | 1098 | if ((flag != 0) && (flag != 1)) { |
1107 | printk(KERN_ERR "Illegal value %d\n", flag); | 1099 | printk(KERN_ERR "Illegal value %d\n", flag); |
1108 | return -1; | 1100 | return -EINVAL; |
1109 | } | 1101 | } |
1110 | /* | 1102 | /* |
1111 | * We expect this value to be non-zero when generic Block Layer | 1103 | * We expect this value to be non-zero when generic Block Layer |
1112 | * Discard supported is detected iblock_create_virtdevice(). | 1104 | * Discard supported is detected iblock_create_virtdevice(). |
1113 | */ | 1105 | */ |
1114 | if (!(DEV_ATTRIB(dev)->max_unmap_block_desc_count)) { | 1106 | if (!(dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count)) { |
1115 | printk(KERN_ERR "Generic Block Discard not supported\n"); | 1107 | printk(KERN_ERR "Generic Block Discard not supported\n"); |
1116 | return -ENOSYS; | 1108 | return -ENOSYS; |
1117 | } | 1109 | } |
1118 | 1110 | ||
1119 | DEV_ATTRIB(dev)->emulate_tpws = flag; | 1111 | dev->se_sub_dev->se_dev_attrib.emulate_tpws = flag; |
1120 | printk(KERN_INFO "dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n", | 1112 | printk(KERN_INFO "dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n", |
1121 | dev, flag); | 1113 | dev, flag); |
1122 | return 0; | 1114 | return 0; |
@@ -1126,11 +1118,11 @@ int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag) | |||
1126 | { | 1118 | { |
1127 | if ((flag != 0) && (flag != 1)) { | 1119 | if ((flag != 0) && (flag != 1)) { |
1128 | printk(KERN_ERR "Illegal value %d\n", flag); | 1120 | printk(KERN_ERR "Illegal value %d\n", flag); |
1129 | return -1; | 1121 | return -EINVAL; |
1130 | } | 1122 | } |
1131 | DEV_ATTRIB(dev)->enforce_pr_isids = flag; | 1123 | dev->se_sub_dev->se_dev_attrib.enforce_pr_isids = flag; |
1132 | printk(KERN_INFO "dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev, | 1124 | printk(KERN_INFO "dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev, |
1133 | (DEV_ATTRIB(dev)->enforce_pr_isids) ? "Enabled" : "Disabled"); | 1125 | (dev->se_sub_dev->se_dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled"); |
1134 | return 0; | 1126 | return 0; |
1135 | } | 1127 | } |
1136 | 1128 | ||
@@ -1145,35 +1137,35 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth) | |||
1145 | printk(KERN_ERR "dev[%p]: Unable to change SE Device TCQ while" | 1137 | printk(KERN_ERR "dev[%p]: Unable to change SE Device TCQ while" |
1146 | " dev_export_obj: %d count exists\n", dev, | 1138 | " dev_export_obj: %d count exists\n", dev, |
1147 | atomic_read(&dev->dev_export_obj.obj_access_count)); | 1139 | atomic_read(&dev->dev_export_obj.obj_access_count)); |
1148 | return -1; | 1140 | return -EINVAL; |
1149 | } | 1141 | } |
1150 | if (!(queue_depth)) { | 1142 | if (!(queue_depth)) { |
1151 | printk(KERN_ERR "dev[%p]: Illegal ZERO value for queue" | 1143 | printk(KERN_ERR "dev[%p]: Illegal ZERO value for queue" |
1152 | "_depth\n", dev); | 1144 | "_depth\n", dev); |
1153 | return -1; | 1145 | return -EINVAL; |
1154 | } | 1146 | } |
1155 | 1147 | ||
1156 | if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { | 1148 | if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { |
1157 | if (queue_depth > DEV_ATTRIB(dev)->hw_queue_depth) { | 1149 | if (queue_depth > dev->se_sub_dev->se_dev_attrib.hw_queue_depth) { |
1158 | printk(KERN_ERR "dev[%p]: Passed queue_depth: %u" | 1150 | printk(KERN_ERR "dev[%p]: Passed queue_depth: %u" |
1159 | " exceeds TCM/SE_Device TCQ: %u\n", | 1151 | " exceeds TCM/SE_Device TCQ: %u\n", |
1160 | dev, queue_depth, | 1152 | dev, queue_depth, |
1161 | DEV_ATTRIB(dev)->hw_queue_depth); | 1153 | dev->se_sub_dev->se_dev_attrib.hw_queue_depth); |
1162 | return -1; | 1154 | return -EINVAL; |
1163 | } | 1155 | } |
1164 | } else { | 1156 | } else { |
1165 | if (queue_depth > DEV_ATTRIB(dev)->queue_depth) { | 1157 | if (queue_depth > dev->se_sub_dev->se_dev_attrib.queue_depth) { |
1166 | if (queue_depth > DEV_ATTRIB(dev)->hw_queue_depth) { | 1158 | if (queue_depth > dev->se_sub_dev->se_dev_attrib.hw_queue_depth) { |
1167 | printk(KERN_ERR "dev[%p]: Passed queue_depth:" | 1159 | printk(KERN_ERR "dev[%p]: Passed queue_depth:" |
1168 | " %u exceeds TCM/SE_Device MAX" | 1160 | " %u exceeds TCM/SE_Device MAX" |
1169 | " TCQ: %u\n", dev, queue_depth, | 1161 | " TCQ: %u\n", dev, queue_depth, |
1170 | DEV_ATTRIB(dev)->hw_queue_depth); | 1162 | dev->se_sub_dev->se_dev_attrib.hw_queue_depth); |
1171 | return -1; | 1163 | return -EINVAL; |
1172 | } | 1164 | } |
1173 | } | 1165 | } |
1174 | } | 1166 | } |
1175 | 1167 | ||
1176 | DEV_ATTRIB(dev)->queue_depth = dev->queue_depth = queue_depth; | 1168 | dev->se_sub_dev->se_dev_attrib.queue_depth = dev->queue_depth = queue_depth; |
1177 | if (queue_depth > orig_queue_depth) | 1169 | if (queue_depth > orig_queue_depth) |
1178 | atomic_add(queue_depth - orig_queue_depth, &dev->depth_left); | 1170 | atomic_add(queue_depth - orig_queue_depth, &dev->depth_left); |
1179 | else if (queue_depth < orig_queue_depth) | 1171 | else if (queue_depth < orig_queue_depth) |
@@ -1192,46 +1184,46 @@ int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors) | |||
1192 | printk(KERN_ERR "dev[%p]: Unable to change SE Device" | 1184 | printk(KERN_ERR "dev[%p]: Unable to change SE Device" |
1193 | " max_sectors while dev_export_obj: %d count exists\n", | 1185 | " max_sectors while dev_export_obj: %d count exists\n", |
1194 | dev, atomic_read(&dev->dev_export_obj.obj_access_count)); | 1186 | dev, atomic_read(&dev->dev_export_obj.obj_access_count)); |
1195 | return -1; | 1187 | return -EINVAL; |
1196 | } | 1188 | } |
1197 | if (!(max_sectors)) { | 1189 | if (!(max_sectors)) { |
1198 | printk(KERN_ERR "dev[%p]: Illegal ZERO value for" | 1190 | printk(KERN_ERR "dev[%p]: Illegal ZERO value for" |
1199 | " max_sectors\n", dev); | 1191 | " max_sectors\n", dev); |
1200 | return -1; | 1192 | return -EINVAL; |
1201 | } | 1193 | } |
1202 | if (max_sectors < DA_STATUS_MAX_SECTORS_MIN) { | 1194 | if (max_sectors < DA_STATUS_MAX_SECTORS_MIN) { |
1203 | printk(KERN_ERR "dev[%p]: Passed max_sectors: %u less than" | 1195 | printk(KERN_ERR "dev[%p]: Passed max_sectors: %u less than" |
1204 | " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, max_sectors, | 1196 | " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, max_sectors, |
1205 | DA_STATUS_MAX_SECTORS_MIN); | 1197 | DA_STATUS_MAX_SECTORS_MIN); |
1206 | return -1; | 1198 | return -EINVAL; |
1207 | } | 1199 | } |
1208 | if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { | 1200 | if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { |
1209 | if (max_sectors > DEV_ATTRIB(dev)->hw_max_sectors) { | 1201 | if (max_sectors > dev->se_sub_dev->se_dev_attrib.hw_max_sectors) { |
1210 | printk(KERN_ERR "dev[%p]: Passed max_sectors: %u" | 1202 | printk(KERN_ERR "dev[%p]: Passed max_sectors: %u" |
1211 | " greater than TCM/SE_Device max_sectors:" | 1203 | " greater than TCM/SE_Device max_sectors:" |
1212 | " %u\n", dev, max_sectors, | 1204 | " %u\n", dev, max_sectors, |
1213 | DEV_ATTRIB(dev)->hw_max_sectors); | 1205 | dev->se_sub_dev->se_dev_attrib.hw_max_sectors); |
1214 | return -1; | 1206 | return -EINVAL; |
1215 | } | 1207 | } |
1216 | } else { | 1208 | } else { |
1217 | if (!(force) && (max_sectors > | 1209 | if (!(force) && (max_sectors > |
1218 | DEV_ATTRIB(dev)->hw_max_sectors)) { | 1210 | dev->se_sub_dev->se_dev_attrib.hw_max_sectors)) { |
1219 | printk(KERN_ERR "dev[%p]: Passed max_sectors: %u" | 1211 | printk(KERN_ERR "dev[%p]: Passed max_sectors: %u" |
1220 | " greater than TCM/SE_Device max_sectors" | 1212 | " greater than TCM/SE_Device max_sectors" |
1221 | ": %u, use force=1 to override.\n", dev, | 1213 | ": %u, use force=1 to override.\n", dev, |
1222 | max_sectors, DEV_ATTRIB(dev)->hw_max_sectors); | 1214 | max_sectors, dev->se_sub_dev->se_dev_attrib.hw_max_sectors); |
1223 | return -1; | 1215 | return -EINVAL; |
1224 | } | 1216 | } |
1225 | if (max_sectors > DA_STATUS_MAX_SECTORS_MAX) { | 1217 | if (max_sectors > DA_STATUS_MAX_SECTORS_MAX) { |
1226 | printk(KERN_ERR "dev[%p]: Passed max_sectors: %u" | 1218 | printk(KERN_ERR "dev[%p]: Passed max_sectors: %u" |
1227 | " greater than DA_STATUS_MAX_SECTORS_MAX:" | 1219 | " greater than DA_STATUS_MAX_SECTORS_MAX:" |
1228 | " %u\n", dev, max_sectors, | 1220 | " %u\n", dev, max_sectors, |
1229 | DA_STATUS_MAX_SECTORS_MAX); | 1221 | DA_STATUS_MAX_SECTORS_MAX); |
1230 | return -1; | 1222 | return -EINVAL; |
1231 | } | 1223 | } |
1232 | } | 1224 | } |
1233 | 1225 | ||
1234 | DEV_ATTRIB(dev)->max_sectors = max_sectors; | 1226 | dev->se_sub_dev->se_dev_attrib.max_sectors = max_sectors; |
1235 | printk("dev[%p]: SE Device max_sectors changed to %u\n", | 1227 | printk("dev[%p]: SE Device max_sectors changed to %u\n", |
1236 | dev, max_sectors); | 1228 | dev, max_sectors); |
1237 | return 0; | 1229 | return 0; |
@@ -1245,19 +1237,19 @@ int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors) | |||
1245 | dev, atomic_read(&dev->dev_export_obj.obj_access_count)); | 1237 | dev, atomic_read(&dev->dev_export_obj.obj_access_count)); |
1246 | return -EINVAL; | 1238 | return -EINVAL; |
1247 | } | 1239 | } |
1248 | if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { | 1240 | if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { |
1249 | printk(KERN_ERR "dev[%p]: Passed optimal_sectors cannot be" | 1241 | printk(KERN_ERR "dev[%p]: Passed optimal_sectors cannot be" |
1250 | " changed for TCM/pSCSI\n", dev); | 1242 | " changed for TCM/pSCSI\n", dev); |
1251 | return -EINVAL; | 1243 | return -EINVAL; |
1252 | } | 1244 | } |
1253 | if (optimal_sectors > DEV_ATTRIB(dev)->max_sectors) { | 1245 | if (optimal_sectors > dev->se_sub_dev->se_dev_attrib.max_sectors) { |
1254 | printk(KERN_ERR "dev[%p]: Passed optimal_sectors %u cannot be" | 1246 | printk(KERN_ERR "dev[%p]: Passed optimal_sectors %u cannot be" |
1255 | " greater than max_sectors: %u\n", dev, | 1247 | " greater than max_sectors: %u\n", dev, |
1256 | optimal_sectors, DEV_ATTRIB(dev)->max_sectors); | 1248 | optimal_sectors, dev->se_sub_dev->se_dev_attrib.max_sectors); |
1257 | return -EINVAL; | 1249 | return -EINVAL; |
1258 | } | 1250 | } |
1259 | 1251 | ||
1260 | DEV_ATTRIB(dev)->optimal_sectors = optimal_sectors; | 1252 | dev->se_sub_dev->se_dev_attrib.optimal_sectors = optimal_sectors; |
1261 | printk(KERN_INFO "dev[%p]: SE Device optimal_sectors changed to %u\n", | 1253 | printk(KERN_INFO "dev[%p]: SE Device optimal_sectors changed to %u\n", |
1262 | dev, optimal_sectors); | 1254 | dev, optimal_sectors); |
1263 | return 0; | 1255 | return 0; |
@@ -1269,7 +1261,7 @@ int se_dev_set_block_size(struct se_device *dev, u32 block_size) | |||
1269 | printk(KERN_ERR "dev[%p]: Unable to change SE Device block_size" | 1261 | printk(KERN_ERR "dev[%p]: Unable to change SE Device block_size" |
1270 | " while dev_export_obj: %d count exists\n", dev, | 1262 | " while dev_export_obj: %d count exists\n", dev, |
1271 | atomic_read(&dev->dev_export_obj.obj_access_count)); | 1263 | atomic_read(&dev->dev_export_obj.obj_access_count)); |
1272 | return -1; | 1264 | return -EINVAL; |
1273 | } | 1265 | } |
1274 | 1266 | ||
1275 | if ((block_size != 512) && | 1267 | if ((block_size != 512) && |
@@ -1279,17 +1271,17 @@ int se_dev_set_block_size(struct se_device *dev, u32 block_size) | |||
1279 | printk(KERN_ERR "dev[%p]: Illegal value for block_device: %u" | 1271 | printk(KERN_ERR "dev[%p]: Illegal value for block_device: %u" |
1280 | " for SE device, must be 512, 1024, 2048 or 4096\n", | 1272 | " for SE device, must be 512, 1024, 2048 or 4096\n", |
1281 | dev, block_size); | 1273 | dev, block_size); |
1282 | return -1; | 1274 | return -EINVAL; |
1283 | } | 1275 | } |
1284 | 1276 | ||
1285 | if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { | 1277 | if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { |
1286 | printk(KERN_ERR "dev[%p]: Not allowed to change block_size for" | 1278 | printk(KERN_ERR "dev[%p]: Not allowed to change block_size for" |
1287 | " Physical Device, use for Linux/SCSI to change" | 1279 | " Physical Device, use for Linux/SCSI to change" |
1288 | " block_size for underlying hardware\n", dev); | 1280 | " block_size for underlying hardware\n", dev); |
1289 | return -1; | 1281 | return -EINVAL; |
1290 | } | 1282 | } |
1291 | 1283 | ||
1292 | DEV_ATTRIB(dev)->block_size = block_size; | 1284 | dev->se_sub_dev->se_dev_attrib.block_size = block_size; |
1293 | printk(KERN_INFO "dev[%p]: SE Device block_size changed to %u\n", | 1285 | printk(KERN_INFO "dev[%p]: SE Device block_size changed to %u\n", |
1294 | dev, block_size); | 1286 | dev, block_size); |
1295 | return 0; | 1287 | return 0; |
@@ -1323,14 +1315,14 @@ struct se_lun *core_dev_add_lun( | |||
1323 | return NULL; | 1315 | return NULL; |
1324 | 1316 | ||
1325 | printk(KERN_INFO "%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from" | 1317 | printk(KERN_INFO "%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from" |
1326 | " CORE HBA: %u\n", TPG_TFO(tpg)->get_fabric_name(), | 1318 | " CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(), |
1327 | TPG_TFO(tpg)->tpg_get_tag(tpg), lun_p->unpacked_lun, | 1319 | tpg->se_tpg_tfo->tpg_get_tag(tpg), lun_p->unpacked_lun, |
1328 | TPG_TFO(tpg)->get_fabric_name(), hba->hba_id); | 1320 | tpg->se_tpg_tfo->get_fabric_name(), hba->hba_id); |
1329 | /* | 1321 | /* |
1330 | * Update LUN maps for dynamically added initiators when | 1322 | * Update LUN maps for dynamically added initiators when |
1331 | * generate_node_acl is enabled. | 1323 | * generate_node_acl is enabled. |
1332 | */ | 1324 | */ |
1333 | if (TPG_TFO(tpg)->tpg_check_demo_mode(tpg)) { | 1325 | if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) { |
1334 | struct se_node_acl *acl; | 1326 | struct se_node_acl *acl; |
1335 | spin_lock_bh(&tpg->acl_node_lock); | 1327 | spin_lock_bh(&tpg->acl_node_lock); |
1336 | list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { | 1328 | list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { |
@@ -1364,9 +1356,9 @@ int core_dev_del_lun( | |||
1364 | core_tpg_post_dellun(tpg, lun); | 1356 | core_tpg_post_dellun(tpg, lun); |
1365 | 1357 | ||
1366 | printk(KERN_INFO "%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from" | 1358 | printk(KERN_INFO "%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from" |
1367 | " device object\n", TPG_TFO(tpg)->get_fabric_name(), | 1359 | " device object\n", tpg->se_tpg_tfo->get_fabric_name(), |
1368 | TPG_TFO(tpg)->tpg_get_tag(tpg), unpacked_lun, | 1360 | tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun, |
1369 | TPG_TFO(tpg)->get_fabric_name()); | 1361 | tpg->se_tpg_tfo->get_fabric_name()); |
1370 | 1362 | ||
1371 | return 0; | 1363 | return 0; |
1372 | } | 1364 | } |
@@ -1379,9 +1371,9 @@ struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_l | |||
1379 | if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { | 1371 | if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { |
1380 | printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS" | 1372 | printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS" |
1381 | "_PER_TPG-1: %u for Target Portal Group: %hu\n", | 1373 | "_PER_TPG-1: %u for Target Portal Group: %hu\n", |
1382 | TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, | 1374 | tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, |
1383 | TRANSPORT_MAX_LUNS_PER_TPG-1, | 1375 | TRANSPORT_MAX_LUNS_PER_TPG-1, |
1384 | TPG_TFO(tpg)->tpg_get_tag(tpg)); | 1376 | tpg->se_tpg_tfo->tpg_get_tag(tpg)); |
1385 | spin_unlock(&tpg->tpg_lun_lock); | 1377 | spin_unlock(&tpg->tpg_lun_lock); |
1386 | return NULL; | 1378 | return NULL; |
1387 | } | 1379 | } |
@@ -1390,8 +1382,8 @@ struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_l | |||
1390 | if (lun->lun_status != TRANSPORT_LUN_STATUS_FREE) { | 1382 | if (lun->lun_status != TRANSPORT_LUN_STATUS_FREE) { |
1391 | printk(KERN_ERR "%s Logical Unit Number: %u is not free on" | 1383 | printk(KERN_ERR "%s Logical Unit Number: %u is not free on" |
1392 | " Target Portal Group: %hu, ignoring request.\n", | 1384 | " Target Portal Group: %hu, ignoring request.\n", |
1393 | TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, | 1385 | tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, |
1394 | TPG_TFO(tpg)->tpg_get_tag(tpg)); | 1386 | tpg->se_tpg_tfo->tpg_get_tag(tpg)); |
1395 | spin_unlock(&tpg->tpg_lun_lock); | 1387 | spin_unlock(&tpg->tpg_lun_lock); |
1396 | return NULL; | 1388 | return NULL; |
1397 | } | 1389 | } |
@@ -1412,9 +1404,9 @@ static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked | |||
1412 | if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { | 1404 | if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { |
1413 | printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER" | 1405 | printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER" |
1414 | "_TPG-1: %u for Target Portal Group: %hu\n", | 1406 | "_TPG-1: %u for Target Portal Group: %hu\n", |
1415 | TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, | 1407 | tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, |
1416 | TRANSPORT_MAX_LUNS_PER_TPG-1, | 1408 | TRANSPORT_MAX_LUNS_PER_TPG-1, |
1417 | TPG_TFO(tpg)->tpg_get_tag(tpg)); | 1409 | tpg->se_tpg_tfo->tpg_get_tag(tpg)); |
1418 | spin_unlock(&tpg->tpg_lun_lock); | 1410 | spin_unlock(&tpg->tpg_lun_lock); |
1419 | return NULL; | 1411 | return NULL; |
1420 | } | 1412 | } |
@@ -1423,8 +1415,8 @@ static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked | |||
1423 | if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) { | 1415 | if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) { |
1424 | printk(KERN_ERR "%s Logical Unit Number: %u is not active on" | 1416 | printk(KERN_ERR "%s Logical Unit Number: %u is not active on" |
1425 | " Target Portal Group: %hu, ignoring request.\n", | 1417 | " Target Portal Group: %hu, ignoring request.\n", |
1426 | TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, | 1418 | tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, |
1427 | TPG_TFO(tpg)->tpg_get_tag(tpg)); | 1419 | tpg->se_tpg_tfo->tpg_get_tag(tpg)); |
1428 | spin_unlock(&tpg->tpg_lun_lock); | 1420 | spin_unlock(&tpg->tpg_lun_lock); |
1429 | return NULL; | 1421 | return NULL; |
1430 | } | 1422 | } |
@@ -1444,7 +1436,7 @@ struct se_lun_acl *core_dev_init_initiator_node_lun_acl( | |||
1444 | 1436 | ||
1445 | if (strlen(initiatorname) >= TRANSPORT_IQN_LEN) { | 1437 | if (strlen(initiatorname) >= TRANSPORT_IQN_LEN) { |
1446 | printk(KERN_ERR "%s InitiatorName exceeds maximum size.\n", | 1438 | printk(KERN_ERR "%s InitiatorName exceeds maximum size.\n", |
1447 | TPG_TFO(tpg)->get_fabric_name()); | 1439 | tpg->se_tpg_tfo->get_fabric_name()); |
1448 | *ret = -EOVERFLOW; | 1440 | *ret = -EOVERFLOW; |
1449 | return NULL; | 1441 | return NULL; |
1450 | } | 1442 | } |
@@ -1481,8 +1473,8 @@ int core_dev_add_initiator_node_lun_acl( | |||
1481 | if (!(lun)) { | 1473 | if (!(lun)) { |
1482 | printk(KERN_ERR "%s Logical Unit Number: %u is not active on" | 1474 | printk(KERN_ERR "%s Logical Unit Number: %u is not active on" |
1483 | " Target Portal Group: %hu, ignoring request.\n", | 1475 | " Target Portal Group: %hu, ignoring request.\n", |
1484 | TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, | 1476 | tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, |
1485 | TPG_TFO(tpg)->tpg_get_tag(tpg)); | 1477 | tpg->se_tpg_tfo->tpg_get_tag(tpg)); |
1486 | return -EINVAL; | 1478 | return -EINVAL; |
1487 | } | 1479 | } |
1488 | 1480 | ||
@@ -1507,8 +1499,8 @@ int core_dev_add_initiator_node_lun_acl( | |||
1507 | spin_unlock(&lun->lun_acl_lock); | 1499 | spin_unlock(&lun->lun_acl_lock); |
1508 | 1500 | ||
1509 | printk(KERN_INFO "%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for " | 1501 | printk(KERN_INFO "%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for " |
1510 | " InitiatorNode: %s\n", TPG_TFO(tpg)->get_fabric_name(), | 1502 | " InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(), |
1511 | TPG_TFO(tpg)->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun, | 1503 | tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun, |
1512 | (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO", | 1504 | (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO", |
1513 | lacl->initiatorname); | 1505 | lacl->initiatorname); |
1514 | /* | 1506 | /* |
@@ -1547,8 +1539,8 @@ int core_dev_del_initiator_node_lun_acl( | |||
1547 | 1539 | ||
1548 | printk(KERN_INFO "%s_TPG[%hu]_LUN[%u] - Removed ACL for" | 1540 | printk(KERN_INFO "%s_TPG[%hu]_LUN[%u] - Removed ACL for" |
1549 | " InitiatorNode: %s Mapped LUN: %u\n", | 1541 | " InitiatorNode: %s Mapped LUN: %u\n", |
1550 | TPG_TFO(tpg)->get_fabric_name(), | 1542 | tpg->se_tpg_tfo->get_fabric_name(), |
1551 | TPG_TFO(tpg)->tpg_get_tag(tpg), lun->unpacked_lun, | 1543 | tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, |
1552 | lacl->initiatorname, lacl->mapped_lun); | 1544 | lacl->initiatorname, lacl->mapped_lun); |
1553 | 1545 | ||
1554 | return 0; | 1546 | return 0; |
@@ -1559,9 +1551,9 @@ void core_dev_free_initiator_node_lun_acl( | |||
1559 | struct se_lun_acl *lacl) | 1551 | struct se_lun_acl *lacl) |
1560 | { | 1552 | { |
1561 | printk("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s" | 1553 | printk("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s" |
1562 | " Mapped LUN: %u\n", TPG_TFO(tpg)->get_fabric_name(), | 1554 | " Mapped LUN: %u\n", tpg->se_tpg_tfo->get_fabric_name(), |
1563 | TPG_TFO(tpg)->tpg_get_tag(tpg), | 1555 | tpg->se_tpg_tfo->tpg_get_tag(tpg), |
1564 | TPG_TFO(tpg)->get_fabric_name(), | 1556 | tpg->se_tpg_tfo->get_fabric_name(), |
1565 | lacl->initiatorname, lacl->mapped_lun); | 1557 | lacl->initiatorname, lacl->mapped_lun); |
1566 | 1558 | ||
1567 | kfree(lacl); | 1559 | kfree(lacl); |
@@ -1580,7 +1572,7 @@ int core_dev_setup_virtual_lun0(void) | |||
1580 | if (IS_ERR(hba)) | 1572 | if (IS_ERR(hba)) |
1581 | return PTR_ERR(hba); | 1573 | return PTR_ERR(hba); |
1582 | 1574 | ||
1583 | se_global->g_lun0_hba = hba; | 1575 | lun0_hba = hba; |
1584 | t = hba->transport; | 1576 | t = hba->transport; |
1585 | 1577 | ||
1586 | se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL); | 1578 | se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL); |
@@ -1590,17 +1582,17 @@ int core_dev_setup_virtual_lun0(void) | |||
1590 | ret = -ENOMEM; | 1582 | ret = -ENOMEM; |
1591 | goto out; | 1583 | goto out; |
1592 | } | 1584 | } |
1593 | INIT_LIST_HEAD(&se_dev->g_se_dev_list); | 1585 | INIT_LIST_HEAD(&se_dev->se_dev_node); |
1594 | INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list); | 1586 | INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list); |
1595 | spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock); | 1587 | spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock); |
1596 | INIT_LIST_HEAD(&se_dev->t10_reservation.registration_list); | 1588 | INIT_LIST_HEAD(&se_dev->t10_pr.registration_list); |
1597 | INIT_LIST_HEAD(&se_dev->t10_reservation.aptpl_reg_list); | 1589 | INIT_LIST_HEAD(&se_dev->t10_pr.aptpl_reg_list); |
1598 | spin_lock_init(&se_dev->t10_reservation.registration_lock); | 1590 | spin_lock_init(&se_dev->t10_pr.registration_lock); |
1599 | spin_lock_init(&se_dev->t10_reservation.aptpl_reg_lock); | 1591 | spin_lock_init(&se_dev->t10_pr.aptpl_reg_lock); |
1600 | INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list); | 1592 | INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list); |
1601 | spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock); | 1593 | spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock); |
1602 | spin_lock_init(&se_dev->se_dev_lock); | 1594 | spin_lock_init(&se_dev->se_dev_lock); |
1603 | se_dev->t10_reservation.pr_aptpl_buf_len = PR_APTPL_BUF_LEN; | 1595 | se_dev->t10_pr.pr_aptpl_buf_len = PR_APTPL_BUF_LEN; |
1604 | se_dev->t10_wwn.t10_sub_dev = se_dev; | 1596 | se_dev->t10_wwn.t10_sub_dev = se_dev; |
1605 | se_dev->t10_alua.t10_sub_dev = se_dev; | 1597 | se_dev->t10_alua.t10_sub_dev = se_dev; |
1606 | se_dev->se_dev_attrib.da_sub_dev = se_dev; | 1598 | se_dev->se_dev_attrib.da_sub_dev = se_dev; |
@@ -1613,27 +1605,27 @@ int core_dev_setup_virtual_lun0(void) | |||
1613 | ret = -ENOMEM; | 1605 | ret = -ENOMEM; |
1614 | goto out; | 1606 | goto out; |
1615 | } | 1607 | } |
1616 | se_global->g_lun0_su_dev = se_dev; | 1608 | lun0_su_dev = se_dev; |
1617 | 1609 | ||
1618 | memset(buf, 0, 16); | 1610 | memset(buf, 0, 16); |
1619 | sprintf(buf, "rd_pages=8"); | 1611 | sprintf(buf, "rd_pages=8"); |
1620 | t->set_configfs_dev_params(hba, se_dev, buf, sizeof(buf)); | 1612 | t->set_configfs_dev_params(hba, se_dev, buf, sizeof(buf)); |
1621 | 1613 | ||
1622 | dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr); | 1614 | dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr); |
1623 | if (!(dev) || IS_ERR(dev)) { | 1615 | if (IS_ERR(dev)) { |
1624 | ret = -ENOMEM; | 1616 | ret = PTR_ERR(dev); |
1625 | goto out; | 1617 | goto out; |
1626 | } | 1618 | } |
1627 | se_dev->se_dev_ptr = dev; | 1619 | se_dev->se_dev_ptr = dev; |
1628 | se_global->g_lun0_dev = dev; | 1620 | g_lun0_dev = dev; |
1629 | 1621 | ||
1630 | return 0; | 1622 | return 0; |
1631 | out: | 1623 | out: |
1632 | se_global->g_lun0_su_dev = NULL; | 1624 | lun0_su_dev = NULL; |
1633 | kfree(se_dev); | 1625 | kfree(se_dev); |
1634 | if (se_global->g_lun0_hba) { | 1626 | if (lun0_hba) { |
1635 | core_delete_hba(se_global->g_lun0_hba); | 1627 | core_delete_hba(lun0_hba); |
1636 | se_global->g_lun0_hba = NULL; | 1628 | lun0_hba = NULL; |
1637 | } | 1629 | } |
1638 | return ret; | 1630 | return ret; |
1639 | } | 1631 | } |
@@ -1641,14 +1633,14 @@ out: | |||
1641 | 1633 | ||
1642 | void core_dev_release_virtual_lun0(void) | 1634 | void core_dev_release_virtual_lun0(void) |
1643 | { | 1635 | { |
1644 | struct se_hba *hba = se_global->g_lun0_hba; | 1636 | struct se_hba *hba = lun0_hba; |
1645 | struct se_subsystem_dev *su_dev = se_global->g_lun0_su_dev; | 1637 | struct se_subsystem_dev *su_dev = lun0_su_dev; |
1646 | 1638 | ||
1647 | if (!(hba)) | 1639 | if (!(hba)) |
1648 | return; | 1640 | return; |
1649 | 1641 | ||
1650 | if (se_global->g_lun0_dev) | 1642 | if (g_lun0_dev) |
1651 | se_free_virtual_device(se_global->g_lun0_dev, hba); | 1643 | se_free_virtual_device(g_lun0_dev, hba); |
1652 | 1644 | ||
1653 | kfree(su_dev); | 1645 | kfree(su_dev); |
1654 | core_delete_hba(hba); | 1646 | core_delete_hba(hba); |
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c index 07ab5a3bb8e8..0b1659d0fefc 100644 --- a/drivers/target/target_core_fabric_configfs.c +++ b/drivers/target/target_core_fabric_configfs.c | |||
@@ -118,7 +118,7 @@ static int target_fabric_mappedlun_link( | |||
118 | lun_access = deve->lun_flags; | 118 | lun_access = deve->lun_flags; |
119 | else | 119 | else |
120 | lun_access = | 120 | lun_access = |
121 | (TPG_TFO(se_tpg)->tpg_check_prod_mode_write_protect( | 121 | (se_tpg->se_tpg_tfo->tpg_check_prod_mode_write_protect( |
122 | se_tpg)) ? TRANSPORT_LUNFLAGS_READ_ONLY : | 122 | se_tpg)) ? TRANSPORT_LUNFLAGS_READ_ONLY : |
123 | TRANSPORT_LUNFLAGS_READ_WRITE; | 123 | TRANSPORT_LUNFLAGS_READ_WRITE; |
124 | spin_unlock_irq(&lacl->se_lun_nacl->device_list_lock); | 124 | spin_unlock_irq(&lacl->se_lun_nacl->device_list_lock); |
@@ -204,7 +204,7 @@ static ssize_t target_fabric_mappedlun_store_write_protect( | |||
204 | 204 | ||
205 | printk(KERN_INFO "%s_ConfigFS: Changed Initiator ACL: %s" | 205 | printk(KERN_INFO "%s_ConfigFS: Changed Initiator ACL: %s" |
206 | " Mapped LUN: %u Write Protect bit to %s\n", | 206 | " Mapped LUN: %u Write Protect bit to %s\n", |
207 | TPG_TFO(se_tpg)->get_fabric_name(), | 207 | se_tpg->se_tpg_tfo->get_fabric_name(), |
208 | lacl->initiatorname, lacl->mapped_lun, (op) ? "ON" : "OFF"); | 208 | lacl->initiatorname, lacl->mapped_lun, (op) ? "ON" : "OFF"); |
209 | 209 | ||
210 | return count; | 210 | return count; |
@@ -379,7 +379,7 @@ static struct config_group *target_fabric_make_mappedlun( | |||
379 | lacl_cg->default_groups[0] = &lacl->ml_stat_grps.stat_group; | 379 | lacl_cg->default_groups[0] = &lacl->ml_stat_grps.stat_group; |
380 | lacl_cg->default_groups[1] = NULL; | 380 | lacl_cg->default_groups[1] = NULL; |
381 | 381 | ||
382 | ml_stat_grp = &ML_STAT_GRPS(lacl)->stat_group; | 382 | ml_stat_grp = &lacl->ml_stat_grps.stat_group; |
383 | ml_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 3, | 383 | ml_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 3, |
384 | GFP_KERNEL); | 384 | GFP_KERNEL); |
385 | if (!ml_stat_grp->default_groups) { | 385 | if (!ml_stat_grp->default_groups) { |
@@ -408,7 +408,7 @@ static void target_fabric_drop_mappedlun( | |||
408 | struct config_group *lacl_cg = NULL, *ml_stat_grp = NULL; | 408 | struct config_group *lacl_cg = NULL, *ml_stat_grp = NULL; |
409 | int i; | 409 | int i; |
410 | 410 | ||
411 | ml_stat_grp = &ML_STAT_GRPS(lacl)->stat_group; | 411 | ml_stat_grp = &lacl->ml_stat_grps.stat_group; |
412 | for (i = 0; ml_stat_grp->default_groups[i]; i++) { | 412 | for (i = 0; ml_stat_grp->default_groups[i]; i++) { |
413 | df_item = &ml_stat_grp->default_groups[i]->cg_item; | 413 | df_item = &ml_stat_grp->default_groups[i]->cg_item; |
414 | ml_stat_grp->default_groups[i] = NULL; | 414 | ml_stat_grp->default_groups[i] = NULL; |
@@ -914,7 +914,7 @@ static struct config_group *target_fabric_make_lun( | |||
914 | lun_cg->default_groups[0] = &lun->port_stat_grps.stat_group; | 914 | lun_cg->default_groups[0] = &lun->port_stat_grps.stat_group; |
915 | lun_cg->default_groups[1] = NULL; | 915 | lun_cg->default_groups[1] = NULL; |
916 | 916 | ||
917 | port_stat_grp = &PORT_STAT_GRP(lun)->stat_group; | 917 | port_stat_grp = &lun->port_stat_grps.stat_group; |
918 | port_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 3, | 918 | port_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 3, |
919 | GFP_KERNEL); | 919 | GFP_KERNEL); |
920 | if (!port_stat_grp->default_groups) { | 920 | if (!port_stat_grp->default_groups) { |
@@ -941,7 +941,7 @@ static void target_fabric_drop_lun( | |||
941 | struct config_group *lun_cg, *port_stat_grp; | 941 | struct config_group *lun_cg, *port_stat_grp; |
942 | int i; | 942 | int i; |
943 | 943 | ||
944 | port_stat_grp = &PORT_STAT_GRP(lun)->stat_group; | 944 | port_stat_grp = &lun->port_stat_grps.stat_group; |
945 | for (i = 0; port_stat_grp->default_groups[i]; i++) { | 945 | for (i = 0; port_stat_grp->default_groups[i]; i++) { |
946 | df_item = &port_stat_grp->default_groups[i]->cg_item; | 946 | df_item = &port_stat_grp->default_groups[i]->cg_item; |
947 | port_stat_grp->default_groups[i] = NULL; | 947 | port_stat_grp->default_groups[i] = NULL; |
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index 150c4305f385..0c44bc051484 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c | |||
@@ -67,22 +67,19 @@ static int fd_attach_hba(struct se_hba *hba, u32 host_id) | |||
67 | fd_host = kzalloc(sizeof(struct fd_host), GFP_KERNEL); | 67 | fd_host = kzalloc(sizeof(struct fd_host), GFP_KERNEL); |
68 | if (!(fd_host)) { | 68 | if (!(fd_host)) { |
69 | printk(KERN_ERR "Unable to allocate memory for struct fd_host\n"); | 69 | printk(KERN_ERR "Unable to allocate memory for struct fd_host\n"); |
70 | return -1; | 70 | return -ENOMEM; |
71 | } | 71 | } |
72 | 72 | ||
73 | fd_host->fd_host_id = host_id; | 73 | fd_host->fd_host_id = host_id; |
74 | 74 | ||
75 | atomic_set(&hba->left_queue_depth, FD_HBA_QUEUE_DEPTH); | 75 | hba->hba_ptr = fd_host; |
76 | atomic_set(&hba->max_queue_depth, FD_HBA_QUEUE_DEPTH); | ||
77 | hba->hba_ptr = (void *) fd_host; | ||
78 | 76 | ||
79 | printk(KERN_INFO "CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic" | 77 | printk(KERN_INFO "CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic" |
80 | " Target Core Stack %s\n", hba->hba_id, FD_VERSION, | 78 | " Target Core Stack %s\n", hba->hba_id, FD_VERSION, |
81 | TARGET_CORE_MOD_VERSION); | 79 | TARGET_CORE_MOD_VERSION); |
82 | printk(KERN_INFO "CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic" | 80 | printk(KERN_INFO "CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic" |
83 | " Target Core with TCQ Depth: %d MaxSectors: %u\n", | 81 | " MaxSectors: %u\n", |
84 | hba->hba_id, fd_host->fd_host_id, | 82 | hba->hba_id, fd_host->fd_host_id, FD_MAX_SECTORS); |
85 | atomic_read(&hba->max_queue_depth), FD_MAX_SECTORS); | ||
86 | 83 | ||
87 | return 0; | 84 | return 0; |
88 | } | 85 | } |
@@ -282,7 +279,7 @@ fd_alloc_task(struct se_cmd *cmd) | |||
282 | return NULL; | 279 | return NULL; |
283 | } | 280 | } |
284 | 281 | ||
285 | fd_req->fd_dev = SE_DEV(cmd)->dev_ptr; | 282 | fd_req->fd_dev = cmd->se_lun->lun_se_dev->dev_ptr; |
286 | 283 | ||
287 | return &fd_req->fd_task; | 284 | return &fd_req->fd_task; |
288 | } | 285 | } |
@@ -294,13 +291,14 @@ static int fd_do_readv(struct se_task *task) | |||
294 | struct scatterlist *sg = task->task_sg; | 291 | struct scatterlist *sg = task->task_sg; |
295 | struct iovec *iov; | 292 | struct iovec *iov; |
296 | mm_segment_t old_fs; | 293 | mm_segment_t old_fs; |
297 | loff_t pos = (task->task_lba * DEV_ATTRIB(task->se_dev)->block_size); | 294 | loff_t pos = (task->task_lba * |
295 | task->se_dev->se_sub_dev->se_dev_attrib.block_size); | ||
298 | int ret = 0, i; | 296 | int ret = 0, i; |
299 | 297 | ||
300 | iov = kzalloc(sizeof(struct iovec) * task->task_sg_num, GFP_KERNEL); | 298 | iov = kzalloc(sizeof(struct iovec) * task->task_sg_num, GFP_KERNEL); |
301 | if (!(iov)) { | 299 | if (!(iov)) { |
302 | printk(KERN_ERR "Unable to allocate fd_do_readv iov[]\n"); | 300 | printk(KERN_ERR "Unable to allocate fd_do_readv iov[]\n"); |
303 | return -1; | 301 | return -ENOMEM; |
304 | } | 302 | } |
305 | 303 | ||
306 | for (i = 0; i < task->task_sg_num; i++) { | 304 | for (i = 0; i < task->task_sg_num; i++) { |
@@ -324,13 +322,13 @@ static int fd_do_readv(struct se_task *task) | |||
324 | printk(KERN_ERR "vfs_readv() returned %d," | 322 | printk(KERN_ERR "vfs_readv() returned %d," |
325 | " expecting %d for S_ISBLK\n", ret, | 323 | " expecting %d for S_ISBLK\n", ret, |
326 | (int)task->task_size); | 324 | (int)task->task_size); |
327 | return -1; | 325 | return (ret < 0 ? ret : -EINVAL); |
328 | } | 326 | } |
329 | } else { | 327 | } else { |
330 | if (ret < 0) { | 328 | if (ret < 0) { |
331 | printk(KERN_ERR "vfs_readv() returned %d for non" | 329 | printk(KERN_ERR "vfs_readv() returned %d for non" |
332 | " S_ISBLK\n", ret); | 330 | " S_ISBLK\n", ret); |
333 | return -1; | 331 | return ret; |
334 | } | 332 | } |
335 | } | 333 | } |
336 | 334 | ||
@@ -344,13 +342,14 @@ static int fd_do_writev(struct se_task *task) | |||
344 | struct scatterlist *sg = task->task_sg; | 342 | struct scatterlist *sg = task->task_sg; |
345 | struct iovec *iov; | 343 | struct iovec *iov; |
346 | mm_segment_t old_fs; | 344 | mm_segment_t old_fs; |
347 | loff_t pos = (task->task_lba * DEV_ATTRIB(task->se_dev)->block_size); | 345 | loff_t pos = (task->task_lba * |
346 | task->se_dev->se_sub_dev->se_dev_attrib.block_size); | ||
348 | int ret, i = 0; | 347 | int ret, i = 0; |
349 | 348 | ||
350 | iov = kzalloc(sizeof(struct iovec) * task->task_sg_num, GFP_KERNEL); | 349 | iov = kzalloc(sizeof(struct iovec) * task->task_sg_num, GFP_KERNEL); |
351 | if (!(iov)) { | 350 | if (!(iov)) { |
352 | printk(KERN_ERR "Unable to allocate fd_do_writev iov[]\n"); | 351 | printk(KERN_ERR "Unable to allocate fd_do_writev iov[]\n"); |
353 | return -1; | 352 | return -ENOMEM; |
354 | } | 353 | } |
355 | 354 | ||
356 | for (i = 0; i < task->task_sg_num; i++) { | 355 | for (i = 0; i < task->task_sg_num; i++) { |
@@ -367,7 +366,7 @@ static int fd_do_writev(struct se_task *task) | |||
367 | 366 | ||
368 | if (ret < 0 || ret != task->task_size) { | 367 | if (ret < 0 || ret != task->task_size) { |
369 | printk(KERN_ERR "vfs_writev() returned %d\n", ret); | 368 | printk(KERN_ERR "vfs_writev() returned %d\n", ret); |
370 | return -1; | 369 | return (ret < 0 ? ret : -EINVAL); |
371 | } | 370 | } |
372 | 371 | ||
373 | return 1; | 372 | return 1; |
@@ -375,7 +374,7 @@ static int fd_do_writev(struct se_task *task) | |||
375 | 374 | ||
376 | static void fd_emulate_sync_cache(struct se_task *task) | 375 | static void fd_emulate_sync_cache(struct se_task *task) |
377 | { | 376 | { |
378 | struct se_cmd *cmd = TASK_CMD(task); | 377 | struct se_cmd *cmd = task->task_se_cmd; |
379 | struct se_device *dev = cmd->se_dev; | 378 | struct se_device *dev = cmd->se_dev; |
380 | struct fd_dev *fd_dev = dev->dev_ptr; | 379 | struct fd_dev *fd_dev = dev->dev_ptr; |
381 | int immed = (cmd->t_task->t_task_cdb[1] & 0x2); | 380 | int immed = (cmd->t_task->t_task_cdb[1] & 0x2); |
@@ -396,7 +395,7 @@ static void fd_emulate_sync_cache(struct se_task *task) | |||
396 | start = 0; | 395 | start = 0; |
397 | end = LLONG_MAX; | 396 | end = LLONG_MAX; |
398 | } else { | 397 | } else { |
399 | start = cmd->t_task->t_task_lba * DEV_ATTRIB(dev)->block_size; | 398 | start = cmd->t_task->t_task_lba * dev->se_sub_dev->se_dev_attrib.block_size; |
400 | if (cmd->data_length) | 399 | if (cmd->data_length) |
401 | end = start + cmd->data_length; | 400 | end = start + cmd->data_length; |
402 | else | 401 | else |
@@ -446,7 +445,7 @@ static void fd_emulate_write_fua(struct se_cmd *cmd, struct se_task *task) | |||
446 | { | 445 | { |
447 | struct se_device *dev = cmd->se_dev; | 446 | struct se_device *dev = cmd->se_dev; |
448 | struct fd_dev *fd_dev = dev->dev_ptr; | 447 | struct fd_dev *fd_dev = dev->dev_ptr; |
449 | loff_t start = task->task_lba * DEV_ATTRIB(dev)->block_size; | 448 | loff_t start = task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size; |
450 | loff_t end = start + task->task_size; | 449 | loff_t end = start + task->task_size; |
451 | int ret; | 450 | int ret; |
452 | 451 | ||
@@ -474,9 +473,9 @@ static int fd_do_task(struct se_task *task) | |||
474 | ret = fd_do_writev(task); | 473 | ret = fd_do_writev(task); |
475 | 474 | ||
476 | if (ret > 0 && | 475 | if (ret > 0 && |
477 | DEV_ATTRIB(dev)->emulate_write_cache > 0 && | 476 | dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0 && |
478 | DEV_ATTRIB(dev)->emulate_fua_write > 0 && | 477 | dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 && |
479 | T_TASK(cmd)->t_tasks_fua) { | 478 | cmd->t_task->t_tasks_fua) { |
480 | /* | 479 | /* |
481 | * We might need to be a bit smarter here | 480 | * We might need to be a bit smarter here |
482 | * and return some sense data to let the initiator | 481 | * and return some sense data to let the initiator |
@@ -599,7 +598,7 @@ static ssize_t fd_check_configfs_dev_params(struct se_hba *hba, struct se_subsys | |||
599 | 598 | ||
600 | if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) { | 599 | if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) { |
601 | printk(KERN_ERR "Missing fd_dev_name=\n"); | 600 | printk(KERN_ERR "Missing fd_dev_name=\n"); |
602 | return -1; | 601 | return -EINVAL; |
603 | } | 602 | } |
604 | 603 | ||
605 | return 0; | 604 | return 0; |
@@ -654,7 +653,7 @@ static sector_t fd_get_blocks(struct se_device *dev) | |||
654 | { | 653 | { |
655 | struct fd_dev *fd_dev = dev->dev_ptr; | 654 | struct fd_dev *fd_dev = dev->dev_ptr; |
656 | unsigned long long blocks_long = div_u64(fd_dev->fd_dev_size, | 655 | unsigned long long blocks_long = div_u64(fd_dev->fd_dev_size, |
657 | DEV_ATTRIB(dev)->block_size); | 656 | dev->se_sub_dev->se_dev_attrib.block_size); |
658 | 657 | ||
659 | return blocks_long; | 658 | return blocks_long; |
660 | } | 659 | } |
diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h index ef4de2b4bd46..6386d3f60631 100644 --- a/drivers/target/target_core_file.h +++ b/drivers/target/target_core_file.h | |||
@@ -4,8 +4,6 @@ | |||
4 | #define FD_VERSION "4.0" | 4 | #define FD_VERSION "4.0" |
5 | 5 | ||
6 | #define FD_MAX_DEV_NAME 256 | 6 | #define FD_MAX_DEV_NAME 256 |
7 | /* Maximum queuedepth for the FILEIO HBA */ | ||
8 | #define FD_HBA_QUEUE_DEPTH 256 | ||
9 | #define FD_DEVICE_QUEUE_DEPTH 32 | 7 | #define FD_DEVICE_QUEUE_DEPTH 32 |
10 | #define FD_MAX_DEVICE_QUEUE_DEPTH 128 | 8 | #define FD_MAX_DEVICE_QUEUE_DEPTH 128 |
11 | #define FD_BLOCKSIZE 512 | 9 | #define FD_BLOCKSIZE 512 |
diff --git a/drivers/target/target_core_hba.c b/drivers/target/target_core_hba.c index 0b8f8da89019..bd9da25bc945 100644 --- a/drivers/target/target_core_hba.c +++ b/drivers/target/target_core_hba.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | * Filename: target_core_hba.c | 2 | * Filename: target_core_hba.c |
3 | * | 3 | * |
4 | * This file copntains the iSCSI HBA Transport related functions. | 4 | * This file contains the TCM HBA Transport related functions. |
5 | * | 5 | * |
6 | * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc. | 6 | * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc. |
7 | * Copyright (c) 2005, 2006, 2007 SBE, Inc. | 7 | * Copyright (c) 2005, 2006, 2007 SBE, Inc. |
@@ -45,6 +45,11 @@ | |||
45 | static LIST_HEAD(subsystem_list); | 45 | static LIST_HEAD(subsystem_list); |
46 | static DEFINE_MUTEX(subsystem_mutex); | 46 | static DEFINE_MUTEX(subsystem_mutex); |
47 | 47 | ||
48 | static u32 hba_id_counter; | ||
49 | |||
50 | static DEFINE_SPINLOCK(hba_lock); | ||
51 | static LIST_HEAD(hba_list); | ||
52 | |||
48 | int transport_subsystem_register(struct se_subsystem_api *sub_api) | 53 | int transport_subsystem_register(struct se_subsystem_api *sub_api) |
49 | { | 54 | { |
50 | struct se_subsystem_api *s; | 55 | struct se_subsystem_api *s; |
@@ -110,15 +115,11 @@ core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags) | |||
110 | 115 | ||
111 | INIT_LIST_HEAD(&hba->hba_dev_list); | 116 | INIT_LIST_HEAD(&hba->hba_dev_list); |
112 | spin_lock_init(&hba->device_lock); | 117 | spin_lock_init(&hba->device_lock); |
113 | spin_lock_init(&hba->hba_queue_lock); | ||
114 | mutex_init(&hba->hba_access_mutex); | 118 | mutex_init(&hba->hba_access_mutex); |
115 | 119 | ||
116 | hba->hba_index = scsi_get_new_index(SCSI_INST_INDEX); | 120 | hba->hba_index = scsi_get_new_index(SCSI_INST_INDEX); |
117 | hba->hba_flags |= hba_flags; | 121 | hba->hba_flags |= hba_flags; |
118 | 122 | ||
119 | atomic_set(&hba->max_queue_depth, 0); | ||
120 | atomic_set(&hba->left_queue_depth, 0); | ||
121 | |||
122 | hba->transport = core_get_backend(plugin_name); | 123 | hba->transport = core_get_backend(plugin_name); |
123 | if (!hba->transport) { | 124 | if (!hba->transport) { |
124 | ret = -EINVAL; | 125 | ret = -EINVAL; |
@@ -129,10 +130,10 @@ core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags) | |||
129 | if (ret < 0) | 130 | if (ret < 0) |
130 | goto out_module_put; | 131 | goto out_module_put; |
131 | 132 | ||
132 | spin_lock(&se_global->hba_lock); | 133 | spin_lock(&hba_lock); |
133 | hba->hba_id = se_global->g_hba_id_counter++; | 134 | hba->hba_id = hba_id_counter++; |
134 | list_add_tail(&hba->hba_list, &se_global->g_hba_list); | 135 | list_add_tail(&hba->hba_node, &hba_list); |
135 | spin_unlock(&se_global->hba_lock); | 136 | spin_unlock(&hba_lock); |
136 | 137 | ||
137 | printk(KERN_INFO "CORE_HBA[%d] - Attached HBA to Generic Target" | 138 | printk(KERN_INFO "CORE_HBA[%d] - Attached HBA to Generic Target" |
138 | " Core\n", hba->hba_id); | 139 | " Core\n", hba->hba_id); |
@@ -156,9 +157,9 @@ core_delete_hba(struct se_hba *hba) | |||
156 | 157 | ||
157 | hba->transport->detach_hba(hba); | 158 | hba->transport->detach_hba(hba); |
158 | 159 | ||
159 | spin_lock(&se_global->hba_lock); | 160 | spin_lock(&hba_lock); |
160 | list_del(&hba->hba_list); | 161 | list_del(&hba->hba_node); |
161 | spin_unlock(&se_global->hba_lock); | 162 | spin_unlock(&hba_lock); |
162 | 163 | ||
163 | printk(KERN_INFO "CORE_HBA[%d] - Detached HBA from Generic Target" | 164 | printk(KERN_INFO "CORE_HBA[%d] - Detached HBA from Generic Target" |
164 | " Core\n", hba->hba_id); | 165 | " Core\n", hba->hba_id); |
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 86639004af9e..fb159876fffc 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c | |||
@@ -74,17 +74,14 @@ static int iblock_attach_hba(struct se_hba *hba, u32 host_id) | |||
74 | 74 | ||
75 | ib_host->iblock_host_id = host_id; | 75 | ib_host->iblock_host_id = host_id; |
76 | 76 | ||
77 | atomic_set(&hba->left_queue_depth, IBLOCK_HBA_QUEUE_DEPTH); | ||
78 | atomic_set(&hba->max_queue_depth, IBLOCK_HBA_QUEUE_DEPTH); | ||
79 | hba->hba_ptr = (void *) ib_host; | 77 | hba->hba_ptr = (void *) ib_host; |
80 | 78 | ||
81 | printk(KERN_INFO "CORE_HBA[%d] - TCM iBlock HBA Driver %s on" | 79 | printk(KERN_INFO "CORE_HBA[%d] - TCM iBlock HBA Driver %s on" |
82 | " Generic Target Core Stack %s\n", hba->hba_id, | 80 | " Generic Target Core Stack %s\n", hba->hba_id, |
83 | IBLOCK_VERSION, TARGET_CORE_MOD_VERSION); | 81 | IBLOCK_VERSION, TARGET_CORE_MOD_VERSION); |
84 | 82 | ||
85 | printk(KERN_INFO "CORE_HBA[%d] - Attached iBlock HBA: %u to Generic" | 83 | printk(KERN_INFO "CORE_HBA[%d] - Attached iBlock HBA: %u to Generic\n", |
86 | " Target Core TCQ Depth: %d\n", hba->hba_id, | 84 | hba->hba_id, ib_host->iblock_host_id); |
87 | ib_host->iblock_host_id, atomic_read(&hba->max_queue_depth)); | ||
88 | 85 | ||
89 | return 0; | 86 | return 0; |
90 | } | 87 | } |
@@ -188,15 +185,15 @@ static struct se_device *iblock_create_virtdevice( | |||
188 | * in ATA and we need to set TPE=1 | 185 | * in ATA and we need to set TPE=1 |
189 | */ | 186 | */ |
190 | if (blk_queue_discard(q)) { | 187 | if (blk_queue_discard(q)) { |
191 | DEV_ATTRIB(dev)->max_unmap_lba_count = | 188 | dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = |
192 | q->limits.max_discard_sectors; | 189 | q->limits.max_discard_sectors; |
193 | /* | 190 | /* |
194 | * Currently hardcoded to 1 in Linux/SCSI code.. | 191 | * Currently hardcoded to 1 in Linux/SCSI code.. |
195 | */ | 192 | */ |
196 | DEV_ATTRIB(dev)->max_unmap_block_desc_count = 1; | 193 | dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count = 1; |
197 | DEV_ATTRIB(dev)->unmap_granularity = | 194 | dev->se_sub_dev->se_dev_attrib.unmap_granularity = |
198 | q->limits.discard_granularity; | 195 | q->limits.discard_granularity; |
199 | DEV_ATTRIB(dev)->unmap_granularity_alignment = | 196 | dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment = |
200 | q->limits.discard_alignment; | 197 | q->limits.discard_alignment; |
201 | 198 | ||
202 | printk(KERN_INFO "IBLOCK: BLOCK Discard support available," | 199 | printk(KERN_INFO "IBLOCK: BLOCK Discard support available," |
@@ -243,7 +240,7 @@ iblock_alloc_task(struct se_cmd *cmd) | |||
243 | return NULL; | 240 | return NULL; |
244 | } | 241 | } |
245 | 242 | ||
246 | ib_req->ib_dev = SE_DEV(cmd)->dev_ptr; | 243 | ib_req->ib_dev = cmd->se_lun->lun_se_dev->dev_ptr; |
247 | atomic_set(&ib_req->ib_bio_cnt, 0); | 244 | atomic_set(&ib_req->ib_bio_cnt, 0); |
248 | return &ib_req->ib_task; | 245 | return &ib_req->ib_task; |
249 | } | 246 | } |
@@ -257,12 +254,12 @@ static unsigned long long iblock_emulate_read_cap_with_block_size( | |||
257 | bdev_logical_block_size(bd)) - 1); | 254 | bdev_logical_block_size(bd)) - 1); |
258 | u32 block_size = bdev_logical_block_size(bd); | 255 | u32 block_size = bdev_logical_block_size(bd); |
259 | 256 | ||
260 | if (block_size == DEV_ATTRIB(dev)->block_size) | 257 | if (block_size == dev->se_sub_dev->se_dev_attrib.block_size) |
261 | return blocks_long; | 258 | return blocks_long; |
262 | 259 | ||
263 | switch (block_size) { | 260 | switch (block_size) { |
264 | case 4096: | 261 | case 4096: |
265 | switch (DEV_ATTRIB(dev)->block_size) { | 262 | switch (dev->se_sub_dev->se_dev_attrib.block_size) { |
266 | case 2048: | 263 | case 2048: |
267 | blocks_long <<= 1; | 264 | blocks_long <<= 1; |
268 | break; | 265 | break; |
@@ -276,7 +273,7 @@ static unsigned long long iblock_emulate_read_cap_with_block_size( | |||
276 | } | 273 | } |
277 | break; | 274 | break; |
278 | case 2048: | 275 | case 2048: |
279 | switch (DEV_ATTRIB(dev)->block_size) { | 276 | switch (dev->se_sub_dev->se_dev_attrib.block_size) { |
280 | case 4096: | 277 | case 4096: |
281 | blocks_long >>= 1; | 278 | blocks_long >>= 1; |
282 | break; | 279 | break; |
@@ -291,7 +288,7 @@ static unsigned long long iblock_emulate_read_cap_with_block_size( | |||
291 | } | 288 | } |
292 | break; | 289 | break; |
293 | case 1024: | 290 | case 1024: |
294 | switch (DEV_ATTRIB(dev)->block_size) { | 291 | switch (dev->se_sub_dev->se_dev_attrib.block_size) { |
295 | case 4096: | 292 | case 4096: |
296 | blocks_long >>= 2; | 293 | blocks_long >>= 2; |
297 | break; | 294 | break; |
@@ -306,7 +303,7 @@ static unsigned long long iblock_emulate_read_cap_with_block_size( | |||
306 | } | 303 | } |
307 | break; | 304 | break; |
308 | case 512: | 305 | case 512: |
309 | switch (DEV_ATTRIB(dev)->block_size) { | 306 | switch (dev->se_sub_dev->se_dev_attrib.block_size) { |
310 | case 4096: | 307 | case 4096: |
311 | blocks_long >>= 3; | 308 | blocks_long >>= 3; |
312 | break; | 309 | break; |
@@ -332,9 +329,9 @@ static unsigned long long iblock_emulate_read_cap_with_block_size( | |||
332 | */ | 329 | */ |
333 | static void iblock_emulate_sync_cache(struct se_task *task) | 330 | static void iblock_emulate_sync_cache(struct se_task *task) |
334 | { | 331 | { |
335 | struct se_cmd *cmd = TASK_CMD(task); | 332 | struct se_cmd *cmd = task->task_se_cmd; |
336 | struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr; | 333 | struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr; |
337 | int immed = (T_TASK(cmd)->t_task_cdb[1] & 0x2); | 334 | int immed = (cmd->t_task->t_task_cdb[1] & 0x2); |
338 | sector_t error_sector; | 335 | sector_t error_sector; |
339 | int ret; | 336 | int ret; |
340 | 337 | ||
@@ -401,9 +398,9 @@ static int iblock_do_task(struct se_task *task) | |||
401 | * Force data to disk if we pretend to not have a volatile | 398 | * Force data to disk if we pretend to not have a volatile |
402 | * write cache, or the initiator set the Force Unit Access bit. | 399 | * write cache, or the initiator set the Force Unit Access bit. |
403 | */ | 400 | */ |
404 | if (DEV_ATTRIB(dev)->emulate_write_cache == 0 || | 401 | if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 || |
405 | (DEV_ATTRIB(dev)->emulate_fua_write > 0 && | 402 | (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 && |
406 | T_TASK(task->task_se_cmd)->t_tasks_fua)) | 403 | task->task_se_cmd->t_task->t_tasks_fua)) |
407 | rw = WRITE_FUA; | 404 | rw = WRITE_FUA; |
408 | else | 405 | else |
409 | rw = WRITE; | 406 | rw = WRITE; |
@@ -527,7 +524,7 @@ static ssize_t iblock_check_configfs_dev_params( | |||
527 | 524 | ||
528 | if (!(ibd->ibd_flags & IBDF_HAS_UDEV_PATH)) { | 525 | if (!(ibd->ibd_flags & IBDF_HAS_UDEV_PATH)) { |
529 | printk(KERN_ERR "Missing udev_path= parameters for IBLOCK\n"); | 526 | printk(KERN_ERR "Missing udev_path= parameters for IBLOCK\n"); |
530 | return -1; | 527 | return -EINVAL; |
531 | } | 528 | } |
532 | 529 | ||
533 | return 0; | 530 | return 0; |
@@ -611,7 +608,7 @@ static struct bio *iblock_get_bio( | |||
611 | static int iblock_map_task_SG(struct se_task *task) | 608 | static int iblock_map_task_SG(struct se_task *task) |
612 | { | 609 | { |
613 | struct se_cmd *cmd = task->task_se_cmd; | 610 | struct se_cmd *cmd = task->task_se_cmd; |
614 | struct se_device *dev = SE_DEV(cmd); | 611 | struct se_device *dev = cmd->se_lun->lun_se_dev; |
615 | struct iblock_dev *ib_dev = task->se_dev->dev_ptr; | 612 | struct iblock_dev *ib_dev = task->se_dev->dev_ptr; |
616 | struct iblock_req *ib_req = IBLOCK_REQ(task); | 613 | struct iblock_req *ib_req = IBLOCK_REQ(task); |
617 | struct bio *bio = NULL, *hbio = NULL, *tbio = NULL; | 614 | struct bio *bio = NULL, *hbio = NULL, *tbio = NULL; |
@@ -623,17 +620,17 @@ static int iblock_map_task_SG(struct se_task *task) | |||
623 | * Do starting conversion up from non 512-byte blocksize with | 620 | * Do starting conversion up from non 512-byte blocksize with |
624 | * struct se_task SCSI blocksize into Linux/Block 512 units for BIO. | 621 | * struct se_task SCSI blocksize into Linux/Block 512 units for BIO. |
625 | */ | 622 | */ |
626 | if (DEV_ATTRIB(dev)->block_size == 4096) | 623 | if (dev->se_sub_dev->se_dev_attrib.block_size == 4096) |
627 | block_lba = (task->task_lba << 3); | 624 | block_lba = (task->task_lba << 3); |
628 | else if (DEV_ATTRIB(dev)->block_size == 2048) | 625 | else if (dev->se_sub_dev->se_dev_attrib.block_size == 2048) |
629 | block_lba = (task->task_lba << 2); | 626 | block_lba = (task->task_lba << 2); |
630 | else if (DEV_ATTRIB(dev)->block_size == 1024) | 627 | else if (dev->se_sub_dev->se_dev_attrib.block_size == 1024) |
631 | block_lba = (task->task_lba << 1); | 628 | block_lba = (task->task_lba << 1); |
632 | else if (DEV_ATTRIB(dev)->block_size == 512) | 629 | else if (dev->se_sub_dev->se_dev_attrib.block_size == 512) |
633 | block_lba = task->task_lba; | 630 | block_lba = task->task_lba; |
634 | else { | 631 | else { |
635 | printk(KERN_ERR "Unsupported SCSI -> BLOCK LBA conversion:" | 632 | printk(KERN_ERR "Unsupported SCSI -> BLOCK LBA conversion:" |
636 | " %u\n", DEV_ATTRIB(dev)->block_size); | 633 | " %u\n", dev->se_sub_dev->se_dev_attrib.block_size); |
637 | return PYX_TRANSPORT_LU_COMM_FAILURE; | 634 | return PYX_TRANSPORT_LU_COMM_FAILURE; |
638 | } | 635 | } |
639 | 636 | ||
diff --git a/drivers/target/target_core_iblock.h b/drivers/target/target_core_iblock.h index 64c1f4d69f76..6b6d17bb1fd6 100644 --- a/drivers/target/target_core_iblock.h +++ b/drivers/target/target_core_iblock.h | |||
@@ -3,7 +3,6 @@ | |||
3 | 3 | ||
4 | #define IBLOCK_VERSION "4.0" | 4 | #define IBLOCK_VERSION "4.0" |
5 | 5 | ||
6 | #define IBLOCK_HBA_QUEUE_DEPTH 512 | ||
7 | #define IBLOCK_DEVICE_QUEUE_DEPTH 32 | 6 | #define IBLOCK_DEVICE_QUEUE_DEPTH 32 |
8 | #define IBLOCK_MAX_DEVICE_QUEUE_DEPTH 128 | 7 | #define IBLOCK_MAX_DEVICE_QUEUE_DEPTH 128 |
9 | #define IBLOCK_MAX_CDBS 16 | 8 | #define IBLOCK_MAX_CDBS 16 |
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index b662db3a320b..27a7525971b9 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c | |||
@@ -105,13 +105,13 @@ static int core_scsi2_reservation_check(struct se_cmd *cmd, u32 *pr_reg_type) | |||
105 | } | 105 | } |
106 | if (dev->dev_reserved_node_acl != sess->se_node_acl) { | 106 | if (dev->dev_reserved_node_acl != sess->se_node_acl) { |
107 | spin_unlock(&dev->dev_reservation_lock); | 107 | spin_unlock(&dev->dev_reservation_lock); |
108 | return -1; | 108 | return -EINVAL; |
109 | } | 109 | } |
110 | if (!(dev->dev_flags & DF_SPC2_RESERVATIONS_WITH_ISID)) { | 110 | if (!(dev->dev_flags & DF_SPC2_RESERVATIONS_WITH_ISID)) { |
111 | spin_unlock(&dev->dev_reservation_lock); | 111 | spin_unlock(&dev->dev_reservation_lock); |
112 | return 0; | 112 | return 0; |
113 | } | 113 | } |
114 | ret = (dev->dev_res_bin_isid == sess->sess_bin_isid) ? 0 : -1; | 114 | ret = (dev->dev_res_bin_isid == sess->sess_bin_isid) ? 0 : -EINVAL; |
115 | spin_unlock(&dev->dev_reservation_lock); | 115 | spin_unlock(&dev->dev_reservation_lock); |
116 | 116 | ||
117 | return ret; | 117 | return ret; |
@@ -143,8 +143,8 @@ static int core_scsi2_reservation_release(struct se_cmd *cmd) | |||
143 | dev->dev_flags &= ~DF_SPC2_RESERVATIONS_WITH_ISID; | 143 | dev->dev_flags &= ~DF_SPC2_RESERVATIONS_WITH_ISID; |
144 | } | 144 | } |
145 | printk(KERN_INFO "SCSI-2 Released reservation for %s LUN: %u ->" | 145 | printk(KERN_INFO "SCSI-2 Released reservation for %s LUN: %u ->" |
146 | " MAPPED LUN: %u for %s\n", TPG_TFO(tpg)->get_fabric_name(), | 146 | " MAPPED LUN: %u for %s\n", tpg->se_tpg_tfo->get_fabric_name(), |
147 | SE_LUN(cmd)->unpacked_lun, cmd->se_deve->mapped_lun, | 147 | cmd->se_lun->unpacked_lun, cmd->se_deve->mapped_lun, |
148 | sess->se_node_acl->initiatorname); | 148 | sess->se_node_acl->initiatorname); |
149 | spin_unlock(&dev->dev_reservation_lock); | 149 | spin_unlock(&dev->dev_reservation_lock); |
150 | 150 | ||
@@ -157,8 +157,8 @@ static int core_scsi2_reservation_reserve(struct se_cmd *cmd) | |||
157 | struct se_session *sess = cmd->se_sess; | 157 | struct se_session *sess = cmd->se_sess; |
158 | struct se_portal_group *tpg = sess->se_tpg; | 158 | struct se_portal_group *tpg = sess->se_tpg; |
159 | 159 | ||
160 | if ((T_TASK(cmd)->t_task_cdb[1] & 0x01) && | 160 | if ((cmd->t_task->t_task_cdb[1] & 0x01) && |
161 | (T_TASK(cmd)->t_task_cdb[1] & 0x02)) { | 161 | (cmd->t_task->t_task_cdb[1] & 0x02)) { |
162 | printk(KERN_ERR "LongIO and Obselete Bits set, returning" | 162 | printk(KERN_ERR "LongIO and Obselete Bits set, returning" |
163 | " ILLEGAL_REQUEST\n"); | 163 | " ILLEGAL_REQUEST\n"); |
164 | return PYX_TRANSPORT_ILLEGAL_REQUEST; | 164 | return PYX_TRANSPORT_ILLEGAL_REQUEST; |
@@ -174,12 +174,12 @@ static int core_scsi2_reservation_reserve(struct se_cmd *cmd) | |||
174 | if (dev->dev_reserved_node_acl && | 174 | if (dev->dev_reserved_node_acl && |
175 | (dev->dev_reserved_node_acl != sess->se_node_acl)) { | 175 | (dev->dev_reserved_node_acl != sess->se_node_acl)) { |
176 | printk(KERN_ERR "SCSI-2 RESERVATION CONFLIFT for %s fabric\n", | 176 | printk(KERN_ERR "SCSI-2 RESERVATION CONFLIFT for %s fabric\n", |
177 | TPG_TFO(tpg)->get_fabric_name()); | 177 | tpg->se_tpg_tfo->get_fabric_name()); |
178 | printk(KERN_ERR "Original reserver LUN: %u %s\n", | 178 | printk(KERN_ERR "Original reserver LUN: %u %s\n", |
179 | SE_LUN(cmd)->unpacked_lun, | 179 | cmd->se_lun->unpacked_lun, |
180 | dev->dev_reserved_node_acl->initiatorname); | 180 | dev->dev_reserved_node_acl->initiatorname); |
181 | printk(KERN_ERR "Current attempt - LUN: %u -> MAPPED LUN: %u" | 181 | printk(KERN_ERR "Current attempt - LUN: %u -> MAPPED LUN: %u" |
182 | " from %s \n", SE_LUN(cmd)->unpacked_lun, | 182 | " from %s \n", cmd->se_lun->unpacked_lun, |
183 | cmd->se_deve->mapped_lun, | 183 | cmd->se_deve->mapped_lun, |
184 | sess->se_node_acl->initiatorname); | 184 | sess->se_node_acl->initiatorname); |
185 | spin_unlock(&dev->dev_reservation_lock); | 185 | spin_unlock(&dev->dev_reservation_lock); |
@@ -193,8 +193,8 @@ static int core_scsi2_reservation_reserve(struct se_cmd *cmd) | |||
193 | dev->dev_flags |= DF_SPC2_RESERVATIONS_WITH_ISID; | 193 | dev->dev_flags |= DF_SPC2_RESERVATIONS_WITH_ISID; |
194 | } | 194 | } |
195 | printk(KERN_INFO "SCSI-2 Reserved %s LUN: %u -> MAPPED LUN: %u" | 195 | printk(KERN_INFO "SCSI-2 Reserved %s LUN: %u -> MAPPED LUN: %u" |
196 | " for %s\n", TPG_TFO(tpg)->get_fabric_name(), | 196 | " for %s\n", tpg->se_tpg_tfo->get_fabric_name(), |
197 | SE_LUN(cmd)->unpacked_lun, cmd->se_deve->mapped_lun, | 197 | cmd->se_lun->unpacked_lun, cmd->se_deve->mapped_lun, |
198 | sess->se_node_acl->initiatorname); | 198 | sess->se_node_acl->initiatorname); |
199 | spin_unlock(&dev->dev_reservation_lock); | 199 | spin_unlock(&dev->dev_reservation_lock); |
200 | 200 | ||
@@ -215,9 +215,9 @@ int core_scsi2_emulate_crh(struct se_cmd *cmd) | |||
215 | struct se_session *se_sess = cmd->se_sess; | 215 | struct se_session *se_sess = cmd->se_sess; |
216 | struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev; | 216 | struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev; |
217 | struct t10_pr_registration *pr_reg; | 217 | struct t10_pr_registration *pr_reg; |
218 | struct t10_reservation_template *pr_tmpl = &su_dev->t10_reservation; | 218 | struct t10_reservation *pr_tmpl = &su_dev->t10_pr; |
219 | unsigned char *cdb = &T_TASK(cmd)->t_task_cdb[0]; | 219 | unsigned char *cdb = &cmd->t_task->t_task_cdb[0]; |
220 | int crh = (T10_RES(su_dev)->res_type == SPC3_PERSISTENT_RESERVATIONS); | 220 | int crh = (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS); |
221 | int conflict = 0; | 221 | int conflict = 0; |
222 | 222 | ||
223 | if (!(se_sess)) | 223 | if (!(se_sess)) |
@@ -307,7 +307,7 @@ static int core_scsi3_pr_seq_non_holder( | |||
307 | u32 pr_reg_type) | 307 | u32 pr_reg_type) |
308 | { | 308 | { |
309 | struct se_dev_entry *se_deve; | 309 | struct se_dev_entry *se_deve; |
310 | struct se_session *se_sess = SE_SESS(cmd); | 310 | struct se_session *se_sess = cmd->se_sess; |
311 | int other_cdb = 0, ignore_reg; | 311 | int other_cdb = 0, ignore_reg; |
312 | int registered_nexus = 0, ret = 1; /* Conflict by default */ | 312 | int registered_nexus = 0, ret = 1; /* Conflict by default */ |
313 | int all_reg = 0, reg_only = 0; /* ALL_REG, REG_ONLY */ | 313 | int all_reg = 0, reg_only = 0; /* ALL_REG, REG_ONLY */ |
@@ -362,7 +362,7 @@ static int core_scsi3_pr_seq_non_holder( | |||
362 | registered_nexus = 1; | 362 | registered_nexus = 1; |
363 | break; | 363 | break; |
364 | default: | 364 | default: |
365 | return -1; | 365 | return -EINVAL; |
366 | } | 366 | } |
367 | /* | 367 | /* |
368 | * Referenced from spc4r17 table 45 for *NON* PR holder access | 368 | * Referenced from spc4r17 table 45 for *NON* PR holder access |
@@ -414,7 +414,7 @@ static int core_scsi3_pr_seq_non_holder( | |||
414 | default: | 414 | default: |
415 | printk(KERN_ERR "Unknown PERSISTENT_RESERVE_OUT service" | 415 | printk(KERN_ERR "Unknown PERSISTENT_RESERVE_OUT service" |
416 | " action: 0x%02x\n", cdb[1] & 0x1f); | 416 | " action: 0x%02x\n", cdb[1] & 0x1f); |
417 | return -1; | 417 | return -EINVAL; |
418 | } | 418 | } |
419 | break; | 419 | break; |
420 | case RELEASE: | 420 | case RELEASE: |
@@ -461,7 +461,7 @@ static int core_scsi3_pr_seq_non_holder( | |||
461 | default: | 461 | default: |
462 | printk(KERN_ERR "Unknown MI Service Action: 0x%02x\n", | 462 | printk(KERN_ERR "Unknown MI Service Action: 0x%02x\n", |
463 | (cdb[1] & 0x1f)); | 463 | (cdb[1] & 0x1f)); |
464 | return -1; | 464 | return -EINVAL; |
465 | } | 465 | } |
466 | break; | 466 | break; |
467 | case ACCESS_CONTROL_IN: | 467 | case ACCESS_CONTROL_IN: |
@@ -549,7 +549,7 @@ static int core_scsi3_pr_seq_non_holder( | |||
549 | 549 | ||
550 | static u32 core_scsi3_pr_generation(struct se_device *dev) | 550 | static u32 core_scsi3_pr_generation(struct se_device *dev) |
551 | { | 551 | { |
552 | struct se_subsystem_dev *su_dev = SU_DEV(dev); | 552 | struct se_subsystem_dev *su_dev = dev->se_sub_dev; |
553 | u32 prg; | 553 | u32 prg; |
554 | /* | 554 | /* |
555 | * PRGeneration field shall contain the value of a 32-bit wrapping | 555 | * PRGeneration field shall contain the value of a 32-bit wrapping |
@@ -561,7 +561,7 @@ static u32 core_scsi3_pr_generation(struct se_device *dev) | |||
561 | * See spc4r17 section 6.3.12 READ_KEYS service action | 561 | * See spc4r17 section 6.3.12 READ_KEYS service action |
562 | */ | 562 | */ |
563 | spin_lock(&dev->dev_reservation_lock); | 563 | spin_lock(&dev->dev_reservation_lock); |
564 | prg = T10_RES(su_dev)->pr_generation++; | 564 | prg = su_dev->t10_pr.pr_generation++; |
565 | spin_unlock(&dev->dev_reservation_lock); | 565 | spin_unlock(&dev->dev_reservation_lock); |
566 | 566 | ||
567 | return prg; | 567 | return prg; |
@@ -592,14 +592,14 @@ static int core_scsi3_pr_reservation_check( | |||
592 | cmd->pr_res_key = dev->dev_pr_res_holder->pr_res_key; | 592 | cmd->pr_res_key = dev->dev_pr_res_holder->pr_res_key; |
593 | if (dev->dev_pr_res_holder->pr_reg_nacl != sess->se_node_acl) { | 593 | if (dev->dev_pr_res_holder->pr_reg_nacl != sess->se_node_acl) { |
594 | spin_unlock(&dev->dev_reservation_lock); | 594 | spin_unlock(&dev->dev_reservation_lock); |
595 | return -1; | 595 | return -EINVAL; |
596 | } | 596 | } |
597 | if (!(dev->dev_pr_res_holder->isid_present_at_reg)) { | 597 | if (!(dev->dev_pr_res_holder->isid_present_at_reg)) { |
598 | spin_unlock(&dev->dev_reservation_lock); | 598 | spin_unlock(&dev->dev_reservation_lock); |
599 | return 0; | 599 | return 0; |
600 | } | 600 | } |
601 | ret = (dev->dev_pr_res_holder->pr_reg_bin_isid == | 601 | ret = (dev->dev_pr_res_holder->pr_reg_bin_isid == |
602 | sess->sess_bin_isid) ? 0 : -1; | 602 | sess->sess_bin_isid) ? 0 : -EINVAL; |
603 | /* | 603 | /* |
604 | * Use bit in *pr_reg_type to notify ISID mismatch in | 604 | * Use bit in *pr_reg_type to notify ISID mismatch in |
605 | * core_scsi3_pr_seq_non_holder(). | 605 | * core_scsi3_pr_seq_non_holder(). |
@@ -620,7 +620,7 @@ static struct t10_pr_registration *__core_scsi3_do_alloc_registration( | |||
620 | int all_tg_pt, | 620 | int all_tg_pt, |
621 | int aptpl) | 621 | int aptpl) |
622 | { | 622 | { |
623 | struct se_subsystem_dev *su_dev = SU_DEV(dev); | 623 | struct se_subsystem_dev *su_dev = dev->se_sub_dev; |
624 | struct t10_pr_registration *pr_reg; | 624 | struct t10_pr_registration *pr_reg; |
625 | 625 | ||
626 | pr_reg = kmem_cache_zalloc(t10_pr_reg_cache, GFP_ATOMIC); | 626 | pr_reg = kmem_cache_zalloc(t10_pr_reg_cache, GFP_ATOMIC); |
@@ -629,7 +629,7 @@ static struct t10_pr_registration *__core_scsi3_do_alloc_registration( | |||
629 | return NULL; | 629 | return NULL; |
630 | } | 630 | } |
631 | 631 | ||
632 | pr_reg->pr_aptpl_buf = kzalloc(T10_RES(su_dev)->pr_aptpl_buf_len, | 632 | pr_reg->pr_aptpl_buf = kzalloc(su_dev->t10_pr.pr_aptpl_buf_len, |
633 | GFP_ATOMIC); | 633 | GFP_ATOMIC); |
634 | if (!(pr_reg->pr_aptpl_buf)) { | 634 | if (!(pr_reg->pr_aptpl_buf)) { |
635 | printk(KERN_ERR "Unable to allocate pr_reg->pr_aptpl_buf\n"); | 635 | printk(KERN_ERR "Unable to allocate pr_reg->pr_aptpl_buf\n"); |
@@ -803,7 +803,7 @@ out: | |||
803 | } | 803 | } |
804 | 804 | ||
805 | int core_scsi3_alloc_aptpl_registration( | 805 | int core_scsi3_alloc_aptpl_registration( |
806 | struct t10_reservation_template *pr_tmpl, | 806 | struct t10_reservation *pr_tmpl, |
807 | u64 sa_res_key, | 807 | u64 sa_res_key, |
808 | unsigned char *i_port, | 808 | unsigned char *i_port, |
809 | unsigned char *isid, | 809 | unsigned char *isid, |
@@ -819,13 +819,13 @@ int core_scsi3_alloc_aptpl_registration( | |||
819 | 819 | ||
820 | if (!(i_port) || !(t_port) || !(sa_res_key)) { | 820 | if (!(i_port) || !(t_port) || !(sa_res_key)) { |
821 | printk(KERN_ERR "Illegal parameters for APTPL registration\n"); | 821 | printk(KERN_ERR "Illegal parameters for APTPL registration\n"); |
822 | return -1; | 822 | return -EINVAL; |
823 | } | 823 | } |
824 | 824 | ||
825 | pr_reg = kmem_cache_zalloc(t10_pr_reg_cache, GFP_KERNEL); | 825 | pr_reg = kmem_cache_zalloc(t10_pr_reg_cache, GFP_KERNEL); |
826 | if (!(pr_reg)) { | 826 | if (!(pr_reg)) { |
827 | printk(KERN_ERR "Unable to allocate struct t10_pr_registration\n"); | 827 | printk(KERN_ERR "Unable to allocate struct t10_pr_registration\n"); |
828 | return -1; | 828 | return -ENOMEM; |
829 | } | 829 | } |
830 | pr_reg->pr_aptpl_buf = kzalloc(pr_tmpl->pr_aptpl_buf_len, GFP_KERNEL); | 830 | pr_reg->pr_aptpl_buf = kzalloc(pr_tmpl->pr_aptpl_buf_len, GFP_KERNEL); |
831 | 831 | ||
@@ -893,11 +893,11 @@ static void core_scsi3_aptpl_reserve( | |||
893 | 893 | ||
894 | printk(KERN_INFO "SPC-3 PR [%s] Service Action: APTPL RESERVE created" | 894 | printk(KERN_INFO "SPC-3 PR [%s] Service Action: APTPL RESERVE created" |
895 | " new reservation holder TYPE: %s ALL_TG_PT: %d\n", | 895 | " new reservation holder TYPE: %s ALL_TG_PT: %d\n", |
896 | TPG_TFO(tpg)->get_fabric_name(), | 896 | tpg->se_tpg_tfo->get_fabric_name(), |
897 | core_scsi3_pr_dump_type(pr_reg->pr_res_type), | 897 | core_scsi3_pr_dump_type(pr_reg->pr_res_type), |
898 | (pr_reg->pr_reg_all_tg_pt) ? 1 : 0); | 898 | (pr_reg->pr_reg_all_tg_pt) ? 1 : 0); |
899 | printk(KERN_INFO "SPC-3 PR [%s] RESERVE Node: %s%s\n", | 899 | printk(KERN_INFO "SPC-3 PR [%s] RESERVE Node: %s%s\n", |
900 | TPG_TFO(tpg)->get_fabric_name(), node_acl->initiatorname, | 900 | tpg->se_tpg_tfo->get_fabric_name(), node_acl->initiatorname, |
901 | (prf_isid) ? &i_buf[0] : ""); | 901 | (prf_isid) ? &i_buf[0] : ""); |
902 | } | 902 | } |
903 | 903 | ||
@@ -913,7 +913,7 @@ static int __core_scsi3_check_aptpl_registration( | |||
913 | struct se_dev_entry *deve) | 913 | struct se_dev_entry *deve) |
914 | { | 914 | { |
915 | struct t10_pr_registration *pr_reg, *pr_reg_tmp; | 915 | struct t10_pr_registration *pr_reg, *pr_reg_tmp; |
916 | struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation; | 916 | struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; |
917 | unsigned char i_port[PR_APTPL_MAX_IPORT_LEN]; | 917 | unsigned char i_port[PR_APTPL_MAX_IPORT_LEN]; |
918 | unsigned char t_port[PR_APTPL_MAX_TPORT_LEN]; | 918 | unsigned char t_port[PR_APTPL_MAX_TPORT_LEN]; |
919 | u16 tpgt; | 919 | u16 tpgt; |
@@ -925,8 +925,8 @@ static int __core_scsi3_check_aptpl_registration( | |||
925 | */ | 925 | */ |
926 | snprintf(i_port, PR_APTPL_MAX_IPORT_LEN, "%s", nacl->initiatorname); | 926 | snprintf(i_port, PR_APTPL_MAX_IPORT_LEN, "%s", nacl->initiatorname); |
927 | snprintf(t_port, PR_APTPL_MAX_TPORT_LEN, "%s", | 927 | snprintf(t_port, PR_APTPL_MAX_TPORT_LEN, "%s", |
928 | TPG_TFO(tpg)->tpg_get_wwn(tpg)); | 928 | tpg->se_tpg_tfo->tpg_get_wwn(tpg)); |
929 | tpgt = TPG_TFO(tpg)->tpg_get_tag(tpg); | 929 | tpgt = tpg->se_tpg_tfo->tpg_get_tag(tpg); |
930 | /* | 930 | /* |
931 | * Look for the matching registrations+reservation from those | 931 | * Look for the matching registrations+reservation from those |
932 | * created from APTPL metadata. Note that multiple registrations | 932 | * created from APTPL metadata. Note that multiple registrations |
@@ -980,11 +980,11 @@ int core_scsi3_check_aptpl_registration( | |||
980 | struct se_lun *lun, | 980 | struct se_lun *lun, |
981 | struct se_lun_acl *lun_acl) | 981 | struct se_lun_acl *lun_acl) |
982 | { | 982 | { |
983 | struct se_subsystem_dev *su_dev = SU_DEV(dev); | 983 | struct se_subsystem_dev *su_dev = dev->se_sub_dev; |
984 | struct se_node_acl *nacl = lun_acl->se_lun_nacl; | 984 | struct se_node_acl *nacl = lun_acl->se_lun_nacl; |
985 | struct se_dev_entry *deve = &nacl->device_list[lun_acl->mapped_lun]; | 985 | struct se_dev_entry *deve = &nacl->device_list[lun_acl->mapped_lun]; |
986 | 986 | ||
987 | if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS) | 987 | if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) |
988 | return 0; | 988 | return 0; |
989 | 989 | ||
990 | return __core_scsi3_check_aptpl_registration(dev, tpg, lun, | 990 | return __core_scsi3_check_aptpl_registration(dev, tpg, lun, |
@@ -1017,7 +1017,7 @@ static void __core_scsi3_dump_registration( | |||
1017 | printk(KERN_INFO "SPC-3 PR [%s] for %s TCM Subsystem %s Object Target" | 1017 | printk(KERN_INFO "SPC-3 PR [%s] for %s TCM Subsystem %s Object Target" |
1018 | " Port(s)\n", tfo->get_fabric_name(), | 1018 | " Port(s)\n", tfo->get_fabric_name(), |
1019 | (pr_reg->pr_reg_all_tg_pt) ? "ALL" : "SINGLE", | 1019 | (pr_reg->pr_reg_all_tg_pt) ? "ALL" : "SINGLE", |
1020 | TRANSPORT(dev)->name); | 1020 | dev->transport->name); |
1021 | printk(KERN_INFO "SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration:" | 1021 | printk(KERN_INFO "SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration:" |
1022 | " 0x%08x APTPL: %d\n", tfo->get_fabric_name(), | 1022 | " 0x%08x APTPL: %d\n", tfo->get_fabric_name(), |
1023 | pr_reg->pr_res_key, pr_reg->pr_res_generation, | 1023 | pr_reg->pr_res_key, pr_reg->pr_res_generation, |
@@ -1035,10 +1035,10 @@ static void __core_scsi3_add_registration( | |||
1035 | int register_type, | 1035 | int register_type, |
1036 | int register_move) | 1036 | int register_move) |
1037 | { | 1037 | { |
1038 | struct se_subsystem_dev *su_dev = SU_DEV(dev); | 1038 | struct se_subsystem_dev *su_dev = dev->se_sub_dev; |
1039 | struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo; | 1039 | struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo; |
1040 | struct t10_pr_registration *pr_reg_tmp, *pr_reg_tmp_safe; | 1040 | struct t10_pr_registration *pr_reg_tmp, *pr_reg_tmp_safe; |
1041 | struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation; | 1041 | struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; |
1042 | 1042 | ||
1043 | /* | 1043 | /* |
1044 | * Increment PRgeneration counter for struct se_device upon a successful | 1044 | * Increment PRgeneration counter for struct se_device upon a successful |
@@ -1050,7 +1050,7 @@ static void __core_scsi3_add_registration( | |||
1050 | * for the REGISTER. | 1050 | * for the REGISTER. |
1051 | */ | 1051 | */ |
1052 | pr_reg->pr_res_generation = (register_move) ? | 1052 | pr_reg->pr_res_generation = (register_move) ? |
1053 | T10_RES(su_dev)->pr_generation++ : | 1053 | su_dev->t10_pr.pr_generation++ : |
1054 | core_scsi3_pr_generation(dev); | 1054 | core_scsi3_pr_generation(dev); |
1055 | 1055 | ||
1056 | spin_lock(&pr_tmpl->registration_lock); | 1056 | spin_lock(&pr_tmpl->registration_lock); |
@@ -1107,7 +1107,7 @@ static int core_scsi3_alloc_registration( | |||
1107 | pr_reg = __core_scsi3_alloc_registration(dev, nacl, deve, isid, | 1107 | pr_reg = __core_scsi3_alloc_registration(dev, nacl, deve, isid, |
1108 | sa_res_key, all_tg_pt, aptpl); | 1108 | sa_res_key, all_tg_pt, aptpl); |
1109 | if (!(pr_reg)) | 1109 | if (!(pr_reg)) |
1110 | return -1; | 1110 | return -EPERM; |
1111 | 1111 | ||
1112 | __core_scsi3_add_registration(dev, nacl, pr_reg, | 1112 | __core_scsi3_add_registration(dev, nacl, pr_reg, |
1113 | register_type, register_move); | 1113 | register_type, register_move); |
@@ -1119,7 +1119,7 @@ static struct t10_pr_registration *__core_scsi3_locate_pr_reg( | |||
1119 | struct se_node_acl *nacl, | 1119 | struct se_node_acl *nacl, |
1120 | unsigned char *isid) | 1120 | unsigned char *isid) |
1121 | { | 1121 | { |
1122 | struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation; | 1122 | struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; |
1123 | struct t10_pr_registration *pr_reg, *pr_reg_tmp; | 1123 | struct t10_pr_registration *pr_reg, *pr_reg_tmp; |
1124 | struct se_portal_group *tpg; | 1124 | struct se_portal_group *tpg; |
1125 | 1125 | ||
@@ -1143,8 +1143,8 @@ static struct t10_pr_registration *__core_scsi3_locate_pr_reg( | |||
1143 | * SCSI Intiatior TransportID w/ ISIDs is enforced | 1143 | * SCSI Intiatior TransportID w/ ISIDs is enforced |
1144 | * for fabric modules (iSCSI) requiring them. | 1144 | * for fabric modules (iSCSI) requiring them. |
1145 | */ | 1145 | */ |
1146 | if (TPG_TFO(tpg)->sess_get_initiator_sid != NULL) { | 1146 | if (tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) { |
1147 | if (DEV_ATTRIB(dev)->enforce_pr_isids) | 1147 | if (dev->se_sub_dev->se_dev_attrib.enforce_pr_isids) |
1148 | continue; | 1148 | continue; |
1149 | } | 1149 | } |
1150 | atomic_inc(&pr_reg->pr_res_holders); | 1150 | atomic_inc(&pr_reg->pr_res_holders); |
@@ -1180,9 +1180,9 @@ static struct t10_pr_registration *core_scsi3_locate_pr_reg( | |||
1180 | struct se_portal_group *tpg = nacl->se_tpg; | 1180 | struct se_portal_group *tpg = nacl->se_tpg; |
1181 | unsigned char buf[PR_REG_ISID_LEN], *isid_ptr = NULL; | 1181 | unsigned char buf[PR_REG_ISID_LEN], *isid_ptr = NULL; |
1182 | 1182 | ||
1183 | if (TPG_TFO(tpg)->sess_get_initiator_sid != NULL) { | 1183 | if (tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) { |
1184 | memset(&buf[0], 0, PR_REG_ISID_LEN); | 1184 | memset(&buf[0], 0, PR_REG_ISID_LEN); |
1185 | TPG_TFO(tpg)->sess_get_initiator_sid(sess, &buf[0], | 1185 | tpg->se_tpg_tfo->sess_get_initiator_sid(sess, &buf[0], |
1186 | PR_REG_ISID_LEN); | 1186 | PR_REG_ISID_LEN); |
1187 | isid_ptr = &buf[0]; | 1187 | isid_ptr = &buf[0]; |
1188 | } | 1188 | } |
@@ -1240,7 +1240,7 @@ static int core_scsi3_check_implict_release( | |||
1240 | " UNREGISTER while existing reservation with matching" | 1240 | " UNREGISTER while existing reservation with matching" |
1241 | " key 0x%016Lx is present from another SCSI Initiator" | 1241 | " key 0x%016Lx is present from another SCSI Initiator" |
1242 | " Port\n", pr_reg->pr_res_key); | 1242 | " Port\n", pr_reg->pr_res_key); |
1243 | ret = -1; | 1243 | ret = -EPERM; |
1244 | } | 1244 | } |
1245 | spin_unlock(&dev->dev_reservation_lock); | 1245 | spin_unlock(&dev->dev_reservation_lock); |
1246 | 1246 | ||
@@ -1248,7 +1248,7 @@ static int core_scsi3_check_implict_release( | |||
1248 | } | 1248 | } |
1249 | 1249 | ||
1250 | /* | 1250 | /* |
1251 | * Called with struct t10_reservation_template->registration_lock held. | 1251 | * Called with struct t10_reservation->registration_lock held. |
1252 | */ | 1252 | */ |
1253 | static void __core_scsi3_free_registration( | 1253 | static void __core_scsi3_free_registration( |
1254 | struct se_device *dev, | 1254 | struct se_device *dev, |
@@ -1258,7 +1258,7 @@ static void __core_scsi3_free_registration( | |||
1258 | { | 1258 | { |
1259 | struct target_core_fabric_ops *tfo = | 1259 | struct target_core_fabric_ops *tfo = |
1260 | pr_reg->pr_reg_nacl->se_tpg->se_tpg_tfo; | 1260 | pr_reg->pr_reg_nacl->se_tpg->se_tpg_tfo; |
1261 | struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation; | 1261 | struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; |
1262 | char i_buf[PR_REG_ISID_ID_LEN]; | 1262 | char i_buf[PR_REG_ISID_ID_LEN]; |
1263 | int prf_isid; | 1263 | int prf_isid; |
1264 | 1264 | ||
@@ -1296,7 +1296,7 @@ static void __core_scsi3_free_registration( | |||
1296 | printk(KERN_INFO "SPC-3 PR [%s] for %s TCM Subsystem %s Object Target" | 1296 | printk(KERN_INFO "SPC-3 PR [%s] for %s TCM Subsystem %s Object Target" |
1297 | " Port(s)\n", tfo->get_fabric_name(), | 1297 | " Port(s)\n", tfo->get_fabric_name(), |
1298 | (pr_reg->pr_reg_all_tg_pt) ? "ALL" : "SINGLE", | 1298 | (pr_reg->pr_reg_all_tg_pt) ? "ALL" : "SINGLE", |
1299 | TRANSPORT(dev)->name); | 1299 | dev->transport->name); |
1300 | printk(KERN_INFO "SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration:" | 1300 | printk(KERN_INFO "SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration:" |
1301 | " 0x%08x\n", tfo->get_fabric_name(), pr_reg->pr_res_key, | 1301 | " 0x%08x\n", tfo->get_fabric_name(), pr_reg->pr_res_key, |
1302 | pr_reg->pr_res_generation); | 1302 | pr_reg->pr_res_generation); |
@@ -1319,7 +1319,7 @@ void core_scsi3_free_pr_reg_from_nacl( | |||
1319 | struct se_device *dev, | 1319 | struct se_device *dev, |
1320 | struct se_node_acl *nacl) | 1320 | struct se_node_acl *nacl) |
1321 | { | 1321 | { |
1322 | struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation; | 1322 | struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; |
1323 | struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_res_holder; | 1323 | struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_res_holder; |
1324 | /* | 1324 | /* |
1325 | * If the passed se_node_acl matches the reservation holder, | 1325 | * If the passed se_node_acl matches the reservation holder, |
@@ -1349,7 +1349,7 @@ void core_scsi3_free_pr_reg_from_nacl( | |||
1349 | void core_scsi3_free_all_registrations( | 1349 | void core_scsi3_free_all_registrations( |
1350 | struct se_device *dev) | 1350 | struct se_device *dev) |
1351 | { | 1351 | { |
1352 | struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation; | 1352 | struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; |
1353 | struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_res_holder; | 1353 | struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_res_holder; |
1354 | 1354 | ||
1355 | spin_lock(&dev->dev_reservation_lock); | 1355 | spin_lock(&dev->dev_reservation_lock); |
@@ -1381,13 +1381,13 @@ void core_scsi3_free_all_registrations( | |||
1381 | 1381 | ||
1382 | static int core_scsi3_tpg_depend_item(struct se_portal_group *tpg) | 1382 | static int core_scsi3_tpg_depend_item(struct se_portal_group *tpg) |
1383 | { | 1383 | { |
1384 | return configfs_depend_item(TPG_TFO(tpg)->tf_subsys, | 1384 | return configfs_depend_item(tpg->se_tpg_tfo->tf_subsys, |
1385 | &tpg->tpg_group.cg_item); | 1385 | &tpg->tpg_group.cg_item); |
1386 | } | 1386 | } |
1387 | 1387 | ||
1388 | static void core_scsi3_tpg_undepend_item(struct se_portal_group *tpg) | 1388 | static void core_scsi3_tpg_undepend_item(struct se_portal_group *tpg) |
1389 | { | 1389 | { |
1390 | configfs_undepend_item(TPG_TFO(tpg)->tf_subsys, | 1390 | configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys, |
1391 | &tpg->tpg_group.cg_item); | 1391 | &tpg->tpg_group.cg_item); |
1392 | 1392 | ||
1393 | atomic_dec(&tpg->tpg_pr_ref_count); | 1393 | atomic_dec(&tpg->tpg_pr_ref_count); |
@@ -1401,7 +1401,7 @@ static int core_scsi3_nodeacl_depend_item(struct se_node_acl *nacl) | |||
1401 | if (nacl->dynamic_node_acl) | 1401 | if (nacl->dynamic_node_acl) |
1402 | return 0; | 1402 | return 0; |
1403 | 1403 | ||
1404 | return configfs_depend_item(TPG_TFO(tpg)->tf_subsys, | 1404 | return configfs_depend_item(tpg->se_tpg_tfo->tf_subsys, |
1405 | &nacl->acl_group.cg_item); | 1405 | &nacl->acl_group.cg_item); |
1406 | } | 1406 | } |
1407 | 1407 | ||
@@ -1415,7 +1415,7 @@ static void core_scsi3_nodeacl_undepend_item(struct se_node_acl *nacl) | |||
1415 | return; | 1415 | return; |
1416 | } | 1416 | } |
1417 | 1417 | ||
1418 | configfs_undepend_item(TPG_TFO(tpg)->tf_subsys, | 1418 | configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys, |
1419 | &nacl->acl_group.cg_item); | 1419 | &nacl->acl_group.cg_item); |
1420 | 1420 | ||
1421 | atomic_dec(&nacl->acl_pr_ref_count); | 1421 | atomic_dec(&nacl->acl_pr_ref_count); |
@@ -1436,7 +1436,7 @@ static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve) | |||
1436 | nacl = lun_acl->se_lun_nacl; | 1436 | nacl = lun_acl->se_lun_nacl; |
1437 | tpg = nacl->se_tpg; | 1437 | tpg = nacl->se_tpg; |
1438 | 1438 | ||
1439 | return configfs_depend_item(TPG_TFO(tpg)->tf_subsys, | 1439 | return configfs_depend_item(tpg->se_tpg_tfo->tf_subsys, |
1440 | &lun_acl->se_lun_group.cg_item); | 1440 | &lun_acl->se_lun_group.cg_item); |
1441 | } | 1441 | } |
1442 | 1442 | ||
@@ -1456,7 +1456,7 @@ static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve) | |||
1456 | nacl = lun_acl->se_lun_nacl; | 1456 | nacl = lun_acl->se_lun_nacl; |
1457 | tpg = nacl->se_tpg; | 1457 | tpg = nacl->se_tpg; |
1458 | 1458 | ||
1459 | configfs_undepend_item(TPG_TFO(tpg)->tf_subsys, | 1459 | configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys, |
1460 | &lun_acl->se_lun_group.cg_item); | 1460 | &lun_acl->se_lun_group.cg_item); |
1461 | 1461 | ||
1462 | atomic_dec(&se_deve->pr_ref_count); | 1462 | atomic_dec(&se_deve->pr_ref_count); |
@@ -1471,10 +1471,10 @@ static int core_scsi3_decode_spec_i_port( | |||
1471 | int all_tg_pt, | 1471 | int all_tg_pt, |
1472 | int aptpl) | 1472 | int aptpl) |
1473 | { | 1473 | { |
1474 | struct se_device *dev = SE_DEV(cmd); | 1474 | struct se_device *dev = cmd->se_lun->lun_se_dev; |
1475 | struct se_port *tmp_port; | 1475 | struct se_port *tmp_port; |
1476 | struct se_portal_group *dest_tpg = NULL, *tmp_tpg; | 1476 | struct se_portal_group *dest_tpg = NULL, *tmp_tpg; |
1477 | struct se_session *se_sess = SE_SESS(cmd); | 1477 | struct se_session *se_sess = cmd->se_sess; |
1478 | struct se_node_acl *dest_node_acl = NULL; | 1478 | struct se_node_acl *dest_node_acl = NULL; |
1479 | struct se_dev_entry *dest_se_deve = NULL, *local_se_deve; | 1479 | struct se_dev_entry *dest_se_deve = NULL, *local_se_deve; |
1480 | struct t10_pr_registration *dest_pr_reg, *local_pr_reg, *pr_reg_e; | 1480 | struct t10_pr_registration *dest_pr_reg, *local_pr_reg, *pr_reg_e; |
@@ -1482,7 +1482,7 @@ static int core_scsi3_decode_spec_i_port( | |||
1482 | struct list_head tid_dest_list; | 1482 | struct list_head tid_dest_list; |
1483 | struct pr_transport_id_holder *tidh_new, *tidh, *tidh_tmp; | 1483 | struct pr_transport_id_holder *tidh_new, *tidh, *tidh_tmp; |
1484 | struct target_core_fabric_ops *tmp_tf_ops; | 1484 | struct target_core_fabric_ops *tmp_tf_ops; |
1485 | unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf; | 1485 | unsigned char *buf = (unsigned char *)cmd->t_task->t_task_buf; |
1486 | unsigned char *ptr, *i_str = NULL, proto_ident, tmp_proto_ident; | 1486 | unsigned char *ptr, *i_str = NULL, proto_ident, tmp_proto_ident; |
1487 | char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN]; | 1487 | char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN]; |
1488 | u32 tpdl, tid_len = 0; | 1488 | u32 tpdl, tid_len = 0; |
@@ -1509,7 +1509,7 @@ static int core_scsi3_decode_spec_i_port( | |||
1509 | tidh_new->dest_node_acl = se_sess->se_node_acl; | 1509 | tidh_new->dest_node_acl = se_sess->se_node_acl; |
1510 | tidh_new->dest_se_deve = local_se_deve; | 1510 | tidh_new->dest_se_deve = local_se_deve; |
1511 | 1511 | ||
1512 | local_pr_reg = __core_scsi3_alloc_registration(SE_DEV(cmd), | 1512 | local_pr_reg = __core_scsi3_alloc_registration(cmd->se_lun->lun_se_dev, |
1513 | se_sess->se_node_acl, local_se_deve, l_isid, | 1513 | se_sess->se_node_acl, local_se_deve, l_isid, |
1514 | sa_res_key, all_tg_pt, aptpl); | 1514 | sa_res_key, all_tg_pt, aptpl); |
1515 | if (!(local_pr_reg)) { | 1515 | if (!(local_pr_reg)) { |
@@ -1557,7 +1557,7 @@ static int core_scsi3_decode_spec_i_port( | |||
1557 | tmp_tpg = tmp_port->sep_tpg; | 1557 | tmp_tpg = tmp_port->sep_tpg; |
1558 | if (!(tmp_tpg)) | 1558 | if (!(tmp_tpg)) |
1559 | continue; | 1559 | continue; |
1560 | tmp_tf_ops = TPG_TFO(tmp_tpg); | 1560 | tmp_tf_ops = tmp_tpg->se_tpg_tfo; |
1561 | if (!(tmp_tf_ops)) | 1561 | if (!(tmp_tf_ops)) |
1562 | continue; | 1562 | continue; |
1563 | if (!(tmp_tf_ops->get_fabric_proto_ident) || | 1563 | if (!(tmp_tf_ops->get_fabric_proto_ident) || |
@@ -1625,7 +1625,7 @@ static int core_scsi3_decode_spec_i_port( | |||
1625 | dest_tpg = tmp_tpg; | 1625 | dest_tpg = tmp_tpg; |
1626 | printk(KERN_INFO "SPC-3 PR SPEC_I_PT: Located %s Node:" | 1626 | printk(KERN_INFO "SPC-3 PR SPEC_I_PT: Located %s Node:" |
1627 | " %s Port RTPI: %hu\n", | 1627 | " %s Port RTPI: %hu\n", |
1628 | TPG_TFO(dest_tpg)->get_fabric_name(), | 1628 | dest_tpg->se_tpg_tfo->get_fabric_name(), |
1629 | dest_node_acl->initiatorname, dest_rtpi); | 1629 | dest_node_acl->initiatorname, dest_rtpi); |
1630 | 1630 | ||
1631 | spin_lock(&dev->se_port_lock); | 1631 | spin_lock(&dev->se_port_lock); |
@@ -1642,7 +1642,7 @@ static int core_scsi3_decode_spec_i_port( | |||
1642 | #if 0 | 1642 | #if 0 |
1643 | printk("SPC-3 PR SPEC_I_PT: Got %s data_length: %u tpdl: %u" | 1643 | printk("SPC-3 PR SPEC_I_PT: Got %s data_length: %u tpdl: %u" |
1644 | " tid_len: %d for %s + %s\n", | 1644 | " tid_len: %d for %s + %s\n", |
1645 | TPG_TFO(dest_tpg)->get_fabric_name(), cmd->data_length, | 1645 | dest_tpg->se_tpg_tfo->get_fabric_name(), cmd->data_length, |
1646 | tpdl, tid_len, i_str, iport_ptr); | 1646 | tpdl, tid_len, i_str, iport_ptr); |
1647 | #endif | 1647 | #endif |
1648 | if (tid_len > tpdl) { | 1648 | if (tid_len > tpdl) { |
@@ -1663,7 +1663,7 @@ static int core_scsi3_decode_spec_i_port( | |||
1663 | if (!(dest_se_deve)) { | 1663 | if (!(dest_se_deve)) { |
1664 | printk(KERN_ERR "Unable to locate %s dest_se_deve" | 1664 | printk(KERN_ERR "Unable to locate %s dest_se_deve" |
1665 | " from destination RTPI: %hu\n", | 1665 | " from destination RTPI: %hu\n", |
1666 | TPG_TFO(dest_tpg)->get_fabric_name(), | 1666 | dest_tpg->se_tpg_tfo->get_fabric_name(), |
1667 | dest_rtpi); | 1667 | dest_rtpi); |
1668 | 1668 | ||
1669 | core_scsi3_nodeacl_undepend_item(dest_node_acl); | 1669 | core_scsi3_nodeacl_undepend_item(dest_node_acl); |
@@ -1686,7 +1686,7 @@ static int core_scsi3_decode_spec_i_port( | |||
1686 | #if 0 | 1686 | #if 0 |
1687 | printk(KERN_INFO "SPC-3 PR SPEC_I_PT: Located %s Node: %s" | 1687 | printk(KERN_INFO "SPC-3 PR SPEC_I_PT: Located %s Node: %s" |
1688 | " dest_se_deve mapped_lun: %u\n", | 1688 | " dest_se_deve mapped_lun: %u\n", |
1689 | TPG_TFO(dest_tpg)->get_fabric_name(), | 1689 | dest_tpg->se_tpg_tfo->get_fabric_name(), |
1690 | dest_node_acl->initiatorname, dest_se_deve->mapped_lun); | 1690 | dest_node_acl->initiatorname, dest_se_deve->mapped_lun); |
1691 | #endif | 1691 | #endif |
1692 | /* | 1692 | /* |
@@ -1741,7 +1741,7 @@ static int core_scsi3_decode_spec_i_port( | |||
1741 | * and then call __core_scsi3_add_registration() in the | 1741 | * and then call __core_scsi3_add_registration() in the |
1742 | * 2nd loop which will never fail. | 1742 | * 2nd loop which will never fail. |
1743 | */ | 1743 | */ |
1744 | dest_pr_reg = __core_scsi3_alloc_registration(SE_DEV(cmd), | 1744 | dest_pr_reg = __core_scsi3_alloc_registration(cmd->se_lun->lun_se_dev, |
1745 | dest_node_acl, dest_se_deve, iport_ptr, | 1745 | dest_node_acl, dest_se_deve, iport_ptr, |
1746 | sa_res_key, all_tg_pt, aptpl); | 1746 | sa_res_key, all_tg_pt, aptpl); |
1747 | if (!(dest_pr_reg)) { | 1747 | if (!(dest_pr_reg)) { |
@@ -1787,12 +1787,12 @@ static int core_scsi3_decode_spec_i_port( | |||
1787 | prf_isid = core_pr_dump_initiator_port(dest_pr_reg, &i_buf[0], | 1787 | prf_isid = core_pr_dump_initiator_port(dest_pr_reg, &i_buf[0], |
1788 | PR_REG_ISID_ID_LEN); | 1788 | PR_REG_ISID_ID_LEN); |
1789 | 1789 | ||
1790 | __core_scsi3_add_registration(SE_DEV(cmd), dest_node_acl, | 1790 | __core_scsi3_add_registration(cmd->se_lun->lun_se_dev, dest_node_acl, |
1791 | dest_pr_reg, 0, 0); | 1791 | dest_pr_reg, 0, 0); |
1792 | 1792 | ||
1793 | printk(KERN_INFO "SPC-3 PR [%s] SPEC_I_PT: Successfully" | 1793 | printk(KERN_INFO "SPC-3 PR [%s] SPEC_I_PT: Successfully" |
1794 | " registered Transport ID for Node: %s%s Mapped LUN:" | 1794 | " registered Transport ID for Node: %s%s Mapped LUN:" |
1795 | " %u\n", TPG_TFO(dest_tpg)->get_fabric_name(), | 1795 | " %u\n", dest_tpg->se_tpg_tfo->get_fabric_name(), |
1796 | dest_node_acl->initiatorname, (prf_isid) ? | 1796 | dest_node_acl->initiatorname, (prf_isid) ? |
1797 | &i_buf[0] : "", dest_se_deve->mapped_lun); | 1797 | &i_buf[0] : "", dest_se_deve->mapped_lun); |
1798 | 1798 | ||
@@ -1855,7 +1855,7 @@ static int __core_scsi3_update_aptpl_buf( | |||
1855 | { | 1855 | { |
1856 | struct se_lun *lun; | 1856 | struct se_lun *lun; |
1857 | struct se_portal_group *tpg; | 1857 | struct se_portal_group *tpg; |
1858 | struct se_subsystem_dev *su_dev = SU_DEV(dev); | 1858 | struct se_subsystem_dev *su_dev = dev->se_sub_dev; |
1859 | struct t10_pr_registration *pr_reg; | 1859 | struct t10_pr_registration *pr_reg; |
1860 | unsigned char tmp[512], isid_buf[32]; | 1860 | unsigned char tmp[512], isid_buf[32]; |
1861 | ssize_t len = 0; | 1861 | ssize_t len = 0; |
@@ -1873,8 +1873,8 @@ static int __core_scsi3_update_aptpl_buf( | |||
1873 | /* | 1873 | /* |
1874 | * Walk the registration list.. | 1874 | * Walk the registration list.. |
1875 | */ | 1875 | */ |
1876 | spin_lock(&T10_RES(su_dev)->registration_lock); | 1876 | spin_lock(&su_dev->t10_pr.registration_lock); |
1877 | list_for_each_entry(pr_reg, &T10_RES(su_dev)->registration_list, | 1877 | list_for_each_entry(pr_reg, &su_dev->t10_pr.registration_list, |
1878 | pr_reg_list) { | 1878 | pr_reg_list) { |
1879 | 1879 | ||
1880 | tmp[0] = '\0'; | 1880 | tmp[0] = '\0'; |
@@ -1900,7 +1900,7 @@ static int __core_scsi3_update_aptpl_buf( | |||
1900 | "res_holder=1\nres_type=%02x\n" | 1900 | "res_holder=1\nres_type=%02x\n" |
1901 | "res_scope=%02x\nres_all_tg_pt=%d\n" | 1901 | "res_scope=%02x\nres_all_tg_pt=%d\n" |
1902 | "mapped_lun=%u\n", reg_count, | 1902 | "mapped_lun=%u\n", reg_count, |
1903 | TPG_TFO(tpg)->get_fabric_name(), | 1903 | tpg->se_tpg_tfo->get_fabric_name(), |
1904 | pr_reg->pr_reg_nacl->initiatorname, isid_buf, | 1904 | pr_reg->pr_reg_nacl->initiatorname, isid_buf, |
1905 | pr_reg->pr_res_key, pr_reg->pr_res_type, | 1905 | pr_reg->pr_res_key, pr_reg->pr_res_type, |
1906 | pr_reg->pr_res_scope, pr_reg->pr_reg_all_tg_pt, | 1906 | pr_reg->pr_res_scope, pr_reg->pr_reg_all_tg_pt, |
@@ -1910,7 +1910,7 @@ static int __core_scsi3_update_aptpl_buf( | |||
1910 | "initiator_fabric=%s\ninitiator_node=%s\n%s" | 1910 | "initiator_fabric=%s\ninitiator_node=%s\n%s" |
1911 | "sa_res_key=%llu\nres_holder=0\n" | 1911 | "sa_res_key=%llu\nres_holder=0\n" |
1912 | "res_all_tg_pt=%d\nmapped_lun=%u\n", | 1912 | "res_all_tg_pt=%d\nmapped_lun=%u\n", |
1913 | reg_count, TPG_TFO(tpg)->get_fabric_name(), | 1913 | reg_count, tpg->se_tpg_tfo->get_fabric_name(), |
1914 | pr_reg->pr_reg_nacl->initiatorname, isid_buf, | 1914 | pr_reg->pr_reg_nacl->initiatorname, isid_buf, |
1915 | pr_reg->pr_res_key, pr_reg->pr_reg_all_tg_pt, | 1915 | pr_reg->pr_res_key, pr_reg->pr_reg_all_tg_pt, |
1916 | pr_reg->pr_res_mapped_lun); | 1916 | pr_reg->pr_res_mapped_lun); |
@@ -1919,8 +1919,8 @@ static int __core_scsi3_update_aptpl_buf( | |||
1919 | if ((len + strlen(tmp) >= pr_aptpl_buf_len)) { | 1919 | if ((len + strlen(tmp) >= pr_aptpl_buf_len)) { |
1920 | printk(KERN_ERR "Unable to update renaming" | 1920 | printk(KERN_ERR "Unable to update renaming" |
1921 | " APTPL metadata\n"); | 1921 | " APTPL metadata\n"); |
1922 | spin_unlock(&T10_RES(su_dev)->registration_lock); | 1922 | spin_unlock(&su_dev->t10_pr.registration_lock); |
1923 | return -1; | 1923 | return -EMSGSIZE; |
1924 | } | 1924 | } |
1925 | len += sprintf(buf+len, "%s", tmp); | 1925 | len += sprintf(buf+len, "%s", tmp); |
1926 | 1926 | ||
@@ -1929,21 +1929,21 @@ static int __core_scsi3_update_aptpl_buf( | |||
1929 | */ | 1929 | */ |
1930 | snprintf(tmp, 512, "target_fabric=%s\ntarget_node=%s\n" | 1930 | snprintf(tmp, 512, "target_fabric=%s\ntarget_node=%s\n" |
1931 | "tpgt=%hu\nport_rtpi=%hu\ntarget_lun=%u\nPR_REG_END:" | 1931 | "tpgt=%hu\nport_rtpi=%hu\ntarget_lun=%u\nPR_REG_END:" |
1932 | " %d\n", TPG_TFO(tpg)->get_fabric_name(), | 1932 | " %d\n", tpg->se_tpg_tfo->get_fabric_name(), |
1933 | TPG_TFO(tpg)->tpg_get_wwn(tpg), | 1933 | tpg->se_tpg_tfo->tpg_get_wwn(tpg), |
1934 | TPG_TFO(tpg)->tpg_get_tag(tpg), | 1934 | tpg->se_tpg_tfo->tpg_get_tag(tpg), |
1935 | lun->lun_sep->sep_rtpi, lun->unpacked_lun, reg_count); | 1935 | lun->lun_sep->sep_rtpi, lun->unpacked_lun, reg_count); |
1936 | 1936 | ||
1937 | if ((len + strlen(tmp) >= pr_aptpl_buf_len)) { | 1937 | if ((len + strlen(tmp) >= pr_aptpl_buf_len)) { |
1938 | printk(KERN_ERR "Unable to update renaming" | 1938 | printk(KERN_ERR "Unable to update renaming" |
1939 | " APTPL metadata\n"); | 1939 | " APTPL metadata\n"); |
1940 | spin_unlock(&T10_RES(su_dev)->registration_lock); | 1940 | spin_unlock(&su_dev->t10_pr.registration_lock); |
1941 | return -1; | 1941 | return -EMSGSIZE; |
1942 | } | 1942 | } |
1943 | len += sprintf(buf+len, "%s", tmp); | 1943 | len += sprintf(buf+len, "%s", tmp); |
1944 | reg_count++; | 1944 | reg_count++; |
1945 | } | 1945 | } |
1946 | spin_unlock(&T10_RES(su_dev)->registration_lock); | 1946 | spin_unlock(&su_dev->t10_pr.registration_lock); |
1947 | 1947 | ||
1948 | if (!(reg_count)) | 1948 | if (!(reg_count)) |
1949 | len += sprintf(buf+len, "No Registrations or Reservations"); | 1949 | len += sprintf(buf+len, "No Registrations or Reservations"); |
@@ -1975,7 +1975,7 @@ static int __core_scsi3_write_aptpl_to_file( | |||
1975 | unsigned char *buf, | 1975 | unsigned char *buf, |
1976 | u32 pr_aptpl_buf_len) | 1976 | u32 pr_aptpl_buf_len) |
1977 | { | 1977 | { |
1978 | struct t10_wwn *wwn = &SU_DEV(dev)->t10_wwn; | 1978 | struct t10_wwn *wwn = &dev->se_sub_dev->t10_wwn; |
1979 | struct file *file; | 1979 | struct file *file; |
1980 | struct iovec iov[1]; | 1980 | struct iovec iov[1]; |
1981 | mm_segment_t old_fs; | 1981 | mm_segment_t old_fs; |
@@ -1989,7 +1989,7 @@ static int __core_scsi3_write_aptpl_to_file( | |||
1989 | if (strlen(&wwn->unit_serial[0]) >= 512) { | 1989 | if (strlen(&wwn->unit_serial[0]) >= 512) { |
1990 | printk(KERN_ERR "WWN value for struct se_device does not fit" | 1990 | printk(KERN_ERR "WWN value for struct se_device does not fit" |
1991 | " into path buffer\n"); | 1991 | " into path buffer\n"); |
1992 | return -1; | 1992 | return -EMSGSIZE; |
1993 | } | 1993 | } |
1994 | 1994 | ||
1995 | snprintf(path, 512, "/var/target/pr/aptpl_%s", &wwn->unit_serial[0]); | 1995 | snprintf(path, 512, "/var/target/pr/aptpl_%s", &wwn->unit_serial[0]); |
@@ -1997,7 +1997,7 @@ static int __core_scsi3_write_aptpl_to_file( | |||
1997 | if (IS_ERR(file) || !file || !file->f_dentry) { | 1997 | if (IS_ERR(file) || !file || !file->f_dentry) { |
1998 | printk(KERN_ERR "filp_open(%s) for APTPL metadata" | 1998 | printk(KERN_ERR "filp_open(%s) for APTPL metadata" |
1999 | " failed\n", path); | 1999 | " failed\n", path); |
2000 | return -1; | 2000 | return (PTR_ERR(file) < 0 ? PTR_ERR(file) : -ENOENT); |
2001 | } | 2001 | } |
2002 | 2002 | ||
2003 | iov[0].iov_base = &buf[0]; | 2003 | iov[0].iov_base = &buf[0]; |
@@ -2014,7 +2014,7 @@ static int __core_scsi3_write_aptpl_to_file( | |||
2014 | if (ret < 0) { | 2014 | if (ret < 0) { |
2015 | printk("Error writing APTPL metadata file: %s\n", path); | 2015 | printk("Error writing APTPL metadata file: %s\n", path); |
2016 | filp_close(file, NULL); | 2016 | filp_close(file, NULL); |
2017 | return -1; | 2017 | return -EIO; |
2018 | } | 2018 | } |
2019 | filp_close(file, NULL); | 2019 | filp_close(file, NULL); |
2020 | 2020 | ||
@@ -2049,14 +2049,14 @@ static int core_scsi3_update_and_write_aptpl( | |||
2049 | ret = core_scsi3_update_aptpl_buf(dev, buf, pr_aptpl_buf_len, | 2049 | ret = core_scsi3_update_aptpl_buf(dev, buf, pr_aptpl_buf_len, |
2050 | clear_aptpl_metadata); | 2050 | clear_aptpl_metadata); |
2051 | if (ret != 0) | 2051 | if (ret != 0) |
2052 | return -1; | 2052 | return ret; |
2053 | /* | 2053 | /* |
2054 | * __core_scsi3_write_aptpl_to_file() will call strlen() | 2054 | * __core_scsi3_write_aptpl_to_file() will call strlen() |
2055 | * on the passed buf to determine pr_aptpl_buf_len. | 2055 | * on the passed buf to determine pr_aptpl_buf_len. |
2056 | */ | 2056 | */ |
2057 | ret = __core_scsi3_write_aptpl_to_file(dev, buf, 0); | 2057 | ret = __core_scsi3_write_aptpl_to_file(dev, buf, 0); |
2058 | if (ret != 0) | 2058 | if (ret != 0) |
2059 | return -1; | 2059 | return ret; |
2060 | 2060 | ||
2061 | return ret; | 2061 | return ret; |
2062 | } | 2062 | } |
@@ -2070,13 +2070,13 @@ static int core_scsi3_emulate_pro_register( | |||
2070 | int spec_i_pt, | 2070 | int spec_i_pt, |
2071 | int ignore_key) | 2071 | int ignore_key) |
2072 | { | 2072 | { |
2073 | struct se_session *se_sess = SE_SESS(cmd); | 2073 | struct se_session *se_sess = cmd->se_sess; |
2074 | struct se_device *dev = SE_DEV(cmd); | 2074 | struct se_device *dev = cmd->se_lun->lun_se_dev; |
2075 | struct se_dev_entry *se_deve; | 2075 | struct se_dev_entry *se_deve; |
2076 | struct se_lun *se_lun = SE_LUN(cmd); | 2076 | struct se_lun *se_lun = cmd->se_lun; |
2077 | struct se_portal_group *se_tpg; | 2077 | struct se_portal_group *se_tpg; |
2078 | struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_reg_tmp, *pr_reg_e; | 2078 | struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_reg_tmp, *pr_reg_e; |
2079 | struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation; | 2079 | struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; |
2080 | /* Used for APTPL metadata w/ UNREGISTER */ | 2080 | /* Used for APTPL metadata w/ UNREGISTER */ |
2081 | unsigned char *pr_aptpl_buf = NULL; | 2081 | unsigned char *pr_aptpl_buf = NULL; |
2082 | unsigned char isid_buf[PR_REG_ISID_LEN], *isid_ptr = NULL; | 2082 | unsigned char isid_buf[PR_REG_ISID_LEN], *isid_ptr = NULL; |
@@ -2089,9 +2089,9 @@ static int core_scsi3_emulate_pro_register( | |||
2089 | se_tpg = se_sess->se_tpg; | 2089 | se_tpg = se_sess->se_tpg; |
2090 | se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; | 2090 | se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; |
2091 | 2091 | ||
2092 | if (TPG_TFO(se_tpg)->sess_get_initiator_sid != NULL) { | 2092 | if (se_tpg->se_tpg_tfo->sess_get_initiator_sid) { |
2093 | memset(&isid_buf[0], 0, PR_REG_ISID_LEN); | 2093 | memset(&isid_buf[0], 0, PR_REG_ISID_LEN); |
2094 | TPG_TFO(se_tpg)->sess_get_initiator_sid(se_sess, &isid_buf[0], | 2094 | se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess, &isid_buf[0], |
2095 | PR_REG_ISID_LEN); | 2095 | PR_REG_ISID_LEN); |
2096 | isid_ptr = &isid_buf[0]; | 2096 | isid_ptr = &isid_buf[0]; |
2097 | } | 2097 | } |
@@ -2117,7 +2117,7 @@ static int core_scsi3_emulate_pro_register( | |||
2117 | * Port Endpoint that the PRO was received from on the | 2117 | * Port Endpoint that the PRO was received from on the |
2118 | * Logical Unit of the SCSI device server. | 2118 | * Logical Unit of the SCSI device server. |
2119 | */ | 2119 | */ |
2120 | ret = core_scsi3_alloc_registration(SE_DEV(cmd), | 2120 | ret = core_scsi3_alloc_registration(cmd->se_lun->lun_se_dev, |
2121 | se_sess->se_node_acl, se_deve, isid_ptr, | 2121 | se_sess->se_node_acl, se_deve, isid_ptr, |
2122 | sa_res_key, all_tg_pt, aptpl, | 2122 | sa_res_key, all_tg_pt, aptpl, |
2123 | ignore_key, 0); | 2123 | ignore_key, 0); |
@@ -2145,7 +2145,7 @@ static int core_scsi3_emulate_pro_register( | |||
2145 | */ | 2145 | */ |
2146 | if (!(aptpl)) { | 2146 | if (!(aptpl)) { |
2147 | pr_tmpl->pr_aptpl_active = 0; | 2147 | pr_tmpl->pr_aptpl_active = 0; |
2148 | core_scsi3_update_and_write_aptpl(SE_DEV(cmd), NULL, 0); | 2148 | core_scsi3_update_and_write_aptpl(cmd->se_lun->lun_se_dev, NULL, 0); |
2149 | printk("SPC-3 PR: Set APTPL Bit Deactivated for" | 2149 | printk("SPC-3 PR: Set APTPL Bit Deactivated for" |
2150 | " REGISTER\n"); | 2150 | " REGISTER\n"); |
2151 | return 0; | 2151 | return 0; |
@@ -2155,10 +2155,10 @@ static int core_scsi3_emulate_pro_register( | |||
2155 | * update the APTPL metadata information using its | 2155 | * update the APTPL metadata information using its |
2156 | * preallocated *pr_reg->pr_aptpl_buf. | 2156 | * preallocated *pr_reg->pr_aptpl_buf. |
2157 | */ | 2157 | */ |
2158 | pr_reg = core_scsi3_locate_pr_reg(SE_DEV(cmd), | 2158 | pr_reg = core_scsi3_locate_pr_reg(cmd->se_lun->lun_se_dev, |
2159 | se_sess->se_node_acl, se_sess); | 2159 | se_sess->se_node_acl, se_sess); |
2160 | 2160 | ||
2161 | ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd), | 2161 | ret = core_scsi3_update_and_write_aptpl(cmd->se_lun->lun_se_dev, |
2162 | &pr_reg->pr_aptpl_buf[0], | 2162 | &pr_reg->pr_aptpl_buf[0], |
2163 | pr_tmpl->pr_aptpl_buf_len); | 2163 | pr_tmpl->pr_aptpl_buf_len); |
2164 | if (!(ret)) { | 2164 | if (!(ret)) { |
@@ -2223,7 +2223,7 @@ static int core_scsi3_emulate_pro_register( | |||
2223 | */ | 2223 | */ |
2224 | if (!(sa_res_key)) { | 2224 | if (!(sa_res_key)) { |
2225 | pr_holder = core_scsi3_check_implict_release( | 2225 | pr_holder = core_scsi3_check_implict_release( |
2226 | SE_DEV(cmd), pr_reg); | 2226 | cmd->se_lun->lun_se_dev, pr_reg); |
2227 | if (pr_holder < 0) { | 2227 | if (pr_holder < 0) { |
2228 | kfree(pr_aptpl_buf); | 2228 | kfree(pr_aptpl_buf); |
2229 | core_scsi3_put_pr_reg(pr_reg); | 2229 | core_scsi3_put_pr_reg(pr_reg); |
@@ -2260,7 +2260,7 @@ static int core_scsi3_emulate_pro_register( | |||
2260 | /* | 2260 | /* |
2261 | * Release the calling I_T Nexus registration now.. | 2261 | * Release the calling I_T Nexus registration now.. |
2262 | */ | 2262 | */ |
2263 | __core_scsi3_free_registration(SE_DEV(cmd), pr_reg, | 2263 | __core_scsi3_free_registration(cmd->se_lun->lun_se_dev, pr_reg, |
2264 | NULL, 1); | 2264 | NULL, 1); |
2265 | /* | 2265 | /* |
2266 | * From spc4r17, section 5.7.11.3 Unregistering | 2266 | * From spc4r17, section 5.7.11.3 Unregistering |
@@ -2315,11 +2315,11 @@ static int core_scsi3_emulate_pro_register( | |||
2315 | * READ_KEYS service action. | 2315 | * READ_KEYS service action. |
2316 | */ | 2316 | */ |
2317 | pr_reg->pr_res_generation = core_scsi3_pr_generation( | 2317 | pr_reg->pr_res_generation = core_scsi3_pr_generation( |
2318 | SE_DEV(cmd)); | 2318 | cmd->se_lun->lun_se_dev); |
2319 | pr_reg->pr_res_key = sa_res_key; | 2319 | pr_reg->pr_res_key = sa_res_key; |
2320 | printk("SPC-3 PR [%s] REGISTER%s: Changed Reservation" | 2320 | printk("SPC-3 PR [%s] REGISTER%s: Changed Reservation" |
2321 | " Key for %s to: 0x%016Lx PRgeneration:" | 2321 | " Key for %s to: 0x%016Lx PRgeneration:" |
2322 | " 0x%08x\n", CMD_TFO(cmd)->get_fabric_name(), | 2322 | " 0x%08x\n", cmd->se_tfo->get_fabric_name(), |
2323 | (ignore_key) ? "_AND_IGNORE_EXISTING_KEY" : "", | 2323 | (ignore_key) ? "_AND_IGNORE_EXISTING_KEY" : "", |
2324 | pr_reg->pr_reg_nacl->initiatorname, | 2324 | pr_reg->pr_reg_nacl->initiatorname, |
2325 | pr_reg->pr_res_key, pr_reg->pr_res_generation); | 2325 | pr_reg->pr_res_key, pr_reg->pr_res_generation); |
@@ -2378,12 +2378,12 @@ static int core_scsi3_pro_reserve( | |||
2378 | int scope, | 2378 | int scope, |
2379 | u64 res_key) | 2379 | u64 res_key) |
2380 | { | 2380 | { |
2381 | struct se_session *se_sess = SE_SESS(cmd); | 2381 | struct se_session *se_sess = cmd->se_sess; |
2382 | struct se_dev_entry *se_deve; | 2382 | struct se_dev_entry *se_deve; |
2383 | struct se_lun *se_lun = SE_LUN(cmd); | 2383 | struct se_lun *se_lun = cmd->se_lun; |
2384 | struct se_portal_group *se_tpg; | 2384 | struct se_portal_group *se_tpg; |
2385 | struct t10_pr_registration *pr_reg, *pr_res_holder; | 2385 | struct t10_pr_registration *pr_reg, *pr_res_holder; |
2386 | struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation; | 2386 | struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; |
2387 | char i_buf[PR_REG_ISID_ID_LEN]; | 2387 | char i_buf[PR_REG_ISID_ID_LEN]; |
2388 | int ret, prf_isid; | 2388 | int ret, prf_isid; |
2389 | 2389 | ||
@@ -2398,7 +2398,7 @@ static int core_scsi3_pro_reserve( | |||
2398 | /* | 2398 | /* |
2399 | * Locate the existing *pr_reg via struct se_node_acl pointers | 2399 | * Locate the existing *pr_reg via struct se_node_acl pointers |
2400 | */ | 2400 | */ |
2401 | pr_reg = core_scsi3_locate_pr_reg(SE_DEV(cmd), se_sess->se_node_acl, | 2401 | pr_reg = core_scsi3_locate_pr_reg(cmd->se_lun->lun_se_dev, se_sess->se_node_acl, |
2402 | se_sess); | 2402 | se_sess); |
2403 | if (!(pr_reg)) { | 2403 | if (!(pr_reg)) { |
2404 | printk(KERN_ERR "SPC-3 PR: Unable to locate" | 2404 | printk(KERN_ERR "SPC-3 PR: Unable to locate" |
@@ -2459,9 +2459,9 @@ static int core_scsi3_pro_reserve( | |||
2459 | printk(KERN_ERR "SPC-3 PR: Attempted RESERVE from" | 2459 | printk(KERN_ERR "SPC-3 PR: Attempted RESERVE from" |
2460 | " [%s]: %s while reservation already held by" | 2460 | " [%s]: %s while reservation already held by" |
2461 | " [%s]: %s, returning RESERVATION_CONFLICT\n", | 2461 | " [%s]: %s, returning RESERVATION_CONFLICT\n", |
2462 | CMD_TFO(cmd)->get_fabric_name(), | 2462 | cmd->se_tfo->get_fabric_name(), |
2463 | se_sess->se_node_acl->initiatorname, | 2463 | se_sess->se_node_acl->initiatorname, |
2464 | TPG_TFO(pr_res_nacl->se_tpg)->get_fabric_name(), | 2464 | pr_res_nacl->se_tpg->se_tpg_tfo->get_fabric_name(), |
2465 | pr_res_holder->pr_reg_nacl->initiatorname); | 2465 | pr_res_holder->pr_reg_nacl->initiatorname); |
2466 | 2466 | ||
2467 | spin_unlock(&dev->dev_reservation_lock); | 2467 | spin_unlock(&dev->dev_reservation_lock); |
@@ -2482,9 +2482,9 @@ static int core_scsi3_pro_reserve( | |||
2482 | " [%s]: %s trying to change TYPE and/or SCOPE," | 2482 | " [%s]: %s trying to change TYPE and/or SCOPE," |
2483 | " while reservation already held by [%s]: %s," | 2483 | " while reservation already held by [%s]: %s," |
2484 | " returning RESERVATION_CONFLICT\n", | 2484 | " returning RESERVATION_CONFLICT\n", |
2485 | CMD_TFO(cmd)->get_fabric_name(), | 2485 | cmd->se_tfo->get_fabric_name(), |
2486 | se_sess->se_node_acl->initiatorname, | 2486 | se_sess->se_node_acl->initiatorname, |
2487 | TPG_TFO(pr_res_nacl->se_tpg)->get_fabric_name(), | 2487 | pr_res_nacl->se_tpg->se_tpg_tfo->get_fabric_name(), |
2488 | pr_res_holder->pr_reg_nacl->initiatorname); | 2488 | pr_res_holder->pr_reg_nacl->initiatorname); |
2489 | 2489 | ||
2490 | spin_unlock(&dev->dev_reservation_lock); | 2490 | spin_unlock(&dev->dev_reservation_lock); |
@@ -2518,16 +2518,16 @@ static int core_scsi3_pro_reserve( | |||
2518 | 2518 | ||
2519 | printk(KERN_INFO "SPC-3 PR [%s] Service Action: RESERVE created new" | 2519 | printk(KERN_INFO "SPC-3 PR [%s] Service Action: RESERVE created new" |
2520 | " reservation holder TYPE: %s ALL_TG_PT: %d\n", | 2520 | " reservation holder TYPE: %s ALL_TG_PT: %d\n", |
2521 | CMD_TFO(cmd)->get_fabric_name(), core_scsi3_pr_dump_type(type), | 2521 | cmd->se_tfo->get_fabric_name(), core_scsi3_pr_dump_type(type), |
2522 | (pr_reg->pr_reg_all_tg_pt) ? 1 : 0); | 2522 | (pr_reg->pr_reg_all_tg_pt) ? 1 : 0); |
2523 | printk(KERN_INFO "SPC-3 PR [%s] RESERVE Node: %s%s\n", | 2523 | printk(KERN_INFO "SPC-3 PR [%s] RESERVE Node: %s%s\n", |
2524 | CMD_TFO(cmd)->get_fabric_name(), | 2524 | cmd->se_tfo->get_fabric_name(), |
2525 | se_sess->se_node_acl->initiatorname, | 2525 | se_sess->se_node_acl->initiatorname, |
2526 | (prf_isid) ? &i_buf[0] : ""); | 2526 | (prf_isid) ? &i_buf[0] : ""); |
2527 | spin_unlock(&dev->dev_reservation_lock); | 2527 | spin_unlock(&dev->dev_reservation_lock); |
2528 | 2528 | ||
2529 | if (pr_tmpl->pr_aptpl_active) { | 2529 | if (pr_tmpl->pr_aptpl_active) { |
2530 | ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd), | 2530 | ret = core_scsi3_update_and_write_aptpl(cmd->se_lun->lun_se_dev, |
2531 | &pr_reg->pr_aptpl_buf[0], | 2531 | &pr_reg->pr_aptpl_buf[0], |
2532 | pr_tmpl->pr_aptpl_buf_len); | 2532 | pr_tmpl->pr_aptpl_buf_len); |
2533 | if (!(ret)) | 2533 | if (!(ret)) |
@@ -2608,10 +2608,10 @@ static int core_scsi3_emulate_pro_release( | |||
2608 | u64 res_key) | 2608 | u64 res_key) |
2609 | { | 2609 | { |
2610 | struct se_device *dev = cmd->se_dev; | 2610 | struct se_device *dev = cmd->se_dev; |
2611 | struct se_session *se_sess = SE_SESS(cmd); | 2611 | struct se_session *se_sess = cmd->se_sess; |
2612 | struct se_lun *se_lun = SE_LUN(cmd); | 2612 | struct se_lun *se_lun = cmd->se_lun; |
2613 | struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_res_holder; | 2613 | struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_res_holder; |
2614 | struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation; | 2614 | struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; |
2615 | int ret, all_reg = 0; | 2615 | int ret, all_reg = 0; |
2616 | 2616 | ||
2617 | if (!(se_sess) || !(se_lun)) { | 2617 | if (!(se_sess) || !(se_lun)) { |
@@ -2698,9 +2698,9 @@ static int core_scsi3_emulate_pro_release( | |||
2698 | " reservation from [%s]: %s with different TYPE " | 2698 | " reservation from [%s]: %s with different TYPE " |
2699 | "and/or SCOPE while reservation already held by" | 2699 | "and/or SCOPE while reservation already held by" |
2700 | " [%s]: %s, returning RESERVATION_CONFLICT\n", | 2700 | " [%s]: %s, returning RESERVATION_CONFLICT\n", |
2701 | CMD_TFO(cmd)->get_fabric_name(), | 2701 | cmd->se_tfo->get_fabric_name(), |
2702 | se_sess->se_node_acl->initiatorname, | 2702 | se_sess->se_node_acl->initiatorname, |
2703 | TPG_TFO(pr_res_nacl->se_tpg)->get_fabric_name(), | 2703 | pr_res_nacl->se_tpg->se_tpg_tfo->get_fabric_name(), |
2704 | pr_res_holder->pr_reg_nacl->initiatorname); | 2704 | pr_res_holder->pr_reg_nacl->initiatorname); |
2705 | 2705 | ||
2706 | spin_unlock(&dev->dev_reservation_lock); | 2706 | spin_unlock(&dev->dev_reservation_lock); |
@@ -2758,7 +2758,7 @@ static int core_scsi3_emulate_pro_release( | |||
2758 | 2758 | ||
2759 | write_aptpl: | 2759 | write_aptpl: |
2760 | if (pr_tmpl->pr_aptpl_active) { | 2760 | if (pr_tmpl->pr_aptpl_active) { |
2761 | ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd), | 2761 | ret = core_scsi3_update_and_write_aptpl(cmd->se_lun->lun_se_dev, |
2762 | &pr_reg->pr_aptpl_buf[0], | 2762 | &pr_reg->pr_aptpl_buf[0], |
2763 | pr_tmpl->pr_aptpl_buf_len); | 2763 | pr_tmpl->pr_aptpl_buf_len); |
2764 | if (!(ret)) | 2764 | if (!(ret)) |
@@ -2775,15 +2775,15 @@ static int core_scsi3_emulate_pro_clear( | |||
2775 | { | 2775 | { |
2776 | struct se_device *dev = cmd->se_dev; | 2776 | struct se_device *dev = cmd->se_dev; |
2777 | struct se_node_acl *pr_reg_nacl; | 2777 | struct se_node_acl *pr_reg_nacl; |
2778 | struct se_session *se_sess = SE_SESS(cmd); | 2778 | struct se_session *se_sess = cmd->se_sess; |
2779 | struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation; | 2779 | struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; |
2780 | struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder; | 2780 | struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder; |
2781 | u32 pr_res_mapped_lun = 0; | 2781 | u32 pr_res_mapped_lun = 0; |
2782 | int calling_it_nexus = 0; | 2782 | int calling_it_nexus = 0; |
2783 | /* | 2783 | /* |
2784 | * Locate the existing *pr_reg via struct se_node_acl pointers | 2784 | * Locate the existing *pr_reg via struct se_node_acl pointers |
2785 | */ | 2785 | */ |
2786 | pr_reg_n = core_scsi3_locate_pr_reg(SE_DEV(cmd), | 2786 | pr_reg_n = core_scsi3_locate_pr_reg(cmd->se_lun->lun_se_dev, |
2787 | se_sess->se_node_acl, se_sess); | 2787 | se_sess->se_node_acl, se_sess); |
2788 | if (!(pr_reg_n)) { | 2788 | if (!(pr_reg_n)) { |
2789 | printk(KERN_ERR "SPC-3 PR: Unable to locate" | 2789 | printk(KERN_ERR "SPC-3 PR: Unable to locate" |
@@ -2846,10 +2846,10 @@ static int core_scsi3_emulate_pro_clear( | |||
2846 | spin_unlock(&pr_tmpl->registration_lock); | 2846 | spin_unlock(&pr_tmpl->registration_lock); |
2847 | 2847 | ||
2848 | printk(KERN_INFO "SPC-3 PR [%s] Service Action: CLEAR complete\n", | 2848 | printk(KERN_INFO "SPC-3 PR [%s] Service Action: CLEAR complete\n", |
2849 | CMD_TFO(cmd)->get_fabric_name()); | 2849 | cmd->se_tfo->get_fabric_name()); |
2850 | 2850 | ||
2851 | if (pr_tmpl->pr_aptpl_active) { | 2851 | if (pr_tmpl->pr_aptpl_active) { |
2852 | core_scsi3_update_and_write_aptpl(SE_DEV(cmd), NULL, 0); | 2852 | core_scsi3_update_and_write_aptpl(cmd->se_lun->lun_se_dev, NULL, 0); |
2853 | printk(KERN_INFO "SPC-3 PR: Updated APTPL metadata" | 2853 | printk(KERN_INFO "SPC-3 PR: Updated APTPL metadata" |
2854 | " for CLEAR\n"); | 2854 | " for CLEAR\n"); |
2855 | } | 2855 | } |
@@ -2954,13 +2954,13 @@ static int core_scsi3_pro_preempt( | |||
2954 | u64 sa_res_key, | 2954 | u64 sa_res_key, |
2955 | int abort) | 2955 | int abort) |
2956 | { | 2956 | { |
2957 | struct se_device *dev = SE_DEV(cmd); | 2957 | struct se_device *dev = cmd->se_lun->lun_se_dev; |
2958 | struct se_dev_entry *se_deve; | 2958 | struct se_dev_entry *se_deve; |
2959 | struct se_node_acl *pr_reg_nacl; | 2959 | struct se_node_acl *pr_reg_nacl; |
2960 | struct se_session *se_sess = SE_SESS(cmd); | 2960 | struct se_session *se_sess = cmd->se_sess; |
2961 | struct list_head preempt_and_abort_list; | 2961 | struct list_head preempt_and_abort_list; |
2962 | struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder; | 2962 | struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder; |
2963 | struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation; | 2963 | struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; |
2964 | u32 pr_res_mapped_lun = 0; | 2964 | u32 pr_res_mapped_lun = 0; |
2965 | int all_reg = 0, calling_it_nexus = 0, released_regs = 0; | 2965 | int all_reg = 0, calling_it_nexus = 0, released_regs = 0; |
2966 | int prh_type = 0, prh_scope = 0, ret; | 2966 | int prh_type = 0, prh_scope = 0, ret; |
@@ -2969,7 +2969,7 @@ static int core_scsi3_pro_preempt( | |||
2969 | return PYX_TRANSPORT_LU_COMM_FAILURE; | 2969 | return PYX_TRANSPORT_LU_COMM_FAILURE; |
2970 | 2970 | ||
2971 | se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; | 2971 | se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; |
2972 | pr_reg_n = core_scsi3_locate_pr_reg(SE_DEV(cmd), se_sess->se_node_acl, | 2972 | pr_reg_n = core_scsi3_locate_pr_reg(cmd->se_lun->lun_se_dev, se_sess->se_node_acl, |
2973 | se_sess); | 2973 | se_sess); |
2974 | if (!(pr_reg_n)) { | 2974 | if (!(pr_reg_n)) { |
2975 | printk(KERN_ERR "SPC-3 PR: Unable to locate" | 2975 | printk(KERN_ERR "SPC-3 PR: Unable to locate" |
@@ -3111,7 +3111,7 @@ static int core_scsi3_pro_preempt( | |||
3111 | spin_unlock(&dev->dev_reservation_lock); | 3111 | spin_unlock(&dev->dev_reservation_lock); |
3112 | 3112 | ||
3113 | if (pr_tmpl->pr_aptpl_active) { | 3113 | if (pr_tmpl->pr_aptpl_active) { |
3114 | ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd), | 3114 | ret = core_scsi3_update_and_write_aptpl(cmd->se_lun->lun_se_dev, |
3115 | &pr_reg_n->pr_aptpl_buf[0], | 3115 | &pr_reg_n->pr_aptpl_buf[0], |
3116 | pr_tmpl->pr_aptpl_buf_len); | 3116 | pr_tmpl->pr_aptpl_buf_len); |
3117 | if (!(ret)) | 3117 | if (!(ret)) |
@@ -3121,7 +3121,7 @@ static int core_scsi3_pro_preempt( | |||
3121 | } | 3121 | } |
3122 | 3122 | ||
3123 | core_scsi3_put_pr_reg(pr_reg_n); | 3123 | core_scsi3_put_pr_reg(pr_reg_n); |
3124 | core_scsi3_pr_generation(SE_DEV(cmd)); | 3124 | core_scsi3_pr_generation(cmd->se_lun->lun_se_dev); |
3125 | return 0; | 3125 | return 0; |
3126 | } | 3126 | } |
3127 | /* | 3127 | /* |
@@ -3247,7 +3247,7 @@ static int core_scsi3_pro_preempt( | |||
3247 | } | 3247 | } |
3248 | 3248 | ||
3249 | if (pr_tmpl->pr_aptpl_active) { | 3249 | if (pr_tmpl->pr_aptpl_active) { |
3250 | ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd), | 3250 | ret = core_scsi3_update_and_write_aptpl(cmd->se_lun->lun_se_dev, |
3251 | &pr_reg_n->pr_aptpl_buf[0], | 3251 | &pr_reg_n->pr_aptpl_buf[0], |
3252 | pr_tmpl->pr_aptpl_buf_len); | 3252 | pr_tmpl->pr_aptpl_buf_len); |
3253 | if (!(ret)) | 3253 | if (!(ret)) |
@@ -3256,7 +3256,7 @@ static int core_scsi3_pro_preempt( | |||
3256 | } | 3256 | } |
3257 | 3257 | ||
3258 | core_scsi3_put_pr_reg(pr_reg_n); | 3258 | core_scsi3_put_pr_reg(pr_reg_n); |
3259 | core_scsi3_pr_generation(SE_DEV(cmd)); | 3259 | core_scsi3_pr_generation(cmd->se_lun->lun_se_dev); |
3260 | return 0; | 3260 | return 0; |
3261 | } | 3261 | } |
3262 | 3262 | ||
@@ -3297,17 +3297,17 @@ static int core_scsi3_emulate_pro_register_and_move( | |||
3297 | int aptpl, | 3297 | int aptpl, |
3298 | int unreg) | 3298 | int unreg) |
3299 | { | 3299 | { |
3300 | struct se_session *se_sess = SE_SESS(cmd); | 3300 | struct se_session *se_sess = cmd->se_sess; |
3301 | struct se_device *dev = SE_DEV(cmd); | 3301 | struct se_device *dev = cmd->se_lun->lun_se_dev; |
3302 | struct se_dev_entry *se_deve, *dest_se_deve = NULL; | 3302 | struct se_dev_entry *se_deve, *dest_se_deve = NULL; |
3303 | struct se_lun *se_lun = SE_LUN(cmd); | 3303 | struct se_lun *se_lun = cmd->se_lun; |
3304 | struct se_node_acl *pr_res_nacl, *pr_reg_nacl, *dest_node_acl = NULL; | 3304 | struct se_node_acl *pr_res_nacl, *pr_reg_nacl, *dest_node_acl = NULL; |
3305 | struct se_port *se_port; | 3305 | struct se_port *se_port; |
3306 | struct se_portal_group *se_tpg, *dest_se_tpg = NULL; | 3306 | struct se_portal_group *se_tpg, *dest_se_tpg = NULL; |
3307 | struct target_core_fabric_ops *dest_tf_ops = NULL, *tf_ops; | 3307 | struct target_core_fabric_ops *dest_tf_ops = NULL, *tf_ops; |
3308 | struct t10_pr_registration *pr_reg, *pr_res_holder, *dest_pr_reg; | 3308 | struct t10_pr_registration *pr_reg, *pr_res_holder, *dest_pr_reg; |
3309 | struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation; | 3309 | struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; |
3310 | unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf; | 3310 | unsigned char *buf = (unsigned char *)cmd->t_task->t_task_buf; |
3311 | unsigned char *initiator_str; | 3311 | unsigned char *initiator_str; |
3312 | char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN]; | 3312 | char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN]; |
3313 | u32 tid_len, tmp_tid_len; | 3313 | u32 tid_len, tmp_tid_len; |
@@ -3322,7 +3322,7 @@ static int core_scsi3_emulate_pro_register_and_move( | |||
3322 | memset(dest_iport, 0, 64); | 3322 | memset(dest_iport, 0, 64); |
3323 | memset(i_buf, 0, PR_REG_ISID_ID_LEN); | 3323 | memset(i_buf, 0, PR_REG_ISID_ID_LEN); |
3324 | se_tpg = se_sess->se_tpg; | 3324 | se_tpg = se_sess->se_tpg; |
3325 | tf_ops = TPG_TFO(se_tpg); | 3325 | tf_ops = se_tpg->se_tpg_tfo; |
3326 | se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; | 3326 | se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; |
3327 | /* | 3327 | /* |
3328 | * Follow logic from spc4r17 Section 5.7.8, Table 50 -- | 3328 | * Follow logic from spc4r17 Section 5.7.8, Table 50 -- |
@@ -3330,7 +3330,7 @@ static int core_scsi3_emulate_pro_register_and_move( | |||
3330 | * | 3330 | * |
3331 | * Locate the existing *pr_reg via struct se_node_acl pointers | 3331 | * Locate the existing *pr_reg via struct se_node_acl pointers |
3332 | */ | 3332 | */ |
3333 | pr_reg = core_scsi3_locate_pr_reg(SE_DEV(cmd), se_sess->se_node_acl, | 3333 | pr_reg = core_scsi3_locate_pr_reg(cmd->se_lun->lun_se_dev, se_sess->se_node_acl, |
3334 | se_sess); | 3334 | se_sess); |
3335 | if (!(pr_reg)) { | 3335 | if (!(pr_reg)) { |
3336 | printk(KERN_ERR "SPC-3 PR: Unable to locate PR_REGISTERED" | 3336 | printk(KERN_ERR "SPC-3 PR: Unable to locate PR_REGISTERED" |
@@ -3384,7 +3384,7 @@ static int core_scsi3_emulate_pro_register_and_move( | |||
3384 | dest_se_tpg = se_port->sep_tpg; | 3384 | dest_se_tpg = se_port->sep_tpg; |
3385 | if (!(dest_se_tpg)) | 3385 | if (!(dest_se_tpg)) |
3386 | continue; | 3386 | continue; |
3387 | dest_tf_ops = TPG_TFO(dest_se_tpg); | 3387 | dest_tf_ops = dest_se_tpg->se_tpg_tfo; |
3388 | if (!(dest_tf_ops)) | 3388 | if (!(dest_tf_ops)) |
3389 | continue; | 3389 | continue; |
3390 | 3390 | ||
@@ -3612,7 +3612,7 @@ after_iport_check: | |||
3612 | dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl, | 3612 | dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl, |
3613 | iport_ptr); | 3613 | iport_ptr); |
3614 | if (!(dest_pr_reg)) { | 3614 | if (!(dest_pr_reg)) { |
3615 | ret = core_scsi3_alloc_registration(SE_DEV(cmd), | 3615 | ret = core_scsi3_alloc_registration(cmd->se_lun->lun_se_dev, |
3616 | dest_node_acl, dest_se_deve, iport_ptr, | 3616 | dest_node_acl, dest_se_deve, iport_ptr, |
3617 | sa_res_key, 0, aptpl, 2, 1); | 3617 | sa_res_key, 0, aptpl, 2, 1); |
3618 | if (ret != 0) { | 3618 | if (ret != 0) { |
@@ -3683,12 +3683,12 @@ after_iport_check: | |||
3683 | */ | 3683 | */ |
3684 | if (!(aptpl)) { | 3684 | if (!(aptpl)) { |
3685 | pr_tmpl->pr_aptpl_active = 0; | 3685 | pr_tmpl->pr_aptpl_active = 0; |
3686 | core_scsi3_update_and_write_aptpl(SE_DEV(cmd), NULL, 0); | 3686 | core_scsi3_update_and_write_aptpl(cmd->se_lun->lun_se_dev, NULL, 0); |
3687 | printk("SPC-3 PR: Set APTPL Bit Deactivated for" | 3687 | printk("SPC-3 PR: Set APTPL Bit Deactivated for" |
3688 | " REGISTER_AND_MOVE\n"); | 3688 | " REGISTER_AND_MOVE\n"); |
3689 | } else { | 3689 | } else { |
3690 | pr_tmpl->pr_aptpl_active = 1; | 3690 | pr_tmpl->pr_aptpl_active = 1; |
3691 | ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd), | 3691 | ret = core_scsi3_update_and_write_aptpl(cmd->se_lun->lun_se_dev, |
3692 | &dest_pr_reg->pr_aptpl_buf[0], | 3692 | &dest_pr_reg->pr_aptpl_buf[0], |
3693 | pr_tmpl->pr_aptpl_buf_len); | 3693 | pr_tmpl->pr_aptpl_buf_len); |
3694 | if (!(ret)) | 3694 | if (!(ret)) |
@@ -3723,7 +3723,7 @@ static unsigned long long core_scsi3_extract_reservation_key(unsigned char *cdb) | |||
3723 | */ | 3723 | */ |
3724 | static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb) | 3724 | static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb) |
3725 | { | 3725 | { |
3726 | unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf; | 3726 | unsigned char *buf = (unsigned char *)cmd->t_task->t_task_buf; |
3727 | u64 res_key, sa_res_key; | 3727 | u64 res_key, sa_res_key; |
3728 | int sa, scope, type, aptpl; | 3728 | int sa, scope, type, aptpl; |
3729 | int spec_i_pt = 0, all_tg_pt = 0, unreg = 0; | 3729 | int spec_i_pt = 0, all_tg_pt = 0, unreg = 0; |
@@ -3731,7 +3731,7 @@ static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb) | |||
3731 | * FIXME: A NULL struct se_session pointer means an this is not coming from | 3731 | * FIXME: A NULL struct se_session pointer means an this is not coming from |
3732 | * a $FABRIC_MOD's nexus, but from internal passthrough ops. | 3732 | * a $FABRIC_MOD's nexus, but from internal passthrough ops. |
3733 | */ | 3733 | */ |
3734 | if (!(SE_SESS(cmd))) | 3734 | if (!(cmd->se_sess)) |
3735 | return PYX_TRANSPORT_LU_COMM_FAILURE; | 3735 | return PYX_TRANSPORT_LU_COMM_FAILURE; |
3736 | 3736 | ||
3737 | if (cmd->data_length < 24) { | 3737 | if (cmd->data_length < 24) { |
@@ -3827,10 +3827,10 @@ static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb) | |||
3827 | */ | 3827 | */ |
3828 | static int core_scsi3_pri_read_keys(struct se_cmd *cmd) | 3828 | static int core_scsi3_pri_read_keys(struct se_cmd *cmd) |
3829 | { | 3829 | { |
3830 | struct se_device *se_dev = SE_DEV(cmd); | 3830 | struct se_device *se_dev = cmd->se_lun->lun_se_dev; |
3831 | struct se_subsystem_dev *su_dev = SU_DEV(se_dev); | 3831 | struct se_subsystem_dev *su_dev = se_dev->se_sub_dev; |
3832 | struct t10_pr_registration *pr_reg; | 3832 | struct t10_pr_registration *pr_reg; |
3833 | unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf; | 3833 | unsigned char *buf = (unsigned char *)cmd->t_task->t_task_buf; |
3834 | u32 add_len = 0, off = 8; | 3834 | u32 add_len = 0, off = 8; |
3835 | 3835 | ||
3836 | if (cmd->data_length < 8) { | 3836 | if (cmd->data_length < 8) { |
@@ -3839,13 +3839,13 @@ static int core_scsi3_pri_read_keys(struct se_cmd *cmd) | |||
3839 | return PYX_TRANSPORT_INVALID_CDB_FIELD; | 3839 | return PYX_TRANSPORT_INVALID_CDB_FIELD; |
3840 | } | 3840 | } |
3841 | 3841 | ||
3842 | buf[0] = ((T10_RES(su_dev)->pr_generation >> 24) & 0xff); | 3842 | buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff); |
3843 | buf[1] = ((T10_RES(su_dev)->pr_generation >> 16) & 0xff); | 3843 | buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff); |
3844 | buf[2] = ((T10_RES(su_dev)->pr_generation >> 8) & 0xff); | 3844 | buf[2] = ((su_dev->t10_pr.pr_generation >> 8) & 0xff); |
3845 | buf[3] = (T10_RES(su_dev)->pr_generation & 0xff); | 3845 | buf[3] = (su_dev->t10_pr.pr_generation & 0xff); |
3846 | 3846 | ||
3847 | spin_lock(&T10_RES(su_dev)->registration_lock); | 3847 | spin_lock(&su_dev->t10_pr.registration_lock); |
3848 | list_for_each_entry(pr_reg, &T10_RES(su_dev)->registration_list, | 3848 | list_for_each_entry(pr_reg, &su_dev->t10_pr.registration_list, |
3849 | pr_reg_list) { | 3849 | pr_reg_list) { |
3850 | /* | 3850 | /* |
3851 | * Check for overflow of 8byte PRI READ_KEYS payload and | 3851 | * Check for overflow of 8byte PRI READ_KEYS payload and |
@@ -3865,7 +3865,7 @@ static int core_scsi3_pri_read_keys(struct se_cmd *cmd) | |||
3865 | 3865 | ||
3866 | add_len += 8; | 3866 | add_len += 8; |
3867 | } | 3867 | } |
3868 | spin_unlock(&T10_RES(su_dev)->registration_lock); | 3868 | spin_unlock(&su_dev->t10_pr.registration_lock); |
3869 | 3869 | ||
3870 | buf[4] = ((add_len >> 24) & 0xff); | 3870 | buf[4] = ((add_len >> 24) & 0xff); |
3871 | buf[5] = ((add_len >> 16) & 0xff); | 3871 | buf[5] = ((add_len >> 16) & 0xff); |
@@ -3882,10 +3882,10 @@ static int core_scsi3_pri_read_keys(struct se_cmd *cmd) | |||
3882 | */ | 3882 | */ |
3883 | static int core_scsi3_pri_read_reservation(struct se_cmd *cmd) | 3883 | static int core_scsi3_pri_read_reservation(struct se_cmd *cmd) |
3884 | { | 3884 | { |
3885 | struct se_device *se_dev = SE_DEV(cmd); | 3885 | struct se_device *se_dev = cmd->se_lun->lun_se_dev; |
3886 | struct se_subsystem_dev *su_dev = SU_DEV(se_dev); | 3886 | struct se_subsystem_dev *su_dev = se_dev->se_sub_dev; |
3887 | struct t10_pr_registration *pr_reg; | 3887 | struct t10_pr_registration *pr_reg; |
3888 | unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf; | 3888 | unsigned char *buf = (unsigned char *)cmd->t_task->t_task_buf; |
3889 | u64 pr_res_key; | 3889 | u64 pr_res_key; |
3890 | u32 add_len = 16; /* Hardcoded to 16 when a reservation is held. */ | 3890 | u32 add_len = 16; /* Hardcoded to 16 when a reservation is held. */ |
3891 | 3891 | ||
@@ -3895,10 +3895,10 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd) | |||
3895 | return PYX_TRANSPORT_INVALID_CDB_FIELD; | 3895 | return PYX_TRANSPORT_INVALID_CDB_FIELD; |
3896 | } | 3896 | } |
3897 | 3897 | ||
3898 | buf[0] = ((T10_RES(su_dev)->pr_generation >> 24) & 0xff); | 3898 | buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff); |
3899 | buf[1] = ((T10_RES(su_dev)->pr_generation >> 16) & 0xff); | 3899 | buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff); |
3900 | buf[2] = ((T10_RES(su_dev)->pr_generation >> 8) & 0xff); | 3900 | buf[2] = ((su_dev->t10_pr.pr_generation >> 8) & 0xff); |
3901 | buf[3] = (T10_RES(su_dev)->pr_generation & 0xff); | 3901 | buf[3] = (su_dev->t10_pr.pr_generation & 0xff); |
3902 | 3902 | ||
3903 | spin_lock(&se_dev->dev_reservation_lock); | 3903 | spin_lock(&se_dev->dev_reservation_lock); |
3904 | pr_reg = se_dev->dev_pr_res_holder; | 3904 | pr_reg = se_dev->dev_pr_res_holder; |
@@ -3963,9 +3963,9 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd) | |||
3963 | */ | 3963 | */ |
3964 | static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd) | 3964 | static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd) |
3965 | { | 3965 | { |
3966 | struct se_device *dev = SE_DEV(cmd); | 3966 | struct se_device *dev = cmd->se_lun->lun_se_dev; |
3967 | struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation; | 3967 | struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; |
3968 | unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf; | 3968 | unsigned char *buf = (unsigned char *)cmd->t_task->t_task_buf; |
3969 | u16 add_len = 8; /* Hardcoded to 8. */ | 3969 | u16 add_len = 8; /* Hardcoded to 8. */ |
3970 | 3970 | ||
3971 | if (cmd->data_length < 6) { | 3971 | if (cmd->data_length < 6) { |
@@ -4014,13 +4014,13 @@ static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd) | |||
4014 | */ | 4014 | */ |
4015 | static int core_scsi3_pri_read_full_status(struct se_cmd *cmd) | 4015 | static int core_scsi3_pri_read_full_status(struct se_cmd *cmd) |
4016 | { | 4016 | { |
4017 | struct se_device *se_dev = SE_DEV(cmd); | 4017 | struct se_device *se_dev = cmd->se_lun->lun_se_dev; |
4018 | struct se_node_acl *se_nacl; | 4018 | struct se_node_acl *se_nacl; |
4019 | struct se_subsystem_dev *su_dev = SU_DEV(se_dev); | 4019 | struct se_subsystem_dev *su_dev = se_dev->se_sub_dev; |
4020 | struct se_portal_group *se_tpg; | 4020 | struct se_portal_group *se_tpg; |
4021 | struct t10_pr_registration *pr_reg, *pr_reg_tmp; | 4021 | struct t10_pr_registration *pr_reg, *pr_reg_tmp; |
4022 | struct t10_reservation_template *pr_tmpl = &SU_DEV(se_dev)->t10_reservation; | 4022 | struct t10_reservation *pr_tmpl = &se_dev->se_sub_dev->t10_pr; |
4023 | unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf; | 4023 | unsigned char *buf = (unsigned char *)cmd->t_task->t_task_buf; |
4024 | u32 add_desc_len = 0, add_len = 0, desc_len, exp_desc_len; | 4024 | u32 add_desc_len = 0, add_len = 0, desc_len, exp_desc_len; |
4025 | u32 off = 8; /* off into first Full Status descriptor */ | 4025 | u32 off = 8; /* off into first Full Status descriptor */ |
4026 | int format_code = 0; | 4026 | int format_code = 0; |
@@ -4031,10 +4031,10 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd) | |||
4031 | return PYX_TRANSPORT_INVALID_CDB_FIELD; | 4031 | return PYX_TRANSPORT_INVALID_CDB_FIELD; |
4032 | } | 4032 | } |
4033 | 4033 | ||
4034 | buf[0] = ((T10_RES(su_dev)->pr_generation >> 24) & 0xff); | 4034 | buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff); |
4035 | buf[1] = ((T10_RES(su_dev)->pr_generation >> 16) & 0xff); | 4035 | buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff); |
4036 | buf[2] = ((T10_RES(su_dev)->pr_generation >> 8) & 0xff); | 4036 | buf[2] = ((su_dev->t10_pr.pr_generation >> 8) & 0xff); |
4037 | buf[3] = (T10_RES(su_dev)->pr_generation & 0xff); | 4037 | buf[3] = (su_dev->t10_pr.pr_generation & 0xff); |
4038 | 4038 | ||
4039 | spin_lock(&pr_tmpl->registration_lock); | 4039 | spin_lock(&pr_tmpl->registration_lock); |
4040 | list_for_each_entry_safe(pr_reg, pr_reg_tmp, | 4040 | list_for_each_entry_safe(pr_reg, pr_reg_tmp, |
@@ -4051,7 +4051,7 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd) | |||
4051 | * Determine expected length of $FABRIC_MOD specific | 4051 | * Determine expected length of $FABRIC_MOD specific |
4052 | * TransportID full status descriptor.. | 4052 | * TransportID full status descriptor.. |
4053 | */ | 4053 | */ |
4054 | exp_desc_len = TPG_TFO(se_tpg)->tpg_get_pr_transport_id_len( | 4054 | exp_desc_len = se_tpg->se_tpg_tfo->tpg_get_pr_transport_id_len( |
4055 | se_tpg, se_nacl, pr_reg, &format_code); | 4055 | se_tpg, se_nacl, pr_reg, &format_code); |
4056 | 4056 | ||
4057 | if ((exp_desc_len + add_len) > cmd->data_length) { | 4057 | if ((exp_desc_len + add_len) > cmd->data_length) { |
@@ -4116,7 +4116,7 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd) | |||
4116 | /* | 4116 | /* |
4117 | * Now, have the $FABRIC_MOD fill in the protocol identifier | 4117 | * Now, have the $FABRIC_MOD fill in the protocol identifier |
4118 | */ | 4118 | */ |
4119 | desc_len = TPG_TFO(se_tpg)->tpg_get_pr_transport_id(se_tpg, | 4119 | desc_len = se_tpg->se_tpg_tfo->tpg_get_pr_transport_id(se_tpg, |
4120 | se_nacl, pr_reg, &format_code, &buf[off+4]); | 4120 | se_nacl, pr_reg, &format_code, &buf[off+4]); |
4121 | 4121 | ||
4122 | spin_lock(&pr_tmpl->registration_lock); | 4122 | spin_lock(&pr_tmpl->registration_lock); |
@@ -4174,7 +4174,7 @@ static int core_scsi3_emulate_pr_in(struct se_cmd *cmd, unsigned char *cdb) | |||
4174 | 4174 | ||
4175 | int core_scsi3_emulate_pr(struct se_cmd *cmd) | 4175 | int core_scsi3_emulate_pr(struct se_cmd *cmd) |
4176 | { | 4176 | { |
4177 | unsigned char *cdb = &T_TASK(cmd)->t_task_cdb[0]; | 4177 | unsigned char *cdb = &cmd->t_task->t_task_cdb[0]; |
4178 | struct se_device *dev = cmd->se_dev; | 4178 | struct se_device *dev = cmd->se_dev; |
4179 | /* | 4179 | /* |
4180 | * Following spc2r20 5.5.1 Reservations overview: | 4180 | * Following spc2r20 5.5.1 Reservations overview: |
@@ -4213,39 +4213,39 @@ static int core_pt_seq_non_holder( | |||
4213 | int core_setup_reservations(struct se_device *dev, int force_pt) | 4213 | int core_setup_reservations(struct se_device *dev, int force_pt) |
4214 | { | 4214 | { |
4215 | struct se_subsystem_dev *su_dev = dev->se_sub_dev; | 4215 | struct se_subsystem_dev *su_dev = dev->se_sub_dev; |
4216 | struct t10_reservation_template *rest = &su_dev->t10_reservation; | 4216 | struct t10_reservation *rest = &su_dev->t10_pr; |
4217 | /* | 4217 | /* |
4218 | * If this device is from Target_Core_Mod/pSCSI, use the reservations | 4218 | * If this device is from Target_Core_Mod/pSCSI, use the reservations |
4219 | * of the Underlying SCSI hardware. In Linux/SCSI terms, this can | 4219 | * of the Underlying SCSI hardware. In Linux/SCSI terms, this can |
4220 | * cause a problem because libata and some SATA RAID HBAs appear | 4220 | * cause a problem because libata and some SATA RAID HBAs appear |
4221 | * under Linux/SCSI, but to emulate reservations themselves. | 4221 | * under Linux/SCSI, but to emulate reservations themselves. |
4222 | */ | 4222 | */ |
4223 | if (((TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) && | 4223 | if (((dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) && |
4224 | !(DEV_ATTRIB(dev)->emulate_reservations)) || force_pt) { | 4224 | !(dev->se_sub_dev->se_dev_attrib.emulate_reservations)) || force_pt) { |
4225 | rest->res_type = SPC_PASSTHROUGH; | 4225 | rest->res_type = SPC_PASSTHROUGH; |
4226 | rest->pr_ops.t10_reservation_check = &core_pt_reservation_check; | 4226 | rest->pr_ops.t10_reservation_check = &core_pt_reservation_check; |
4227 | rest->pr_ops.t10_seq_non_holder = &core_pt_seq_non_holder; | 4227 | rest->pr_ops.t10_seq_non_holder = &core_pt_seq_non_holder; |
4228 | printk(KERN_INFO "%s: Using SPC_PASSTHROUGH, no reservation" | 4228 | printk(KERN_INFO "%s: Using SPC_PASSTHROUGH, no reservation" |
4229 | " emulation\n", TRANSPORT(dev)->name); | 4229 | " emulation\n", dev->transport->name); |
4230 | return 0; | 4230 | return 0; |
4231 | } | 4231 | } |
4232 | /* | 4232 | /* |
4233 | * If SPC-3 or above is reported by real or emulated struct se_device, | 4233 | * If SPC-3 or above is reported by real or emulated struct se_device, |
4234 | * use emulated Persistent Reservations. | 4234 | * use emulated Persistent Reservations. |
4235 | */ | 4235 | */ |
4236 | if (TRANSPORT(dev)->get_device_rev(dev) >= SCSI_3) { | 4236 | if (dev->transport->get_device_rev(dev) >= SCSI_3) { |
4237 | rest->res_type = SPC3_PERSISTENT_RESERVATIONS; | 4237 | rest->res_type = SPC3_PERSISTENT_RESERVATIONS; |
4238 | rest->pr_ops.t10_reservation_check = &core_scsi3_pr_reservation_check; | 4238 | rest->pr_ops.t10_reservation_check = &core_scsi3_pr_reservation_check; |
4239 | rest->pr_ops.t10_seq_non_holder = &core_scsi3_pr_seq_non_holder; | 4239 | rest->pr_ops.t10_seq_non_holder = &core_scsi3_pr_seq_non_holder; |
4240 | printk(KERN_INFO "%s: Using SPC3_PERSISTENT_RESERVATIONS" | 4240 | printk(KERN_INFO "%s: Using SPC3_PERSISTENT_RESERVATIONS" |
4241 | " emulation\n", TRANSPORT(dev)->name); | 4241 | " emulation\n", dev->transport->name); |
4242 | } else { | 4242 | } else { |
4243 | rest->res_type = SPC2_RESERVATIONS; | 4243 | rest->res_type = SPC2_RESERVATIONS; |
4244 | rest->pr_ops.t10_reservation_check = &core_scsi2_reservation_check; | 4244 | rest->pr_ops.t10_reservation_check = &core_scsi2_reservation_check; |
4245 | rest->pr_ops.t10_seq_non_holder = | 4245 | rest->pr_ops.t10_seq_non_holder = |
4246 | &core_scsi2_reservation_seq_non_holder; | 4246 | &core_scsi2_reservation_seq_non_holder; |
4247 | printk(KERN_INFO "%s: Using SPC2_RESERVATIONS emulation\n", | 4247 | printk(KERN_INFO "%s: Using SPC2_RESERVATIONS emulation\n", |
4248 | TRANSPORT(dev)->name); | 4248 | dev->transport->name); |
4249 | } | 4249 | } |
4250 | 4250 | ||
4251 | return 0; | 4251 | return 0; |
diff --git a/drivers/target/target_core_pr.h b/drivers/target/target_core_pr.h index 5603bcfd86d3..c8f47d064584 100644 --- a/drivers/target/target_core_pr.h +++ b/drivers/target/target_core_pr.h | |||
@@ -49,7 +49,7 @@ extern int core_pr_dump_initiator_port(struct t10_pr_registration *, | |||
49 | char *, u32); | 49 | char *, u32); |
50 | extern int core_scsi2_emulate_crh(struct se_cmd *); | 50 | extern int core_scsi2_emulate_crh(struct se_cmd *); |
51 | extern int core_scsi3_alloc_aptpl_registration( | 51 | extern int core_scsi3_alloc_aptpl_registration( |
52 | struct t10_reservation_template *, u64, | 52 | struct t10_reservation *, u64, |
53 | unsigned char *, unsigned char *, u32, | 53 | unsigned char *, unsigned char *, u32, |
54 | unsigned char *, u16, u32, int, int, u8); | 54 | unsigned char *, u16, u32, int, int, u8); |
55 | extern int core_scsi3_check_aptpl_registration(struct se_device *, | 55 | extern int core_scsi3_check_aptpl_registration(struct se_device *, |
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index 331d423fd0e0..44a79a5c6d32 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c | |||
@@ -55,24 +55,6 @@ static struct se_subsystem_api pscsi_template; | |||
55 | 55 | ||
56 | static void pscsi_req_done(struct request *, int); | 56 | static void pscsi_req_done(struct request *, int); |
57 | 57 | ||
58 | /* pscsi_get_sh(): | ||
59 | * | ||
60 | * | ||
61 | */ | ||
62 | static struct Scsi_Host *pscsi_get_sh(u32 host_no) | ||
63 | { | ||
64 | struct Scsi_Host *sh = NULL; | ||
65 | |||
66 | sh = scsi_host_lookup(host_no); | ||
67 | if (IS_ERR(sh)) { | ||
68 | printk(KERN_ERR "Unable to locate SCSI HBA with Host ID:" | ||
69 | " %u\n", host_no); | ||
70 | return NULL; | ||
71 | } | ||
72 | |||
73 | return sh; | ||
74 | } | ||
75 | |||
76 | /* pscsi_attach_hba(): | 58 | /* pscsi_attach_hba(): |
77 | * | 59 | * |
78 | * pscsi_get_sh() used scsi_host_lookup() to locate struct Scsi_Host. | 60 | * pscsi_get_sh() used scsi_host_lookup() to locate struct Scsi_Host. |
@@ -80,28 +62,23 @@ static struct Scsi_Host *pscsi_get_sh(u32 host_no) | |||
80 | */ | 62 | */ |
81 | static int pscsi_attach_hba(struct se_hba *hba, u32 host_id) | 63 | static int pscsi_attach_hba(struct se_hba *hba, u32 host_id) |
82 | { | 64 | { |
83 | int hba_depth; | ||
84 | struct pscsi_hba_virt *phv; | 65 | struct pscsi_hba_virt *phv; |
85 | 66 | ||
86 | phv = kzalloc(sizeof(struct pscsi_hba_virt), GFP_KERNEL); | 67 | phv = kzalloc(sizeof(struct pscsi_hba_virt), GFP_KERNEL); |
87 | if (!(phv)) { | 68 | if (!(phv)) { |
88 | printk(KERN_ERR "Unable to allocate struct pscsi_hba_virt\n"); | 69 | printk(KERN_ERR "Unable to allocate struct pscsi_hba_virt\n"); |
89 | return -1; | 70 | return -ENOMEM; |
90 | } | 71 | } |
91 | phv->phv_host_id = host_id; | 72 | phv->phv_host_id = host_id; |
92 | phv->phv_mode = PHV_VIRUTAL_HOST_ID; | 73 | phv->phv_mode = PHV_VIRUTAL_HOST_ID; |
93 | hba_depth = PSCSI_VIRTUAL_HBA_DEPTH; | ||
94 | atomic_set(&hba->left_queue_depth, hba_depth); | ||
95 | atomic_set(&hba->max_queue_depth, hba_depth); | ||
96 | 74 | ||
97 | hba->hba_ptr = (void *)phv; | 75 | hba->hba_ptr = (void *)phv; |
98 | 76 | ||
99 | printk(KERN_INFO "CORE_HBA[%d] - TCM SCSI HBA Driver %s on" | 77 | printk(KERN_INFO "CORE_HBA[%d] - TCM SCSI HBA Driver %s on" |
100 | " Generic Target Core Stack %s\n", hba->hba_id, | 78 | " Generic Target Core Stack %s\n", hba->hba_id, |
101 | PSCSI_VERSION, TARGET_CORE_MOD_VERSION); | 79 | PSCSI_VERSION, TARGET_CORE_MOD_VERSION); |
102 | printk(KERN_INFO "CORE_HBA[%d] - Attached SCSI HBA to Generic" | 80 | printk(KERN_INFO "CORE_HBA[%d] - Attached SCSI HBA to Generic\n", |
103 | " Target Core with TCQ Depth: %d\n", hba->hba_id, | 81 | hba->hba_id); |
104 | atomic_read(&hba->max_queue_depth)); | ||
105 | 82 | ||
106 | return 0; | 83 | return 0; |
107 | } | 84 | } |
@@ -130,7 +107,6 @@ static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag) | |||
130 | { | 107 | { |
131 | struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)hba->hba_ptr; | 108 | struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)hba->hba_ptr; |
132 | struct Scsi_Host *sh = phv->phv_lld_host; | 109 | struct Scsi_Host *sh = phv->phv_lld_host; |
133 | int hba_depth = PSCSI_VIRTUAL_HBA_DEPTH; | ||
134 | /* | 110 | /* |
135 | * Release the struct Scsi_Host | 111 | * Release the struct Scsi_Host |
136 | */ | 112 | */ |
@@ -140,8 +116,6 @@ static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag) | |||
140 | 116 | ||
141 | phv->phv_lld_host = NULL; | 117 | phv->phv_lld_host = NULL; |
142 | phv->phv_mode = PHV_VIRUTAL_HOST_ID; | 118 | phv->phv_mode = PHV_VIRUTAL_HOST_ID; |
143 | atomic_set(&hba->left_queue_depth, hba_depth); | ||
144 | atomic_set(&hba->max_queue_depth, hba_depth); | ||
145 | 119 | ||
146 | printk(KERN_INFO "CORE_HBA[%d] - Disabled pSCSI HBA Passthrough" | 120 | printk(KERN_INFO "CORE_HBA[%d] - Disabled pSCSI HBA Passthrough" |
147 | " %s\n", hba->hba_id, (sh->hostt->name) ? | 121 | " %s\n", hba->hba_id, (sh->hostt->name) ? |
@@ -154,22 +128,12 @@ static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag) | |||
154 | * Otherwise, locate struct Scsi_Host from the original passed | 128 | * Otherwise, locate struct Scsi_Host from the original passed |
155 | * pSCSI Host ID and enable for phba mode | 129 | * pSCSI Host ID and enable for phba mode |
156 | */ | 130 | */ |
157 | sh = pscsi_get_sh(phv->phv_host_id); | 131 | sh = scsi_host_lookup(phv->phv_host_id); |
158 | if (!(sh)) { | 132 | if (IS_ERR(sh)) { |
159 | printk(KERN_ERR "pSCSI: Unable to locate SCSI Host for" | 133 | printk(KERN_ERR "pSCSI: Unable to locate SCSI Host for" |
160 | " phv_host_id: %d\n", phv->phv_host_id); | 134 | " phv_host_id: %d\n", phv->phv_host_id); |
161 | return -1; | 135 | return PTR_ERR(sh); |
162 | } | 136 | } |
163 | /* | ||
164 | * Usually the SCSI LLD will use the hostt->can_queue value to define | ||
165 | * its HBA TCQ depth. Some other drivers (like 2.6 megaraid) don't set | ||
166 | * this at all and set sh->can_queue at runtime. | ||
167 | */ | ||
168 | hba_depth = (sh->hostt->can_queue > sh->can_queue) ? | ||
169 | sh->hostt->can_queue : sh->can_queue; | ||
170 | |||
171 | atomic_set(&hba->left_queue_depth, hba_depth); | ||
172 | atomic_set(&hba->max_queue_depth, hba_depth); | ||
173 | 137 | ||
174 | phv->phv_lld_host = sh; | 138 | phv->phv_lld_host = sh; |
175 | phv->phv_mode = PHV_LLD_SCSI_HOST_NO; | 139 | phv->phv_mode = PHV_LLD_SCSI_HOST_NO; |
@@ -236,7 +200,7 @@ pscsi_get_inquiry_vpd_serial(struct scsi_device *sdev, struct t10_wwn *wwn) | |||
236 | 200 | ||
237 | buf = kzalloc(INQUIRY_VPD_SERIAL_LEN, GFP_KERNEL); | 201 | buf = kzalloc(INQUIRY_VPD_SERIAL_LEN, GFP_KERNEL); |
238 | if (!buf) | 202 | if (!buf) |
239 | return -1; | 203 | return -ENOMEM; |
240 | 204 | ||
241 | memset(cdb, 0, MAX_COMMAND_SIZE); | 205 | memset(cdb, 0, MAX_COMMAND_SIZE); |
242 | cdb[0] = INQUIRY; | 206 | cdb[0] = INQUIRY; |
@@ -259,7 +223,7 @@ pscsi_get_inquiry_vpd_serial(struct scsi_device *sdev, struct t10_wwn *wwn) | |||
259 | 223 | ||
260 | out_free: | 224 | out_free: |
261 | kfree(buf); | 225 | kfree(buf); |
262 | return -1; | 226 | return -EPERM; |
263 | } | 227 | } |
264 | 228 | ||
265 | static void | 229 | static void |
@@ -601,11 +565,11 @@ static struct se_device *pscsi_create_virtdevice( | |||
601 | hba->hba_flags |= HBA_FLAGS_PSCSI_MODE; | 565 | hba->hba_flags |= HBA_FLAGS_PSCSI_MODE; |
602 | sh = phv->phv_lld_host; | 566 | sh = phv->phv_lld_host; |
603 | } else { | 567 | } else { |
604 | sh = pscsi_get_sh(pdv->pdv_host_id); | 568 | sh = scsi_host_lookup(pdv->pdv_host_id); |
605 | if (!(sh)) { | 569 | if (IS_ERR(sh)) { |
606 | printk(KERN_ERR "pSCSI: Unable to locate" | 570 | printk(KERN_ERR "pSCSI: Unable to locate" |
607 | " pdv_host_id: %d\n", pdv->pdv_host_id); | 571 | " pdv_host_id: %d\n", pdv->pdv_host_id); |
608 | return ERR_PTR(-ENODEV); | 572 | return (struct se_device *) sh; |
609 | } | 573 | } |
610 | } | 574 | } |
611 | } else { | 575 | } else { |
@@ -728,13 +692,12 @@ static int pscsi_transport_complete(struct se_task *task) | |||
728 | */ | 692 | */ |
729 | if (((cdb[0] == MODE_SENSE) || (cdb[0] == MODE_SENSE_10)) && | 693 | if (((cdb[0] == MODE_SENSE) || (cdb[0] == MODE_SENSE_10)) && |
730 | (status_byte(result) << 1) == SAM_STAT_GOOD) { | 694 | (status_byte(result) << 1) == SAM_STAT_GOOD) { |
731 | if (!TASK_CMD(task)->se_deve) | 695 | if (!task->task_se_cmd->se_deve) |
732 | goto after_mode_sense; | 696 | goto after_mode_sense; |
733 | 697 | ||
734 | if (TASK_CMD(task)->se_deve->lun_flags & | 698 | if (task->task_se_cmd->se_deve->lun_flags & |
735 | TRANSPORT_LUNFLAGS_READ_ONLY) { | 699 | TRANSPORT_LUNFLAGS_READ_ONLY) { |
736 | unsigned char *buf = (unsigned char *) | 700 | unsigned char *buf = task->task_se_cmd->t_task->t_task_buf; |
737 | T_TASK(task->task_se_cmd)->t_task_buf; | ||
738 | 701 | ||
739 | if (cdb[0] == MODE_SENSE_10) { | 702 | if (cdb[0] == MODE_SENSE_10) { |
740 | if (!(buf[3] & 0x80)) | 703 | if (!(buf[3] & 0x80)) |
@@ -800,7 +763,7 @@ static struct se_task * | |||
800 | pscsi_alloc_task(struct se_cmd *cmd) | 763 | pscsi_alloc_task(struct se_cmd *cmd) |
801 | { | 764 | { |
802 | struct pscsi_plugin_task *pt; | 765 | struct pscsi_plugin_task *pt; |
803 | unsigned char *cdb = T_TASK(cmd)->t_task_cdb; | 766 | unsigned char *cdb = cmd->t_task->t_task_cdb; |
804 | 767 | ||
805 | pt = kzalloc(sizeof(struct pscsi_plugin_task), GFP_KERNEL); | 768 | pt = kzalloc(sizeof(struct pscsi_plugin_task), GFP_KERNEL); |
806 | if (!pt) { | 769 | if (!pt) { |
@@ -813,7 +776,7 @@ pscsi_alloc_task(struct se_cmd *cmd) | |||
813 | * allocate the extended CDB buffer for per struct se_task context | 776 | * allocate the extended CDB buffer for per struct se_task context |
814 | * pt->pscsi_cdb now. | 777 | * pt->pscsi_cdb now. |
815 | */ | 778 | */ |
816 | if (T_TASK(cmd)->t_task_cdb != T_TASK(cmd)->__t_task_cdb) { | 779 | if (cmd->t_task->t_task_cdb != cmd->t_task->__t_task_cdb) { |
817 | 780 | ||
818 | pt->pscsi_cdb = kzalloc(scsi_command_size(cdb), GFP_KERNEL); | 781 | pt->pscsi_cdb = kzalloc(scsi_command_size(cdb), GFP_KERNEL); |
819 | if (!(pt->pscsi_cdb)) { | 782 | if (!(pt->pscsi_cdb)) { |
@@ -926,7 +889,7 @@ static void pscsi_free_task(struct se_task *task) | |||
926 | * Release the extended CDB allocation from pscsi_alloc_task() | 889 | * Release the extended CDB allocation from pscsi_alloc_task() |
927 | * if one exists. | 890 | * if one exists. |
928 | */ | 891 | */ |
929 | if (T_TASK(cmd)->t_task_cdb != T_TASK(cmd)->__t_task_cdb) | 892 | if (cmd->t_task->t_task_cdb != cmd->t_task->__t_task_cdb) |
930 | kfree(pt->pscsi_cdb); | 893 | kfree(pt->pscsi_cdb); |
931 | /* | 894 | /* |
932 | * We do not release the bio(s) here associated with this task, as | 895 | * We do not release the bio(s) here associated with this task, as |
@@ -1030,7 +993,7 @@ static ssize_t pscsi_check_configfs_dev_params( | |||
1030 | !(pdv->pdv_flags & PDF_HAS_LUN_ID)) { | 993 | !(pdv->pdv_flags & PDF_HAS_LUN_ID)) { |
1031 | printk(KERN_ERR "Missing scsi_channel_id=, scsi_target_id= and" | 994 | printk(KERN_ERR "Missing scsi_channel_id=, scsi_target_id= and" |
1032 | " scsi_lun_id= parameters\n"); | 995 | " scsi_lun_id= parameters\n"); |
1033 | return -1; | 996 | return -EINVAL; |
1034 | } | 997 | } |
1035 | 998 | ||
1036 | return 0; | 999 | return 0; |
@@ -1291,7 +1254,7 @@ static int pscsi_map_task_SG(struct se_task *task) | |||
1291 | */ | 1254 | */ |
1292 | static int pscsi_map_task_non_SG(struct se_task *task) | 1255 | static int pscsi_map_task_non_SG(struct se_task *task) |
1293 | { | 1256 | { |
1294 | struct se_cmd *cmd = TASK_CMD(task); | 1257 | struct se_cmd *cmd = task->task_se_cmd; |
1295 | struct pscsi_plugin_task *pt = PSCSI_TASK(task); | 1258 | struct pscsi_plugin_task *pt = PSCSI_TASK(task); |
1296 | struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr; | 1259 | struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr; |
1297 | int ret = 0; | 1260 | int ret = 0; |
@@ -1303,7 +1266,7 @@ static int pscsi_map_task_non_SG(struct se_task *task) | |||
1303 | return 0; | 1266 | return 0; |
1304 | 1267 | ||
1305 | ret = blk_rq_map_kern(pdv->pdv_sd->request_queue, | 1268 | ret = blk_rq_map_kern(pdv->pdv_sd->request_queue, |
1306 | pt->pscsi_req, T_TASK(cmd)->t_task_buf, | 1269 | pt->pscsi_req, cmd->t_task->t_task_buf, |
1307 | task->task_size, GFP_KERNEL); | 1270 | task->task_size, GFP_KERNEL); |
1308 | if (ret < 0) { | 1271 | if (ret < 0) { |
1309 | printk(KERN_ERR "PSCSI: blk_rq_map_kern() failed: %d\n", ret); | 1272 | printk(KERN_ERR "PSCSI: blk_rq_map_kern() failed: %d\n", ret); |
@@ -1400,13 +1363,11 @@ static inline void pscsi_process_SAM_status( | |||
1400 | pt->pscsi_result); | 1363 | pt->pscsi_result); |
1401 | task->task_scsi_status = SAM_STAT_CHECK_CONDITION; | 1364 | task->task_scsi_status = SAM_STAT_CHECK_CONDITION; |
1402 | task->task_error_status = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; | 1365 | task->task_error_status = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; |
1403 | TASK_CMD(task)->transport_error_status = | 1366 | task->task_se_cmd->transport_error_status = |
1404 | PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; | 1367 | PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; |
1405 | transport_complete_task(task, 0); | 1368 | transport_complete_task(task, 0); |
1406 | break; | 1369 | break; |
1407 | } | 1370 | } |
1408 | |||
1409 | return; | ||
1410 | } | 1371 | } |
1411 | 1372 | ||
1412 | static void pscsi_req_done(struct request *req, int uptodate) | 1373 | static void pscsi_req_done(struct request *req, int uptodate) |
diff --git a/drivers/target/target_core_pscsi.h b/drivers/target/target_core_pscsi.h index a4cd5d352c3a..280b689379c3 100644 --- a/drivers/target/target_core_pscsi.h +++ b/drivers/target/target_core_pscsi.h | |||
@@ -2,7 +2,6 @@ | |||
2 | #define TARGET_CORE_PSCSI_H | 2 | #define TARGET_CORE_PSCSI_H |
3 | 3 | ||
4 | #define PSCSI_VERSION "v4.0" | 4 | #define PSCSI_VERSION "v4.0" |
5 | #define PSCSI_VIRTUAL_HBA_DEPTH 2048 | ||
6 | 5 | ||
7 | /* used in pscsi_find_alloc_len() */ | 6 | /* used in pscsi_find_alloc_len() */ |
8 | #ifndef INQUIRY_DATA_SIZE | 7 | #ifndef INQUIRY_DATA_SIZE |
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c index 7837dd365a9d..fbf06c3994fd 100644 --- a/drivers/target/target_core_rd.c +++ b/drivers/target/target_core_rd.c | |||
@@ -66,17 +66,14 @@ static int rd_attach_hba(struct se_hba *hba, u32 host_id) | |||
66 | 66 | ||
67 | rd_host->rd_host_id = host_id; | 67 | rd_host->rd_host_id = host_id; |
68 | 68 | ||
69 | atomic_set(&hba->left_queue_depth, RD_HBA_QUEUE_DEPTH); | ||
70 | atomic_set(&hba->max_queue_depth, RD_HBA_QUEUE_DEPTH); | ||
71 | hba->hba_ptr = (void *) rd_host; | 69 | hba->hba_ptr = (void *) rd_host; |
72 | 70 | ||
73 | printk(KERN_INFO "CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on" | 71 | printk(KERN_INFO "CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on" |
74 | " Generic Target Core Stack %s\n", hba->hba_id, | 72 | " Generic Target Core Stack %s\n", hba->hba_id, |
75 | RD_HBA_VERSION, TARGET_CORE_MOD_VERSION); | 73 | RD_HBA_VERSION, TARGET_CORE_MOD_VERSION); |
76 | printk(KERN_INFO "CORE_HBA[%d] - Attached Ramdisk HBA: %u to Generic" | 74 | printk(KERN_INFO "CORE_HBA[%d] - Attached Ramdisk HBA: %u to Generic" |
77 | " Target Core TCQ Depth: %d MaxSectors: %u\n", hba->hba_id, | 75 | " MaxSectors: %u\n", hba->hba_id, |
78 | rd_host->rd_host_id, atomic_read(&hba->max_queue_depth), | 76 | rd_host->rd_host_id, RD_MAX_SECTORS); |
79 | RD_MAX_SECTORS); | ||
80 | 77 | ||
81 | return 0; | 78 | return 0; |
82 | } | 79 | } |
@@ -339,7 +336,7 @@ rd_alloc_task(struct se_cmd *cmd) | |||
339 | printk(KERN_ERR "Unable to allocate struct rd_request\n"); | 336 | printk(KERN_ERR "Unable to allocate struct rd_request\n"); |
340 | return NULL; | 337 | return NULL; |
341 | } | 338 | } |
342 | rd_req->rd_dev = SE_DEV(cmd)->dev_ptr; | 339 | rd_req->rd_dev = cmd->se_lun->lun_se_dev->dev_ptr; |
343 | 340 | ||
344 | return &rd_req->rd_task; | 341 | return &rd_req->rd_task; |
345 | } | 342 | } |
@@ -383,7 +380,7 @@ static int rd_MEMCPY_read(struct rd_request *req) | |||
383 | 380 | ||
384 | table = rd_get_sg_table(dev, req->rd_page); | 381 | table = rd_get_sg_table(dev, req->rd_page); |
385 | if (!(table)) | 382 | if (!(table)) |
386 | return -1; | 383 | return -EINVAL; |
387 | 384 | ||
388 | table_sg_end = (table->page_end_offset - req->rd_page); | 385 | table_sg_end = (table->page_end_offset - req->rd_page); |
389 | sg_d = task->task_sg; | 386 | sg_d = task->task_sg; |
@@ -481,7 +478,7 @@ static int rd_MEMCPY_read(struct rd_request *req) | |||
481 | #endif | 478 | #endif |
482 | table = rd_get_sg_table(dev, req->rd_page); | 479 | table = rd_get_sg_table(dev, req->rd_page); |
483 | if (!(table)) | 480 | if (!(table)) |
484 | return -1; | 481 | return -EINVAL; |
485 | 482 | ||
486 | sg_s = &table->sg_table[j = 0]; | 483 | sg_s = &table->sg_table[j = 0]; |
487 | } | 484 | } |
@@ -506,7 +503,7 @@ static int rd_MEMCPY_write(struct rd_request *req) | |||
506 | 503 | ||
507 | table = rd_get_sg_table(dev, req->rd_page); | 504 | table = rd_get_sg_table(dev, req->rd_page); |
508 | if (!(table)) | 505 | if (!(table)) |
509 | return -1; | 506 | return -EINVAL; |
510 | 507 | ||
511 | table_sg_end = (table->page_end_offset - req->rd_page); | 508 | table_sg_end = (table->page_end_offset - req->rd_page); |
512 | sg_d = &table->sg_table[req->rd_page - table->page_start_offset]; | 509 | sg_d = &table->sg_table[req->rd_page - table->page_start_offset]; |
@@ -604,7 +601,7 @@ static int rd_MEMCPY_write(struct rd_request *req) | |||
604 | #endif | 601 | #endif |
605 | table = rd_get_sg_table(dev, req->rd_page); | 602 | table = rd_get_sg_table(dev, req->rd_page); |
606 | if (!(table)) | 603 | if (!(table)) |
607 | return -1; | 604 | return -EINVAL; |
608 | 605 | ||
609 | sg_d = &table->sg_table[j = 0]; | 606 | sg_d = &table->sg_table[j = 0]; |
610 | } | 607 | } |
@@ -623,11 +620,11 @@ static int rd_MEMCPY_do_task(struct se_task *task) | |||
623 | unsigned long long lba; | 620 | unsigned long long lba; |
624 | int ret; | 621 | int ret; |
625 | 622 | ||
626 | req->rd_page = (task->task_lba * DEV_ATTRIB(dev)->block_size) / PAGE_SIZE; | 623 | req->rd_page = (task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size) / PAGE_SIZE; |
627 | lba = task->task_lba; | 624 | lba = task->task_lba; |
628 | req->rd_offset = (do_div(lba, | 625 | req->rd_offset = (do_div(lba, |
629 | (PAGE_SIZE / DEV_ATTRIB(dev)->block_size))) * | 626 | (PAGE_SIZE / dev->se_sub_dev->se_dev_attrib.block_size))) * |
630 | DEV_ATTRIB(dev)->block_size; | 627 | dev->se_sub_dev->se_dev_attrib.block_size; |
631 | req->rd_size = task->task_size; | 628 | req->rd_size = task->task_size; |
632 | 629 | ||
633 | if (task->task_data_direction == DMA_FROM_DEVICE) | 630 | if (task->task_data_direction == DMA_FROM_DEVICE) |
@@ -664,7 +661,7 @@ static int rd_DIRECT_with_offset( | |||
664 | 661 | ||
665 | table = rd_get_sg_table(dev, req->rd_page); | 662 | table = rd_get_sg_table(dev, req->rd_page); |
666 | if (!(table)) | 663 | if (!(table)) |
667 | return -1; | 664 | return -EINVAL; |
668 | 665 | ||
669 | table_sg_end = (table->page_end_offset - req->rd_page); | 666 | table_sg_end = (table->page_end_offset - req->rd_page); |
670 | sg_s = &table->sg_table[req->rd_page - table->page_start_offset]; | 667 | sg_s = &table->sg_table[req->rd_page - table->page_start_offset]; |
@@ -678,7 +675,7 @@ static int rd_DIRECT_with_offset( | |||
678 | se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL); | 675 | se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL); |
679 | if (!(se_mem)) { | 676 | if (!(se_mem)) { |
680 | printk(KERN_ERR "Unable to allocate struct se_mem\n"); | 677 | printk(KERN_ERR "Unable to allocate struct se_mem\n"); |
681 | return -1; | 678 | return -ENOMEM; |
682 | } | 679 | } |
683 | INIT_LIST_HEAD(&se_mem->se_list); | 680 | INIT_LIST_HEAD(&se_mem->se_list); |
684 | 681 | ||
@@ -734,13 +731,13 @@ check_eot: | |||
734 | #endif | 731 | #endif |
735 | table = rd_get_sg_table(dev, req->rd_page); | 732 | table = rd_get_sg_table(dev, req->rd_page); |
736 | if (!(table)) | 733 | if (!(table)) |
737 | return -1; | 734 | return -EINVAL; |
738 | 735 | ||
739 | sg_s = &table->sg_table[j = 0]; | 736 | sg_s = &table->sg_table[j = 0]; |
740 | } | 737 | } |
741 | 738 | ||
742 | out: | 739 | out: |
743 | T_TASK(task->task_se_cmd)->t_tasks_se_num += *se_mem_cnt; | 740 | task->task_se_cmd->t_task->t_tasks_se_num += *se_mem_cnt; |
744 | #ifdef DEBUG_RAMDISK_DR | 741 | #ifdef DEBUG_RAMDISK_DR |
745 | printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n", | 742 | printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n", |
746 | *se_mem_cnt); | 743 | *se_mem_cnt); |
@@ -767,7 +764,7 @@ static int rd_DIRECT_without_offset( | |||
767 | 764 | ||
768 | table = rd_get_sg_table(dev, req->rd_page); | 765 | table = rd_get_sg_table(dev, req->rd_page); |
769 | if (!(table)) | 766 | if (!(table)) |
770 | return -1; | 767 | return -EINVAL; |
771 | 768 | ||
772 | sg_s = &table->sg_table[req->rd_page - table->page_start_offset]; | 769 | sg_s = &table->sg_table[req->rd_page - table->page_start_offset]; |
773 | #ifdef DEBUG_RAMDISK_DR | 770 | #ifdef DEBUG_RAMDISK_DR |
@@ -780,7 +777,7 @@ static int rd_DIRECT_without_offset( | |||
780 | se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL); | 777 | se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL); |
781 | if (!(se_mem)) { | 778 | if (!(se_mem)) { |
782 | printk(KERN_ERR "Unable to allocate struct se_mem\n"); | 779 | printk(KERN_ERR "Unable to allocate struct se_mem\n"); |
783 | return -1; | 780 | return -ENOMEM; |
784 | } | 781 | } |
785 | INIT_LIST_HEAD(&se_mem->se_list); | 782 | INIT_LIST_HEAD(&se_mem->se_list); |
786 | 783 | ||
@@ -816,13 +813,13 @@ static int rd_DIRECT_without_offset( | |||
816 | #endif | 813 | #endif |
817 | table = rd_get_sg_table(dev, req->rd_page); | 814 | table = rd_get_sg_table(dev, req->rd_page); |
818 | if (!(table)) | 815 | if (!(table)) |
819 | return -1; | 816 | return -EINVAL; |
820 | 817 | ||
821 | sg_s = &table->sg_table[j = 0]; | 818 | sg_s = &table->sg_table[j = 0]; |
822 | } | 819 | } |
823 | 820 | ||
824 | out: | 821 | out: |
825 | T_TASK(task->task_se_cmd)->t_tasks_se_num += *se_mem_cnt; | 822 | task->task_se_cmd->t_task->t_tasks_se_num += *se_mem_cnt; |
826 | #ifdef DEBUG_RAMDISK_DR | 823 | #ifdef DEBUG_RAMDISK_DR |
827 | printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n", | 824 | printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n", |
828 | *se_mem_cnt); | 825 | *se_mem_cnt); |
@@ -848,13 +845,11 @@ static int rd_DIRECT_do_se_mem_map( | |||
848 | u32 task_offset = *task_offset_in; | 845 | u32 task_offset = *task_offset_in; |
849 | unsigned long long lba; | 846 | unsigned long long lba; |
850 | int ret; | 847 | int ret; |
848 | int block_size = task->se_dev->se_sub_dev->se_dev_attrib.block_size; | ||
851 | 849 | ||
852 | req->rd_page = ((task->task_lba * DEV_ATTRIB(task->se_dev)->block_size) / | ||
853 | PAGE_SIZE); | ||
854 | lba = task->task_lba; | 850 | lba = task->task_lba; |
855 | req->rd_offset = (do_div(lba, | 851 | req->rd_page = ((task->task_lba * block_size) / PAGE_SIZE); |
856 | (PAGE_SIZE / DEV_ATTRIB(task->se_dev)->block_size))) * | 852 | req->rd_offset = (do_div(lba, (PAGE_SIZE / block_size))) * block_size; |
857 | DEV_ATTRIB(task->se_dev)->block_size; | ||
858 | req->rd_size = task->task_size; | 853 | req->rd_size = task->task_size; |
859 | 854 | ||
860 | if (req->rd_offset) | 855 | if (req->rd_offset) |
@@ -867,7 +862,7 @@ static int rd_DIRECT_do_se_mem_map( | |||
867 | if (ret < 0) | 862 | if (ret < 0) |
868 | return ret; | 863 | return ret; |
869 | 864 | ||
870 | if (CMD_TFO(cmd)->task_sg_chaining == 0) | 865 | if (cmd->se_tfo->task_sg_chaining == 0) |
871 | return 0; | 866 | return 0; |
872 | /* | 867 | /* |
873 | * Currently prevent writers from multiple HW fabrics doing | 868 | * Currently prevent writers from multiple HW fabrics doing |
@@ -876,7 +871,7 @@ static int rd_DIRECT_do_se_mem_map( | |||
876 | if (cmd->data_direction == DMA_TO_DEVICE) { | 871 | if (cmd->data_direction == DMA_TO_DEVICE) { |
877 | printk(KERN_ERR "DMA_TO_DEVICE not supported for" | 872 | printk(KERN_ERR "DMA_TO_DEVICE not supported for" |
878 | " RAMDISK_DR with task_sg_chaining=1\n"); | 873 | " RAMDISK_DR with task_sg_chaining=1\n"); |
879 | return -1; | 874 | return -ENOSYS; |
880 | } | 875 | } |
881 | /* | 876 | /* |
882 | * Special case for if task_sg_chaining is enabled, then | 877 | * Special case for if task_sg_chaining is enabled, then |
@@ -884,14 +879,15 @@ static int rd_DIRECT_do_se_mem_map( | |||
884 | * transport_do_task_sg_chain() for creating chainged SGLs | 879 | * transport_do_task_sg_chain() for creating chainged SGLs |
885 | * across multiple struct se_task->task_sg[]. | 880 | * across multiple struct se_task->task_sg[]. |
886 | */ | 881 | */ |
887 | if (!(transport_calc_sg_num(task, | 882 | ret = transport_init_task_sg(task, |
888 | list_entry(T_TASK(cmd)->t_mem_list->next, | 883 | list_entry(cmd->t_task->t_mem_list->next, |
889 | struct se_mem, se_list), | 884 | struct se_mem, se_list), |
890 | task_offset))) | 885 | task_offset); |
891 | return -1; | 886 | if (ret <= 0) |
887 | return ret; | ||
892 | 888 | ||
893 | return transport_map_mem_to_sg(task, se_mem_list, task->task_sg, | 889 | return transport_map_mem_to_sg(task, se_mem_list, task->task_sg, |
894 | list_entry(T_TASK(cmd)->t_mem_list->next, | 890 | list_entry(cmd->t_task->t_mem_list->next, |
895 | struct se_mem, se_list), | 891 | struct se_mem, se_list), |
896 | out_se_mem, se_mem_cnt, task_offset_in); | 892 | out_se_mem, se_mem_cnt, task_offset_in); |
897 | } | 893 | } |
@@ -975,7 +971,7 @@ static ssize_t rd_check_configfs_dev_params(struct se_hba *hba, struct se_subsys | |||
975 | 971 | ||
976 | if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) { | 972 | if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) { |
977 | printk(KERN_INFO "Missing rd_pages= parameter\n"); | 973 | printk(KERN_INFO "Missing rd_pages= parameter\n"); |
978 | return -1; | 974 | return -EINVAL; |
979 | } | 975 | } |
980 | 976 | ||
981 | return 0; | 977 | return 0; |
@@ -1021,7 +1017,7 @@ static sector_t rd_get_blocks(struct se_device *dev) | |||
1021 | { | 1017 | { |
1022 | struct rd_dev *rd_dev = dev->dev_ptr; | 1018 | struct rd_dev *rd_dev = dev->dev_ptr; |
1023 | unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) / | 1019 | unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) / |
1024 | DEV_ATTRIB(dev)->block_size) - 1; | 1020 | dev->se_sub_dev->se_dev_attrib.block_size) - 1; |
1025 | 1021 | ||
1026 | return blocks_long; | 1022 | return blocks_long; |
1027 | } | 1023 | } |
diff --git a/drivers/target/target_core_rd.h b/drivers/target/target_core_rd.h index 3ea19e29d8ec..bab93020a3a9 100644 --- a/drivers/target/target_core_rd.h +++ b/drivers/target/target_core_rd.h | |||
@@ -7,8 +7,6 @@ | |||
7 | 7 | ||
8 | /* Largest piece of memory kmalloc can allocate */ | 8 | /* Largest piece of memory kmalloc can allocate */ |
9 | #define RD_MAX_ALLOCATION_SIZE 65536 | 9 | #define RD_MAX_ALLOCATION_SIZE 65536 |
10 | /* Maximum queuedepth for the Ramdisk HBA */ | ||
11 | #define RD_HBA_QUEUE_DEPTH 256 | ||
12 | #define RD_DEVICE_QUEUE_DEPTH 32 | 10 | #define RD_DEVICE_QUEUE_DEPTH 32 |
13 | #define RD_MAX_DEVICE_QUEUE_DEPTH 128 | 11 | #define RD_MAX_DEVICE_QUEUE_DEPTH 128 |
14 | #define RD_BLOCKSIZE 512 | 12 | #define RD_BLOCKSIZE 512 |
diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c index 5e3a067a7475..a8d6e1dee938 100644 --- a/drivers/target/target_core_stat.c +++ b/drivers/target/target_core_stat.c | |||
@@ -402,8 +402,8 @@ static ssize_t target_stat_scsi_lu_show_attr_lu_name( | |||
402 | return -ENODEV; | 402 | return -ENODEV; |
403 | /* scsiLuWwnName */ | 403 | /* scsiLuWwnName */ |
404 | return snprintf(page, PAGE_SIZE, "%s\n", | 404 | return snprintf(page, PAGE_SIZE, "%s\n", |
405 | (strlen(DEV_T10_WWN(dev)->unit_serial)) ? | 405 | (strlen(dev->se_sub_dev->t10_wwn.unit_serial)) ? |
406 | (char *)&DEV_T10_WWN(dev)->unit_serial[0] : "None"); | 406 | dev->se_sub_dev->t10_wwn.unit_serial : "None"); |
407 | } | 407 | } |
408 | DEV_STAT_SCSI_LU_ATTR_RO(lu_name); | 408 | DEV_STAT_SCSI_LU_ATTR_RO(lu_name); |
409 | 409 | ||
@@ -413,17 +413,17 @@ static ssize_t target_stat_scsi_lu_show_attr_vend( | |||
413 | struct se_subsystem_dev *se_subdev = container_of(sgrps, | 413 | struct se_subsystem_dev *se_subdev = container_of(sgrps, |
414 | struct se_subsystem_dev, dev_stat_grps); | 414 | struct se_subsystem_dev, dev_stat_grps); |
415 | struct se_device *dev = se_subdev->se_dev_ptr; | 415 | struct se_device *dev = se_subdev->se_dev_ptr; |
416 | int j; | 416 | int i; |
417 | char str[28]; | 417 | char str[sizeof(dev->se_sub_dev->t10_wwn.vendor)+1]; |
418 | 418 | ||
419 | if (!dev) | 419 | if (!dev) |
420 | return -ENODEV; | 420 | return -ENODEV; |
421 | |||
421 | /* scsiLuVendorId */ | 422 | /* scsiLuVendorId */ |
422 | memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28); | 423 | for (i = 0; i < sizeof(dev->se_sub_dev->t10_wwn.vendor); i++) |
423 | for (j = 0; j < 8; j++) | 424 | str[i] = ISPRINT(dev->se_sub_dev->t10_wwn.vendor[i]) ? |
424 | str[j] = ISPRINT(DEV_T10_WWN(dev)->vendor[j]) ? | 425 | dev->se_sub_dev->t10_wwn.vendor[i] : ' '; |
425 | DEV_T10_WWN(dev)->vendor[j] : 0x20; | 426 | str[i] = '\0'; |
426 | str[8] = 0; | ||
427 | return snprintf(page, PAGE_SIZE, "%s\n", str); | 427 | return snprintf(page, PAGE_SIZE, "%s\n", str); |
428 | } | 428 | } |
429 | DEV_STAT_SCSI_LU_ATTR_RO(vend); | 429 | DEV_STAT_SCSI_LU_ATTR_RO(vend); |
@@ -434,18 +434,17 @@ static ssize_t target_stat_scsi_lu_show_attr_prod( | |||
434 | struct se_subsystem_dev *se_subdev = container_of(sgrps, | 434 | struct se_subsystem_dev *se_subdev = container_of(sgrps, |
435 | struct se_subsystem_dev, dev_stat_grps); | 435 | struct se_subsystem_dev, dev_stat_grps); |
436 | struct se_device *dev = se_subdev->se_dev_ptr; | 436 | struct se_device *dev = se_subdev->se_dev_ptr; |
437 | int j; | 437 | int i; |
438 | char str[28]; | 438 | char str[sizeof(dev->se_sub_dev->t10_wwn.model)+1]; |
439 | 439 | ||
440 | if (!dev) | 440 | if (!dev) |
441 | return -ENODEV; | 441 | return -ENODEV; |
442 | 442 | ||
443 | /* scsiLuProductId */ | 443 | /* scsiLuProductId */ |
444 | memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28); | 444 | for (i = 0; i < sizeof(dev->se_sub_dev->t10_wwn.vendor); i++) |
445 | for (j = 0; j < 16; j++) | 445 | str[i] = ISPRINT(dev->se_sub_dev->t10_wwn.model[i]) ? |
446 | str[j] = ISPRINT(DEV_T10_WWN(dev)->model[j]) ? | 446 | dev->se_sub_dev->t10_wwn.model[i] : ' '; |
447 | DEV_T10_WWN(dev)->model[j] : 0x20; | 447 | str[i] = '\0'; |
448 | str[16] = 0; | ||
449 | return snprintf(page, PAGE_SIZE, "%s\n", str); | 448 | return snprintf(page, PAGE_SIZE, "%s\n", str); |
450 | } | 449 | } |
451 | DEV_STAT_SCSI_LU_ATTR_RO(prod); | 450 | DEV_STAT_SCSI_LU_ATTR_RO(prod); |
@@ -456,18 +455,17 @@ static ssize_t target_stat_scsi_lu_show_attr_rev( | |||
456 | struct se_subsystem_dev *se_subdev = container_of(sgrps, | 455 | struct se_subsystem_dev *se_subdev = container_of(sgrps, |
457 | struct se_subsystem_dev, dev_stat_grps); | 456 | struct se_subsystem_dev, dev_stat_grps); |
458 | struct se_device *dev = se_subdev->se_dev_ptr; | 457 | struct se_device *dev = se_subdev->se_dev_ptr; |
459 | int j; | 458 | int i; |
460 | char str[28]; | 459 | char str[sizeof(dev->se_sub_dev->t10_wwn.revision)+1]; |
461 | 460 | ||
462 | if (!dev) | 461 | if (!dev) |
463 | return -ENODEV; | 462 | return -ENODEV; |
464 | 463 | ||
465 | /* scsiLuRevisionId */ | 464 | /* scsiLuRevisionId */ |
466 | memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28); | 465 | for (i = 0; i < sizeof(dev->se_sub_dev->t10_wwn.revision); i++) |
467 | for (j = 0; j < 4; j++) | 466 | str[i] = ISPRINT(dev->se_sub_dev->t10_wwn.revision[i]) ? |
468 | str[j] = ISPRINT(DEV_T10_WWN(dev)->revision[j]) ? | 467 | dev->se_sub_dev->t10_wwn.revision[i] : ' '; |
469 | DEV_T10_WWN(dev)->revision[j] : 0x20; | 468 | str[i] = '\0'; |
470 | str[4] = 0; | ||
471 | return snprintf(page, PAGE_SIZE, "%s\n", str); | 469 | return snprintf(page, PAGE_SIZE, "%s\n", str); |
472 | } | 470 | } |
473 | DEV_STAT_SCSI_LU_ATTR_RO(rev); | 471 | DEV_STAT_SCSI_LU_ATTR_RO(rev); |
@@ -484,7 +482,7 @@ static ssize_t target_stat_scsi_lu_show_attr_dev_type( | |||
484 | 482 | ||
485 | /* scsiLuPeripheralType */ | 483 | /* scsiLuPeripheralType */ |
486 | return snprintf(page, PAGE_SIZE, "%u\n", | 484 | return snprintf(page, PAGE_SIZE, "%u\n", |
487 | TRANSPORT(dev)->get_device_type(dev)); | 485 | dev->transport->get_device_type(dev)); |
488 | } | 486 | } |
489 | DEV_STAT_SCSI_LU_ATTR_RO(dev_type); | 487 | DEV_STAT_SCSI_LU_ATTR_RO(dev_type); |
490 | 488 | ||
@@ -668,18 +666,18 @@ static struct config_item_type target_stat_scsi_lu_cit = { | |||
668 | */ | 666 | */ |
669 | void target_stat_setup_dev_default_groups(struct se_subsystem_dev *se_subdev) | 667 | void target_stat_setup_dev_default_groups(struct se_subsystem_dev *se_subdev) |
670 | { | 668 | { |
671 | struct config_group *dev_stat_grp = &DEV_STAT_GRP(se_subdev)->stat_group; | 669 | struct config_group *dev_stat_grp = &se_subdev->dev_stat_grps.stat_group; |
672 | 670 | ||
673 | config_group_init_type_name(&DEV_STAT_GRP(se_subdev)->scsi_dev_group, | 671 | config_group_init_type_name(&se_subdev->dev_stat_grps.scsi_dev_group, |
674 | "scsi_dev", &target_stat_scsi_dev_cit); | 672 | "scsi_dev", &target_stat_scsi_dev_cit); |
675 | config_group_init_type_name(&DEV_STAT_GRP(se_subdev)->scsi_tgt_dev_group, | 673 | config_group_init_type_name(&se_subdev->dev_stat_grps.scsi_tgt_dev_group, |
676 | "scsi_tgt_dev", &target_stat_scsi_tgt_dev_cit); | 674 | "scsi_tgt_dev", &target_stat_scsi_tgt_dev_cit); |
677 | config_group_init_type_name(&DEV_STAT_GRP(se_subdev)->scsi_lu_group, | 675 | config_group_init_type_name(&se_subdev->dev_stat_grps.scsi_lu_group, |
678 | "scsi_lu", &target_stat_scsi_lu_cit); | 676 | "scsi_lu", &target_stat_scsi_lu_cit); |
679 | 677 | ||
680 | dev_stat_grp->default_groups[0] = &DEV_STAT_GRP(se_subdev)->scsi_dev_group; | 678 | dev_stat_grp->default_groups[0] = &se_subdev->dev_stat_grps.scsi_dev_group; |
681 | dev_stat_grp->default_groups[1] = &DEV_STAT_GRP(se_subdev)->scsi_tgt_dev_group; | 679 | dev_stat_grp->default_groups[1] = &se_subdev->dev_stat_grps.scsi_tgt_dev_group; |
682 | dev_stat_grp->default_groups[2] = &DEV_STAT_GRP(se_subdev)->scsi_lu_group; | 680 | dev_stat_grp->default_groups[2] = &se_subdev->dev_stat_grps.scsi_lu_group; |
683 | dev_stat_grp->default_groups[3] = NULL; | 681 | dev_stat_grp->default_groups[3] = NULL; |
684 | } | 682 | } |
685 | 683 | ||
@@ -922,7 +920,7 @@ static ssize_t target_stat_scsi_tgt_port_show_attr_name( | |||
922 | tpg = sep->sep_tpg; | 920 | tpg = sep->sep_tpg; |
923 | 921 | ||
924 | ret = snprintf(page, PAGE_SIZE, "%sPort#%u\n", | 922 | ret = snprintf(page, PAGE_SIZE, "%sPort#%u\n", |
925 | TPG_TFO(tpg)->get_fabric_name(), sep->sep_index); | 923 | tpg->se_tpg_tfo->get_fabric_name(), sep->sep_index); |
926 | spin_unlock(&lun->lun_sep_lock); | 924 | spin_unlock(&lun->lun_sep_lock); |
927 | return ret; | 925 | return ret; |
928 | } | 926 | } |
@@ -945,8 +943,8 @@ static ssize_t target_stat_scsi_tgt_port_show_attr_port_index( | |||
945 | tpg = sep->sep_tpg; | 943 | tpg = sep->sep_tpg; |
946 | 944 | ||
947 | ret = snprintf(page, PAGE_SIZE, "%s%s%d\n", | 945 | ret = snprintf(page, PAGE_SIZE, "%s%s%d\n", |
948 | TPG_TFO(tpg)->tpg_get_wwn(tpg), "+t+", | 946 | tpg->se_tpg_tfo->tpg_get_wwn(tpg), "+t+", |
949 | TPG_TFO(tpg)->tpg_get_tag(tpg)); | 947 | tpg->se_tpg_tfo->tpg_get_tag(tpg)); |
950 | spin_unlock(&lun->lun_sep_lock); | 948 | spin_unlock(&lun->lun_sep_lock); |
951 | return ret; | 949 | return ret; |
952 | } | 950 | } |
@@ -1128,7 +1126,7 @@ static ssize_t target_stat_scsi_transport_show_attr_device( | |||
1128 | tpg = sep->sep_tpg; | 1126 | tpg = sep->sep_tpg; |
1129 | /* scsiTransportType */ | 1127 | /* scsiTransportType */ |
1130 | ret = snprintf(page, PAGE_SIZE, "scsiTransport%s\n", | 1128 | ret = snprintf(page, PAGE_SIZE, "scsiTransport%s\n", |
1131 | TPG_TFO(tpg)->get_fabric_name()); | 1129 | tpg->se_tpg_tfo->get_fabric_name()); |
1132 | spin_unlock(&lun->lun_sep_lock); | 1130 | spin_unlock(&lun->lun_sep_lock); |
1133 | return ret; | 1131 | return ret; |
1134 | } | 1132 | } |
@@ -1150,7 +1148,7 @@ static ssize_t target_stat_scsi_transport_show_attr_indx( | |||
1150 | } | 1148 | } |
1151 | tpg = sep->sep_tpg; | 1149 | tpg = sep->sep_tpg; |
1152 | ret = snprintf(page, PAGE_SIZE, "%u\n", | 1150 | ret = snprintf(page, PAGE_SIZE, "%u\n", |
1153 | TPG_TFO(tpg)->tpg_get_inst_index(tpg)); | 1151 | tpg->se_tpg_tfo->tpg_get_inst_index(tpg)); |
1154 | spin_unlock(&lun->lun_sep_lock); | 1152 | spin_unlock(&lun->lun_sep_lock); |
1155 | return ret; | 1153 | return ret; |
1156 | } | 1154 | } |
@@ -1173,10 +1171,10 @@ static ssize_t target_stat_scsi_transport_show_attr_dev_name( | |||
1173 | return -ENODEV; | 1171 | return -ENODEV; |
1174 | } | 1172 | } |
1175 | tpg = sep->sep_tpg; | 1173 | tpg = sep->sep_tpg; |
1176 | wwn = DEV_T10_WWN(dev); | 1174 | wwn = &dev->se_sub_dev->t10_wwn; |
1177 | /* scsiTransportDevName */ | 1175 | /* scsiTransportDevName */ |
1178 | ret = snprintf(page, PAGE_SIZE, "%s+%s\n", | 1176 | ret = snprintf(page, PAGE_SIZE, "%s+%s\n", |
1179 | TPG_TFO(tpg)->tpg_get_wwn(tpg), | 1177 | tpg->se_tpg_tfo->tpg_get_wwn(tpg), |
1180 | (strlen(wwn->unit_serial)) ? wwn->unit_serial : | 1178 | (strlen(wwn->unit_serial)) ? wwn->unit_serial : |
1181 | wwn->vendor); | 1179 | wwn->vendor); |
1182 | spin_unlock(&lun->lun_sep_lock); | 1180 | spin_unlock(&lun->lun_sep_lock); |
@@ -1212,18 +1210,18 @@ static struct config_item_type target_stat_scsi_transport_cit = { | |||
1212 | */ | 1210 | */ |
1213 | void target_stat_setup_port_default_groups(struct se_lun *lun) | 1211 | void target_stat_setup_port_default_groups(struct se_lun *lun) |
1214 | { | 1212 | { |
1215 | struct config_group *port_stat_grp = &PORT_STAT_GRP(lun)->stat_group; | 1213 | struct config_group *port_stat_grp = &lun->port_stat_grps.stat_group; |
1216 | 1214 | ||
1217 | config_group_init_type_name(&PORT_STAT_GRP(lun)->scsi_port_group, | 1215 | config_group_init_type_name(&lun->port_stat_grps.scsi_port_group, |
1218 | "scsi_port", &target_stat_scsi_port_cit); | 1216 | "scsi_port", &target_stat_scsi_port_cit); |
1219 | config_group_init_type_name(&PORT_STAT_GRP(lun)->scsi_tgt_port_group, | 1217 | config_group_init_type_name(&lun->port_stat_grps.scsi_tgt_port_group, |
1220 | "scsi_tgt_port", &target_stat_scsi_tgt_port_cit); | 1218 | "scsi_tgt_port", &target_stat_scsi_tgt_port_cit); |
1221 | config_group_init_type_name(&PORT_STAT_GRP(lun)->scsi_transport_group, | 1219 | config_group_init_type_name(&lun->port_stat_grps.scsi_transport_group, |
1222 | "scsi_transport", &target_stat_scsi_transport_cit); | 1220 | "scsi_transport", &target_stat_scsi_transport_cit); |
1223 | 1221 | ||
1224 | port_stat_grp->default_groups[0] = &PORT_STAT_GRP(lun)->scsi_port_group; | 1222 | port_stat_grp->default_groups[0] = &lun->port_stat_grps.scsi_port_group; |
1225 | port_stat_grp->default_groups[1] = &PORT_STAT_GRP(lun)->scsi_tgt_port_group; | 1223 | port_stat_grp->default_groups[1] = &lun->port_stat_grps.scsi_tgt_port_group; |
1226 | port_stat_grp->default_groups[2] = &PORT_STAT_GRP(lun)->scsi_transport_group; | 1224 | port_stat_grp->default_groups[2] = &lun->port_stat_grps.scsi_transport_group; |
1227 | port_stat_grp->default_groups[3] = NULL; | 1225 | port_stat_grp->default_groups[3] = NULL; |
1228 | } | 1226 | } |
1229 | 1227 | ||
@@ -1264,7 +1262,7 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_inst( | |||
1264 | tpg = nacl->se_tpg; | 1262 | tpg = nacl->se_tpg; |
1265 | /* scsiInstIndex */ | 1263 | /* scsiInstIndex */ |
1266 | ret = snprintf(page, PAGE_SIZE, "%u\n", | 1264 | ret = snprintf(page, PAGE_SIZE, "%u\n", |
1267 | TPG_TFO(tpg)->tpg_get_inst_index(tpg)); | 1265 | tpg->se_tpg_tfo->tpg_get_inst_index(tpg)); |
1268 | spin_unlock_irq(&nacl->device_list_lock); | 1266 | spin_unlock_irq(&nacl->device_list_lock); |
1269 | return ret; | 1267 | return ret; |
1270 | } | 1268 | } |
@@ -1314,7 +1312,7 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_port( | |||
1314 | } | 1312 | } |
1315 | tpg = nacl->se_tpg; | 1313 | tpg = nacl->se_tpg; |
1316 | /* scsiAuthIntrTgtPortIndex */ | 1314 | /* scsiAuthIntrTgtPortIndex */ |
1317 | ret = snprintf(page, PAGE_SIZE, "%u\n", TPG_TFO(tpg)->tpg_get_tag(tpg)); | 1315 | ret = snprintf(page, PAGE_SIZE, "%u\n", tpg->se_tpg_tfo->tpg_get_tag(tpg)); |
1318 | spin_unlock_irq(&nacl->device_list_lock); | 1316 | spin_unlock_irq(&nacl->device_list_lock); |
1319 | return ret; | 1317 | return ret; |
1320 | } | 1318 | } |
@@ -1632,7 +1630,7 @@ static ssize_t target_stat_scsi_att_intr_port_show_attr_inst( | |||
1632 | tpg = nacl->se_tpg; | 1630 | tpg = nacl->se_tpg; |
1633 | /* scsiInstIndex */ | 1631 | /* scsiInstIndex */ |
1634 | ret = snprintf(page, PAGE_SIZE, "%u\n", | 1632 | ret = snprintf(page, PAGE_SIZE, "%u\n", |
1635 | TPG_TFO(tpg)->tpg_get_inst_index(tpg)); | 1633 | tpg->se_tpg_tfo->tpg_get_inst_index(tpg)); |
1636 | spin_unlock_irq(&nacl->device_list_lock); | 1634 | spin_unlock_irq(&nacl->device_list_lock); |
1637 | return ret; | 1635 | return ret; |
1638 | } | 1636 | } |
@@ -1682,7 +1680,7 @@ static ssize_t target_stat_scsi_att_intr_port_show_attr_port( | |||
1682 | } | 1680 | } |
1683 | tpg = nacl->se_tpg; | 1681 | tpg = nacl->se_tpg; |
1684 | /* scsiPortIndex */ | 1682 | /* scsiPortIndex */ |
1685 | ret = snprintf(page, PAGE_SIZE, "%u\n", TPG_TFO(tpg)->tpg_get_tag(tpg)); | 1683 | ret = snprintf(page, PAGE_SIZE, "%u\n", tpg->se_tpg_tfo->tpg_get_tag(tpg)); |
1686 | spin_unlock_irq(&nacl->device_list_lock); | 1684 | spin_unlock_irq(&nacl->device_list_lock); |
1687 | return ret; | 1685 | return ret; |
1688 | } | 1686 | } |
@@ -1708,7 +1706,7 @@ static ssize_t target_stat_scsi_att_intr_port_show_attr_indx( | |||
1708 | tpg = nacl->se_tpg; | 1706 | tpg = nacl->se_tpg; |
1709 | /* scsiAttIntrPortIndex */ | 1707 | /* scsiAttIntrPortIndex */ |
1710 | ret = snprintf(page, PAGE_SIZE, "%u\n", | 1708 | ret = snprintf(page, PAGE_SIZE, "%u\n", |
1711 | TPG_TFO(tpg)->sess_get_index(se_sess)); | 1709 | tpg->se_tpg_tfo->sess_get_index(se_sess)); |
1712 | spin_unlock_irq(&nacl->nacl_sess_lock); | 1710 | spin_unlock_irq(&nacl->nacl_sess_lock); |
1713 | return ret; | 1711 | return ret; |
1714 | } | 1712 | } |
@@ -1757,8 +1755,8 @@ static ssize_t target_stat_scsi_att_intr_port_show_attr_port_ident( | |||
1757 | tpg = nacl->se_tpg; | 1755 | tpg = nacl->se_tpg; |
1758 | /* scsiAttIntrPortName+scsiAttIntrPortIdentifier */ | 1756 | /* scsiAttIntrPortName+scsiAttIntrPortIdentifier */ |
1759 | memset(buf, 0, 64); | 1757 | memset(buf, 0, 64); |
1760 | if (TPG_TFO(tpg)->sess_get_initiator_sid != NULL) | 1758 | if (tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) |
1761 | TPG_TFO(tpg)->sess_get_initiator_sid(se_sess, | 1759 | tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess, |
1762 | (unsigned char *)&buf[0], 64); | 1760 | (unsigned char *)&buf[0], 64); |
1763 | 1761 | ||
1764 | ret = snprintf(page, PAGE_SIZE, "%s+i+%s\n", nacl->initiatorname, buf); | 1762 | ret = snprintf(page, PAGE_SIZE, "%s+i+%s\n", nacl->initiatorname, buf); |
@@ -1797,14 +1795,14 @@ static struct config_item_type target_stat_scsi_att_intr_port_cit = { | |||
1797 | */ | 1795 | */ |
1798 | void target_stat_setup_mappedlun_default_groups(struct se_lun_acl *lacl) | 1796 | void target_stat_setup_mappedlun_default_groups(struct se_lun_acl *lacl) |
1799 | { | 1797 | { |
1800 | struct config_group *ml_stat_grp = &ML_STAT_GRPS(lacl)->stat_group; | 1798 | struct config_group *ml_stat_grp = &lacl->ml_stat_grps.stat_group; |
1801 | 1799 | ||
1802 | config_group_init_type_name(&ML_STAT_GRPS(lacl)->scsi_auth_intr_group, | 1800 | config_group_init_type_name(&lacl->ml_stat_grps.scsi_auth_intr_group, |
1803 | "scsi_auth_intr", &target_stat_scsi_auth_intr_cit); | 1801 | "scsi_auth_intr", &target_stat_scsi_auth_intr_cit); |
1804 | config_group_init_type_name(&ML_STAT_GRPS(lacl)->scsi_att_intr_port_group, | 1802 | config_group_init_type_name(&lacl->ml_stat_grps.scsi_att_intr_port_group, |
1805 | "scsi_att_intr_port", &target_stat_scsi_att_intr_port_cit); | 1803 | "scsi_att_intr_port", &target_stat_scsi_att_intr_port_cit); |
1806 | 1804 | ||
1807 | ml_stat_grp->default_groups[0] = &ML_STAT_GRPS(lacl)->scsi_auth_intr_group; | 1805 | ml_stat_grp->default_groups[0] = &lacl->ml_stat_grps.scsi_auth_intr_group; |
1808 | ml_stat_grp->default_groups[1] = &ML_STAT_GRPS(lacl)->scsi_att_intr_port_group; | 1806 | ml_stat_grp->default_groups[1] = &lacl->ml_stat_grps.scsi_att_intr_port_group; |
1809 | ml_stat_grp->default_groups[2] = NULL; | 1807 | ml_stat_grp->default_groups[2] = NULL; |
1810 | } | 1808 | } |
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index 179063d81cdd..2f73749b8151 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c | |||
@@ -117,7 +117,7 @@ int core_tmr_lun_reset( | |||
117 | struct se_queue_req *qr, *qr_tmp; | 117 | struct se_queue_req *qr, *qr_tmp; |
118 | struct se_node_acl *tmr_nacl = NULL; | 118 | struct se_node_acl *tmr_nacl = NULL; |
119 | struct se_portal_group *tmr_tpg = NULL; | 119 | struct se_portal_group *tmr_tpg = NULL; |
120 | struct se_queue_obj *qobj = dev->dev_queue_obj; | 120 | struct se_queue_obj *qobj = &dev->dev_queue_obj; |
121 | struct se_tmr_req *tmr_p, *tmr_pp; | 121 | struct se_tmr_req *tmr_p, *tmr_pp; |
122 | struct se_task *task, *task_tmp; | 122 | struct se_task *task, *task_tmp; |
123 | unsigned long flags; | 123 | unsigned long flags; |
@@ -133,7 +133,7 @@ int core_tmr_lun_reset( | |||
133 | * which the command was received shall be completed with TASK ABORTED | 133 | * which the command was received shall be completed with TASK ABORTED |
134 | * status (see SAM-4). | 134 | * status (see SAM-4). |
135 | */ | 135 | */ |
136 | tas = DEV_ATTRIB(dev)->emulate_tas; | 136 | tas = dev->se_sub_dev->se_dev_attrib.emulate_tas; |
137 | /* | 137 | /* |
138 | * Determine if this se_tmr is coming from a $FABRIC_MOD | 138 | * Determine if this se_tmr is coming from a $FABRIC_MOD |
139 | * or struct se_device passthrough.. | 139 | * or struct se_device passthrough.. |
@@ -144,13 +144,13 @@ int core_tmr_lun_reset( | |||
144 | if (tmr_nacl && tmr_tpg) { | 144 | if (tmr_nacl && tmr_tpg) { |
145 | DEBUG_LR("LUN_RESET: TMR caller fabric: %s" | 145 | DEBUG_LR("LUN_RESET: TMR caller fabric: %s" |
146 | " initiator port %s\n", | 146 | " initiator port %s\n", |
147 | TPG_TFO(tmr_tpg)->get_fabric_name(), | 147 | tmr_tpg->se_tpg_tfo->get_fabric_name(), |
148 | tmr_nacl->initiatorname); | 148 | tmr_nacl->initiatorname); |
149 | } | 149 | } |
150 | } | 150 | } |
151 | DEBUG_LR("LUN_RESET: %s starting for [%s], tas: %d\n", | 151 | DEBUG_LR("LUN_RESET: %s starting for [%s], tas: %d\n", |
152 | (preempt_and_abort_list) ? "Preempt" : "TMR", | 152 | (preempt_and_abort_list) ? "Preempt" : "TMR", |
153 | TRANSPORT(dev)->name, tas); | 153 | dev->transport->name, tas); |
154 | /* | 154 | /* |
155 | * Release all pending and outgoing TMRs aside from the received | 155 | * Release all pending and outgoing TMRs aside from the received |
156 | * LUN_RESET tmr.. | 156 | * LUN_RESET tmr.. |
@@ -179,14 +179,14 @@ int core_tmr_lun_reset( | |||
179 | continue; | 179 | continue; |
180 | spin_unlock(&dev->se_tmr_lock); | 180 | spin_unlock(&dev->se_tmr_lock); |
181 | 181 | ||
182 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 182 | spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); |
183 | if (!(atomic_read(&T_TASK(cmd)->t_transport_active))) { | 183 | if (!(atomic_read(&cmd->t_task->t_transport_active))) { |
184 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 184 | spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); |
185 | spin_lock(&dev->se_tmr_lock); | 185 | spin_lock(&dev->se_tmr_lock); |
186 | continue; | 186 | continue; |
187 | } | 187 | } |
188 | if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) { | 188 | if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) { |
189 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 189 | spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); |
190 | spin_lock(&dev->se_tmr_lock); | 190 | spin_lock(&dev->se_tmr_lock); |
191 | continue; | 191 | continue; |
192 | } | 192 | } |
@@ -194,7 +194,7 @@ int core_tmr_lun_reset( | |||
194 | " Response: 0x%02x, t_state: %d\n", | 194 | " Response: 0x%02x, t_state: %d\n", |
195 | (preempt_and_abort_list) ? "Preempt" : "", tmr_p, | 195 | (preempt_and_abort_list) ? "Preempt" : "", tmr_p, |
196 | tmr_p->function, tmr_p->response, cmd->t_state); | 196 | tmr_p->function, tmr_p->response, cmd->t_state); |
197 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 197 | spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); |
198 | 198 | ||
199 | transport_cmd_finish_abort_tmr(cmd); | 199 | transport_cmd_finish_abort_tmr(cmd); |
200 | spin_lock(&dev->se_tmr_lock); | 200 | spin_lock(&dev->se_tmr_lock); |
@@ -224,16 +224,16 @@ int core_tmr_lun_reset( | |||
224 | spin_lock_irqsave(&dev->execute_task_lock, flags); | 224 | spin_lock_irqsave(&dev->execute_task_lock, flags); |
225 | list_for_each_entry_safe(task, task_tmp, &dev->state_task_list, | 225 | list_for_each_entry_safe(task, task_tmp, &dev->state_task_list, |
226 | t_state_list) { | 226 | t_state_list) { |
227 | if (!(TASK_CMD(task))) { | 227 | if (!task->task_se_cmd) { |
228 | printk(KERN_ERR "TASK_CMD(task) is NULL!\n"); | 228 | printk(KERN_ERR "task->task_se_cmd is NULL!\n"); |
229 | continue; | 229 | continue; |
230 | } | 230 | } |
231 | cmd = TASK_CMD(task); | 231 | cmd = task->task_se_cmd; |
232 | 232 | ||
233 | if (!T_TASK(cmd)) { | 233 | if (!cmd->t_task) { |
234 | printk(KERN_ERR "T_TASK(cmd) is NULL for task: %p cmd:" | 234 | printk(KERN_ERR "cmd->t_task is NULL for task: %p cmd:" |
235 | " %p ITT: 0x%08x\n", task, cmd, | 235 | " %p ITT: 0x%08x\n", task, cmd, |
236 | CMD_TFO(cmd)->get_task_tag(cmd)); | 236 | cmd->se_tfo->get_task_tag(cmd)); |
237 | continue; | 237 | continue; |
238 | } | 238 | } |
239 | /* | 239 | /* |
@@ -254,38 +254,38 @@ int core_tmr_lun_reset( | |||
254 | atomic_set(&task->task_state_active, 0); | 254 | atomic_set(&task->task_state_active, 0); |
255 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); | 255 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); |
256 | 256 | ||
257 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 257 | spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); |
258 | DEBUG_LR("LUN_RESET: %s cmd: %p task: %p" | 258 | DEBUG_LR("LUN_RESET: %s cmd: %p task: %p" |
259 | " ITT/CmdSN: 0x%08x/0x%08x, i_state: %d, t_state/" | 259 | " ITT/CmdSN: 0x%08x/0x%08x, i_state: %d, t_state/" |
260 | "def_t_state: %d/%d cdb: 0x%02x\n", | 260 | "def_t_state: %d/%d cdb: 0x%02x\n", |
261 | (preempt_and_abort_list) ? "Preempt" : "", cmd, task, | 261 | (preempt_and_abort_list) ? "Preempt" : "", cmd, task, |
262 | CMD_TFO(cmd)->get_task_tag(cmd), 0, | 262 | cmd->se_tfo->get_task_tag(cmd), 0, |
263 | CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state, | 263 | cmd->se_tfo->get_cmd_state(cmd), cmd->t_state, |
264 | cmd->deferred_t_state, T_TASK(cmd)->t_task_cdb[0]); | 264 | cmd->deferred_t_state, cmd->t_task->t_task_cdb[0]); |
265 | DEBUG_LR("LUN_RESET: ITT[0x%08x] - pr_res_key: 0x%016Lx" | 265 | DEBUG_LR("LUN_RESET: ITT[0x%08x] - pr_res_key: 0x%016Lx" |
266 | " t_task_cdbs: %d t_task_cdbs_left: %d" | 266 | " t_task_cdbs: %d t_task_cdbs_left: %d" |
267 | " t_task_cdbs_sent: %d -- t_transport_active: %d" | 267 | " t_task_cdbs_sent: %d -- t_transport_active: %d" |
268 | " t_transport_stop: %d t_transport_sent: %d\n", | 268 | " t_transport_stop: %d t_transport_sent: %d\n", |
269 | CMD_TFO(cmd)->get_task_tag(cmd), cmd->pr_res_key, | 269 | cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key, |
270 | T_TASK(cmd)->t_task_cdbs, | 270 | cmd->t_task->t_task_cdbs, |
271 | atomic_read(&T_TASK(cmd)->t_task_cdbs_left), | 271 | atomic_read(&cmd->t_task->t_task_cdbs_left), |
272 | atomic_read(&T_TASK(cmd)->t_task_cdbs_sent), | 272 | atomic_read(&cmd->t_task->t_task_cdbs_sent), |
273 | atomic_read(&T_TASK(cmd)->t_transport_active), | 273 | atomic_read(&cmd->t_task->t_transport_active), |
274 | atomic_read(&T_TASK(cmd)->t_transport_stop), | 274 | atomic_read(&cmd->t_task->t_transport_stop), |
275 | atomic_read(&T_TASK(cmd)->t_transport_sent)); | 275 | atomic_read(&cmd->t_task->t_transport_sent)); |
276 | 276 | ||
277 | if (atomic_read(&task->task_active)) { | 277 | if (atomic_read(&task->task_active)) { |
278 | atomic_set(&task->task_stop, 1); | 278 | atomic_set(&task->task_stop, 1); |
279 | spin_unlock_irqrestore( | 279 | spin_unlock_irqrestore( |
280 | &T_TASK(cmd)->t_state_lock, flags); | 280 | &cmd->t_task->t_state_lock, flags); |
281 | 281 | ||
282 | DEBUG_LR("LUN_RESET: Waiting for task: %p to shutdown" | 282 | DEBUG_LR("LUN_RESET: Waiting for task: %p to shutdown" |
283 | " for dev: %p\n", task, dev); | 283 | " for dev: %p\n", task, dev); |
284 | wait_for_completion(&task->task_stop_comp); | 284 | wait_for_completion(&task->task_stop_comp); |
285 | DEBUG_LR("LUN_RESET Completed task: %p shutdown for" | 285 | DEBUG_LR("LUN_RESET Completed task: %p shutdown for" |
286 | " dev: %p\n", task, dev); | 286 | " dev: %p\n", task, dev); |
287 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 287 | spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); |
288 | atomic_dec(&T_TASK(cmd)->t_task_cdbs_left); | 288 | atomic_dec(&cmd->t_task->t_task_cdbs_left); |
289 | 289 | ||
290 | atomic_set(&task->task_active, 0); | 290 | atomic_set(&task->task_active, 0); |
291 | atomic_set(&task->task_stop, 0); | 291 | atomic_set(&task->task_stop, 0); |
@@ -295,24 +295,24 @@ int core_tmr_lun_reset( | |||
295 | } | 295 | } |
296 | __transport_stop_task_timer(task, &flags); | 296 | __transport_stop_task_timer(task, &flags); |
297 | 297 | ||
298 | if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_ex_left))) { | 298 | if (!(atomic_dec_and_test(&cmd->t_task->t_task_cdbs_ex_left))) { |
299 | spin_unlock_irqrestore( | 299 | spin_unlock_irqrestore( |
300 | &T_TASK(cmd)->t_state_lock, flags); | 300 | &cmd->t_task->t_state_lock, flags); |
301 | DEBUG_LR("LUN_RESET: Skipping task: %p, dev: %p for" | 301 | DEBUG_LR("LUN_RESET: Skipping task: %p, dev: %p for" |
302 | " t_task_cdbs_ex_left: %d\n", task, dev, | 302 | " t_task_cdbs_ex_left: %d\n", task, dev, |
303 | atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left)); | 303 | atomic_read(&cmd->t_task->t_task_cdbs_ex_left)); |
304 | 304 | ||
305 | spin_lock_irqsave(&dev->execute_task_lock, flags); | 305 | spin_lock_irqsave(&dev->execute_task_lock, flags); |
306 | continue; | 306 | continue; |
307 | } | 307 | } |
308 | fe_count = atomic_read(&T_TASK(cmd)->t_fe_count); | 308 | fe_count = atomic_read(&cmd->t_task->t_fe_count); |
309 | 309 | ||
310 | if (atomic_read(&T_TASK(cmd)->t_transport_active)) { | 310 | if (atomic_read(&cmd->t_task->t_transport_active)) { |
311 | DEBUG_LR("LUN_RESET: got t_transport_active = 1 for" | 311 | DEBUG_LR("LUN_RESET: got t_transport_active = 1 for" |
312 | " task: %p, t_fe_count: %d dev: %p\n", task, | 312 | " task: %p, t_fe_count: %d dev: %p\n", task, |
313 | fe_count, dev); | 313 | fe_count, dev); |
314 | atomic_set(&T_TASK(cmd)->t_transport_aborted, 1); | 314 | atomic_set(&cmd->t_task->t_transport_aborted, 1); |
315 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, | 315 | spin_unlock_irqrestore(&cmd->t_task->t_state_lock, |
316 | flags); | 316 | flags); |
317 | core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); | 317 | core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); |
318 | 318 | ||
@@ -321,8 +321,8 @@ int core_tmr_lun_reset( | |||
321 | } | 321 | } |
322 | DEBUG_LR("LUN_RESET: Got t_transport_active = 0 for task: %p," | 322 | DEBUG_LR("LUN_RESET: Got t_transport_active = 0 for task: %p," |
323 | " t_fe_count: %d dev: %p\n", task, fe_count, dev); | 323 | " t_fe_count: %d dev: %p\n", task, fe_count, dev); |
324 | atomic_set(&T_TASK(cmd)->t_transport_aborted, 1); | 324 | atomic_set(&cmd->t_task->t_transport_aborted, 1); |
325 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 325 | spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); |
326 | core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); | 326 | core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); |
327 | 327 | ||
328 | spin_lock_irqsave(&dev->execute_task_lock, flags); | 328 | spin_lock_irqsave(&dev->execute_task_lock, flags); |
@@ -365,7 +365,7 @@ int core_tmr_lun_reset( | |||
365 | if (prout_cmd == cmd) | 365 | if (prout_cmd == cmd) |
366 | continue; | 366 | continue; |
367 | 367 | ||
368 | atomic_dec(&T_TASK(cmd)->t_transport_queue_active); | 368 | atomic_dec(&cmd->t_task->t_transport_queue_active); |
369 | atomic_dec(&qobj->queue_cnt); | 369 | atomic_dec(&qobj->queue_cnt); |
370 | list_del(&qr->qr_list); | 370 | list_del(&qr->qr_list); |
371 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); | 371 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); |
@@ -376,7 +376,7 @@ int core_tmr_lun_reset( | |||
376 | DEBUG_LR("LUN_RESET: %s from Device Queue: cmd: %p t_state:" | 376 | DEBUG_LR("LUN_RESET: %s from Device Queue: cmd: %p t_state:" |
377 | " %d t_fe_count: %d\n", (preempt_and_abort_list) ? | 377 | " %d t_fe_count: %d\n", (preempt_and_abort_list) ? |
378 | "Preempt" : "", cmd, state, | 378 | "Preempt" : "", cmd, state, |
379 | atomic_read(&T_TASK(cmd)->t_fe_count)); | 379 | atomic_read(&cmd->t_task->t_fe_count)); |
380 | /* | 380 | /* |
381 | * Signal that the command has failed via cmd->se_cmd_flags, | 381 | * Signal that the command has failed via cmd->se_cmd_flags, |
382 | * and call TFO->new_cmd_failure() to wakeup any fabric | 382 | * and call TFO->new_cmd_failure() to wakeup any fabric |
@@ -388,7 +388,7 @@ int core_tmr_lun_reset( | |||
388 | transport_new_cmd_failure(cmd); | 388 | transport_new_cmd_failure(cmd); |
389 | 389 | ||
390 | core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, | 390 | core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, |
391 | atomic_read(&T_TASK(cmd)->t_fe_count)); | 391 | atomic_read(&cmd->t_task->t_fe_count)); |
392 | spin_lock_irqsave(&qobj->cmd_queue_lock, flags); | 392 | spin_lock_irqsave(&qobj->cmd_queue_lock, flags); |
393 | } | 393 | } |
394 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); | 394 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); |
@@ -411,6 +411,6 @@ int core_tmr_lun_reset( | |||
411 | 411 | ||
412 | DEBUG_LR("LUN_RESET: %s for [%s] Complete\n", | 412 | DEBUG_LR("LUN_RESET: %s for [%s] Complete\n", |
413 | (preempt_and_abort_list) ? "Preempt" : "TMR", | 413 | (preempt_and_abort_list) ? "Preempt" : "TMR", |
414 | TRANSPORT(dev)->name); | 414 | dev->transport->name); |
415 | return 0; | 415 | return 0; |
416 | } | 416 | } |
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c index 5ec745fed931..448129f74cf9 100644 --- a/drivers/target/target_core_tpg.c +++ b/drivers/target/target_core_tpg.c | |||
@@ -44,6 +44,12 @@ | |||
44 | #include <target/target_core_fabric_ops.h> | 44 | #include <target/target_core_fabric_ops.h> |
45 | 45 | ||
46 | #include "target_core_hba.h" | 46 | #include "target_core_hba.h" |
47 | #include "target_core_stat.h" | ||
48 | |||
49 | extern struct se_device *g_lun0_dev; | ||
50 | |||
51 | static DEFINE_SPINLOCK(tpg_lock); | ||
52 | static LIST_HEAD(tpg_list); | ||
47 | 53 | ||
48 | /* core_clear_initiator_node_from_tpg(): | 54 | /* core_clear_initiator_node_from_tpg(): |
49 | * | 55 | * |
@@ -68,7 +74,7 @@ static void core_clear_initiator_node_from_tpg( | |||
68 | if (!deve->se_lun) { | 74 | if (!deve->se_lun) { |
69 | printk(KERN_ERR "%s device entries device pointer is" | 75 | printk(KERN_ERR "%s device entries device pointer is" |
70 | " NULL, but Initiator has access.\n", | 76 | " NULL, but Initiator has access.\n", |
71 | TPG_TFO(tpg)->get_fabric_name()); | 77 | tpg->se_tpg_tfo->get_fabric_name()); |
72 | continue; | 78 | continue; |
73 | } | 79 | } |
74 | 80 | ||
@@ -171,7 +177,7 @@ void core_tpg_add_node_to_devs( | |||
171 | * By default in LIO-Target $FABRIC_MOD, | 177 | * By default in LIO-Target $FABRIC_MOD, |
172 | * demo_mode_write_protect is ON, or READ_ONLY; | 178 | * demo_mode_write_protect is ON, or READ_ONLY; |
173 | */ | 179 | */ |
174 | if (!(TPG_TFO(tpg)->tpg_check_demo_mode_write_protect(tpg))) { | 180 | if (!(tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg))) { |
175 | if (dev->dev_flags & DF_READ_ONLY) | 181 | if (dev->dev_flags & DF_READ_ONLY) |
176 | lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; | 182 | lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; |
177 | else | 183 | else |
@@ -181,7 +187,7 @@ void core_tpg_add_node_to_devs( | |||
181 | * Allow only optical drives to issue R/W in default RO | 187 | * Allow only optical drives to issue R/W in default RO |
182 | * demo mode. | 188 | * demo mode. |
183 | */ | 189 | */ |
184 | if (TRANSPORT(dev)->get_device_type(dev) == TYPE_DISK) | 190 | if (dev->transport->get_device_type(dev) == TYPE_DISK) |
185 | lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; | 191 | lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; |
186 | else | 192 | else |
187 | lun_access = TRANSPORT_LUNFLAGS_READ_WRITE; | 193 | lun_access = TRANSPORT_LUNFLAGS_READ_WRITE; |
@@ -189,8 +195,8 @@ void core_tpg_add_node_to_devs( | |||
189 | 195 | ||
190 | printk(KERN_INFO "TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s" | 196 | printk(KERN_INFO "TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s" |
191 | " access for LUN in Demo Mode\n", | 197 | " access for LUN in Demo Mode\n", |
192 | TPG_TFO(tpg)->get_fabric_name(), | 198 | tpg->se_tpg_tfo->get_fabric_name(), |
193 | TPG_TFO(tpg)->tpg_get_tag(tpg), lun->unpacked_lun, | 199 | tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, |
194 | (lun_access == TRANSPORT_LUNFLAGS_READ_WRITE) ? | 200 | (lun_access == TRANSPORT_LUNFLAGS_READ_WRITE) ? |
195 | "READ-WRITE" : "READ-ONLY"); | 201 | "READ-WRITE" : "READ-ONLY"); |
196 | 202 | ||
@@ -211,7 +217,7 @@ static int core_set_queue_depth_for_node( | |||
211 | { | 217 | { |
212 | if (!acl->queue_depth) { | 218 | if (!acl->queue_depth) { |
213 | printk(KERN_ERR "Queue depth for %s Initiator Node: %s is 0," | 219 | printk(KERN_ERR "Queue depth for %s Initiator Node: %s is 0," |
214 | "defaulting to 1.\n", TPG_TFO(tpg)->get_fabric_name(), | 220 | "defaulting to 1.\n", tpg->se_tpg_tfo->get_fabric_name(), |
215 | acl->initiatorname); | 221 | acl->initiatorname); |
216 | acl->queue_depth = 1; | 222 | acl->queue_depth = 1; |
217 | } | 223 | } |
@@ -233,7 +239,7 @@ static int core_create_device_list_for_node(struct se_node_acl *nacl) | |||
233 | if (!(nacl->device_list)) { | 239 | if (!(nacl->device_list)) { |
234 | printk(KERN_ERR "Unable to allocate memory for" | 240 | printk(KERN_ERR "Unable to allocate memory for" |
235 | " struct se_node_acl->device_list\n"); | 241 | " struct se_node_acl->device_list\n"); |
236 | return -1; | 242 | return -ENOMEM; |
237 | } | 243 | } |
238 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { | 244 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { |
239 | deve = &nacl->device_list[i]; | 245 | deve = &nacl->device_list[i]; |
@@ -262,10 +268,10 @@ struct se_node_acl *core_tpg_check_initiator_node_acl( | |||
262 | if ((acl)) | 268 | if ((acl)) |
263 | return acl; | 269 | return acl; |
264 | 270 | ||
265 | if (!(TPG_TFO(tpg)->tpg_check_demo_mode(tpg))) | 271 | if (!(tpg->se_tpg_tfo->tpg_check_demo_mode(tpg))) |
266 | return NULL; | 272 | return NULL; |
267 | 273 | ||
268 | acl = TPG_TFO(tpg)->tpg_alloc_fabric_acl(tpg); | 274 | acl = tpg->se_tpg_tfo->tpg_alloc_fabric_acl(tpg); |
269 | if (!(acl)) | 275 | if (!(acl)) |
270 | return NULL; | 276 | return NULL; |
271 | 277 | ||
@@ -274,23 +280,23 @@ struct se_node_acl *core_tpg_check_initiator_node_acl( | |||
274 | spin_lock_init(&acl->device_list_lock); | 280 | spin_lock_init(&acl->device_list_lock); |
275 | spin_lock_init(&acl->nacl_sess_lock); | 281 | spin_lock_init(&acl->nacl_sess_lock); |
276 | atomic_set(&acl->acl_pr_ref_count, 0); | 282 | atomic_set(&acl->acl_pr_ref_count, 0); |
277 | acl->queue_depth = TPG_TFO(tpg)->tpg_get_default_depth(tpg); | 283 | acl->queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg); |
278 | snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); | 284 | snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); |
279 | acl->se_tpg = tpg; | 285 | acl->se_tpg = tpg; |
280 | acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX); | 286 | acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX); |
281 | spin_lock_init(&acl->stats_lock); | 287 | spin_lock_init(&acl->stats_lock); |
282 | acl->dynamic_node_acl = 1; | 288 | acl->dynamic_node_acl = 1; |
283 | 289 | ||
284 | TPG_TFO(tpg)->set_default_node_attributes(acl); | 290 | tpg->se_tpg_tfo->set_default_node_attributes(acl); |
285 | 291 | ||
286 | if (core_create_device_list_for_node(acl) < 0) { | 292 | if (core_create_device_list_for_node(acl) < 0) { |
287 | TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl); | 293 | tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl); |
288 | return NULL; | 294 | return NULL; |
289 | } | 295 | } |
290 | 296 | ||
291 | if (core_set_queue_depth_for_node(tpg, acl) < 0) { | 297 | if (core_set_queue_depth_for_node(tpg, acl) < 0) { |
292 | core_free_device_list_for_node(acl, tpg); | 298 | core_free_device_list_for_node(acl, tpg); |
293 | TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl); | 299 | tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl); |
294 | return NULL; | 300 | return NULL; |
295 | } | 301 | } |
296 | 302 | ||
@@ -302,9 +308,9 @@ struct se_node_acl *core_tpg_check_initiator_node_acl( | |||
302 | spin_unlock_bh(&tpg->acl_node_lock); | 308 | spin_unlock_bh(&tpg->acl_node_lock); |
303 | 309 | ||
304 | printk("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s" | 310 | printk("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s" |
305 | " Initiator Node: %s\n", TPG_TFO(tpg)->get_fabric_name(), | 311 | " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(), |
306 | TPG_TFO(tpg)->tpg_get_tag(tpg), acl->queue_depth, | 312 | tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth, |
307 | TPG_TFO(tpg)->get_fabric_name(), initiatorname); | 313 | tpg->se_tpg_tfo->get_fabric_name(), initiatorname); |
308 | 314 | ||
309 | return acl; | 315 | return acl; |
310 | } | 316 | } |
@@ -355,8 +361,8 @@ struct se_node_acl *core_tpg_add_initiator_node_acl( | |||
355 | if (acl->dynamic_node_acl) { | 361 | if (acl->dynamic_node_acl) { |
356 | acl->dynamic_node_acl = 0; | 362 | acl->dynamic_node_acl = 0; |
357 | printk(KERN_INFO "%s_TPG[%u] - Replacing dynamic ACL" | 363 | printk(KERN_INFO "%s_TPG[%u] - Replacing dynamic ACL" |
358 | " for %s\n", TPG_TFO(tpg)->get_fabric_name(), | 364 | " for %s\n", tpg->se_tpg_tfo->get_fabric_name(), |
359 | TPG_TFO(tpg)->tpg_get_tag(tpg), initiatorname); | 365 | tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname); |
360 | spin_unlock_bh(&tpg->acl_node_lock); | 366 | spin_unlock_bh(&tpg->acl_node_lock); |
361 | /* | 367 | /* |
362 | * Release the locally allocated struct se_node_acl | 368 | * Release the locally allocated struct se_node_acl |
@@ -364,15 +370,15 @@ struct se_node_acl *core_tpg_add_initiator_node_acl( | |||
364 | * a pointer to an existing demo mode node ACL. | 370 | * a pointer to an existing demo mode node ACL. |
365 | */ | 371 | */ |
366 | if (se_nacl) | 372 | if (se_nacl) |
367 | TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, | 373 | tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, |
368 | se_nacl); | 374 | se_nacl); |
369 | goto done; | 375 | goto done; |
370 | } | 376 | } |
371 | 377 | ||
372 | printk(KERN_ERR "ACL entry for %s Initiator" | 378 | printk(KERN_ERR "ACL entry for %s Initiator" |
373 | " Node %s already exists for TPG %u, ignoring" | 379 | " Node %s already exists for TPG %u, ignoring" |
374 | " request.\n", TPG_TFO(tpg)->get_fabric_name(), | 380 | " request.\n", tpg->se_tpg_tfo->get_fabric_name(), |
375 | initiatorname, TPG_TFO(tpg)->tpg_get_tag(tpg)); | 381 | initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg)); |
376 | spin_unlock_bh(&tpg->acl_node_lock); | 382 | spin_unlock_bh(&tpg->acl_node_lock); |
377 | return ERR_PTR(-EEXIST); | 383 | return ERR_PTR(-EEXIST); |
378 | } | 384 | } |
@@ -400,16 +406,16 @@ struct se_node_acl *core_tpg_add_initiator_node_acl( | |||
400 | acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX); | 406 | acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX); |
401 | spin_lock_init(&acl->stats_lock); | 407 | spin_lock_init(&acl->stats_lock); |
402 | 408 | ||
403 | TPG_TFO(tpg)->set_default_node_attributes(acl); | 409 | tpg->se_tpg_tfo->set_default_node_attributes(acl); |
404 | 410 | ||
405 | if (core_create_device_list_for_node(acl) < 0) { | 411 | if (core_create_device_list_for_node(acl) < 0) { |
406 | TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl); | 412 | tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl); |
407 | return ERR_PTR(-ENOMEM); | 413 | return ERR_PTR(-ENOMEM); |
408 | } | 414 | } |
409 | 415 | ||
410 | if (core_set_queue_depth_for_node(tpg, acl) < 0) { | 416 | if (core_set_queue_depth_for_node(tpg, acl) < 0) { |
411 | core_free_device_list_for_node(acl, tpg); | 417 | core_free_device_list_for_node(acl, tpg); |
412 | TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl); | 418 | tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl); |
413 | return ERR_PTR(-EINVAL); | 419 | return ERR_PTR(-EINVAL); |
414 | } | 420 | } |
415 | 421 | ||
@@ -420,9 +426,9 @@ struct se_node_acl *core_tpg_add_initiator_node_acl( | |||
420 | 426 | ||
421 | done: | 427 | done: |
422 | printk(KERN_INFO "%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s" | 428 | printk(KERN_INFO "%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s" |
423 | " Initiator Node: %s\n", TPG_TFO(tpg)->get_fabric_name(), | 429 | " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(), |
424 | TPG_TFO(tpg)->tpg_get_tag(tpg), acl->queue_depth, | 430 | tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth, |
425 | TPG_TFO(tpg)->get_fabric_name(), initiatorname); | 431 | tpg->se_tpg_tfo->get_fabric_name(), initiatorname); |
426 | 432 | ||
427 | return acl; | 433 | return acl; |
428 | } | 434 | } |
@@ -457,7 +463,7 @@ int core_tpg_del_initiator_node_acl( | |||
457 | /* | 463 | /* |
458 | * Determine if the session needs to be closed by our context. | 464 | * Determine if the session needs to be closed by our context. |
459 | */ | 465 | */ |
460 | if (!(TPG_TFO(tpg)->shutdown_session(sess))) | 466 | if (!(tpg->se_tpg_tfo->shutdown_session(sess))) |
461 | continue; | 467 | continue; |
462 | 468 | ||
463 | spin_unlock_bh(&tpg->session_lock); | 469 | spin_unlock_bh(&tpg->session_lock); |
@@ -465,7 +471,7 @@ int core_tpg_del_initiator_node_acl( | |||
465 | * If the $FABRIC_MOD session for the Initiator Node ACL exists, | 471 | * If the $FABRIC_MOD session for the Initiator Node ACL exists, |
466 | * forcefully shutdown the $FABRIC_MOD session/nexus. | 472 | * forcefully shutdown the $FABRIC_MOD session/nexus. |
467 | */ | 473 | */ |
468 | TPG_TFO(tpg)->close_session(sess); | 474 | tpg->se_tpg_tfo->close_session(sess); |
469 | 475 | ||
470 | spin_lock_bh(&tpg->session_lock); | 476 | spin_lock_bh(&tpg->session_lock); |
471 | } | 477 | } |
@@ -476,9 +482,9 @@ int core_tpg_del_initiator_node_acl( | |||
476 | core_free_device_list_for_node(acl, tpg); | 482 | core_free_device_list_for_node(acl, tpg); |
477 | 483 | ||
478 | printk(KERN_INFO "%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s" | 484 | printk(KERN_INFO "%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s" |
479 | " Initiator Node: %s\n", TPG_TFO(tpg)->get_fabric_name(), | 485 | " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(), |
480 | TPG_TFO(tpg)->tpg_get_tag(tpg), acl->queue_depth, | 486 | tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth, |
481 | TPG_TFO(tpg)->get_fabric_name(), acl->initiatorname); | 487 | tpg->se_tpg_tfo->get_fabric_name(), acl->initiatorname); |
482 | 488 | ||
483 | return 0; | 489 | return 0; |
484 | } | 490 | } |
@@ -503,8 +509,8 @@ int core_tpg_set_initiator_node_queue_depth( | |||
503 | if (!(acl)) { | 509 | if (!(acl)) { |
504 | printk(KERN_ERR "Access Control List entry for %s Initiator" | 510 | printk(KERN_ERR "Access Control List entry for %s Initiator" |
505 | " Node %s does not exists for TPG %hu, ignoring" | 511 | " Node %s does not exists for TPG %hu, ignoring" |
506 | " request.\n", TPG_TFO(tpg)->get_fabric_name(), | 512 | " request.\n", tpg->se_tpg_tfo->get_fabric_name(), |
507 | initiatorname, TPG_TFO(tpg)->tpg_get_tag(tpg)); | 513 | initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg)); |
508 | spin_unlock_bh(&tpg->acl_node_lock); | 514 | spin_unlock_bh(&tpg->acl_node_lock); |
509 | return -ENODEV; | 515 | return -ENODEV; |
510 | } | 516 | } |
@@ -525,7 +531,7 @@ int core_tpg_set_initiator_node_queue_depth( | |||
525 | " operational. To forcefully change the queue" | 531 | " operational. To forcefully change the queue" |
526 | " depth and force session reinstatement" | 532 | " depth and force session reinstatement" |
527 | " use the \"force=1\" parameter.\n", | 533 | " use the \"force=1\" parameter.\n", |
528 | TPG_TFO(tpg)->get_fabric_name(), initiatorname); | 534 | tpg->se_tpg_tfo->get_fabric_name(), initiatorname); |
529 | spin_unlock_bh(&tpg->session_lock); | 535 | spin_unlock_bh(&tpg->session_lock); |
530 | 536 | ||
531 | spin_lock_bh(&tpg->acl_node_lock); | 537 | spin_lock_bh(&tpg->acl_node_lock); |
@@ -537,7 +543,7 @@ int core_tpg_set_initiator_node_queue_depth( | |||
537 | /* | 543 | /* |
538 | * Determine if the session needs to be closed by our context. | 544 | * Determine if the session needs to be closed by our context. |
539 | */ | 545 | */ |
540 | if (!(TPG_TFO(tpg)->shutdown_session(sess))) | 546 | if (!(tpg->se_tpg_tfo->shutdown_session(sess))) |
541 | continue; | 547 | continue; |
542 | 548 | ||
543 | init_sess = sess; | 549 | init_sess = sess; |
@@ -549,7 +555,7 @@ int core_tpg_set_initiator_node_queue_depth( | |||
549 | * Change the value in the Node's struct se_node_acl, and call | 555 | * Change the value in the Node's struct se_node_acl, and call |
550 | * core_set_queue_depth_for_node() to add the requested queue depth. | 556 | * core_set_queue_depth_for_node() to add the requested queue depth. |
551 | * | 557 | * |
552 | * Finally call TPG_TFO(tpg)->close_session() to force session | 558 | * Finally call tpg->se_tpg_tfo->close_session() to force session |
553 | * reinstatement to occur if there is an active session for the | 559 | * reinstatement to occur if there is an active session for the |
554 | * $FABRIC_MOD Initiator Node in question. | 560 | * $FABRIC_MOD Initiator Node in question. |
555 | */ | 561 | */ |
@@ -561,10 +567,10 @@ int core_tpg_set_initiator_node_queue_depth( | |||
561 | * Force session reinstatement if | 567 | * Force session reinstatement if |
562 | * core_set_queue_depth_for_node() failed, because we assume | 568 | * core_set_queue_depth_for_node() failed, because we assume |
563 | * the $FABRIC_MOD has already the set session reinstatement | 569 | * the $FABRIC_MOD has already the set session reinstatement |
564 | * bit from TPG_TFO(tpg)->shutdown_session() called above. | 570 | * bit from tpg->se_tpg_tfo->shutdown_session() called above. |
565 | */ | 571 | */ |
566 | if (init_sess) | 572 | if (init_sess) |
567 | TPG_TFO(tpg)->close_session(init_sess); | 573 | tpg->se_tpg_tfo->close_session(init_sess); |
568 | 574 | ||
569 | spin_lock_bh(&tpg->acl_node_lock); | 575 | spin_lock_bh(&tpg->acl_node_lock); |
570 | if (dynamic_acl) | 576 | if (dynamic_acl) |
@@ -578,12 +584,12 @@ int core_tpg_set_initiator_node_queue_depth( | |||
578 | * forcefully shutdown the $FABRIC_MOD session/nexus. | 584 | * forcefully shutdown the $FABRIC_MOD session/nexus. |
579 | */ | 585 | */ |
580 | if (init_sess) | 586 | if (init_sess) |
581 | TPG_TFO(tpg)->close_session(init_sess); | 587 | tpg->se_tpg_tfo->close_session(init_sess); |
582 | 588 | ||
583 | printk(KERN_INFO "Successfuly changed queue depth to: %d for Initiator" | 589 | printk(KERN_INFO "Successfuly changed queue depth to: %d for Initiator" |
584 | " Node: %s on %s Target Portal Group: %u\n", queue_depth, | 590 | " Node: %s on %s Target Portal Group: %u\n", queue_depth, |
585 | initiatorname, TPG_TFO(tpg)->get_fabric_name(), | 591 | initiatorname, tpg->se_tpg_tfo->get_fabric_name(), |
586 | TPG_TFO(tpg)->tpg_get_tag(tpg)); | 592 | tpg->se_tpg_tfo->tpg_get_tag(tpg)); |
587 | 593 | ||
588 | spin_lock_bh(&tpg->acl_node_lock); | 594 | spin_lock_bh(&tpg->acl_node_lock); |
589 | if (dynamic_acl) | 595 | if (dynamic_acl) |
@@ -597,7 +603,7 @@ EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth); | |||
597 | static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg) | 603 | static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg) |
598 | { | 604 | { |
599 | /* Set in core_dev_setup_virtual_lun0() */ | 605 | /* Set in core_dev_setup_virtual_lun0() */ |
600 | struct se_device *dev = se_global->g_lun0_dev; | 606 | struct se_device *dev = g_lun0_dev; |
601 | struct se_lun *lun = &se_tpg->tpg_virt_lun0; | 607 | struct se_lun *lun = &se_tpg->tpg_virt_lun0; |
602 | u32 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; | 608 | u32 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; |
603 | int ret; | 609 | int ret; |
@@ -614,7 +620,7 @@ static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg) | |||
614 | 620 | ||
615 | ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev); | 621 | ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev); |
616 | if (ret < 0) | 622 | if (ret < 0) |
617 | return -1; | 623 | return ret; |
618 | 624 | ||
619 | return 0; | 625 | return 0; |
620 | } | 626 | } |
@@ -663,7 +669,7 @@ int core_tpg_register( | |||
663 | se_tpg->se_tpg_wwn = se_wwn; | 669 | se_tpg->se_tpg_wwn = se_wwn; |
664 | atomic_set(&se_tpg->tpg_pr_ref_count, 0); | 670 | atomic_set(&se_tpg->tpg_pr_ref_count, 0); |
665 | INIT_LIST_HEAD(&se_tpg->acl_node_list); | 671 | INIT_LIST_HEAD(&se_tpg->acl_node_list); |
666 | INIT_LIST_HEAD(&se_tpg->se_tpg_list); | 672 | INIT_LIST_HEAD(&se_tpg->se_tpg_node); |
667 | INIT_LIST_HEAD(&se_tpg->tpg_sess_list); | 673 | INIT_LIST_HEAD(&se_tpg->tpg_sess_list); |
668 | spin_lock_init(&se_tpg->acl_node_lock); | 674 | spin_lock_init(&se_tpg->acl_node_lock); |
669 | spin_lock_init(&se_tpg->session_lock); | 675 | spin_lock_init(&se_tpg->session_lock); |
@@ -676,9 +682,9 @@ int core_tpg_register( | |||
676 | } | 682 | } |
677 | } | 683 | } |
678 | 684 | ||
679 | spin_lock_bh(&se_global->se_tpg_lock); | 685 | spin_lock_bh(&tpg_lock); |
680 | list_add_tail(&se_tpg->se_tpg_list, &se_global->g_se_tpg_list); | 686 | list_add_tail(&se_tpg->se_tpg_node, &tpg_list); |
681 | spin_unlock_bh(&se_global->se_tpg_lock); | 687 | spin_unlock_bh(&tpg_lock); |
682 | 688 | ||
683 | printk(KERN_INFO "TARGET_CORE[%s]: Allocated %s struct se_portal_group for" | 689 | printk(KERN_INFO "TARGET_CORE[%s]: Allocated %s struct se_portal_group for" |
684 | " endpoint: %s, Portal Tag: %u\n", tfo->get_fabric_name(), | 690 | " endpoint: %s, Portal Tag: %u\n", tfo->get_fabric_name(), |
@@ -697,13 +703,13 @@ int core_tpg_deregister(struct se_portal_group *se_tpg) | |||
697 | printk(KERN_INFO "TARGET_CORE[%s]: Deallocating %s struct se_portal_group" | 703 | printk(KERN_INFO "TARGET_CORE[%s]: Deallocating %s struct se_portal_group" |
698 | " for endpoint: %s Portal Tag %u\n", | 704 | " for endpoint: %s Portal Tag %u\n", |
699 | (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ? | 705 | (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ? |
700 | "Normal" : "Discovery", TPG_TFO(se_tpg)->get_fabric_name(), | 706 | "Normal" : "Discovery", se_tpg->se_tpg_tfo->get_fabric_name(), |
701 | TPG_TFO(se_tpg)->tpg_get_wwn(se_tpg), | 707 | se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg), |
702 | TPG_TFO(se_tpg)->tpg_get_tag(se_tpg)); | 708 | se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg)); |
703 | 709 | ||
704 | spin_lock_bh(&se_global->se_tpg_lock); | 710 | spin_lock_bh(&tpg_lock); |
705 | list_del(&se_tpg->se_tpg_list); | 711 | list_del(&se_tpg->se_tpg_node); |
706 | spin_unlock_bh(&se_global->se_tpg_lock); | 712 | spin_unlock_bh(&tpg_lock); |
707 | 713 | ||
708 | while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0) | 714 | while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0) |
709 | cpu_relax(); | 715 | cpu_relax(); |
@@ -721,7 +727,7 @@ int core_tpg_deregister(struct se_portal_group *se_tpg) | |||
721 | 727 | ||
722 | core_tpg_wait_for_nacl_pr_ref(nacl); | 728 | core_tpg_wait_for_nacl_pr_ref(nacl); |
723 | core_free_device_list_for_node(nacl, se_tpg); | 729 | core_free_device_list_for_node(nacl, se_tpg); |
724 | TPG_TFO(se_tpg)->tpg_release_fabric_acl(se_tpg, nacl); | 730 | se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, nacl); |
725 | 731 | ||
726 | spin_lock_bh(&se_tpg->acl_node_lock); | 732 | spin_lock_bh(&se_tpg->acl_node_lock); |
727 | } | 733 | } |
@@ -745,9 +751,9 @@ struct se_lun *core_tpg_pre_addlun( | |||
745 | if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { | 751 | if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { |
746 | printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG" | 752 | printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG" |
747 | "-1: %u for Target Portal Group: %u\n", | 753 | "-1: %u for Target Portal Group: %u\n", |
748 | TPG_TFO(tpg)->get_fabric_name(), | 754 | tpg->se_tpg_tfo->get_fabric_name(), |
749 | unpacked_lun, TRANSPORT_MAX_LUNS_PER_TPG-1, | 755 | unpacked_lun, TRANSPORT_MAX_LUNS_PER_TPG-1, |
750 | TPG_TFO(tpg)->tpg_get_tag(tpg)); | 756 | tpg->se_tpg_tfo->tpg_get_tag(tpg)); |
751 | return ERR_PTR(-EOVERFLOW); | 757 | return ERR_PTR(-EOVERFLOW); |
752 | } | 758 | } |
753 | 759 | ||
@@ -756,8 +762,8 @@ struct se_lun *core_tpg_pre_addlun( | |||
756 | if (lun->lun_status == TRANSPORT_LUN_STATUS_ACTIVE) { | 762 | if (lun->lun_status == TRANSPORT_LUN_STATUS_ACTIVE) { |
757 | printk(KERN_ERR "TPG Logical Unit Number: %u is already active" | 763 | printk(KERN_ERR "TPG Logical Unit Number: %u is already active" |
758 | " on %s Target Portal Group: %u, ignoring request.\n", | 764 | " on %s Target Portal Group: %u, ignoring request.\n", |
759 | unpacked_lun, TPG_TFO(tpg)->get_fabric_name(), | 765 | unpacked_lun, tpg->se_tpg_tfo->get_fabric_name(), |
760 | TPG_TFO(tpg)->tpg_get_tag(tpg)); | 766 | tpg->se_tpg_tfo->tpg_get_tag(tpg)); |
761 | spin_unlock(&tpg->tpg_lun_lock); | 767 | spin_unlock(&tpg->tpg_lun_lock); |
762 | return ERR_PTR(-EINVAL); | 768 | return ERR_PTR(-EINVAL); |
763 | } | 769 | } |
@@ -772,8 +778,11 @@ int core_tpg_post_addlun( | |||
772 | u32 lun_access, | 778 | u32 lun_access, |
773 | void *lun_ptr) | 779 | void *lun_ptr) |
774 | { | 780 | { |
775 | if (core_dev_export(lun_ptr, tpg, lun) < 0) | 781 | int ret; |
776 | return -1; | 782 | |
783 | ret = core_dev_export(lun_ptr, tpg, lun); | ||
784 | if (ret < 0) | ||
785 | return ret; | ||
777 | 786 | ||
778 | spin_lock(&tpg->tpg_lun_lock); | 787 | spin_lock(&tpg->tpg_lun_lock); |
779 | lun->lun_access = lun_access; | 788 | lun->lun_access = lun_access; |
@@ -801,9 +810,9 @@ struct se_lun *core_tpg_pre_dellun( | |||
801 | if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { | 810 | if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { |
802 | printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG" | 811 | printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG" |
803 | "-1: %u for Target Portal Group: %u\n", | 812 | "-1: %u for Target Portal Group: %u\n", |
804 | TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, | 813 | tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, |
805 | TRANSPORT_MAX_LUNS_PER_TPG-1, | 814 | TRANSPORT_MAX_LUNS_PER_TPG-1, |
806 | TPG_TFO(tpg)->tpg_get_tag(tpg)); | 815 | tpg->se_tpg_tfo->tpg_get_tag(tpg)); |
807 | return ERR_PTR(-EOVERFLOW); | 816 | return ERR_PTR(-EOVERFLOW); |
808 | } | 817 | } |
809 | 818 | ||
@@ -812,8 +821,8 @@ struct se_lun *core_tpg_pre_dellun( | |||
812 | if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) { | 821 | if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) { |
813 | printk(KERN_ERR "%s Logical Unit Number: %u is not active on" | 822 | printk(KERN_ERR "%s Logical Unit Number: %u is not active on" |
814 | " Target Portal Group: %u, ignoring request.\n", | 823 | " Target Portal Group: %u, ignoring request.\n", |
815 | TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, | 824 | tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, |
816 | TPG_TFO(tpg)->tpg_get_tag(tpg)); | 825 | tpg->se_tpg_tfo->tpg_get_tag(tpg)); |
817 | spin_unlock(&tpg->tpg_lun_lock); | 826 | spin_unlock(&tpg->tpg_lun_lock); |
818 | return ERR_PTR(-ENODEV); | 827 | return ERR_PTR(-ENODEV); |
819 | } | 828 | } |
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 188225161a7e..e4406c9e66e0 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c | |||
@@ -184,7 +184,7 @@ | |||
184 | #define DEBUG_STA(x...) | 184 | #define DEBUG_STA(x...) |
185 | #endif | 185 | #endif |
186 | 186 | ||
187 | struct se_global *se_global; | 187 | static int sub_api_initialized; |
188 | 188 | ||
189 | static struct kmem_cache *se_cmd_cache; | 189 | static struct kmem_cache *se_cmd_cache; |
190 | static struct kmem_cache *se_sess_cache; | 190 | static struct kmem_cache *se_sess_cache; |
@@ -227,26 +227,8 @@ static void transport_remove_cmd_from_queue(struct se_cmd *cmd, | |||
227 | static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq); | 227 | static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq); |
228 | static void transport_stop_all_task_timers(struct se_cmd *cmd); | 228 | static void transport_stop_all_task_timers(struct se_cmd *cmd); |
229 | 229 | ||
230 | int init_se_global(void) | 230 | int init_se_kmem_caches(void) |
231 | { | 231 | { |
232 | struct se_global *global; | ||
233 | |||
234 | global = kzalloc(sizeof(struct se_global), GFP_KERNEL); | ||
235 | if (!(global)) { | ||
236 | printk(KERN_ERR "Unable to allocate memory for struct se_global\n"); | ||
237 | return -1; | ||
238 | } | ||
239 | |||
240 | INIT_LIST_HEAD(&global->g_lu_gps_list); | ||
241 | INIT_LIST_HEAD(&global->g_se_tpg_list); | ||
242 | INIT_LIST_HEAD(&global->g_hba_list); | ||
243 | INIT_LIST_HEAD(&global->g_se_dev_list); | ||
244 | spin_lock_init(&global->g_device_lock); | ||
245 | spin_lock_init(&global->hba_lock); | ||
246 | spin_lock_init(&global->se_tpg_lock); | ||
247 | spin_lock_init(&global->lu_gps_lock); | ||
248 | spin_lock_init(&global->plugin_class_lock); | ||
249 | |||
250 | se_cmd_cache = kmem_cache_create("se_cmd_cache", | 232 | se_cmd_cache = kmem_cache_create("se_cmd_cache", |
251 | sizeof(struct se_cmd), __alignof__(struct se_cmd), 0, NULL); | 233 | sizeof(struct se_cmd), __alignof__(struct se_cmd), 0, NULL); |
252 | if (!(se_cmd_cache)) { | 234 | if (!(se_cmd_cache)) { |
@@ -325,8 +307,6 @@ int init_se_global(void) | |||
325 | goto out; | 307 | goto out; |
326 | } | 308 | } |
327 | 309 | ||
328 | se_global = global; | ||
329 | |||
330 | return 0; | 310 | return 0; |
331 | out: | 311 | out: |
332 | if (se_cmd_cache) | 312 | if (se_cmd_cache) |
@@ -349,18 +329,11 @@ out: | |||
349 | kmem_cache_destroy(t10_alua_tg_pt_gp_cache); | 329 | kmem_cache_destroy(t10_alua_tg_pt_gp_cache); |
350 | if (t10_alua_tg_pt_gp_mem_cache) | 330 | if (t10_alua_tg_pt_gp_mem_cache) |
351 | kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); | 331 | kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); |
352 | kfree(global); | 332 | return -ENOMEM; |
353 | return -1; | ||
354 | } | 333 | } |
355 | 334 | ||
356 | void release_se_global(void) | 335 | void release_se_kmem_caches(void) |
357 | { | 336 | { |
358 | struct se_global *global; | ||
359 | |||
360 | global = se_global; | ||
361 | if (!(global)) | ||
362 | return; | ||
363 | |||
364 | kmem_cache_destroy(se_cmd_cache); | 337 | kmem_cache_destroy(se_cmd_cache); |
365 | kmem_cache_destroy(se_tmr_req_cache); | 338 | kmem_cache_destroy(se_tmr_req_cache); |
366 | kmem_cache_destroy(se_sess_cache); | 339 | kmem_cache_destroy(se_sess_cache); |
@@ -371,23 +344,11 @@ void release_se_global(void) | |||
371 | kmem_cache_destroy(t10_alua_lu_gp_mem_cache); | 344 | kmem_cache_destroy(t10_alua_lu_gp_mem_cache); |
372 | kmem_cache_destroy(t10_alua_tg_pt_gp_cache); | 345 | kmem_cache_destroy(t10_alua_tg_pt_gp_cache); |
373 | kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); | 346 | kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); |
374 | kfree(global); | ||
375 | |||
376 | se_global = NULL; | ||
377 | } | 347 | } |
378 | 348 | ||
379 | /* SCSI statistics table index */ | 349 | /* This code ensures unique mib indexes are handed out. */ |
380 | static struct scsi_index_table scsi_index_table; | 350 | static DEFINE_SPINLOCK(scsi_mib_index_lock); |
381 | 351 | static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX]; | |
382 | /* | ||
383 | * Initialize the index table for allocating unique row indexes to various mib | ||
384 | * tables. | ||
385 | */ | ||
386 | void init_scsi_index_table(void) | ||
387 | { | ||
388 | memset(&scsi_index_table, 0, sizeof(struct scsi_index_table)); | ||
389 | spin_lock_init(&scsi_index_table.lock); | ||
390 | } | ||
391 | 352 | ||
392 | /* | 353 | /* |
393 | * Allocate a new row index for the entry type specified | 354 | * Allocate a new row index for the entry type specified |
@@ -396,16 +357,11 @@ u32 scsi_get_new_index(scsi_index_t type) | |||
396 | { | 357 | { |
397 | u32 new_index; | 358 | u32 new_index; |
398 | 359 | ||
399 | if ((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)) { | 360 | BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)); |
400 | printk(KERN_ERR "Invalid index type %d\n", type); | ||
401 | return -EINVAL; | ||
402 | } | ||
403 | 361 | ||
404 | spin_lock(&scsi_index_table.lock); | 362 | spin_lock(&scsi_mib_index_lock); |
405 | new_index = ++scsi_index_table.scsi_mib_index[type]; | 363 | new_index = ++scsi_mib_index[type]; |
406 | if (new_index == 0) | 364 | spin_unlock(&scsi_mib_index_lock); |
407 | new_index = ++scsi_index_table.scsi_mib_index[type]; | ||
408 | spin_unlock(&scsi_index_table.lock); | ||
409 | 365 | ||
410 | return new_index; | 366 | return new_index; |
411 | } | 367 | } |
@@ -444,15 +400,18 @@ static int transport_subsystem_reqmods(void) | |||
444 | 400 | ||
445 | int transport_subsystem_check_init(void) | 401 | int transport_subsystem_check_init(void) |
446 | { | 402 | { |
447 | if (se_global->g_sub_api_initialized) | 403 | int ret; |
404 | |||
405 | if (sub_api_initialized) | ||
448 | return 0; | 406 | return 0; |
449 | /* | 407 | /* |
450 | * Request the loading of known TCM subsystem plugins.. | 408 | * Request the loading of known TCM subsystem plugins.. |
451 | */ | 409 | */ |
452 | if (transport_subsystem_reqmods() < 0) | 410 | ret = transport_subsystem_reqmods(); |
453 | return -1; | 411 | if (ret < 0) |
412 | return ret; | ||
454 | 413 | ||
455 | se_global->g_sub_api_initialized = 1; | 414 | sub_api_initialized = 1; |
456 | return 0; | 415 | return 0; |
457 | } | 416 | } |
458 | 417 | ||
@@ -497,9 +456,9 @@ void __transport_register_session( | |||
497 | * If the fabric module supports an ISID based TransportID, | 456 | * If the fabric module supports an ISID based TransportID, |
498 | * save this value in binary from the fabric I_T Nexus now. | 457 | * save this value in binary from the fabric I_T Nexus now. |
499 | */ | 458 | */ |
500 | if (TPG_TFO(se_tpg)->sess_get_initiator_sid != NULL) { | 459 | if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) { |
501 | memset(&buf[0], 0, PR_REG_ISID_LEN); | 460 | memset(&buf[0], 0, PR_REG_ISID_LEN); |
502 | TPG_TFO(se_tpg)->sess_get_initiator_sid(se_sess, | 461 | se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess, |
503 | &buf[0], PR_REG_ISID_LEN); | 462 | &buf[0], PR_REG_ISID_LEN); |
504 | se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]); | 463 | se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]); |
505 | } | 464 | } |
@@ -517,7 +476,7 @@ void __transport_register_session( | |||
517 | list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list); | 476 | list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list); |
518 | 477 | ||
519 | printk(KERN_INFO "TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n", | 478 | printk(KERN_INFO "TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n", |
520 | TPG_TFO(se_tpg)->get_fabric_name(), se_sess->fabric_sess_ptr); | 479 | se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr); |
521 | } | 480 | } |
522 | EXPORT_SYMBOL(__transport_register_session); | 481 | EXPORT_SYMBOL(__transport_register_session); |
523 | 482 | ||
@@ -591,7 +550,7 @@ void transport_deregister_session(struct se_session *se_sess) | |||
591 | if ((se_nacl)) { | 550 | if ((se_nacl)) { |
592 | spin_lock_bh(&se_tpg->acl_node_lock); | 551 | spin_lock_bh(&se_tpg->acl_node_lock); |
593 | if (se_nacl->dynamic_node_acl) { | 552 | if (se_nacl->dynamic_node_acl) { |
594 | if (!(TPG_TFO(se_tpg)->tpg_check_demo_mode_cache( | 553 | if (!(se_tpg->se_tpg_tfo->tpg_check_demo_mode_cache( |
595 | se_tpg))) { | 554 | se_tpg))) { |
596 | list_del(&se_nacl->acl_list); | 555 | list_del(&se_nacl->acl_list); |
597 | se_tpg->num_node_acls--; | 556 | se_tpg->num_node_acls--; |
@@ -599,7 +558,7 @@ void transport_deregister_session(struct se_session *se_sess) | |||
599 | 558 | ||
600 | core_tpg_wait_for_nacl_pr_ref(se_nacl); | 559 | core_tpg_wait_for_nacl_pr_ref(se_nacl); |
601 | core_free_device_list_for_node(se_nacl, se_tpg); | 560 | core_free_device_list_for_node(se_nacl, se_tpg); |
602 | TPG_TFO(se_tpg)->tpg_release_fabric_acl(se_tpg, | 561 | se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, |
603 | se_nacl); | 562 | se_nacl); |
604 | spin_lock_bh(&se_tpg->acl_node_lock); | 563 | spin_lock_bh(&se_tpg->acl_node_lock); |
605 | } | 564 | } |
@@ -610,12 +569,12 @@ void transport_deregister_session(struct se_session *se_sess) | |||
610 | transport_free_session(se_sess); | 569 | transport_free_session(se_sess); |
611 | 570 | ||
612 | printk(KERN_INFO "TARGET_CORE[%s]: Deregistered fabric_sess\n", | 571 | printk(KERN_INFO "TARGET_CORE[%s]: Deregistered fabric_sess\n", |
613 | TPG_TFO(se_tpg)->get_fabric_name()); | 572 | se_tpg->se_tpg_tfo->get_fabric_name()); |
614 | } | 573 | } |
615 | EXPORT_SYMBOL(transport_deregister_session); | 574 | EXPORT_SYMBOL(transport_deregister_session); |
616 | 575 | ||
617 | /* | 576 | /* |
618 | * Called with T_TASK(cmd)->t_state_lock held. | 577 | * Called with cmd->t_task->t_state_lock held. |
619 | */ | 578 | */ |
620 | static void transport_all_task_dev_remove_state(struct se_cmd *cmd) | 579 | static void transport_all_task_dev_remove_state(struct se_cmd *cmd) |
621 | { | 580 | { |
@@ -623,10 +582,10 @@ static void transport_all_task_dev_remove_state(struct se_cmd *cmd) | |||
623 | struct se_task *task; | 582 | struct se_task *task; |
624 | unsigned long flags; | 583 | unsigned long flags; |
625 | 584 | ||
626 | if (!T_TASK(cmd)) | 585 | if (!cmd->t_task) |
627 | return; | 586 | return; |
628 | 587 | ||
629 | list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) { | 588 | list_for_each_entry(task, &cmd->t_task->t_task_list, t_list) { |
630 | dev = task->se_dev; | 589 | dev = task->se_dev; |
631 | if (!(dev)) | 590 | if (!(dev)) |
632 | continue; | 591 | continue; |
@@ -640,11 +599,11 @@ static void transport_all_task_dev_remove_state(struct se_cmd *cmd) | |||
640 | spin_lock_irqsave(&dev->execute_task_lock, flags); | 599 | spin_lock_irqsave(&dev->execute_task_lock, flags); |
641 | list_del(&task->t_state_list); | 600 | list_del(&task->t_state_list); |
642 | DEBUG_TSTATE("Removed ITT: 0x%08x dev: %p task[%p]\n", | 601 | DEBUG_TSTATE("Removed ITT: 0x%08x dev: %p task[%p]\n", |
643 | CMD_TFO(cmd)->tfo_get_task_tag(cmd), dev, task); | 602 | cmd->se_tfo->tfo_get_task_tag(cmd), dev, task); |
644 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); | 603 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); |
645 | 604 | ||
646 | atomic_set(&task->task_state_active, 0); | 605 | atomic_set(&task->task_state_active, 0); |
647 | atomic_dec(&T_TASK(cmd)->t_task_cdbs_ex_left); | 606 | atomic_dec(&cmd->t_task->t_task_cdbs_ex_left); |
648 | } | 607 | } |
649 | } | 608 | } |
650 | 609 | ||
@@ -663,34 +622,34 @@ static int transport_cmd_check_stop( | |||
663 | { | 622 | { |
664 | unsigned long flags; | 623 | unsigned long flags; |
665 | 624 | ||
666 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 625 | spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); |
667 | /* | 626 | /* |
668 | * Determine if IOCTL context caller in requesting the stopping of this | 627 | * Determine if IOCTL context caller in requesting the stopping of this |
669 | * command for LUN shutdown purposes. | 628 | * command for LUN shutdown purposes. |
670 | */ | 629 | */ |
671 | if (atomic_read(&T_TASK(cmd)->transport_lun_stop)) { | 630 | if (atomic_read(&cmd->t_task->transport_lun_stop)) { |
672 | DEBUG_CS("%s:%d atomic_read(&T_TASK(cmd)->transport_lun_stop)" | 631 | DEBUG_CS("%s:%d atomic_read(&cmd->t_task->transport_lun_stop)" |
673 | " == TRUE for ITT: 0x%08x\n", __func__, __LINE__, | 632 | " == TRUE for ITT: 0x%08x\n", __func__, __LINE__, |
674 | CMD_TFO(cmd)->get_task_tag(cmd)); | 633 | cmd->se_tfo->get_task_tag(cmd)); |
675 | 634 | ||
676 | cmd->deferred_t_state = cmd->t_state; | 635 | cmd->deferred_t_state = cmd->t_state; |
677 | cmd->t_state = TRANSPORT_DEFERRED_CMD; | 636 | cmd->t_state = TRANSPORT_DEFERRED_CMD; |
678 | atomic_set(&T_TASK(cmd)->t_transport_active, 0); | 637 | atomic_set(&cmd->t_task->t_transport_active, 0); |
679 | if (transport_off == 2) | 638 | if (transport_off == 2) |
680 | transport_all_task_dev_remove_state(cmd); | 639 | transport_all_task_dev_remove_state(cmd); |
681 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 640 | spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); |
682 | 641 | ||
683 | complete(&T_TASK(cmd)->transport_lun_stop_comp); | 642 | complete(&cmd->t_task->transport_lun_stop_comp); |
684 | return 1; | 643 | return 1; |
685 | } | 644 | } |
686 | /* | 645 | /* |
687 | * Determine if frontend context caller is requesting the stopping of | 646 | * Determine if frontend context caller is requesting the stopping of |
688 | * this command for frontend excpections. | 647 | * this command for frontend exceptions. |
689 | */ | 648 | */ |
690 | if (atomic_read(&T_TASK(cmd)->t_transport_stop)) { | 649 | if (atomic_read(&cmd->t_task->t_transport_stop)) { |
691 | DEBUG_CS("%s:%d atomic_read(&T_TASK(cmd)->t_transport_stop) ==" | 650 | DEBUG_CS("%s:%d atomic_read(&cmd->t_task->t_transport_stop) ==" |
692 | " TRUE for ITT: 0x%08x\n", __func__, __LINE__, | 651 | " TRUE for ITT: 0x%08x\n", __func__, __LINE__, |
693 | CMD_TFO(cmd)->get_task_tag(cmd)); | 652 | cmd->se_tfo->get_task_tag(cmd)); |
694 | 653 | ||
695 | cmd->deferred_t_state = cmd->t_state; | 654 | cmd->deferred_t_state = cmd->t_state; |
696 | cmd->t_state = TRANSPORT_DEFERRED_CMD; | 655 | cmd->t_state = TRANSPORT_DEFERRED_CMD; |
@@ -703,13 +662,13 @@ static int transport_cmd_check_stop( | |||
703 | */ | 662 | */ |
704 | if (transport_off == 2) | 663 | if (transport_off == 2) |
705 | cmd->se_lun = NULL; | 664 | cmd->se_lun = NULL; |
706 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 665 | spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); |
707 | 666 | ||
708 | complete(&T_TASK(cmd)->t_transport_stop_comp); | 667 | complete(&cmd->t_task->t_transport_stop_comp); |
709 | return 1; | 668 | return 1; |
710 | } | 669 | } |
711 | if (transport_off) { | 670 | if (transport_off) { |
712 | atomic_set(&T_TASK(cmd)->t_transport_active, 0); | 671 | atomic_set(&cmd->t_task->t_transport_active, 0); |
713 | if (transport_off == 2) { | 672 | if (transport_off == 2) { |
714 | transport_all_task_dev_remove_state(cmd); | 673 | transport_all_task_dev_remove_state(cmd); |
715 | /* | 674 | /* |
@@ -722,20 +681,20 @@ static int transport_cmd_check_stop( | |||
722 | * their internally allocated I/O reference now and | 681 | * their internally allocated I/O reference now and |
723 | * struct se_cmd now. | 682 | * struct se_cmd now. |
724 | */ | 683 | */ |
725 | if (CMD_TFO(cmd)->check_stop_free != NULL) { | 684 | if (cmd->se_tfo->check_stop_free != NULL) { |
726 | spin_unlock_irqrestore( | 685 | spin_unlock_irqrestore( |
727 | &T_TASK(cmd)->t_state_lock, flags); | 686 | &cmd->t_task->t_state_lock, flags); |
728 | 687 | ||
729 | CMD_TFO(cmd)->check_stop_free(cmd); | 688 | cmd->se_tfo->check_stop_free(cmd); |
730 | return 1; | 689 | return 1; |
731 | } | 690 | } |
732 | } | 691 | } |
733 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 692 | spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); |
734 | 693 | ||
735 | return 0; | 694 | return 0; |
736 | } else if (t_state) | 695 | } else if (t_state) |
737 | cmd->t_state = t_state; | 696 | cmd->t_state = t_state; |
738 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 697 | spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); |
739 | 698 | ||
740 | return 0; | 699 | return 0; |
741 | } | 700 | } |
@@ -747,30 +706,30 @@ static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd) | |||
747 | 706 | ||
748 | static void transport_lun_remove_cmd(struct se_cmd *cmd) | 707 | static void transport_lun_remove_cmd(struct se_cmd *cmd) |
749 | { | 708 | { |
750 | struct se_lun *lun = SE_LUN(cmd); | 709 | struct se_lun *lun = cmd->se_lun; |
751 | unsigned long flags; | 710 | unsigned long flags; |
752 | 711 | ||
753 | if (!lun) | 712 | if (!lun) |
754 | return; | 713 | return; |
755 | 714 | ||
756 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 715 | spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); |
757 | if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) { | 716 | if (!(atomic_read(&cmd->t_task->transport_dev_active))) { |
758 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 717 | spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); |
759 | goto check_lun; | 718 | goto check_lun; |
760 | } | 719 | } |
761 | atomic_set(&T_TASK(cmd)->transport_dev_active, 0); | 720 | atomic_set(&cmd->t_task->transport_dev_active, 0); |
762 | transport_all_task_dev_remove_state(cmd); | 721 | transport_all_task_dev_remove_state(cmd); |
763 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 722 | spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); |
764 | 723 | ||
765 | 724 | ||
766 | check_lun: | 725 | check_lun: |
767 | spin_lock_irqsave(&lun->lun_cmd_lock, flags); | 726 | spin_lock_irqsave(&lun->lun_cmd_lock, flags); |
768 | if (atomic_read(&T_TASK(cmd)->transport_lun_active)) { | 727 | if (atomic_read(&cmd->t_task->transport_lun_active)) { |
769 | list_del(&cmd->se_lun_list); | 728 | list_del(&cmd->se_lun_list); |
770 | atomic_set(&T_TASK(cmd)->transport_lun_active, 0); | 729 | atomic_set(&cmd->t_task->transport_lun_active, 0); |
771 | #if 0 | 730 | #if 0 |
772 | printk(KERN_INFO "Removed ITT: 0x%08x from LUN LIST[%d]\n" | 731 | printk(KERN_INFO "Removed ITT: 0x%08x from LUN LIST[%d]\n" |
773 | CMD_TFO(cmd)->get_task_tag(cmd), lun->unpacked_lun); | 732 | cmd->se_tfo->get_task_tag(cmd), lun->unpacked_lun); |
774 | #endif | 733 | #endif |
775 | } | 734 | } |
776 | spin_unlock_irqrestore(&lun->lun_cmd_lock, flags); | 735 | spin_unlock_irqrestore(&lun->lun_cmd_lock, flags); |
@@ -778,7 +737,7 @@ check_lun: | |||
778 | 737 | ||
779 | void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) | 738 | void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) |
780 | { | 739 | { |
781 | transport_remove_cmd_from_queue(cmd, SE_DEV(cmd)->dev_queue_obj); | 740 | transport_remove_cmd_from_queue(cmd, &cmd->se_lun->lun_se_dev->dev_queue_obj); |
782 | transport_lun_remove_cmd(cmd); | 741 | transport_lun_remove_cmd(cmd); |
783 | 742 | ||
784 | if (transport_cmd_check_stop_to_fabric(cmd)) | 743 | if (transport_cmd_check_stop_to_fabric(cmd)) |
@@ -789,7 +748,7 @@ void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) | |||
789 | 748 | ||
790 | void transport_cmd_finish_abort_tmr(struct se_cmd *cmd) | 749 | void transport_cmd_finish_abort_tmr(struct se_cmd *cmd) |
791 | { | 750 | { |
792 | transport_remove_cmd_from_queue(cmd, SE_DEV(cmd)->dev_queue_obj); | 751 | transport_remove_cmd_from_queue(cmd, &cmd->se_lun->lun_se_dev->dev_queue_obj); |
793 | 752 | ||
794 | if (transport_cmd_check_stop_to_fabric(cmd)) | 753 | if (transport_cmd_check_stop_to_fabric(cmd)) |
795 | return; | 754 | return; |
@@ -802,7 +761,7 @@ static int transport_add_cmd_to_queue( | |||
802 | int t_state) | 761 | int t_state) |
803 | { | 762 | { |
804 | struct se_device *dev = cmd->se_dev; | 763 | struct se_device *dev = cmd->se_dev; |
805 | struct se_queue_obj *qobj = dev->dev_queue_obj; | 764 | struct se_queue_obj *qobj = &dev->dev_queue_obj; |
806 | struct se_queue_req *qr; | 765 | struct se_queue_req *qr; |
807 | unsigned long flags; | 766 | unsigned long flags; |
808 | 767 | ||
@@ -810,23 +769,23 @@ static int transport_add_cmd_to_queue( | |||
810 | if (!(qr)) { | 769 | if (!(qr)) { |
811 | printk(KERN_ERR "Unable to allocate memory for" | 770 | printk(KERN_ERR "Unable to allocate memory for" |
812 | " struct se_queue_req\n"); | 771 | " struct se_queue_req\n"); |
813 | return -1; | 772 | return -ENOMEM; |
814 | } | 773 | } |
815 | INIT_LIST_HEAD(&qr->qr_list); | 774 | INIT_LIST_HEAD(&qr->qr_list); |
816 | 775 | ||
817 | qr->cmd = (void *)cmd; | 776 | qr->cmd = cmd; |
818 | qr->state = t_state; | 777 | qr->state = t_state; |
819 | 778 | ||
820 | if (t_state) { | 779 | if (t_state) { |
821 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 780 | spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); |
822 | cmd->t_state = t_state; | 781 | cmd->t_state = t_state; |
823 | atomic_set(&T_TASK(cmd)->t_transport_active, 1); | 782 | atomic_set(&cmd->t_task->t_transport_active, 1); |
824 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 783 | spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); |
825 | } | 784 | } |
826 | 785 | ||
827 | spin_lock_irqsave(&qobj->cmd_queue_lock, flags); | 786 | spin_lock_irqsave(&qobj->cmd_queue_lock, flags); |
828 | list_add_tail(&qr->qr_list, &qobj->qobj_list); | 787 | list_add_tail(&qr->qr_list, &qobj->qobj_list); |
829 | atomic_inc(&T_TASK(cmd)->t_transport_queue_active); | 788 | atomic_inc(&cmd->t_task->t_transport_queue_active); |
830 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); | 789 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); |
831 | 790 | ||
832 | atomic_inc(&qobj->queue_cnt); | 791 | atomic_inc(&qobj->queue_cnt); |
@@ -838,31 +797,8 @@ static int transport_add_cmd_to_queue( | |||
838 | * Called with struct se_queue_obj->cmd_queue_lock held. | 797 | * Called with struct se_queue_obj->cmd_queue_lock held. |
839 | */ | 798 | */ |
840 | static struct se_queue_req * | 799 | static struct se_queue_req * |
841 | __transport_get_qr_from_queue(struct se_queue_obj *qobj) | ||
842 | { | ||
843 | struct se_cmd *cmd; | ||
844 | struct se_queue_req *qr = NULL; | ||
845 | |||
846 | if (list_empty(&qobj->qobj_list)) | ||
847 | return NULL; | ||
848 | |||
849 | list_for_each_entry(qr, &qobj->qobj_list, qr_list) | ||
850 | break; | ||
851 | |||
852 | if (qr->cmd) { | ||
853 | cmd = (struct se_cmd *)qr->cmd; | ||
854 | atomic_dec(&T_TASK(cmd)->t_transport_queue_active); | ||
855 | } | ||
856 | list_del(&qr->qr_list); | ||
857 | atomic_dec(&qobj->queue_cnt); | ||
858 | |||
859 | return qr; | ||
860 | } | ||
861 | |||
862 | static struct se_queue_req * | ||
863 | transport_get_qr_from_queue(struct se_queue_obj *qobj) | 800 | transport_get_qr_from_queue(struct se_queue_obj *qobj) |
864 | { | 801 | { |
865 | struct se_cmd *cmd; | ||
866 | struct se_queue_req *qr; | 802 | struct se_queue_req *qr; |
867 | unsigned long flags; | 803 | unsigned long flags; |
868 | 804 | ||
@@ -875,10 +811,9 @@ transport_get_qr_from_queue(struct se_queue_obj *qobj) | |||
875 | list_for_each_entry(qr, &qobj->qobj_list, qr_list) | 811 | list_for_each_entry(qr, &qobj->qobj_list, qr_list) |
876 | break; | 812 | break; |
877 | 813 | ||
878 | if (qr->cmd) { | 814 | if (qr->cmd) |
879 | cmd = (struct se_cmd *)qr->cmd; | 815 | atomic_dec(&qr->cmd->t_task->t_transport_queue_active); |
880 | atomic_dec(&T_TASK(cmd)->t_transport_queue_active); | 816 | |
881 | } | ||
882 | list_del(&qr->qr_list); | 817 | list_del(&qr->qr_list); |
883 | atomic_dec(&qobj->queue_cnt); | 818 | atomic_dec(&qobj->queue_cnt); |
884 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); | 819 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); |
@@ -889,32 +824,30 @@ transport_get_qr_from_queue(struct se_queue_obj *qobj) | |||
889 | static void transport_remove_cmd_from_queue(struct se_cmd *cmd, | 824 | static void transport_remove_cmd_from_queue(struct se_cmd *cmd, |
890 | struct se_queue_obj *qobj) | 825 | struct se_queue_obj *qobj) |
891 | { | 826 | { |
892 | struct se_cmd *q_cmd; | ||
893 | struct se_queue_req *qr = NULL, *qr_p = NULL; | 827 | struct se_queue_req *qr = NULL, *qr_p = NULL; |
894 | unsigned long flags; | 828 | unsigned long flags; |
895 | 829 | ||
896 | spin_lock_irqsave(&qobj->cmd_queue_lock, flags); | 830 | spin_lock_irqsave(&qobj->cmd_queue_lock, flags); |
897 | if (!(atomic_read(&T_TASK(cmd)->t_transport_queue_active))) { | 831 | if (!(atomic_read(&cmd->t_task->t_transport_queue_active))) { |
898 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); | 832 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); |
899 | return; | 833 | return; |
900 | } | 834 | } |
901 | 835 | ||
902 | list_for_each_entry_safe(qr, qr_p, &qobj->qobj_list, qr_list) { | 836 | list_for_each_entry_safe(qr, qr_p, &qobj->qobj_list, qr_list) { |
903 | q_cmd = (struct se_cmd *)qr->cmd; | 837 | if (qr->cmd != cmd) |
904 | if (q_cmd != cmd) | ||
905 | continue; | 838 | continue; |
906 | 839 | ||
907 | atomic_dec(&T_TASK(q_cmd)->t_transport_queue_active); | 840 | atomic_dec(&qr->cmd->t_task->t_transport_queue_active); |
908 | atomic_dec(&qobj->queue_cnt); | 841 | atomic_dec(&qobj->queue_cnt); |
909 | list_del(&qr->qr_list); | 842 | list_del(&qr->qr_list); |
910 | kfree(qr); | 843 | kfree(qr); |
911 | } | 844 | } |
912 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); | 845 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); |
913 | 846 | ||
914 | if (atomic_read(&T_TASK(cmd)->t_transport_queue_active)) { | 847 | if (atomic_read(&cmd->t_task->t_transport_queue_active)) { |
915 | printk(KERN_ERR "ITT: 0x%08x t_transport_queue_active: %d\n", | 848 | printk(KERN_ERR "ITT: 0x%08x t_transport_queue_active: %d\n", |
916 | CMD_TFO(cmd)->get_task_tag(cmd), | 849 | cmd->se_tfo->get_task_tag(cmd), |
917 | atomic_read(&T_TASK(cmd)->t_transport_queue_active)); | 850 | atomic_read(&cmd->t_task->t_transport_queue_active)); |
918 | } | 851 | } |
919 | } | 852 | } |
920 | 853 | ||
@@ -924,7 +857,7 @@ static void transport_remove_cmd_from_queue(struct se_cmd *cmd, | |||
924 | */ | 857 | */ |
925 | void transport_complete_sync_cache(struct se_cmd *cmd, int good) | 858 | void transport_complete_sync_cache(struct se_cmd *cmd, int good) |
926 | { | 859 | { |
927 | struct se_task *task = list_entry(T_TASK(cmd)->t_task_list.next, | 860 | struct se_task *task = list_entry(cmd->t_task->t_task_list.next, |
928 | struct se_task, t_list); | 861 | struct se_task, t_list); |
929 | 862 | ||
930 | if (good) { | 863 | if (good) { |
@@ -933,7 +866,7 @@ void transport_complete_sync_cache(struct se_cmd *cmd, int good) | |||
933 | } else { | 866 | } else { |
934 | task->task_scsi_status = SAM_STAT_CHECK_CONDITION; | 867 | task->task_scsi_status = SAM_STAT_CHECK_CONDITION; |
935 | task->task_error_status = PYX_TRANSPORT_ILLEGAL_REQUEST; | 868 | task->task_error_status = PYX_TRANSPORT_ILLEGAL_REQUEST; |
936 | TASK_CMD(task)->transport_error_status = | 869 | task->task_se_cmd->transport_error_status = |
937 | PYX_TRANSPORT_ILLEGAL_REQUEST; | 870 | PYX_TRANSPORT_ILLEGAL_REQUEST; |
938 | } | 871 | } |
939 | 872 | ||
@@ -948,22 +881,18 @@ EXPORT_SYMBOL(transport_complete_sync_cache); | |||
948 | */ | 881 | */ |
949 | void transport_complete_task(struct se_task *task, int success) | 882 | void transport_complete_task(struct se_task *task, int success) |
950 | { | 883 | { |
951 | struct se_cmd *cmd = TASK_CMD(task); | 884 | struct se_cmd *cmd = task->task_se_cmd; |
952 | struct se_device *dev = task->se_dev; | 885 | struct se_device *dev = task->se_dev; |
953 | int t_state; | 886 | int t_state; |
954 | unsigned long flags; | 887 | unsigned long flags; |
955 | #if 0 | 888 | #if 0 |
956 | printk(KERN_INFO "task: %p CDB: 0x%02x obj_ptr: %p\n", task, | 889 | printk(KERN_INFO "task: %p CDB: 0x%02x obj_ptr: %p\n", task, |
957 | T_TASK(cmd)->t_task_cdb[0], dev); | 890 | cmd->t_task->t_task_cdb[0], dev); |
958 | #endif | 891 | #endif |
959 | if (dev) { | 892 | if (dev) |
960 | spin_lock_irqsave(&SE_HBA(dev)->hba_queue_lock, flags); | ||
961 | atomic_inc(&dev->depth_left); | 893 | atomic_inc(&dev->depth_left); |
962 | atomic_inc(&SE_HBA(dev)->left_queue_depth); | ||
963 | spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags); | ||
964 | } | ||
965 | 894 | ||
966 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 895 | spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); |
967 | atomic_set(&task->task_active, 0); | 896 | atomic_set(&task->task_active, 0); |
968 | 897 | ||
969 | /* | 898 | /* |
@@ -985,14 +914,14 @@ void transport_complete_task(struct se_task *task, int success) | |||
985 | */ | 914 | */ |
986 | if (atomic_read(&task->task_stop)) { | 915 | if (atomic_read(&task->task_stop)) { |
987 | /* | 916 | /* |
988 | * Decrement T_TASK(cmd)->t_se_count if this task had | 917 | * Decrement cmd->t_task->t_se_count if this task had |
989 | * previously thrown its timeout exception handler. | 918 | * previously thrown its timeout exception handler. |
990 | */ | 919 | */ |
991 | if (atomic_read(&task->task_timeout)) { | 920 | if (atomic_read(&task->task_timeout)) { |
992 | atomic_dec(&T_TASK(cmd)->t_se_count); | 921 | atomic_dec(&cmd->t_task->t_se_count); |
993 | atomic_set(&task->task_timeout, 0); | 922 | atomic_set(&task->task_timeout, 0); |
994 | } | 923 | } |
995 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 924 | spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); |
996 | 925 | ||
997 | complete(&task->task_stop_comp); | 926 | complete(&task->task_stop_comp); |
998 | return; | 927 | return; |
@@ -1004,33 +933,33 @@ void transport_complete_task(struct se_task *task, int success) | |||
1004 | */ | 933 | */ |
1005 | if (atomic_read(&task->task_timeout)) { | 934 | if (atomic_read(&task->task_timeout)) { |
1006 | if (!(atomic_dec_and_test( | 935 | if (!(atomic_dec_and_test( |
1007 | &T_TASK(cmd)->t_task_cdbs_timeout_left))) { | 936 | &cmd->t_task->t_task_cdbs_timeout_left))) { |
1008 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, | 937 | spin_unlock_irqrestore(&cmd->t_task->t_state_lock, |
1009 | flags); | 938 | flags); |
1010 | return; | 939 | return; |
1011 | } | 940 | } |
1012 | t_state = TRANSPORT_COMPLETE_TIMEOUT; | 941 | t_state = TRANSPORT_COMPLETE_TIMEOUT; |
1013 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 942 | spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); |
1014 | 943 | ||
1015 | transport_add_cmd_to_queue(cmd, t_state); | 944 | transport_add_cmd_to_queue(cmd, t_state); |
1016 | return; | 945 | return; |
1017 | } | 946 | } |
1018 | atomic_dec(&T_TASK(cmd)->t_task_cdbs_timeout_left); | 947 | atomic_dec(&cmd->t_task->t_task_cdbs_timeout_left); |
1019 | 948 | ||
1020 | /* | 949 | /* |
1021 | * Decrement the outstanding t_task_cdbs_left count. The last | 950 | * Decrement the outstanding t_task_cdbs_left count. The last |
1022 | * struct se_task from struct se_cmd will complete itself into the | 951 | * struct se_task from struct se_cmd will complete itself into the |
1023 | * device queue depending upon int success. | 952 | * device queue depending upon int success. |
1024 | */ | 953 | */ |
1025 | if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_left))) { | 954 | if (!(atomic_dec_and_test(&cmd->t_task->t_task_cdbs_left))) { |
1026 | if (!success) | 955 | if (!success) |
1027 | T_TASK(cmd)->t_tasks_failed = 1; | 956 | cmd->t_task->t_tasks_failed = 1; |
1028 | 957 | ||
1029 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 958 | spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); |
1030 | return; | 959 | return; |
1031 | } | 960 | } |
1032 | 961 | ||
1033 | if (!success || T_TASK(cmd)->t_tasks_failed) { | 962 | if (!success || cmd->t_task->t_tasks_failed) { |
1034 | t_state = TRANSPORT_COMPLETE_FAILURE; | 963 | t_state = TRANSPORT_COMPLETE_FAILURE; |
1035 | if (!task->task_error_status) { | 964 | if (!task->task_error_status) { |
1036 | task->task_error_status = | 965 | task->task_error_status = |
@@ -1039,10 +968,10 @@ void transport_complete_task(struct se_task *task, int success) | |||
1039 | PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; | 968 | PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; |
1040 | } | 969 | } |
1041 | } else { | 970 | } else { |
1042 | atomic_set(&T_TASK(cmd)->t_transport_complete, 1); | 971 | atomic_set(&cmd->t_task->t_transport_complete, 1); |
1043 | t_state = TRANSPORT_COMPLETE_OK; | 972 | t_state = TRANSPORT_COMPLETE_OK; |
1044 | } | 973 | } |
1045 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 974 | spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); |
1046 | 975 | ||
1047 | transport_add_cmd_to_queue(cmd, t_state); | 976 | transport_add_cmd_to_queue(cmd, t_state); |
1048 | } | 977 | } |
@@ -1125,7 +1054,7 @@ static void __transport_add_task_to_execute_queue( | |||
1125 | atomic_set(&task->task_state_active, 1); | 1054 | atomic_set(&task->task_state_active, 1); |
1126 | 1055 | ||
1127 | DEBUG_TSTATE("Added ITT: 0x%08x task[%p] to dev: %p\n", | 1056 | DEBUG_TSTATE("Added ITT: 0x%08x task[%p] to dev: %p\n", |
1128 | CMD_TFO(task->task_se_cmd)->get_task_tag(task->task_se_cmd), | 1057 | task->task_se_cmd->se_tfo->get_task_tag(task->task_se_cmd), |
1129 | task, dev); | 1058 | task, dev); |
1130 | } | 1059 | } |
1131 | 1060 | ||
@@ -1135,8 +1064,8 @@ static void transport_add_tasks_to_state_queue(struct se_cmd *cmd) | |||
1135 | struct se_task *task; | 1064 | struct se_task *task; |
1136 | unsigned long flags; | 1065 | unsigned long flags; |
1137 | 1066 | ||
1138 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 1067 | spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); |
1139 | list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) { | 1068 | list_for_each_entry(task, &cmd->t_task->t_task_list, t_list) { |
1140 | dev = task->se_dev; | 1069 | dev = task->se_dev; |
1141 | 1070 | ||
1142 | if (atomic_read(&task->task_state_active)) | 1071 | if (atomic_read(&task->task_state_active)) |
@@ -1147,22 +1076,22 @@ static void transport_add_tasks_to_state_queue(struct se_cmd *cmd) | |||
1147 | atomic_set(&task->task_state_active, 1); | 1076 | atomic_set(&task->task_state_active, 1); |
1148 | 1077 | ||
1149 | DEBUG_TSTATE("Added ITT: 0x%08x task[%p] to dev: %p\n", | 1078 | DEBUG_TSTATE("Added ITT: 0x%08x task[%p] to dev: %p\n", |
1150 | CMD_TFO(task->task_se_cmd)->get_task_tag( | 1079 | task->se_cmd->se_tfo->get_task_tag( |
1151 | task->task_se_cmd), task, dev); | 1080 | task->task_se_cmd), task, dev); |
1152 | 1081 | ||
1153 | spin_unlock(&dev->execute_task_lock); | 1082 | spin_unlock(&dev->execute_task_lock); |
1154 | } | 1083 | } |
1155 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 1084 | spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); |
1156 | } | 1085 | } |
1157 | 1086 | ||
1158 | static void transport_add_tasks_from_cmd(struct se_cmd *cmd) | 1087 | static void transport_add_tasks_from_cmd(struct se_cmd *cmd) |
1159 | { | 1088 | { |
1160 | struct se_device *dev = SE_DEV(cmd); | 1089 | struct se_device *dev = cmd->se_lun->lun_se_dev; |
1161 | struct se_task *task, *task_prev = NULL; | 1090 | struct se_task *task, *task_prev = NULL; |
1162 | unsigned long flags; | 1091 | unsigned long flags; |
1163 | 1092 | ||
1164 | spin_lock_irqsave(&dev->execute_task_lock, flags); | 1093 | spin_lock_irqsave(&dev->execute_task_lock, flags); |
1165 | list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) { | 1094 | list_for_each_entry(task, &cmd->t_task->t_task_list, t_list) { |
1166 | if (atomic_read(&task->task_execute_queue)) | 1095 | if (atomic_read(&task->task_execute_queue)) |
1167 | continue; | 1096 | continue; |
1168 | /* | 1097 | /* |
@@ -1174,30 +1103,6 @@ static void transport_add_tasks_from_cmd(struct se_cmd *cmd) | |||
1174 | task_prev = task; | 1103 | task_prev = task; |
1175 | } | 1104 | } |
1176 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); | 1105 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); |
1177 | |||
1178 | return; | ||
1179 | } | ||
1180 | |||
1181 | /* transport_get_task_from_execute_queue(): | ||
1182 | * | ||
1183 | * Called with dev->execute_task_lock held. | ||
1184 | */ | ||
1185 | static struct se_task * | ||
1186 | transport_get_task_from_execute_queue(struct se_device *dev) | ||
1187 | { | ||
1188 | struct se_task *task; | ||
1189 | |||
1190 | if (list_empty(&dev->execute_task_list)) | ||
1191 | return NULL; | ||
1192 | |||
1193 | list_for_each_entry(task, &dev->execute_task_list, t_execute_list) | ||
1194 | break; | ||
1195 | |||
1196 | list_del(&task->t_execute_list); | ||
1197 | atomic_set(&task->task_execute_queue, 0); | ||
1198 | atomic_dec(&dev->execute_tasks); | ||
1199 | |||
1200 | return task; | ||
1201 | } | 1106 | } |
1202 | 1107 | ||
1203 | /* transport_remove_task_from_execute_queue(): | 1108 | /* transport_remove_task_from_execute_queue(): |
@@ -1269,7 +1174,7 @@ void transport_dump_dev_state( | |||
1269 | atomic_read(&dev->execute_tasks), atomic_read(&dev->depth_left), | 1174 | atomic_read(&dev->execute_tasks), atomic_read(&dev->depth_left), |
1270 | dev->queue_depth); | 1175 | dev->queue_depth); |
1271 | *bl += sprintf(b + *bl, " SectorSize: %u MaxSectors: %u\n", | 1176 | *bl += sprintf(b + *bl, " SectorSize: %u MaxSectors: %u\n", |
1272 | DEV_ATTRIB(dev)->block_size, DEV_ATTRIB(dev)->max_sectors); | 1177 | dev->se_sub_dev->se_dev_attrib.block_size, dev->se_sub_dev->se_dev_attrib.max_sectors); |
1273 | *bl += sprintf(b + *bl, " "); | 1178 | *bl += sprintf(b + *bl, " "); |
1274 | } | 1179 | } |
1275 | 1180 | ||
@@ -1284,28 +1189,28 @@ static void transport_release_all_cmds(struct se_device *dev) | |||
1284 | int bug_out = 0, t_state; | 1189 | int bug_out = 0, t_state; |
1285 | unsigned long flags; | 1190 | unsigned long flags; |
1286 | 1191 | ||
1287 | spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags); | 1192 | spin_lock_irqsave(&dev->dev_queue_obj.cmd_queue_lock, flags); |
1288 | list_for_each_entry_safe(qr, qr_p, &dev->dev_queue_obj->qobj_list, | 1193 | list_for_each_entry_safe(qr, qr_p, &dev->dev_queue_obj.qobj_list, |
1289 | qr_list) { | 1194 | qr_list) { |
1290 | 1195 | ||
1291 | cmd = (struct se_cmd *)qr->cmd; | 1196 | cmd = qr->cmd; |
1292 | t_state = qr->state; | 1197 | t_state = qr->state; |
1293 | list_del(&qr->qr_list); | 1198 | list_del(&qr->qr_list); |
1294 | kfree(qr); | 1199 | kfree(qr); |
1295 | spin_unlock_irqrestore(&dev->dev_queue_obj->cmd_queue_lock, | 1200 | spin_unlock_irqrestore(&dev->dev_queue_obj.cmd_queue_lock, |
1296 | flags); | 1201 | flags); |
1297 | 1202 | ||
1298 | printk(KERN_ERR "Releasing ITT: 0x%08x, i_state: %u," | 1203 | printk(KERN_ERR "Releasing ITT: 0x%08x, i_state: %u," |
1299 | " t_state: %u directly\n", | 1204 | " t_state: %u directly\n", |
1300 | CMD_TFO(cmd)->get_task_tag(cmd), | 1205 | cmd->se_tfo->get_task_tag(cmd), |
1301 | CMD_TFO(cmd)->get_cmd_state(cmd), t_state); | 1206 | cmd->se_tfo->get_cmd_state(cmd), t_state); |
1302 | 1207 | ||
1303 | transport_release_fe_cmd(cmd); | 1208 | transport_release_fe_cmd(cmd); |
1304 | bug_out = 1; | 1209 | bug_out = 1; |
1305 | 1210 | ||
1306 | spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags); | 1211 | spin_lock_irqsave(&dev->dev_queue_obj.cmd_queue_lock, flags); |
1307 | } | 1212 | } |
1308 | spin_unlock_irqrestore(&dev->dev_queue_obj->cmd_queue_lock, flags); | 1213 | spin_unlock_irqrestore(&dev->dev_queue_obj.cmd_queue_lock, flags); |
1309 | #if 0 | 1214 | #if 0 |
1310 | if (bug_out) | 1215 | if (bug_out) |
1311 | BUG(); | 1216 | BUG(); |
@@ -1387,7 +1292,8 @@ int transport_dump_vpd_assoc( | |||
1387 | int p_buf_len) | 1292 | int p_buf_len) |
1388 | { | 1293 | { |
1389 | unsigned char buf[VPD_TMP_BUF_SIZE]; | 1294 | unsigned char buf[VPD_TMP_BUF_SIZE]; |
1390 | int ret = 0, len; | 1295 | int ret = 0; |
1296 | int len; | ||
1391 | 1297 | ||
1392 | memset(buf, 0, VPD_TMP_BUF_SIZE); | 1298 | memset(buf, 0, VPD_TMP_BUF_SIZE); |
1393 | len = sprintf(buf, "T10 VPD Identifier Association: "); | 1299 | len = sprintf(buf, "T10 VPD Identifier Association: "); |
@@ -1404,7 +1310,7 @@ int transport_dump_vpd_assoc( | |||
1404 | break; | 1310 | break; |
1405 | default: | 1311 | default: |
1406 | sprintf(buf+len, "Unknown 0x%02x\n", vpd->association); | 1312 | sprintf(buf+len, "Unknown 0x%02x\n", vpd->association); |
1407 | ret = -1; | 1313 | ret = -EINVAL; |
1408 | break; | 1314 | break; |
1409 | } | 1315 | } |
1410 | 1316 | ||
@@ -1434,7 +1340,8 @@ int transport_dump_vpd_ident_type( | |||
1434 | int p_buf_len) | 1340 | int p_buf_len) |
1435 | { | 1341 | { |
1436 | unsigned char buf[VPD_TMP_BUF_SIZE]; | 1342 | unsigned char buf[VPD_TMP_BUF_SIZE]; |
1437 | int ret = 0, len; | 1343 | int ret = 0; |
1344 | int len; | ||
1438 | 1345 | ||
1439 | memset(buf, 0, VPD_TMP_BUF_SIZE); | 1346 | memset(buf, 0, VPD_TMP_BUF_SIZE); |
1440 | len = sprintf(buf, "T10 VPD Identifier Type: "); | 1347 | len = sprintf(buf, "T10 VPD Identifier Type: "); |
@@ -1461,14 +1368,17 @@ int transport_dump_vpd_ident_type( | |||
1461 | default: | 1368 | default: |
1462 | sprintf(buf+len, "Unsupported: 0x%02x\n", | 1369 | sprintf(buf+len, "Unsupported: 0x%02x\n", |
1463 | vpd->device_identifier_type); | 1370 | vpd->device_identifier_type); |
1464 | ret = -1; | 1371 | ret = -EINVAL; |
1465 | break; | 1372 | break; |
1466 | } | 1373 | } |
1467 | 1374 | ||
1468 | if (p_buf) | 1375 | if (p_buf) { |
1376 | if (p_buf_len < strlen(buf)+1) | ||
1377 | return -EINVAL; | ||
1469 | strncpy(p_buf, buf, p_buf_len); | 1378 | strncpy(p_buf, buf, p_buf_len); |
1470 | else | 1379 | } else { |
1471 | printk("%s", buf); | 1380 | printk("%s", buf); |
1381 | } | ||
1472 | 1382 | ||
1473 | return ret; | 1383 | return ret; |
1474 | } | 1384 | } |
@@ -1511,7 +1421,7 @@ int transport_dump_vpd_ident( | |||
1511 | default: | 1421 | default: |
1512 | sprintf(buf, "T10 VPD Device Identifier encoding unsupported:" | 1422 | sprintf(buf, "T10 VPD Device Identifier encoding unsupported:" |
1513 | " 0x%02x", vpd->device_identifier_code_set); | 1423 | " 0x%02x", vpd->device_identifier_code_set); |
1514 | ret = -1; | 1424 | ret = -EINVAL; |
1515 | break; | 1425 | break; |
1516 | } | 1426 | } |
1517 | 1427 | ||
@@ -1569,20 +1479,20 @@ static void core_setup_task_attr_emulation(struct se_device *dev) | |||
1569 | * This is currently not available in upsream Linux/SCSI Target | 1479 | * This is currently not available in upsream Linux/SCSI Target |
1570 | * mode code, and is assumed to be disabled while using TCM/pSCSI. | 1480 | * mode code, and is assumed to be disabled while using TCM/pSCSI. |
1571 | */ | 1481 | */ |
1572 | if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { | 1482 | if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { |
1573 | dev->dev_task_attr_type = SAM_TASK_ATTR_PASSTHROUGH; | 1483 | dev->dev_task_attr_type = SAM_TASK_ATTR_PASSTHROUGH; |
1574 | return; | 1484 | return; |
1575 | } | 1485 | } |
1576 | 1486 | ||
1577 | dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED; | 1487 | dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED; |
1578 | DEBUG_STA("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x" | 1488 | DEBUG_STA("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x" |
1579 | " device\n", TRANSPORT(dev)->name, | 1489 | " device\n", dev->transport->name, |
1580 | TRANSPORT(dev)->get_device_rev(dev)); | 1490 | dev->transport->get_device_rev(dev)); |
1581 | } | 1491 | } |
1582 | 1492 | ||
1583 | static void scsi_dump_inquiry(struct se_device *dev) | 1493 | static void scsi_dump_inquiry(struct se_device *dev) |
1584 | { | 1494 | { |
1585 | struct t10_wwn *wwn = DEV_T10_WWN(dev); | 1495 | struct t10_wwn *wwn = &dev->se_sub_dev->t10_wwn; |
1586 | int i, device_type; | 1496 | int i, device_type; |
1587 | /* | 1497 | /* |
1588 | * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer | 1498 | * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer |
@@ -1610,10 +1520,10 @@ static void scsi_dump_inquiry(struct se_device *dev) | |||
1610 | 1520 | ||
1611 | printk("\n"); | 1521 | printk("\n"); |
1612 | 1522 | ||
1613 | device_type = TRANSPORT(dev)->get_device_type(dev); | 1523 | device_type = dev->transport->get_device_type(dev); |
1614 | printk(" Type: %s ", scsi_device_type(device_type)); | 1524 | printk(" Type: %s ", scsi_device_type(device_type)); |
1615 | printk(" ANSI SCSI revision: %02x\n", | 1525 | printk(" ANSI SCSI revision: %02x\n", |
1616 | TRANSPORT(dev)->get_device_rev(dev)); | 1526 | dev->transport->get_device_rev(dev)); |
1617 | } | 1527 | } |
1618 | 1528 | ||
1619 | struct se_device *transport_add_device_to_core_hba( | 1529 | struct se_device *transport_add_device_to_core_hba( |
@@ -1634,26 +1544,8 @@ struct se_device *transport_add_device_to_core_hba( | |||
1634 | printk(KERN_ERR "Unable to allocate memory for se_dev_t\n"); | 1544 | printk(KERN_ERR "Unable to allocate memory for se_dev_t\n"); |
1635 | return NULL; | 1545 | return NULL; |
1636 | } | 1546 | } |
1637 | dev->dev_queue_obj = kzalloc(sizeof(struct se_queue_obj), GFP_KERNEL); | ||
1638 | if (!(dev->dev_queue_obj)) { | ||
1639 | printk(KERN_ERR "Unable to allocate memory for" | ||
1640 | " dev->dev_queue_obj\n"); | ||
1641 | kfree(dev); | ||
1642 | return NULL; | ||
1643 | } | ||
1644 | transport_init_queue_obj(dev->dev_queue_obj); | ||
1645 | |||
1646 | dev->dev_status_queue_obj = kzalloc(sizeof(struct se_queue_obj), | ||
1647 | GFP_KERNEL); | ||
1648 | if (!(dev->dev_status_queue_obj)) { | ||
1649 | printk(KERN_ERR "Unable to allocate memory for" | ||
1650 | " dev->dev_status_queue_obj\n"); | ||
1651 | kfree(dev->dev_queue_obj); | ||
1652 | kfree(dev); | ||
1653 | return NULL; | ||
1654 | } | ||
1655 | transport_init_queue_obj(dev->dev_status_queue_obj); | ||
1656 | 1547 | ||
1548 | transport_init_queue_obj(&dev->dev_queue_obj); | ||
1657 | dev->dev_flags = device_flags; | 1549 | dev->dev_flags = device_flags; |
1658 | dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED; | 1550 | dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED; |
1659 | dev->dev_ptr = (void *) transport_dev; | 1551 | dev->dev_ptr = (void *) transport_dev; |
@@ -1715,10 +1607,10 @@ struct se_device *transport_add_device_to_core_hba( | |||
1715 | * Startup the struct se_device processing thread | 1607 | * Startup the struct se_device processing thread |
1716 | */ | 1608 | */ |
1717 | dev->process_thread = kthread_run(transport_processing_thread, dev, | 1609 | dev->process_thread = kthread_run(transport_processing_thread, dev, |
1718 | "LIO_%s", TRANSPORT(dev)->name); | 1610 | "LIO_%s", dev->transport->name); |
1719 | if (IS_ERR(dev->process_thread)) { | 1611 | if (IS_ERR(dev->process_thread)) { |
1720 | printk(KERN_ERR "Unable to create kthread: LIO_%s\n", | 1612 | printk(KERN_ERR "Unable to create kthread: LIO_%s\n", |
1721 | TRANSPORT(dev)->name); | 1613 | dev->transport->name); |
1722 | goto out; | 1614 | goto out; |
1723 | } | 1615 | } |
1724 | 1616 | ||
@@ -1730,16 +1622,16 @@ struct se_device *transport_add_device_to_core_hba( | |||
1730 | * originals once back into DEV_T10_WWN(dev) for the virtual device | 1622 | * originals once back into DEV_T10_WWN(dev) for the virtual device |
1731 | * setup. | 1623 | * setup. |
1732 | */ | 1624 | */ |
1733 | if (TRANSPORT(dev)->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) { | 1625 | if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) { |
1734 | if (!(inquiry_prod) || !(inquiry_prod)) { | 1626 | if (!(inquiry_prod) || !(inquiry_prod)) { |
1735 | printk(KERN_ERR "All non TCM/pSCSI plugins require" | 1627 | printk(KERN_ERR "All non TCM/pSCSI plugins require" |
1736 | " INQUIRY consts\n"); | 1628 | " INQUIRY consts\n"); |
1737 | goto out; | 1629 | goto out; |
1738 | } | 1630 | } |
1739 | 1631 | ||
1740 | strncpy(&DEV_T10_WWN(dev)->vendor[0], "LIO-ORG", 8); | 1632 | strncpy(&dev->se_sub_dev->t10_wwn.vendor[0], "LIO-ORG", 8); |
1741 | strncpy(&DEV_T10_WWN(dev)->model[0], inquiry_prod, 16); | 1633 | strncpy(&dev->se_sub_dev->t10_wwn.model[0], inquiry_prod, 16); |
1742 | strncpy(&DEV_T10_WWN(dev)->revision[0], inquiry_rev, 4); | 1634 | strncpy(&dev->se_sub_dev->t10_wwn.revision[0], inquiry_rev, 4); |
1743 | } | 1635 | } |
1744 | scsi_dump_inquiry(dev); | 1636 | scsi_dump_inquiry(dev); |
1745 | 1637 | ||
@@ -1754,8 +1646,6 @@ out: | |||
1754 | 1646 | ||
1755 | se_release_vpd_for_dev(dev); | 1647 | se_release_vpd_for_dev(dev); |
1756 | 1648 | ||
1757 | kfree(dev->dev_status_queue_obj); | ||
1758 | kfree(dev->dev_queue_obj); | ||
1759 | kfree(dev); | 1649 | kfree(dev); |
1760 | 1650 | ||
1761 | return NULL; | 1651 | return NULL; |
@@ -1794,7 +1684,7 @@ transport_generic_get_task(struct se_cmd *cmd, | |||
1794 | enum dma_data_direction data_direction) | 1684 | enum dma_data_direction data_direction) |
1795 | { | 1685 | { |
1796 | struct se_task *task; | 1686 | struct se_task *task; |
1797 | struct se_device *dev = SE_DEV(cmd); | 1687 | struct se_device *dev = cmd->se_lun->lun_se_dev; |
1798 | unsigned long flags; | 1688 | unsigned long flags; |
1799 | 1689 | ||
1800 | task = dev->transport->alloc_task(cmd); | 1690 | task = dev->transport->alloc_task(cmd); |
@@ -1807,14 +1697,14 @@ transport_generic_get_task(struct se_cmd *cmd, | |||
1807 | INIT_LIST_HEAD(&task->t_execute_list); | 1697 | INIT_LIST_HEAD(&task->t_execute_list); |
1808 | INIT_LIST_HEAD(&task->t_state_list); | 1698 | INIT_LIST_HEAD(&task->t_state_list); |
1809 | init_completion(&task->task_stop_comp); | 1699 | init_completion(&task->task_stop_comp); |
1810 | task->task_no = T_TASK(cmd)->t_tasks_no++; | 1700 | task->task_no = cmd->t_task->t_tasks_no++; |
1811 | task->task_se_cmd = cmd; | 1701 | task->task_se_cmd = cmd; |
1812 | task->se_dev = dev; | 1702 | task->se_dev = dev; |
1813 | task->task_data_direction = data_direction; | 1703 | task->task_data_direction = data_direction; |
1814 | 1704 | ||
1815 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 1705 | spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); |
1816 | list_add_tail(&task->t_list, &T_TASK(cmd)->t_task_list); | 1706 | list_add_tail(&task->t_list, &cmd->t_task->t_task_list); |
1817 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 1707 | spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); |
1818 | 1708 | ||
1819 | return task; | 1709 | return task; |
1820 | } | 1710 | } |
@@ -1823,7 +1713,7 @@ static int transport_generic_cmd_sequencer(struct se_cmd *, unsigned char *); | |||
1823 | 1713 | ||
1824 | void transport_device_setup_cmd(struct se_cmd *cmd) | 1714 | void transport_device_setup_cmd(struct se_cmd *cmd) |
1825 | { | 1715 | { |
1826 | cmd->se_dev = SE_LUN(cmd)->lun_se_dev; | 1716 | cmd->se_dev = cmd->se_lun->lun_se_dev; |
1827 | } | 1717 | } |
1828 | EXPORT_SYMBOL(transport_device_setup_cmd); | 1718 | EXPORT_SYMBOL(transport_device_setup_cmd); |
1829 | 1719 | ||
@@ -1848,12 +1738,12 @@ void transport_init_se_cmd( | |||
1848 | */ | 1738 | */ |
1849 | cmd->t_task = &cmd->t_task_backstore; | 1739 | cmd->t_task = &cmd->t_task_backstore; |
1850 | 1740 | ||
1851 | INIT_LIST_HEAD(&T_TASK(cmd)->t_task_list); | 1741 | INIT_LIST_HEAD(&cmd->t_task->t_task_list); |
1852 | init_completion(&T_TASK(cmd)->transport_lun_fe_stop_comp); | 1742 | init_completion(&cmd->t_task->transport_lun_fe_stop_comp); |
1853 | init_completion(&T_TASK(cmd)->transport_lun_stop_comp); | 1743 | init_completion(&cmd->t_task->transport_lun_stop_comp); |
1854 | init_completion(&T_TASK(cmd)->t_transport_stop_comp); | 1744 | init_completion(&cmd->t_task->t_transport_stop_comp); |
1855 | spin_lock_init(&T_TASK(cmd)->t_state_lock); | 1745 | spin_lock_init(&cmd->t_task->t_state_lock); |
1856 | atomic_set(&T_TASK(cmd)->transport_dev_active, 1); | 1746 | atomic_set(&cmd->t_task->transport_dev_active, 1); |
1857 | 1747 | ||
1858 | cmd->se_tfo = tfo; | 1748 | cmd->se_tfo = tfo; |
1859 | cmd->se_sess = se_sess; | 1749 | cmd->se_sess = se_sess; |
@@ -1870,19 +1760,19 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd) | |||
1870 | * Check if SAM Task Attribute emulation is enabled for this | 1760 | * Check if SAM Task Attribute emulation is enabled for this |
1871 | * struct se_device storage object | 1761 | * struct se_device storage object |
1872 | */ | 1762 | */ |
1873 | if (SE_DEV(cmd)->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) | 1763 | if (cmd->se_lun->lun_se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) |
1874 | return 0; | 1764 | return 0; |
1875 | 1765 | ||
1876 | if (cmd->sam_task_attr == MSG_ACA_TAG) { | 1766 | if (cmd->sam_task_attr == MSG_ACA_TAG) { |
1877 | DEBUG_STA("SAM Task Attribute ACA" | 1767 | DEBUG_STA("SAM Task Attribute ACA" |
1878 | " emulation is not supported\n"); | 1768 | " emulation is not supported\n"); |
1879 | return -1; | 1769 | return -EINVAL; |
1880 | } | 1770 | } |
1881 | /* | 1771 | /* |
1882 | * Used to determine when ORDERED commands should go from | 1772 | * Used to determine when ORDERED commands should go from |
1883 | * Dormant to Active status. | 1773 | * Dormant to Active status. |
1884 | */ | 1774 | */ |
1885 | cmd->se_ordered_id = atomic_inc_return(&SE_DEV(cmd)->dev_ordered_id); | 1775 | cmd->se_ordered_id = atomic_inc_return(&cmd->se_lun->lun_se_dev->dev_ordered_id); |
1886 | smp_mb__after_atomic_inc(); | 1776 | smp_mb__after_atomic_inc(); |
1887 | DEBUG_STA("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n", | 1777 | DEBUG_STA("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n", |
1888 | cmd->se_ordered_id, cmd->sam_task_attr, | 1778 | cmd->se_ordered_id, cmd->sam_task_attr, |
@@ -1898,8 +1788,8 @@ void transport_free_se_cmd( | |||
1898 | /* | 1788 | /* |
1899 | * Check and free any extended CDB buffer that was allocated | 1789 | * Check and free any extended CDB buffer that was allocated |
1900 | */ | 1790 | */ |
1901 | if (T_TASK(se_cmd)->t_task_cdb != T_TASK(se_cmd)->__t_task_cdb) | 1791 | if (se_cmd->t_task->t_task_cdb != se_cmd->t_task->__t_task_cdb) |
1902 | kfree(T_TASK(se_cmd)->t_task_cdb); | 1792 | kfree(se_cmd->t_task->t_task_cdb); |
1903 | } | 1793 | } |
1904 | EXPORT_SYMBOL(transport_free_se_cmd); | 1794 | EXPORT_SYMBOL(transport_free_se_cmd); |
1905 | 1795 | ||
@@ -1931,33 +1821,33 @@ int transport_generic_allocate_tasks( | |||
1931 | printk(KERN_ERR "Received SCSI CDB with command_size: %d that" | 1821 | printk(KERN_ERR "Received SCSI CDB with command_size: %d that" |
1932 | " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", | 1822 | " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", |
1933 | scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE); | 1823 | scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE); |
1934 | return -1; | 1824 | return -EINVAL; |
1935 | } | 1825 | } |
1936 | /* | 1826 | /* |
1937 | * If the received CDB is larger than TCM_MAX_COMMAND_SIZE, | 1827 | * If the received CDB is larger than TCM_MAX_COMMAND_SIZE, |
1938 | * allocate the additional extended CDB buffer now.. Otherwise | 1828 | * allocate the additional extended CDB buffer now.. Otherwise |
1939 | * setup the pointer from __t_task_cdb to t_task_cdb. | 1829 | * setup the pointer from __t_task_cdb to t_task_cdb. |
1940 | */ | 1830 | */ |
1941 | if (scsi_command_size(cdb) > sizeof(T_TASK(cmd)->__t_task_cdb)) { | 1831 | if (scsi_command_size(cdb) > sizeof(cmd->t_task->__t_task_cdb)) { |
1942 | T_TASK(cmd)->t_task_cdb = kzalloc(scsi_command_size(cdb), | 1832 | cmd->t_task->t_task_cdb = kzalloc(scsi_command_size(cdb), |
1943 | GFP_KERNEL); | 1833 | GFP_KERNEL); |
1944 | if (!(T_TASK(cmd)->t_task_cdb)) { | 1834 | if (!(cmd->t_task->t_task_cdb)) { |
1945 | printk(KERN_ERR "Unable to allocate T_TASK(cmd)->t_task_cdb" | 1835 | printk(KERN_ERR "Unable to allocate cmd->t_task->t_task_cdb" |
1946 | " %u > sizeof(T_TASK(cmd)->__t_task_cdb): %lu ops\n", | 1836 | " %u > sizeof(cmd->t_task->__t_task_cdb): %lu ops\n", |
1947 | scsi_command_size(cdb), | 1837 | scsi_command_size(cdb), |
1948 | (unsigned long)sizeof(T_TASK(cmd)->__t_task_cdb)); | 1838 | (unsigned long)sizeof(cmd->t_task->__t_task_cdb)); |
1949 | return -1; | 1839 | return -ENOMEM; |
1950 | } | 1840 | } |
1951 | } else | 1841 | } else |
1952 | T_TASK(cmd)->t_task_cdb = &T_TASK(cmd)->__t_task_cdb[0]; | 1842 | cmd->t_task->t_task_cdb = &cmd->t_task->__t_task_cdb[0]; |
1953 | /* | 1843 | /* |
1954 | * Copy the original CDB into T_TASK(cmd). | 1844 | * Copy the original CDB into cmd->t_task. |
1955 | */ | 1845 | */ |
1956 | memcpy(T_TASK(cmd)->t_task_cdb, cdb, scsi_command_size(cdb)); | 1846 | memcpy(cmd->t_task->t_task_cdb, cdb, scsi_command_size(cdb)); |
1957 | /* | 1847 | /* |
1958 | * Setup the received CDB based on SCSI defined opcodes and | 1848 | * Setup the received CDB based on SCSI defined opcodes and |
1959 | * perform unit attention, persistent reservations and ALUA | 1849 | * perform unit attention, persistent reservations and ALUA |
1960 | * checks for virtual device backends. The T_TASK(cmd)->t_task_cdb | 1850 | * checks for virtual device backends. The cmd->t_task->t_task_cdb |
1961 | * pointer is expected to be setup before we reach this point. | 1851 | * pointer is expected to be setup before we reach this point. |
1962 | */ | 1852 | */ |
1963 | ret = transport_generic_cmd_sequencer(cmd, cdb); | 1853 | ret = transport_generic_cmd_sequencer(cmd, cdb); |
@@ -1986,10 +1876,10 @@ EXPORT_SYMBOL(transport_generic_allocate_tasks); | |||
1986 | int transport_generic_handle_cdb( | 1876 | int transport_generic_handle_cdb( |
1987 | struct se_cmd *cmd) | 1877 | struct se_cmd *cmd) |
1988 | { | 1878 | { |
1989 | if (!SE_LUN(cmd)) { | 1879 | if (!cmd->se_lun) { |
1990 | dump_stack(); | 1880 | dump_stack(); |
1991 | printk(KERN_ERR "SE_LUN(cmd) is NULL\n"); | 1881 | printk(KERN_ERR "cmd->se_lun is NULL\n"); |
1992 | return -1; | 1882 | return -EINVAL; |
1993 | } | 1883 | } |
1994 | 1884 | ||
1995 | transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD); | 1885 | transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD); |
@@ -2005,10 +1895,10 @@ EXPORT_SYMBOL(transport_generic_handle_cdb); | |||
2005 | int transport_generic_handle_cdb_map( | 1895 | int transport_generic_handle_cdb_map( |
2006 | struct se_cmd *cmd) | 1896 | struct se_cmd *cmd) |
2007 | { | 1897 | { |
2008 | if (!SE_LUN(cmd)) { | 1898 | if (!cmd->se_lun) { |
2009 | dump_stack(); | 1899 | dump_stack(); |
2010 | printk(KERN_ERR "SE_LUN(cmd) is NULL\n"); | 1900 | printk(KERN_ERR "cmd->se_lun is NULL\n"); |
2011 | return -1; | 1901 | return -EINVAL; |
2012 | } | 1902 | } |
2013 | 1903 | ||
2014 | transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD_MAP); | 1904 | transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD_MAP); |
@@ -2030,7 +1920,7 @@ int transport_generic_handle_data( | |||
2030 | * in interrupt code, the signal_pending() check is skipped. | 1920 | * in interrupt code, the signal_pending() check is skipped. |
2031 | */ | 1921 | */ |
2032 | if (!in_interrupt() && signal_pending(current)) | 1922 | if (!in_interrupt() && signal_pending(current)) |
2033 | return -1; | 1923 | return -EPERM; |
2034 | /* | 1924 | /* |
2035 | * If the received CDB has aleady been ABORTED by the generic | 1925 | * If the received CDB has aleady been ABORTED by the generic |
2036 | * target engine, we now call transport_check_aborted_status() | 1926 | * target engine, we now call transport_check_aborted_status() |
@@ -2078,14 +1968,14 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd) | |||
2078 | int ret = 0; | 1968 | int ret = 0; |
2079 | 1969 | ||
2080 | DEBUG_TS("ITT[0x%08x] - Stopping tasks\n", | 1970 | DEBUG_TS("ITT[0x%08x] - Stopping tasks\n", |
2081 | CMD_TFO(cmd)->get_task_tag(cmd)); | 1971 | cmd->se_tfo->get_task_tag(cmd)); |
2082 | 1972 | ||
2083 | /* | 1973 | /* |
2084 | * No tasks remain in the execution queue | 1974 | * No tasks remain in the execution queue |
2085 | */ | 1975 | */ |
2086 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 1976 | spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); |
2087 | list_for_each_entry_safe(task, task_tmp, | 1977 | list_for_each_entry_safe(task, task_tmp, |
2088 | &T_TASK(cmd)->t_task_list, t_list) { | 1978 | &cmd->t_task->t_task_list, t_list) { |
2089 | DEBUG_TS("task_no[%d] - Processing task %p\n", | 1979 | DEBUG_TS("task_no[%d] - Processing task %p\n", |
2090 | task->task_no, task); | 1980 | task->task_no, task); |
2091 | /* | 1981 | /* |
@@ -2094,14 +1984,14 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd) | |||
2094 | */ | 1984 | */ |
2095 | if (!atomic_read(&task->task_sent) && | 1985 | if (!atomic_read(&task->task_sent) && |
2096 | !atomic_read(&task->task_active)) { | 1986 | !atomic_read(&task->task_active)) { |
2097 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, | 1987 | spin_unlock_irqrestore(&cmd->t_task->t_state_lock, |
2098 | flags); | 1988 | flags); |
2099 | transport_remove_task_from_execute_queue(task, | 1989 | transport_remove_task_from_execute_queue(task, |
2100 | task->se_dev); | 1990 | task->se_dev); |
2101 | 1991 | ||
2102 | DEBUG_TS("task_no[%d] - Removed from execute queue\n", | 1992 | DEBUG_TS("task_no[%d] - Removed from execute queue\n", |
2103 | task->task_no); | 1993 | task->task_no); |
2104 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 1994 | spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); |
2105 | continue; | 1995 | continue; |
2106 | } | 1996 | } |
2107 | 1997 | ||
@@ -2111,7 +2001,7 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd) | |||
2111 | */ | 2001 | */ |
2112 | if (atomic_read(&task->task_active)) { | 2002 | if (atomic_read(&task->task_active)) { |
2113 | atomic_set(&task->task_stop, 1); | 2003 | atomic_set(&task->task_stop, 1); |
2114 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, | 2004 | spin_unlock_irqrestore(&cmd->t_task->t_state_lock, |
2115 | flags); | 2005 | flags); |
2116 | 2006 | ||
2117 | DEBUG_TS("task_no[%d] - Waiting to complete\n", | 2007 | DEBUG_TS("task_no[%d] - Waiting to complete\n", |
@@ -2120,8 +2010,8 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd) | |||
2120 | DEBUG_TS("task_no[%d] - Stopped successfully\n", | 2010 | DEBUG_TS("task_no[%d] - Stopped successfully\n", |
2121 | task->task_no); | 2011 | task->task_no); |
2122 | 2012 | ||
2123 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 2013 | spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); |
2124 | atomic_dec(&T_TASK(cmd)->t_task_cdbs_left); | 2014 | atomic_dec(&cmd->t_task->t_task_cdbs_left); |
2125 | 2015 | ||
2126 | atomic_set(&task->task_active, 0); | 2016 | atomic_set(&task->task_active, 0); |
2127 | atomic_set(&task->task_stop, 0); | 2017 | atomic_set(&task->task_stop, 0); |
@@ -2132,21 +2022,11 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd) | |||
2132 | 2022 | ||
2133 | __transport_stop_task_timer(task, &flags); | 2023 | __transport_stop_task_timer(task, &flags); |
2134 | } | 2024 | } |
2135 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 2025 | spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); |
2136 | 2026 | ||
2137 | return ret; | 2027 | return ret; |
2138 | } | 2028 | } |
2139 | 2029 | ||
2140 | static void transport_failure_reset_queue_depth(struct se_device *dev) | ||
2141 | { | ||
2142 | unsigned long flags; | ||
2143 | |||
2144 | spin_lock_irqsave(&SE_HBA(dev)->hba_queue_lock, flags); | ||
2145 | atomic_inc(&dev->depth_left); | ||
2146 | atomic_inc(&SE_HBA(dev)->left_queue_depth); | ||
2147 | spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags); | ||
2148 | } | ||
2149 | |||
2150 | /* | 2030 | /* |
2151 | * Handle SAM-esque emulation for generic transport request failures. | 2031 | * Handle SAM-esque emulation for generic transport request failures. |
2152 | */ | 2032 | */ |
@@ -2157,28 +2037,28 @@ static void transport_generic_request_failure( | |||
2157 | int sc) | 2037 | int sc) |
2158 | { | 2038 | { |
2159 | DEBUG_GRF("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x" | 2039 | DEBUG_GRF("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x" |
2160 | " CDB: 0x%02x\n", cmd, CMD_TFO(cmd)->get_task_tag(cmd), | 2040 | " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd), |
2161 | T_TASK(cmd)->t_task_cdb[0]); | 2041 | cmd->t_task->t_task_cdb[0]); |
2162 | DEBUG_GRF("-----[ i_state: %d t_state/def_t_state:" | 2042 | DEBUG_GRF("-----[ i_state: %d t_state/def_t_state:" |
2163 | " %d/%d transport_error_status: %d\n", | 2043 | " %d/%d transport_error_status: %d\n", |
2164 | CMD_TFO(cmd)->get_cmd_state(cmd), | 2044 | cmd->se_tfo->get_cmd_state(cmd), |
2165 | cmd->t_state, cmd->deferred_t_state, | 2045 | cmd->t_state, cmd->deferred_t_state, |
2166 | cmd->transport_error_status); | 2046 | cmd->transport_error_status); |
2167 | DEBUG_GRF("-----[ t_task_cdbs: %d t_task_cdbs_left: %d" | 2047 | DEBUG_GRF("-----[ t_task_cdbs: %d t_task_cdbs_left: %d" |
2168 | " t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --" | 2048 | " t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --" |
2169 | " t_transport_active: %d t_transport_stop: %d" | 2049 | " t_transport_active: %d t_transport_stop: %d" |
2170 | " t_transport_sent: %d\n", T_TASK(cmd)->t_task_cdbs, | 2050 | " t_transport_sent: %d\n", cmd->t_task->t_task_cdbs, |
2171 | atomic_read(&T_TASK(cmd)->t_task_cdbs_left), | 2051 | atomic_read(&cmd->t_task->t_task_cdbs_left), |
2172 | atomic_read(&T_TASK(cmd)->t_task_cdbs_sent), | 2052 | atomic_read(&cmd->t_task->t_task_cdbs_sent), |
2173 | atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left), | 2053 | atomic_read(&cmd->t_task->t_task_cdbs_ex_left), |
2174 | atomic_read(&T_TASK(cmd)->t_transport_active), | 2054 | atomic_read(&cmd->t_task->t_transport_active), |
2175 | atomic_read(&T_TASK(cmd)->t_transport_stop), | 2055 | atomic_read(&cmd->t_task->t_transport_stop), |
2176 | atomic_read(&T_TASK(cmd)->t_transport_sent)); | 2056 | atomic_read(&cmd->t_task->t_transport_sent)); |
2177 | 2057 | ||
2178 | transport_stop_all_task_timers(cmd); | 2058 | transport_stop_all_task_timers(cmd); |
2179 | 2059 | ||
2180 | if (dev) | 2060 | if (dev) |
2181 | transport_failure_reset_queue_depth(dev); | 2061 | atomic_inc(&dev->depth_left); |
2182 | /* | 2062 | /* |
2183 | * For SAM Task Attribute emulation for failed struct se_cmd | 2063 | * For SAM Task Attribute emulation for failed struct se_cmd |
2184 | */ | 2064 | */ |
@@ -2211,8 +2091,8 @@ static void transport_generic_request_failure( | |||
2211 | * we force this session to fall back to session | 2091 | * we force this session to fall back to session |
2212 | * recovery. | 2092 | * recovery. |
2213 | */ | 2093 | */ |
2214 | CMD_TFO(cmd)->fall_back_to_erl0(cmd->se_sess); | 2094 | cmd->se_tfo->fall_back_to_erl0(cmd->se_sess); |
2215 | CMD_TFO(cmd)->stop_session(cmd->se_sess, 0, 0); | 2095 | cmd->se_tfo->stop_session(cmd->se_sess, 0, 0); |
2216 | 2096 | ||
2217 | goto check_stop; | 2097 | goto check_stop; |
2218 | case PYX_TRANSPORT_LU_COMM_FAILURE: | 2098 | case PYX_TRANSPORT_LU_COMM_FAILURE: |
@@ -2240,13 +2120,13 @@ static void transport_generic_request_failure( | |||
2240 | * | 2120 | * |
2241 | * See spc4r17, section 7.4.6 Control Mode Page, Table 349 | 2121 | * See spc4r17, section 7.4.6 Control Mode Page, Table 349 |
2242 | */ | 2122 | */ |
2243 | if (SE_SESS(cmd) && | 2123 | if (cmd->se_sess && |
2244 | DEV_ATTRIB(cmd->se_dev)->emulate_ua_intlck_ctrl == 2) | 2124 | cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2) |
2245 | core_scsi3_ua_allocate(SE_SESS(cmd)->se_node_acl, | 2125 | core_scsi3_ua_allocate(cmd->se_sess->se_node_acl, |
2246 | cmd->orig_fe_lun, 0x2C, | 2126 | cmd->orig_fe_lun, 0x2C, |
2247 | ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); | 2127 | ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); |
2248 | 2128 | ||
2249 | CMD_TFO(cmd)->queue_status(cmd); | 2129 | cmd->se_tfo->queue_status(cmd); |
2250 | goto check_stop; | 2130 | goto check_stop; |
2251 | case PYX_TRANSPORT_USE_SENSE_REASON: | 2131 | case PYX_TRANSPORT_USE_SENSE_REASON: |
2252 | /* | 2132 | /* |
@@ -2255,7 +2135,7 @@ static void transport_generic_request_failure( | |||
2255 | break; | 2135 | break; |
2256 | default: | 2136 | default: |
2257 | printk(KERN_ERR "Unknown transport error for CDB 0x%02x: %d\n", | 2137 | printk(KERN_ERR "Unknown transport error for CDB 0x%02x: %d\n", |
2258 | T_TASK(cmd)->t_task_cdb[0], | 2138 | cmd->t_task->t_task_cdb[0], |
2259 | cmd->transport_error_status); | 2139 | cmd->transport_error_status); |
2260 | cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; | 2140 | cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; |
2261 | break; | 2141 | break; |
@@ -2276,19 +2156,19 @@ static void transport_direct_request_timeout(struct se_cmd *cmd) | |||
2276 | { | 2156 | { |
2277 | unsigned long flags; | 2157 | unsigned long flags; |
2278 | 2158 | ||
2279 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 2159 | spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); |
2280 | if (!(atomic_read(&T_TASK(cmd)->t_transport_timeout))) { | 2160 | if (!(atomic_read(&cmd->t_task->t_transport_timeout))) { |
2281 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 2161 | spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); |
2282 | return; | 2162 | return; |
2283 | } | 2163 | } |
2284 | if (atomic_read(&T_TASK(cmd)->t_task_cdbs_timeout_left)) { | 2164 | if (atomic_read(&cmd->t_task->t_task_cdbs_timeout_left)) { |
2285 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 2165 | spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); |
2286 | return; | 2166 | return; |
2287 | } | 2167 | } |
2288 | 2168 | ||
2289 | atomic_sub(atomic_read(&T_TASK(cmd)->t_transport_timeout), | 2169 | atomic_sub(atomic_read(&cmd->t_task->t_transport_timeout), |
2290 | &T_TASK(cmd)->t_se_count); | 2170 | &cmd->t_task->t_se_count); |
2291 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 2171 | spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); |
2292 | } | 2172 | } |
2293 | 2173 | ||
2294 | static void transport_generic_request_timeout(struct se_cmd *cmd) | 2174 | static void transport_generic_request_timeout(struct se_cmd *cmd) |
@@ -2296,16 +2176,16 @@ static void transport_generic_request_timeout(struct se_cmd *cmd) | |||
2296 | unsigned long flags; | 2176 | unsigned long flags; |
2297 | 2177 | ||
2298 | /* | 2178 | /* |
2299 | * Reset T_TASK(cmd)->t_se_count to allow transport_generic_remove() | 2179 | * Reset cmd->t_task->t_se_count to allow transport_generic_remove() |
2300 | * to allow last call to free memory resources. | 2180 | * to allow last call to free memory resources. |
2301 | */ | 2181 | */ |
2302 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 2182 | spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); |
2303 | if (atomic_read(&T_TASK(cmd)->t_transport_timeout) > 1) { | 2183 | if (atomic_read(&cmd->t_task->t_transport_timeout) > 1) { |
2304 | int tmp = (atomic_read(&T_TASK(cmd)->t_transport_timeout) - 1); | 2184 | int tmp = (atomic_read(&cmd->t_task->t_transport_timeout) - 1); |
2305 | 2185 | ||
2306 | atomic_sub(tmp, &T_TASK(cmd)->t_se_count); | 2186 | atomic_sub(tmp, &cmd->t_task->t_se_count); |
2307 | } | 2187 | } |
2308 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 2188 | spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); |
2309 | 2189 | ||
2310 | transport_generic_remove(cmd, 0, 0); | 2190 | transport_generic_remove(cmd, 0, 0); |
2311 | } | 2191 | } |
@@ -2318,11 +2198,11 @@ transport_generic_allocate_buf(struct se_cmd *cmd, u32 data_length) | |||
2318 | buf = kzalloc(data_length, GFP_KERNEL); | 2198 | buf = kzalloc(data_length, GFP_KERNEL); |
2319 | if (!(buf)) { | 2199 | if (!(buf)) { |
2320 | printk(KERN_ERR "Unable to allocate memory for buffer\n"); | 2200 | printk(KERN_ERR "Unable to allocate memory for buffer\n"); |
2321 | return -1; | 2201 | return -ENOMEM; |
2322 | } | 2202 | } |
2323 | 2203 | ||
2324 | T_TASK(cmd)->t_tasks_se_num = 0; | 2204 | cmd->t_task->t_tasks_se_num = 0; |
2325 | T_TASK(cmd)->t_task_buf = buf; | 2205 | cmd->t_task->t_task_buf = buf; |
2326 | 2206 | ||
2327 | return 0; | 2207 | return 0; |
2328 | } | 2208 | } |
@@ -2364,9 +2244,9 @@ static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd) | |||
2364 | { | 2244 | { |
2365 | unsigned long flags; | 2245 | unsigned long flags; |
2366 | 2246 | ||
2367 | spin_lock_irqsave(&T_TASK(se_cmd)->t_state_lock, flags); | 2247 | spin_lock_irqsave(&se_cmd->t_task->t_state_lock, flags); |
2368 | se_cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE; | 2248 | se_cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE; |
2369 | spin_unlock_irqrestore(&T_TASK(se_cmd)->t_state_lock, flags); | 2249 | spin_unlock_irqrestore(&se_cmd->t_task->t_state_lock, flags); |
2370 | } | 2250 | } |
2371 | 2251 | ||
2372 | /* | 2252 | /* |
@@ -2375,14 +2255,14 @@ static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd) | |||
2375 | static void transport_task_timeout_handler(unsigned long data) | 2255 | static void transport_task_timeout_handler(unsigned long data) |
2376 | { | 2256 | { |
2377 | struct se_task *task = (struct se_task *)data; | 2257 | struct se_task *task = (struct se_task *)data; |
2378 | struct se_cmd *cmd = TASK_CMD(task); | 2258 | struct se_cmd *cmd = task->task_se_cmd; |
2379 | unsigned long flags; | 2259 | unsigned long flags; |
2380 | 2260 | ||
2381 | DEBUG_TT("transport task timeout fired! task: %p cmd: %p\n", task, cmd); | 2261 | DEBUG_TT("transport task timeout fired! task: %p cmd: %p\n", task, cmd); |
2382 | 2262 | ||
2383 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 2263 | spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); |
2384 | if (task->task_flags & TF_STOP) { | 2264 | if (task->task_flags & TF_STOP) { |
2385 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 2265 | spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); |
2386 | return; | 2266 | return; |
2387 | } | 2267 | } |
2388 | task->task_flags &= ~TF_RUNNING; | 2268 | task->task_flags &= ~TF_RUNNING; |
@@ -2393,13 +2273,13 @@ static void transport_task_timeout_handler(unsigned long data) | |||
2393 | if (!(atomic_read(&task->task_active))) { | 2273 | if (!(atomic_read(&task->task_active))) { |
2394 | DEBUG_TT("transport task: %p cmd: %p timeout task_active" | 2274 | DEBUG_TT("transport task: %p cmd: %p timeout task_active" |
2395 | " == 0\n", task, cmd); | 2275 | " == 0\n", task, cmd); |
2396 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 2276 | spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); |
2397 | return; | 2277 | return; |
2398 | } | 2278 | } |
2399 | 2279 | ||
2400 | atomic_inc(&T_TASK(cmd)->t_se_count); | 2280 | atomic_inc(&cmd->t_task->t_se_count); |
2401 | atomic_inc(&T_TASK(cmd)->t_transport_timeout); | 2281 | atomic_inc(&cmd->t_task->t_transport_timeout); |
2402 | T_TASK(cmd)->t_tasks_failed = 1; | 2282 | cmd->t_task->t_tasks_failed = 1; |
2403 | 2283 | ||
2404 | atomic_set(&task->task_timeout, 1); | 2284 | atomic_set(&task->task_timeout, 1); |
2405 | task->task_error_status = PYX_TRANSPORT_TASK_TIMEOUT; | 2285 | task->task_error_status = PYX_TRANSPORT_TASK_TIMEOUT; |
@@ -2408,28 +2288,28 @@ static void transport_task_timeout_handler(unsigned long data) | |||
2408 | if (atomic_read(&task->task_stop)) { | 2288 | if (atomic_read(&task->task_stop)) { |
2409 | DEBUG_TT("transport task: %p cmd: %p timeout task_stop" | 2289 | DEBUG_TT("transport task: %p cmd: %p timeout task_stop" |
2410 | " == 1\n", task, cmd); | 2290 | " == 1\n", task, cmd); |
2411 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 2291 | spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); |
2412 | complete(&task->task_stop_comp); | 2292 | complete(&task->task_stop_comp); |
2413 | return; | 2293 | return; |
2414 | } | 2294 | } |
2415 | 2295 | ||
2416 | if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_left))) { | 2296 | if (!(atomic_dec_and_test(&cmd->t_task->t_task_cdbs_left))) { |
2417 | DEBUG_TT("transport task: %p cmd: %p timeout non zero" | 2297 | DEBUG_TT("transport task: %p cmd: %p timeout non zero" |
2418 | " t_task_cdbs_left\n", task, cmd); | 2298 | " t_task_cdbs_left\n", task, cmd); |
2419 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 2299 | spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); |
2420 | return; | 2300 | return; |
2421 | } | 2301 | } |
2422 | DEBUG_TT("transport task: %p cmd: %p timeout ZERO t_task_cdbs_left\n", | 2302 | DEBUG_TT("transport task: %p cmd: %p timeout ZERO t_task_cdbs_left\n", |
2423 | task, cmd); | 2303 | task, cmd); |
2424 | 2304 | ||
2425 | cmd->t_state = TRANSPORT_COMPLETE_FAILURE; | 2305 | cmd->t_state = TRANSPORT_COMPLETE_FAILURE; |
2426 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 2306 | spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); |
2427 | 2307 | ||
2428 | transport_add_cmd_to_queue(cmd, TRANSPORT_COMPLETE_FAILURE); | 2308 | transport_add_cmd_to_queue(cmd, TRANSPORT_COMPLETE_FAILURE); |
2429 | } | 2309 | } |
2430 | 2310 | ||
2431 | /* | 2311 | /* |
2432 | * Called with T_TASK(cmd)->t_state_lock held. | 2312 | * Called with cmd->t_task->t_state_lock held. |
2433 | */ | 2313 | */ |
2434 | static void transport_start_task_timer(struct se_task *task) | 2314 | static void transport_start_task_timer(struct se_task *task) |
2435 | { | 2315 | { |
@@ -2441,7 +2321,7 @@ static void transport_start_task_timer(struct se_task *task) | |||
2441 | /* | 2321 | /* |
2442 | * If the task_timeout is disabled, exit now. | 2322 | * If the task_timeout is disabled, exit now. |
2443 | */ | 2323 | */ |
2444 | timeout = DEV_ATTRIB(dev)->task_timeout; | 2324 | timeout = dev->se_sub_dev->se_dev_attrib.task_timeout; |
2445 | if (!(timeout)) | 2325 | if (!(timeout)) |
2446 | return; | 2326 | return; |
2447 | 2327 | ||
@@ -2459,21 +2339,21 @@ static void transport_start_task_timer(struct se_task *task) | |||
2459 | } | 2339 | } |
2460 | 2340 | ||
2461 | /* | 2341 | /* |
2462 | * Called with spin_lock_irq(&T_TASK(cmd)->t_state_lock) held. | 2342 | * Called with spin_lock_irq(&cmd->t_task->t_state_lock) held. |
2463 | */ | 2343 | */ |
2464 | void __transport_stop_task_timer(struct se_task *task, unsigned long *flags) | 2344 | void __transport_stop_task_timer(struct se_task *task, unsigned long *flags) |
2465 | { | 2345 | { |
2466 | struct se_cmd *cmd = TASK_CMD(task); | 2346 | struct se_cmd *cmd = task->task_se_cmd; |
2467 | 2347 | ||
2468 | if (!(task->task_flags & TF_RUNNING)) | 2348 | if (!(task->task_flags & TF_RUNNING)) |
2469 | return; | 2349 | return; |
2470 | 2350 | ||
2471 | task->task_flags |= TF_STOP; | 2351 | task->task_flags |= TF_STOP; |
2472 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, *flags); | 2352 | spin_unlock_irqrestore(&cmd->t_task->t_state_lock, *flags); |
2473 | 2353 | ||
2474 | del_timer_sync(&task->task_timer); | 2354 | del_timer_sync(&task->task_timer); |
2475 | 2355 | ||
2476 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, *flags); | 2356 | spin_lock_irqsave(&cmd->t_task->t_state_lock, *flags); |
2477 | task->task_flags &= ~TF_RUNNING; | 2357 | task->task_flags &= ~TF_RUNNING; |
2478 | task->task_flags &= ~TF_STOP; | 2358 | task->task_flags &= ~TF_STOP; |
2479 | } | 2359 | } |
@@ -2483,11 +2363,11 @@ static void transport_stop_all_task_timers(struct se_cmd *cmd) | |||
2483 | struct se_task *task = NULL, *task_tmp; | 2363 | struct se_task *task = NULL, *task_tmp; |
2484 | unsigned long flags; | 2364 | unsigned long flags; |
2485 | 2365 | ||
2486 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 2366 | spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); |
2487 | list_for_each_entry_safe(task, task_tmp, | 2367 | list_for_each_entry_safe(task, task_tmp, |
2488 | &T_TASK(cmd)->t_task_list, t_list) | 2368 | &cmd->t_task->t_task_list, t_list) |
2489 | __transport_stop_task_timer(task, &flags); | 2369 | __transport_stop_task_timer(task, &flags); |
2490 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 2370 | spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); |
2491 | } | 2371 | } |
2492 | 2372 | ||
2493 | static inline int transport_tcq_window_closed(struct se_device *dev) | 2373 | static inline int transport_tcq_window_closed(struct se_device *dev) |
@@ -2498,7 +2378,7 @@ static inline int transport_tcq_window_closed(struct se_device *dev) | |||
2498 | } else | 2378 | } else |
2499 | msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_LONG); | 2379 | msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_LONG); |
2500 | 2380 | ||
2501 | wake_up_interruptible(&dev->dev_queue_obj->thread_wq); | 2381 | wake_up_interruptible(&dev->dev_queue_obj.thread_wq); |
2502 | return 0; | 2382 | return 0; |
2503 | } | 2383 | } |
2504 | 2384 | ||
@@ -2511,45 +2391,45 @@ static inline int transport_tcq_window_closed(struct se_device *dev) | |||
2511 | */ | 2391 | */ |
2512 | static inline int transport_execute_task_attr(struct se_cmd *cmd) | 2392 | static inline int transport_execute_task_attr(struct se_cmd *cmd) |
2513 | { | 2393 | { |
2514 | if (SE_DEV(cmd)->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) | 2394 | if (cmd->se_lun->lun_se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) |
2515 | return 1; | 2395 | return 1; |
2516 | /* | 2396 | /* |
2517 | * Check for the existence of HEAD_OF_QUEUE, and if true return 1 | 2397 | * Check for the existence of HEAD_OF_QUEUE, and if true return 1 |
2518 | * to allow the passed struct se_cmd list of tasks to the front of the list. | 2398 | * to allow the passed struct se_cmd list of tasks to the front of the list. |
2519 | */ | 2399 | */ |
2520 | if (cmd->sam_task_attr == MSG_HEAD_TAG) { | 2400 | if (cmd->sam_task_attr == MSG_HEAD_TAG) { |
2521 | atomic_inc(&SE_DEV(cmd)->dev_hoq_count); | 2401 | atomic_inc(&cmd->se_lun->lun_se_dev->dev_hoq_count); |
2522 | smp_mb__after_atomic_inc(); | 2402 | smp_mb__after_atomic_inc(); |
2523 | DEBUG_STA("Added HEAD_OF_QUEUE for CDB:" | 2403 | DEBUG_STA("Added HEAD_OF_QUEUE for CDB:" |
2524 | " 0x%02x, se_ordered_id: %u\n", | 2404 | " 0x%02x, se_ordered_id: %u\n", |
2525 | T_TASK(cmd)->t_task_cdb[0], | 2405 | cmd->t_task->t_task_cdb[0], |
2526 | cmd->se_ordered_id); | 2406 | cmd->se_ordered_id); |
2527 | return 1; | 2407 | return 1; |
2528 | } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { | 2408 | } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { |
2529 | spin_lock(&SE_DEV(cmd)->ordered_cmd_lock); | 2409 | spin_lock(&cmd->se_lun->lun_se_dev->ordered_cmd_lock); |
2530 | list_add_tail(&cmd->se_ordered_list, | 2410 | list_add_tail(&cmd->se_ordered_list, |
2531 | &SE_DEV(cmd)->ordered_cmd_list); | 2411 | &cmd->se_lun->lun_se_dev->ordered_cmd_list); |
2532 | spin_unlock(&SE_DEV(cmd)->ordered_cmd_lock); | 2412 | spin_unlock(&cmd->se_lun->lun_se_dev->ordered_cmd_lock); |
2533 | 2413 | ||
2534 | atomic_inc(&SE_DEV(cmd)->dev_ordered_sync); | 2414 | atomic_inc(&cmd->se_lun->lun_se_dev->dev_ordered_sync); |
2535 | smp_mb__after_atomic_inc(); | 2415 | smp_mb__after_atomic_inc(); |
2536 | 2416 | ||
2537 | DEBUG_STA("Added ORDERED for CDB: 0x%02x to ordered" | 2417 | DEBUG_STA("Added ORDERED for CDB: 0x%02x to ordered" |
2538 | " list, se_ordered_id: %u\n", | 2418 | " list, se_ordered_id: %u\n", |
2539 | T_TASK(cmd)->t_task_cdb[0], | 2419 | cmd->t_task->t_task_cdb[0], |
2540 | cmd->se_ordered_id); | 2420 | cmd->se_ordered_id); |
2541 | /* | 2421 | /* |
2542 | * Add ORDERED command to tail of execution queue if | 2422 | * Add ORDERED command to tail of execution queue if |
2543 | * no other older commands exist that need to be | 2423 | * no other older commands exist that need to be |
2544 | * completed first. | 2424 | * completed first. |
2545 | */ | 2425 | */ |
2546 | if (!(atomic_read(&SE_DEV(cmd)->simple_cmds))) | 2426 | if (!(atomic_read(&cmd->se_lun->lun_se_dev->simple_cmds))) |
2547 | return 1; | 2427 | return 1; |
2548 | } else { | 2428 | } else { |
2549 | /* | 2429 | /* |
2550 | * For SIMPLE and UNTAGGED Task Attribute commands | 2430 | * For SIMPLE and UNTAGGED Task Attribute commands |
2551 | */ | 2431 | */ |
2552 | atomic_inc(&SE_DEV(cmd)->simple_cmds); | 2432 | atomic_inc(&cmd->se_lun->lun_se_dev->simple_cmds); |
2553 | smp_mb__after_atomic_inc(); | 2433 | smp_mb__after_atomic_inc(); |
2554 | } | 2434 | } |
2555 | /* | 2435 | /* |
@@ -2557,20 +2437,20 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd) | |||
2557 | * add the dormant task(s) built for the passed struct se_cmd to the | 2437 | * add the dormant task(s) built for the passed struct se_cmd to the |
2558 | * execution queue and become in Active state for this struct se_device. | 2438 | * execution queue and become in Active state for this struct se_device. |
2559 | */ | 2439 | */ |
2560 | if (atomic_read(&SE_DEV(cmd)->dev_ordered_sync) != 0) { | 2440 | if (atomic_read(&cmd->se_lun->lun_se_dev->dev_ordered_sync) != 0) { |
2561 | /* | 2441 | /* |
2562 | * Otherwise, add cmd w/ tasks to delayed cmd queue that | 2442 | * Otherwise, add cmd w/ tasks to delayed cmd queue that |
2563 | * will be drained upon completion of HEAD_OF_QUEUE task. | 2443 | * will be drained upon completion of HEAD_OF_QUEUE task. |
2564 | */ | 2444 | */ |
2565 | spin_lock(&SE_DEV(cmd)->delayed_cmd_lock); | 2445 | spin_lock(&cmd->se_lun->lun_se_dev->delayed_cmd_lock); |
2566 | cmd->se_cmd_flags |= SCF_DELAYED_CMD_FROM_SAM_ATTR; | 2446 | cmd->se_cmd_flags |= SCF_DELAYED_CMD_FROM_SAM_ATTR; |
2567 | list_add_tail(&cmd->se_delayed_list, | 2447 | list_add_tail(&cmd->se_delayed_list, |
2568 | &SE_DEV(cmd)->delayed_cmd_list); | 2448 | &cmd->se_lun->lun_se_dev->delayed_cmd_list); |
2569 | spin_unlock(&SE_DEV(cmd)->delayed_cmd_lock); | 2449 | spin_unlock(&cmd->se_lun->lun_se_dev->delayed_cmd_lock); |
2570 | 2450 | ||
2571 | DEBUG_STA("Added CDB: 0x%02x Task Attr: 0x%02x to" | 2451 | DEBUG_STA("Added CDB: 0x%02x Task Attr: 0x%02x to" |
2572 | " delayed CMD list, se_ordered_id: %u\n", | 2452 | " delayed CMD list, se_ordered_id: %u\n", |
2573 | T_TASK(cmd)->t_task_cdb[0], cmd->sam_task_attr, | 2453 | cmd->t_task->t_task_cdb[0], cmd->sam_task_attr, |
2574 | cmd->se_ordered_id); | 2454 | cmd->se_ordered_id); |
2575 | /* | 2455 | /* |
2576 | * Return zero to let transport_execute_tasks() know | 2456 | * Return zero to let transport_execute_tasks() know |
@@ -2610,7 +2490,7 @@ static int transport_execute_tasks(struct se_cmd *cmd) | |||
2610 | * attribute for the tasks of the received struct se_cmd CDB | 2490 | * attribute for the tasks of the received struct se_cmd CDB |
2611 | */ | 2491 | */ |
2612 | add_tasks = transport_execute_task_attr(cmd); | 2492 | add_tasks = transport_execute_task_attr(cmd); |
2613 | if (add_tasks == 0) | 2493 | if (!add_tasks) |
2614 | goto execute_tasks; | 2494 | goto execute_tasks; |
2615 | /* | 2495 | /* |
2616 | * This calls transport_add_tasks_from_cmd() to handle | 2496 | * This calls transport_add_tasks_from_cmd() to handle |
@@ -2625,7 +2505,7 @@ static int transport_execute_tasks(struct se_cmd *cmd) | |||
2625 | * storage object. | 2505 | * storage object. |
2626 | */ | 2506 | */ |
2627 | execute_tasks: | 2507 | execute_tasks: |
2628 | __transport_execute_tasks(SE_DEV(cmd)); | 2508 | __transport_execute_tasks(cmd->se_lun->lun_se_dev); |
2629 | return 0; | 2509 | return 0; |
2630 | } | 2510 | } |
2631 | 2511 | ||
@@ -2639,7 +2519,7 @@ static int __transport_execute_tasks(struct se_device *dev) | |||
2639 | { | 2519 | { |
2640 | int error; | 2520 | int error; |
2641 | struct se_cmd *cmd = NULL; | 2521 | struct se_cmd *cmd = NULL; |
2642 | struct se_task *task; | 2522 | struct se_task *task = NULL; |
2643 | unsigned long flags; | 2523 | unsigned long flags; |
2644 | 2524 | ||
2645 | /* | 2525 | /* |
@@ -2647,43 +2527,41 @@ static int __transport_execute_tasks(struct se_device *dev) | |||
2647 | * struct se_transport_task's to the selected transport. | 2527 | * struct se_transport_task's to the selected transport. |
2648 | */ | 2528 | */ |
2649 | check_depth: | 2529 | check_depth: |
2650 | spin_lock_irqsave(&SE_HBA(dev)->hba_queue_lock, flags); | 2530 | if (!atomic_read(&dev->depth_left)) |
2651 | if (!(atomic_read(&dev->depth_left)) || | ||
2652 | !(atomic_read(&SE_HBA(dev)->left_queue_depth))) { | ||
2653 | spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags); | ||
2654 | return transport_tcq_window_closed(dev); | 2531 | return transport_tcq_window_closed(dev); |
2655 | } | ||
2656 | dev->dev_tcq_window_closed = 0; | ||
2657 | 2532 | ||
2658 | spin_lock(&dev->execute_task_lock); | 2533 | dev->dev_tcq_window_closed = 0; |
2659 | task = transport_get_task_from_execute_queue(dev); | ||
2660 | spin_unlock(&dev->execute_task_lock); | ||
2661 | 2534 | ||
2662 | if (!task) { | 2535 | spin_lock_irq(&dev->execute_task_lock); |
2663 | spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags); | 2536 | if (list_empty(&dev->execute_task_list)) { |
2537 | spin_unlock_irq(&dev->execute_task_lock); | ||
2664 | return 0; | 2538 | return 0; |
2665 | } | 2539 | } |
2540 | task = list_first_entry(&dev->execute_task_list, | ||
2541 | struct se_task, t_execute_list); | ||
2542 | list_del(&task->t_execute_list); | ||
2543 | atomic_set(&task->task_execute_queue, 0); | ||
2544 | atomic_dec(&dev->execute_tasks); | ||
2545 | spin_unlock_irq(&dev->execute_task_lock); | ||
2666 | 2546 | ||
2667 | atomic_dec(&dev->depth_left); | 2547 | atomic_dec(&dev->depth_left); |
2668 | atomic_dec(&SE_HBA(dev)->left_queue_depth); | ||
2669 | spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags); | ||
2670 | 2548 | ||
2671 | cmd = TASK_CMD(task); | 2549 | cmd = task->task_se_cmd; |
2672 | 2550 | ||
2673 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 2551 | spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); |
2674 | atomic_set(&task->task_active, 1); | 2552 | atomic_set(&task->task_active, 1); |
2675 | atomic_set(&task->task_sent, 1); | 2553 | atomic_set(&task->task_sent, 1); |
2676 | atomic_inc(&T_TASK(cmd)->t_task_cdbs_sent); | 2554 | atomic_inc(&cmd->t_task->t_task_cdbs_sent); |
2677 | 2555 | ||
2678 | if (atomic_read(&T_TASK(cmd)->t_task_cdbs_sent) == | 2556 | if (atomic_read(&cmd->t_task->t_task_cdbs_sent) == |
2679 | T_TASK(cmd)->t_task_cdbs) | 2557 | cmd->t_task->t_task_cdbs) |
2680 | atomic_set(&cmd->transport_sent, 1); | 2558 | atomic_set(&cmd->transport_sent, 1); |
2681 | 2559 | ||
2682 | transport_start_task_timer(task); | 2560 | transport_start_task_timer(task); |
2683 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 2561 | spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); |
2684 | /* | 2562 | /* |
2685 | * The struct se_cmd->transport_emulate_cdb() function pointer is used | 2563 | * The struct se_cmd->transport_emulate_cdb() function pointer is used |
2686 | * to grab REPORT_LUNS CDBs before they hit the | 2564 | * to grab REPORT_LUNS and other CDBs we want to handle before they hit the |
2687 | * struct se_subsystem_api->do_task() caller below. | 2565 | * struct se_subsystem_api->do_task() caller below. |
2688 | */ | 2566 | */ |
2689 | if (cmd->transport_emulate_cdb) { | 2567 | if (cmd->transport_emulate_cdb) { |
@@ -2718,11 +2596,11 @@ check_depth: | |||
2718 | * call ->do_task() directly and let the underlying TCM subsystem plugin | 2596 | * call ->do_task() directly and let the underlying TCM subsystem plugin |
2719 | * code handle the CDB emulation. | 2597 | * code handle the CDB emulation. |
2720 | */ | 2598 | */ |
2721 | if ((TRANSPORT(dev)->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) && | 2599 | if ((dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) && |
2722 | (!(TASK_CMD(task)->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB))) | 2600 | (!(task->task_se_cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB))) |
2723 | error = transport_emulate_control_cdb(task); | 2601 | error = transport_emulate_control_cdb(task); |
2724 | else | 2602 | else |
2725 | error = TRANSPORT(dev)->do_task(task); | 2603 | error = dev->transport->do_task(task); |
2726 | 2604 | ||
2727 | if (error != 0) { | 2605 | if (error != 0) { |
2728 | cmd->transport_error_status = error; | 2606 | cmd->transport_error_status = error; |
@@ -2745,12 +2623,12 @@ void transport_new_cmd_failure(struct se_cmd *se_cmd) | |||
2745 | * Any unsolicited data will get dumped for failed command inside of | 2623 | * Any unsolicited data will get dumped for failed command inside of |
2746 | * the fabric plugin | 2624 | * the fabric plugin |
2747 | */ | 2625 | */ |
2748 | spin_lock_irqsave(&T_TASK(se_cmd)->t_state_lock, flags); | 2626 | spin_lock_irqsave(&se_cmd->t_task->t_state_lock, flags); |
2749 | se_cmd->se_cmd_flags |= SCF_SE_CMD_FAILED; | 2627 | se_cmd->se_cmd_flags |= SCF_SE_CMD_FAILED; |
2750 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | 2628 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
2751 | spin_unlock_irqrestore(&T_TASK(se_cmd)->t_state_lock, flags); | 2629 | spin_unlock_irqrestore(&se_cmd->t_task->t_state_lock, flags); |
2752 | 2630 | ||
2753 | CMD_TFO(se_cmd)->new_cmd_failure(se_cmd); | 2631 | se_cmd->se_tfo->new_cmd_failure(se_cmd); |
2754 | } | 2632 | } |
2755 | 2633 | ||
2756 | static void transport_nop_wait_for_tasks(struct se_cmd *, int, int); | 2634 | static void transport_nop_wait_for_tasks(struct se_cmd *, int, int); |
@@ -2760,7 +2638,7 @@ static inline u32 transport_get_sectors_6( | |||
2760 | struct se_cmd *cmd, | 2638 | struct se_cmd *cmd, |
2761 | int *ret) | 2639 | int *ret) |
2762 | { | 2640 | { |
2763 | struct se_device *dev = SE_LUN(cmd)->lun_se_dev; | 2641 | struct se_device *dev = cmd->se_lun->lun_se_dev; |
2764 | 2642 | ||
2765 | /* | 2643 | /* |
2766 | * Assume TYPE_DISK for non struct se_device objects. | 2644 | * Assume TYPE_DISK for non struct se_device objects. |
@@ -2772,7 +2650,7 @@ static inline u32 transport_get_sectors_6( | |||
2772 | /* | 2650 | /* |
2773 | * Use 24-bit allocation length for TYPE_TAPE. | 2651 | * Use 24-bit allocation length for TYPE_TAPE. |
2774 | */ | 2652 | */ |
2775 | if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) | 2653 | if (dev->transport->get_device_type(dev) == TYPE_TAPE) |
2776 | return (u32)(cdb[2] << 16) + (cdb[3] << 8) + cdb[4]; | 2654 | return (u32)(cdb[2] << 16) + (cdb[3] << 8) + cdb[4]; |
2777 | 2655 | ||
2778 | /* | 2656 | /* |
@@ -2788,7 +2666,7 @@ static inline u32 transport_get_sectors_10( | |||
2788 | struct se_cmd *cmd, | 2666 | struct se_cmd *cmd, |
2789 | int *ret) | 2667 | int *ret) |
2790 | { | 2668 | { |
2791 | struct se_device *dev = SE_LUN(cmd)->lun_se_dev; | 2669 | struct se_device *dev = cmd->se_lun->lun_se_dev; |
2792 | 2670 | ||
2793 | /* | 2671 | /* |
2794 | * Assume TYPE_DISK for non struct se_device objects. | 2672 | * Assume TYPE_DISK for non struct se_device objects. |
@@ -2800,8 +2678,8 @@ static inline u32 transport_get_sectors_10( | |||
2800 | /* | 2678 | /* |
2801 | * XXX_10 is not defined in SSC, throw an exception | 2679 | * XXX_10 is not defined in SSC, throw an exception |
2802 | */ | 2680 | */ |
2803 | if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) { | 2681 | if (dev->transport->get_device_type(dev) == TYPE_TAPE) { |
2804 | *ret = -1; | 2682 | *ret = -EINVAL; |
2805 | return 0; | 2683 | return 0; |
2806 | } | 2684 | } |
2807 | 2685 | ||
@@ -2818,7 +2696,7 @@ static inline u32 transport_get_sectors_12( | |||
2818 | struct se_cmd *cmd, | 2696 | struct se_cmd *cmd, |
2819 | int *ret) | 2697 | int *ret) |
2820 | { | 2698 | { |
2821 | struct se_device *dev = SE_LUN(cmd)->lun_se_dev; | 2699 | struct se_device *dev = cmd->se_lun->lun_se_dev; |
2822 | 2700 | ||
2823 | /* | 2701 | /* |
2824 | * Assume TYPE_DISK for non struct se_device objects. | 2702 | * Assume TYPE_DISK for non struct se_device objects. |
@@ -2830,8 +2708,8 @@ static inline u32 transport_get_sectors_12( | |||
2830 | /* | 2708 | /* |
2831 | * XXX_12 is not defined in SSC, throw an exception | 2709 | * XXX_12 is not defined in SSC, throw an exception |
2832 | */ | 2710 | */ |
2833 | if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) { | 2711 | if (dev->transport->get_device_type(dev) == TYPE_TAPE) { |
2834 | *ret = -1; | 2712 | *ret = -EINVAL; |
2835 | return 0; | 2713 | return 0; |
2836 | } | 2714 | } |
2837 | 2715 | ||
@@ -2848,7 +2726,7 @@ static inline u32 transport_get_sectors_16( | |||
2848 | struct se_cmd *cmd, | 2726 | struct se_cmd *cmd, |
2849 | int *ret) | 2727 | int *ret) |
2850 | { | 2728 | { |
2851 | struct se_device *dev = SE_LUN(cmd)->lun_se_dev; | 2729 | struct se_device *dev = cmd->se_lun->lun_se_dev; |
2852 | 2730 | ||
2853 | /* | 2731 | /* |
2854 | * Assume TYPE_DISK for non struct se_device objects. | 2732 | * Assume TYPE_DISK for non struct se_device objects. |
@@ -2860,7 +2738,7 @@ static inline u32 transport_get_sectors_16( | |||
2860 | /* | 2738 | /* |
2861 | * Use 24-bit allocation length for TYPE_TAPE. | 2739 | * Use 24-bit allocation length for TYPE_TAPE. |
2862 | */ | 2740 | */ |
2863 | if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) | 2741 | if (dev->transport->get_device_type(dev) == TYPE_TAPE) |
2864 | return (u32)(cdb[12] << 16) + (cdb[13] << 8) + cdb[14]; | 2742 | return (u32)(cdb[12] << 16) + (cdb[13] << 8) + cdb[14]; |
2865 | 2743 | ||
2866 | type_disk: | 2744 | type_disk: |
@@ -2890,21 +2768,21 @@ static inline u32 transport_get_size( | |||
2890 | unsigned char *cdb, | 2768 | unsigned char *cdb, |
2891 | struct se_cmd *cmd) | 2769 | struct se_cmd *cmd) |
2892 | { | 2770 | { |
2893 | struct se_device *dev = SE_DEV(cmd); | 2771 | struct se_device *dev = cmd->se_lun->lun_se_dev; |
2894 | 2772 | ||
2895 | if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) { | 2773 | if (dev->transport->get_device_type(dev) == TYPE_TAPE) { |
2896 | if (cdb[1] & 1) { /* sectors */ | 2774 | if (cdb[1] & 1) { /* sectors */ |
2897 | return DEV_ATTRIB(dev)->block_size * sectors; | 2775 | return dev->se_sub_dev->se_dev_attrib.block_size * sectors; |
2898 | } else /* bytes */ | 2776 | } else /* bytes */ |
2899 | return sectors; | 2777 | return sectors; |
2900 | } | 2778 | } |
2901 | #if 0 | 2779 | #if 0 |
2902 | printk(KERN_INFO "Returning block_size: %u, sectors: %u == %u for" | 2780 | printk(KERN_INFO "Returning block_size: %u, sectors: %u == %u for" |
2903 | " %s object\n", DEV_ATTRIB(dev)->block_size, sectors, | 2781 | " %s object\n", dev->se_sub_dev->se_dev_attrib.block_size, sectors, |
2904 | DEV_ATTRIB(dev)->block_size * sectors, | 2782 | dev->se_sub_dev->se_dev_attrib.block_size * sectors, |
2905 | TRANSPORT(dev)->name); | 2783 | dev->transport->name); |
2906 | #endif | 2784 | #endif |
2907 | return DEV_ATTRIB(dev)->block_size * sectors; | 2785 | return dev->se_sub_dev->se_dev_attrib.block_size * sectors; |
2908 | } | 2786 | } |
2909 | 2787 | ||
2910 | unsigned char transport_asciihex_to_binaryhex(unsigned char val[2]) | 2788 | unsigned char transport_asciihex_to_binaryhex(unsigned char val[2]) |
@@ -2958,17 +2836,17 @@ static void transport_xor_callback(struct se_cmd *cmd) | |||
2958 | return; | 2836 | return; |
2959 | } | 2837 | } |
2960 | /* | 2838 | /* |
2961 | * Copy the scatterlist WRITE buffer located at T_TASK(cmd)->t_mem_list | 2839 | * Copy the scatterlist WRITE buffer located at cmd->t_task->t_mem_list |
2962 | * into the locally allocated *buf | 2840 | * into the locally allocated *buf |
2963 | */ | 2841 | */ |
2964 | transport_memcpy_se_mem_read_contig(cmd, buf, T_TASK(cmd)->t_mem_list); | 2842 | transport_memcpy_se_mem_read_contig(cmd, buf, cmd->t_task->t_mem_list); |
2965 | /* | 2843 | /* |
2966 | * Now perform the XOR against the BIDI read memory located at | 2844 | * Now perform the XOR against the BIDI read memory located at |
2967 | * T_TASK(cmd)->t_mem_bidi_list | 2845 | * cmd->t_task->t_mem_bidi_list |
2968 | */ | 2846 | */ |
2969 | 2847 | ||
2970 | offset = 0; | 2848 | offset = 0; |
2971 | list_for_each_entry(se_mem, T_TASK(cmd)->t_mem_bidi_list, se_list) { | 2849 | list_for_each_entry(se_mem, cmd->t_task->t_mem_bidi_list, se_list) { |
2972 | addr = (unsigned char *)kmap_atomic(se_mem->se_page, KM_USER0); | 2850 | addr = (unsigned char *)kmap_atomic(se_mem->se_page, KM_USER0); |
2973 | if (!(addr)) | 2851 | if (!(addr)) |
2974 | goto out; | 2852 | goto out; |
@@ -2994,18 +2872,16 @@ static int transport_get_sense_data(struct se_cmd *cmd) | |||
2994 | unsigned long flags; | 2872 | unsigned long flags; |
2995 | u32 offset = 0; | 2873 | u32 offset = 0; |
2996 | 2874 | ||
2997 | if (!SE_LUN(cmd)) { | 2875 | WARN_ON(!cmd->se_lun); |
2998 | printk(KERN_ERR "SE_LUN(cmd) is NULL\n"); | 2876 | |
2999 | return -1; | 2877 | spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); |
3000 | } | ||
3001 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | ||
3002 | if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { | 2878 | if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { |
3003 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 2879 | spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); |
3004 | return 0; | 2880 | return 0; |
3005 | } | 2881 | } |
3006 | 2882 | ||
3007 | list_for_each_entry_safe(task, task_tmp, | 2883 | list_for_each_entry_safe(task, task_tmp, |
3008 | &T_TASK(cmd)->t_task_list, t_list) { | 2884 | &cmd->t_task->t_task_list, t_list) { |
3009 | 2885 | ||
3010 | if (!task->task_sense) | 2886 | if (!task->task_sense) |
3011 | continue; | 2887 | continue; |
@@ -3014,22 +2890,22 @@ static int transport_get_sense_data(struct se_cmd *cmd) | |||
3014 | if (!(dev)) | 2890 | if (!(dev)) |
3015 | continue; | 2891 | continue; |
3016 | 2892 | ||
3017 | if (!TRANSPORT(dev)->get_sense_buffer) { | 2893 | if (!dev->transport->get_sense_buffer) { |
3018 | printk(KERN_ERR "TRANSPORT(dev)->get_sense_buffer" | 2894 | printk(KERN_ERR "dev->transport->get_sense_buffer" |
3019 | " is NULL\n"); | 2895 | " is NULL\n"); |
3020 | continue; | 2896 | continue; |
3021 | } | 2897 | } |
3022 | 2898 | ||
3023 | sense_buffer = TRANSPORT(dev)->get_sense_buffer(task); | 2899 | sense_buffer = dev->transport->get_sense_buffer(task); |
3024 | if (!(sense_buffer)) { | 2900 | if (!(sense_buffer)) { |
3025 | printk(KERN_ERR "ITT[0x%08x]_TASK[%d]: Unable to locate" | 2901 | printk(KERN_ERR "ITT[0x%08x]_TASK[%d]: Unable to locate" |
3026 | " sense buffer for task with sense\n", | 2902 | " sense buffer for task with sense\n", |
3027 | CMD_TFO(cmd)->get_task_tag(cmd), task->task_no); | 2903 | cmd->se_tfo->get_task_tag(cmd), task->task_no); |
3028 | continue; | 2904 | continue; |
3029 | } | 2905 | } |
3030 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 2906 | spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); |
3031 | 2907 | ||
3032 | offset = CMD_TFO(cmd)->set_fabric_sense_len(cmd, | 2908 | offset = cmd->se_tfo->set_fabric_sense_len(cmd, |
3033 | TRANSPORT_SENSE_BUFFER); | 2909 | TRANSPORT_SENSE_BUFFER); |
3034 | 2910 | ||
3035 | memcpy((void *)&buffer[offset], (void *)sense_buffer, | 2911 | memcpy((void *)&buffer[offset], (void *)sense_buffer, |
@@ -3041,11 +2917,11 @@ static int transport_get_sense_data(struct se_cmd *cmd) | |||
3041 | 2917 | ||
3042 | printk(KERN_INFO "HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x" | 2918 | printk(KERN_INFO "HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x" |
3043 | " and sense\n", | 2919 | " and sense\n", |
3044 | dev->se_hba->hba_id, TRANSPORT(dev)->name, | 2920 | dev->se_hba->hba_id, dev->transport->name, |
3045 | cmd->scsi_status); | 2921 | cmd->scsi_status); |
3046 | return 0; | 2922 | return 0; |
3047 | } | 2923 | } |
3048 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 2924 | spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); |
3049 | 2925 | ||
3050 | return -1; | 2926 | return -1; |
3051 | } | 2927 | } |
@@ -3077,9 +2953,9 @@ transport_handle_reservation_conflict(struct se_cmd *cmd) | |||
3077 | * | 2953 | * |
3078 | * See spc4r17, section 7.4.6 Control Mode Page, Table 349 | 2954 | * See spc4r17, section 7.4.6 Control Mode Page, Table 349 |
3079 | */ | 2955 | */ |
3080 | if (SE_SESS(cmd) && | 2956 | if (cmd->se_sess && |
3081 | DEV_ATTRIB(cmd->se_dev)->emulate_ua_intlck_ctrl == 2) | 2957 | cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2) |
3082 | core_scsi3_ua_allocate(SE_SESS(cmd)->se_node_acl, | 2958 | core_scsi3_ua_allocate(cmd->se_sess->se_node_acl, |
3083 | cmd->orig_fe_lun, 0x2C, | 2959 | cmd->orig_fe_lun, 0x2C, |
3084 | ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); | 2960 | ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); |
3085 | return -2; | 2961 | return -2; |
@@ -3099,7 +2975,7 @@ static int transport_generic_cmd_sequencer( | |||
3099 | struct se_cmd *cmd, | 2975 | struct se_cmd *cmd, |
3100 | unsigned char *cdb) | 2976 | unsigned char *cdb) |
3101 | { | 2977 | { |
3102 | struct se_device *dev = SE_DEV(cmd); | 2978 | struct se_device *dev = cmd->se_lun->lun_se_dev; |
3103 | struct se_subsystem_dev *su_dev = dev->se_sub_dev; | 2979 | struct se_subsystem_dev *su_dev = dev->se_sub_dev; |
3104 | int ret = 0, sector_ret = 0, passthrough; | 2980 | int ret = 0, sector_ret = 0, passthrough; |
3105 | u32 sectors = 0, size = 0, pr_reg_type = 0; | 2981 | u32 sectors = 0, size = 0, pr_reg_type = 0; |
@@ -3118,7 +2994,7 @@ static int transport_generic_cmd_sequencer( | |||
3118 | /* | 2994 | /* |
3119 | * Check status of Asymmetric Logical Unit Assignment port | 2995 | * Check status of Asymmetric Logical Unit Assignment port |
3120 | */ | 2996 | */ |
3121 | ret = T10_ALUA(su_dev)->alua_state_check(cmd, cdb, &alua_ascq); | 2997 | ret = su_dev->t10_alua.alua_state_check(cmd, cdb, &alua_ascq); |
3122 | if (ret != 0) { | 2998 | if (ret != 0) { |
3123 | cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks; | 2999 | cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks; |
3124 | /* | 3000 | /* |
@@ -3130,7 +3006,7 @@ static int transport_generic_cmd_sequencer( | |||
3130 | #if 0 | 3006 | #if 0 |
3131 | printk(KERN_INFO "[%s]: ALUA TG Port not available," | 3007 | printk(KERN_INFO "[%s]: ALUA TG Port not available," |
3132 | " SenseKey: NOT_READY, ASC/ASCQ: 0x04/0x%02x\n", | 3008 | " SenseKey: NOT_READY, ASC/ASCQ: 0x04/0x%02x\n", |
3133 | CMD_TFO(cmd)->get_fabric_name(), alua_ascq); | 3009 | cmd->se_tfo->get_fabric_name(), alua_ascq); |
3134 | #endif | 3010 | #endif |
3135 | transport_set_sense_codes(cmd, 0x04, alua_ascq); | 3011 | transport_set_sense_codes(cmd, 0x04, alua_ascq); |
3136 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | 3012 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
@@ -3142,8 +3018,8 @@ static int transport_generic_cmd_sequencer( | |||
3142 | /* | 3018 | /* |
3143 | * Check status for SPC-3 Persistent Reservations | 3019 | * Check status for SPC-3 Persistent Reservations |
3144 | */ | 3020 | */ |
3145 | if (T10_PR_OPS(su_dev)->t10_reservation_check(cmd, &pr_reg_type) != 0) { | 3021 | if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type) != 0) { |
3146 | if (T10_PR_OPS(su_dev)->t10_seq_non_holder( | 3022 | if (su_dev->t10_pr.pr_ops.t10_seq_non_holder( |
3147 | cmd, cdb, pr_reg_type) != 0) | 3023 | cmd, cdb, pr_reg_type) != 0) |
3148 | return transport_handle_reservation_conflict(cmd); | 3024 | return transport_handle_reservation_conflict(cmd); |
3149 | /* | 3025 | /* |
@@ -3160,7 +3036,7 @@ static int transport_generic_cmd_sequencer( | |||
3160 | goto out_unsupported_cdb; | 3036 | goto out_unsupported_cdb; |
3161 | size = transport_get_size(sectors, cdb, cmd); | 3037 | size = transport_get_size(sectors, cdb, cmd); |
3162 | cmd->transport_split_cdb = &split_cdb_XX_6; | 3038 | cmd->transport_split_cdb = &split_cdb_XX_6; |
3163 | T_TASK(cmd)->t_task_lba = transport_lba_21(cdb); | 3039 | cmd->t_task->t_task_lba = transport_lba_21(cdb); |
3164 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; | 3040 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
3165 | break; | 3041 | break; |
3166 | case READ_10: | 3042 | case READ_10: |
@@ -3169,7 +3045,7 @@ static int transport_generic_cmd_sequencer( | |||
3169 | goto out_unsupported_cdb; | 3045 | goto out_unsupported_cdb; |
3170 | size = transport_get_size(sectors, cdb, cmd); | 3046 | size = transport_get_size(sectors, cdb, cmd); |
3171 | cmd->transport_split_cdb = &split_cdb_XX_10; | 3047 | cmd->transport_split_cdb = &split_cdb_XX_10; |
3172 | T_TASK(cmd)->t_task_lba = transport_lba_32(cdb); | 3048 | cmd->t_task->t_task_lba = transport_lba_32(cdb); |
3173 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; | 3049 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
3174 | break; | 3050 | break; |
3175 | case READ_12: | 3051 | case READ_12: |
@@ -3178,7 +3054,7 @@ static int transport_generic_cmd_sequencer( | |||
3178 | goto out_unsupported_cdb; | 3054 | goto out_unsupported_cdb; |
3179 | size = transport_get_size(sectors, cdb, cmd); | 3055 | size = transport_get_size(sectors, cdb, cmd); |
3180 | cmd->transport_split_cdb = &split_cdb_XX_12; | 3056 | cmd->transport_split_cdb = &split_cdb_XX_12; |
3181 | T_TASK(cmd)->t_task_lba = transport_lba_32(cdb); | 3057 | cmd->t_task->t_task_lba = transport_lba_32(cdb); |
3182 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; | 3058 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
3183 | break; | 3059 | break; |
3184 | case READ_16: | 3060 | case READ_16: |
@@ -3187,7 +3063,7 @@ static int transport_generic_cmd_sequencer( | |||
3187 | goto out_unsupported_cdb; | 3063 | goto out_unsupported_cdb; |
3188 | size = transport_get_size(sectors, cdb, cmd); | 3064 | size = transport_get_size(sectors, cdb, cmd); |
3189 | cmd->transport_split_cdb = &split_cdb_XX_16; | 3065 | cmd->transport_split_cdb = &split_cdb_XX_16; |
3190 | T_TASK(cmd)->t_task_lba = transport_lba_64(cdb); | 3066 | cmd->t_task->t_task_lba = transport_lba_64(cdb); |
3191 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; | 3067 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
3192 | break; | 3068 | break; |
3193 | case WRITE_6: | 3069 | case WRITE_6: |
@@ -3196,7 +3072,7 @@ static int transport_generic_cmd_sequencer( | |||
3196 | goto out_unsupported_cdb; | 3072 | goto out_unsupported_cdb; |
3197 | size = transport_get_size(sectors, cdb, cmd); | 3073 | size = transport_get_size(sectors, cdb, cmd); |
3198 | cmd->transport_split_cdb = &split_cdb_XX_6; | 3074 | cmd->transport_split_cdb = &split_cdb_XX_6; |
3199 | T_TASK(cmd)->t_task_lba = transport_lba_21(cdb); | 3075 | cmd->t_task->t_task_lba = transport_lba_21(cdb); |
3200 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; | 3076 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
3201 | break; | 3077 | break; |
3202 | case WRITE_10: | 3078 | case WRITE_10: |
@@ -3205,8 +3081,8 @@ static int transport_generic_cmd_sequencer( | |||
3205 | goto out_unsupported_cdb; | 3081 | goto out_unsupported_cdb; |
3206 | size = transport_get_size(sectors, cdb, cmd); | 3082 | size = transport_get_size(sectors, cdb, cmd); |
3207 | cmd->transport_split_cdb = &split_cdb_XX_10; | 3083 | cmd->transport_split_cdb = &split_cdb_XX_10; |
3208 | T_TASK(cmd)->t_task_lba = transport_lba_32(cdb); | 3084 | cmd->t_task->t_task_lba = transport_lba_32(cdb); |
3209 | T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8); | 3085 | cmd->t_task->t_tasks_fua = (cdb[1] & 0x8); |
3210 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; | 3086 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
3211 | break; | 3087 | break; |
3212 | case WRITE_12: | 3088 | case WRITE_12: |
@@ -3215,8 +3091,8 @@ static int transport_generic_cmd_sequencer( | |||
3215 | goto out_unsupported_cdb; | 3091 | goto out_unsupported_cdb; |
3216 | size = transport_get_size(sectors, cdb, cmd); | 3092 | size = transport_get_size(sectors, cdb, cmd); |
3217 | cmd->transport_split_cdb = &split_cdb_XX_12; | 3093 | cmd->transport_split_cdb = &split_cdb_XX_12; |
3218 | T_TASK(cmd)->t_task_lba = transport_lba_32(cdb); | 3094 | cmd->t_task->t_task_lba = transport_lba_32(cdb); |
3219 | T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8); | 3095 | cmd->t_task->t_tasks_fua = (cdb[1] & 0x8); |
3220 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; | 3096 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
3221 | break; | 3097 | break; |
3222 | case WRITE_16: | 3098 | case WRITE_16: |
@@ -3225,22 +3101,22 @@ static int transport_generic_cmd_sequencer( | |||
3225 | goto out_unsupported_cdb; | 3101 | goto out_unsupported_cdb; |
3226 | size = transport_get_size(sectors, cdb, cmd); | 3102 | size = transport_get_size(sectors, cdb, cmd); |
3227 | cmd->transport_split_cdb = &split_cdb_XX_16; | 3103 | cmd->transport_split_cdb = &split_cdb_XX_16; |
3228 | T_TASK(cmd)->t_task_lba = transport_lba_64(cdb); | 3104 | cmd->t_task->t_task_lba = transport_lba_64(cdb); |
3229 | T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8); | 3105 | cmd->t_task->t_tasks_fua = (cdb[1] & 0x8); |
3230 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; | 3106 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
3231 | break; | 3107 | break; |
3232 | case XDWRITEREAD_10: | 3108 | case XDWRITEREAD_10: |
3233 | if ((cmd->data_direction != DMA_TO_DEVICE) || | 3109 | if ((cmd->data_direction != DMA_TO_DEVICE) || |
3234 | !(T_TASK(cmd)->t_tasks_bidi)) | 3110 | !(cmd->t_task->t_tasks_bidi)) |
3235 | goto out_invalid_cdb_field; | 3111 | goto out_invalid_cdb_field; |
3236 | sectors = transport_get_sectors_10(cdb, cmd, §or_ret); | 3112 | sectors = transport_get_sectors_10(cdb, cmd, §or_ret); |
3237 | if (sector_ret) | 3113 | if (sector_ret) |
3238 | goto out_unsupported_cdb; | 3114 | goto out_unsupported_cdb; |
3239 | size = transport_get_size(sectors, cdb, cmd); | 3115 | size = transport_get_size(sectors, cdb, cmd); |
3240 | cmd->transport_split_cdb = &split_cdb_XX_10; | 3116 | cmd->transport_split_cdb = &split_cdb_XX_10; |
3241 | T_TASK(cmd)->t_task_lba = transport_lba_32(cdb); | 3117 | cmd->t_task->t_task_lba = transport_lba_32(cdb); |
3242 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; | 3118 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
3243 | passthrough = (TRANSPORT(dev)->transport_type == | 3119 | passthrough = (dev->transport->transport_type == |
3244 | TRANSPORT_PLUGIN_PHBA_PDEV); | 3120 | TRANSPORT_PLUGIN_PHBA_PDEV); |
3245 | /* | 3121 | /* |
3246 | * Skip the remaining assignments for TCM/PSCSI passthrough | 3122 | * Skip the remaining assignments for TCM/PSCSI passthrough |
@@ -3251,7 +3127,7 @@ static int transport_generic_cmd_sequencer( | |||
3251 | * Setup BIDI XOR callback to be run during transport_generic_complete_ok() | 3127 | * Setup BIDI XOR callback to be run during transport_generic_complete_ok() |
3252 | */ | 3128 | */ |
3253 | cmd->transport_complete_callback = &transport_xor_callback; | 3129 | cmd->transport_complete_callback = &transport_xor_callback; |
3254 | T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8); | 3130 | cmd->t_task->t_tasks_fua = (cdb[1] & 0x8); |
3255 | break; | 3131 | break; |
3256 | case VARIABLE_LENGTH_CMD: | 3132 | case VARIABLE_LENGTH_CMD: |
3257 | service_action = get_unaligned_be16(&cdb[8]); | 3133 | service_action = get_unaligned_be16(&cdb[8]); |
@@ -3259,7 +3135,7 @@ static int transport_generic_cmd_sequencer( | |||
3259 | * Determine if this is TCM/PSCSI device and we should disable | 3135 | * Determine if this is TCM/PSCSI device and we should disable |
3260 | * internal emulation for this CDB. | 3136 | * internal emulation for this CDB. |
3261 | */ | 3137 | */ |
3262 | passthrough = (TRANSPORT(dev)->transport_type == | 3138 | passthrough = (dev->transport->transport_type == |
3263 | TRANSPORT_PLUGIN_PHBA_PDEV); | 3139 | TRANSPORT_PLUGIN_PHBA_PDEV); |
3264 | 3140 | ||
3265 | switch (service_action) { | 3141 | switch (service_action) { |
@@ -3273,7 +3149,7 @@ static int transport_generic_cmd_sequencer( | |||
3273 | * XDWRITE_READ_32 logic. | 3149 | * XDWRITE_READ_32 logic. |
3274 | */ | 3150 | */ |
3275 | cmd->transport_split_cdb = &split_cdb_XX_32; | 3151 | cmd->transport_split_cdb = &split_cdb_XX_32; |
3276 | T_TASK(cmd)->t_task_lba = transport_lba_64_ext(cdb); | 3152 | cmd->t_task->t_task_lba = transport_lba_64_ext(cdb); |
3277 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; | 3153 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
3278 | 3154 | ||
3279 | /* | 3155 | /* |
@@ -3287,14 +3163,14 @@ static int transport_generic_cmd_sequencer( | |||
3287 | * transport_generic_complete_ok() | 3163 | * transport_generic_complete_ok() |
3288 | */ | 3164 | */ |
3289 | cmd->transport_complete_callback = &transport_xor_callback; | 3165 | cmd->transport_complete_callback = &transport_xor_callback; |
3290 | T_TASK(cmd)->t_tasks_fua = (cdb[10] & 0x8); | 3166 | cmd->t_task->t_tasks_fua = (cdb[10] & 0x8); |
3291 | break; | 3167 | break; |
3292 | case WRITE_SAME_32: | 3168 | case WRITE_SAME_32: |
3293 | sectors = transport_get_sectors_32(cdb, cmd, §or_ret); | 3169 | sectors = transport_get_sectors_32(cdb, cmd, §or_ret); |
3294 | if (sector_ret) | 3170 | if (sector_ret) |
3295 | goto out_unsupported_cdb; | 3171 | goto out_unsupported_cdb; |
3296 | size = transport_get_size(sectors, cdb, cmd); | 3172 | size = transport_get_size(sectors, cdb, cmd); |
3297 | T_TASK(cmd)->t_task_lba = get_unaligned_be64(&cdb[12]); | 3173 | cmd->t_task->t_task_lba = get_unaligned_be64(&cdb[12]); |
3298 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | 3174 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
3299 | 3175 | ||
3300 | /* | 3176 | /* |
@@ -3326,16 +3202,16 @@ static int transport_generic_cmd_sequencer( | |||
3326 | } | 3202 | } |
3327 | break; | 3203 | break; |
3328 | case MAINTENANCE_IN: | 3204 | case MAINTENANCE_IN: |
3329 | if (TRANSPORT(dev)->get_device_type(dev) != TYPE_ROM) { | 3205 | if (dev->transport->get_device_type(dev) != TYPE_ROM) { |
3330 | /* MAINTENANCE_IN from SCC-2 */ | 3206 | /* MAINTENANCE_IN from SCC-2 */ |
3331 | /* | 3207 | /* |
3332 | * Check for emulated MI_REPORT_TARGET_PGS. | 3208 | * Check for emulated MI_REPORT_TARGET_PGS. |
3333 | */ | 3209 | */ |
3334 | if (cdb[1] == MI_REPORT_TARGET_PGS) { | 3210 | if (cdb[1] == MI_REPORT_TARGET_PGS) { |
3335 | cmd->transport_emulate_cdb = | 3211 | cmd->transport_emulate_cdb = |
3336 | (T10_ALUA(su_dev)->alua_type == | 3212 | (su_dev->t10_alua.alua_type == |
3337 | SPC3_ALUA_EMULATED) ? | 3213 | SPC3_ALUA_EMULATED) ? |
3338 | &core_emulate_report_target_port_groups : | 3214 | core_emulate_report_target_port_groups : |
3339 | NULL; | 3215 | NULL; |
3340 | } | 3216 | } |
3341 | size = (cdb[6] << 24) | (cdb[7] << 16) | | 3217 | size = (cdb[6] << 24) | (cdb[7] << 16) | |
@@ -3380,9 +3256,9 @@ static int transport_generic_cmd_sequencer( | |||
3380 | case PERSISTENT_RESERVE_IN: | 3256 | case PERSISTENT_RESERVE_IN: |
3381 | case PERSISTENT_RESERVE_OUT: | 3257 | case PERSISTENT_RESERVE_OUT: |
3382 | cmd->transport_emulate_cdb = | 3258 | cmd->transport_emulate_cdb = |
3383 | (T10_RES(su_dev)->res_type == | 3259 | (su_dev->t10_pr.res_type == |
3384 | SPC3_PERSISTENT_RESERVATIONS) ? | 3260 | SPC3_PERSISTENT_RESERVATIONS) ? |
3385 | &core_scsi3_emulate_pr : NULL; | 3261 | core_scsi3_emulate_pr : NULL; |
3386 | size = (cdb[7] << 8) + cdb[8]; | 3262 | size = (cdb[7] << 8) + cdb[8]; |
3387 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | 3263 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; |
3388 | break; | 3264 | break; |
@@ -3396,16 +3272,16 @@ static int transport_generic_cmd_sequencer( | |||
3396 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | 3272 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; |
3397 | break; | 3273 | break; |
3398 | case MAINTENANCE_OUT: | 3274 | case MAINTENANCE_OUT: |
3399 | if (TRANSPORT(dev)->get_device_type(dev) != TYPE_ROM) { | 3275 | if (dev->transport->get_device_type(dev) != TYPE_ROM) { |
3400 | /* MAINTENANCE_OUT from SCC-2 | 3276 | /* MAINTENANCE_OUT from SCC-2 |
3401 | * | 3277 | * |
3402 | * Check for emulated MO_SET_TARGET_PGS. | 3278 | * Check for emulated MO_SET_TARGET_PGS. |
3403 | */ | 3279 | */ |
3404 | if (cdb[1] == MO_SET_TARGET_PGS) { | 3280 | if (cdb[1] == MO_SET_TARGET_PGS) { |
3405 | cmd->transport_emulate_cdb = | 3281 | cmd->transport_emulate_cdb = |
3406 | (T10_ALUA(su_dev)->alua_type == | 3282 | (su_dev->t10_alua.alua_type == |
3407 | SPC3_ALUA_EMULATED) ? | 3283 | SPC3_ALUA_EMULATED) ? |
3408 | &core_emulate_set_target_port_groups : | 3284 | core_emulate_set_target_port_groups : |
3409 | NULL; | 3285 | NULL; |
3410 | } | 3286 | } |
3411 | 3287 | ||
@@ -3423,7 +3299,7 @@ static int transport_generic_cmd_sequencer( | |||
3423 | * Do implict HEAD_OF_QUEUE processing for INQUIRY. | 3299 | * Do implict HEAD_OF_QUEUE processing for INQUIRY. |
3424 | * See spc4r17 section 5.3 | 3300 | * See spc4r17 section 5.3 |
3425 | */ | 3301 | */ |
3426 | if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) | 3302 | if (cmd->se_lun->lun_se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) |
3427 | cmd->sam_task_attr = MSG_HEAD_TAG; | 3303 | cmd->sam_task_attr = MSG_HEAD_TAG; |
3428 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | 3304 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; |
3429 | break; | 3305 | break; |
@@ -3500,9 +3376,9 @@ static int transport_generic_cmd_sequencer( | |||
3500 | * emulation disabled. | 3376 | * emulation disabled. |
3501 | */ | 3377 | */ |
3502 | cmd->transport_emulate_cdb = | 3378 | cmd->transport_emulate_cdb = |
3503 | (T10_RES(su_dev)->res_type != | 3379 | (su_dev->t10_pr.res_type != |
3504 | SPC_PASSTHROUGH) ? | 3380 | SPC_PASSTHROUGH) ? |
3505 | &core_scsi2_emulate_crh : NULL; | 3381 | core_scsi2_emulate_crh : NULL; |
3506 | cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; | 3382 | cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; |
3507 | break; | 3383 | break; |
3508 | case RELEASE: | 3384 | case RELEASE: |
@@ -3517,9 +3393,9 @@ static int transport_generic_cmd_sequencer( | |||
3517 | size = cmd->data_length; | 3393 | size = cmd->data_length; |
3518 | 3394 | ||
3519 | cmd->transport_emulate_cdb = | 3395 | cmd->transport_emulate_cdb = |
3520 | (T10_RES(su_dev)->res_type != | 3396 | (su_dev->t10_pr.res_type != |
3521 | SPC_PASSTHROUGH) ? | 3397 | SPC_PASSTHROUGH) ? |
3522 | &core_scsi2_emulate_crh : NULL; | 3398 | core_scsi2_emulate_crh : NULL; |
3523 | cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; | 3399 | cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; |
3524 | break; | 3400 | break; |
3525 | case SYNCHRONIZE_CACHE: | 3401 | case SYNCHRONIZE_CACHE: |
@@ -3529,10 +3405,10 @@ static int transport_generic_cmd_sequencer( | |||
3529 | */ | 3405 | */ |
3530 | if (cdb[0] == SYNCHRONIZE_CACHE) { | 3406 | if (cdb[0] == SYNCHRONIZE_CACHE) { |
3531 | sectors = transport_get_sectors_10(cdb, cmd, §or_ret); | 3407 | sectors = transport_get_sectors_10(cdb, cmd, §or_ret); |
3532 | T_TASK(cmd)->t_task_lba = transport_lba_32(cdb); | 3408 | cmd->t_task->t_task_lba = transport_lba_32(cdb); |
3533 | } else { | 3409 | } else { |
3534 | sectors = transport_get_sectors_16(cdb, cmd, §or_ret); | 3410 | sectors = transport_get_sectors_16(cdb, cmd, §or_ret); |
3535 | T_TASK(cmd)->t_task_lba = transport_lba_64(cdb); | 3411 | cmd->t_task->t_task_lba = transport_lba_64(cdb); |
3536 | } | 3412 | } |
3537 | if (sector_ret) | 3413 | if (sector_ret) |
3538 | goto out_unsupported_cdb; | 3414 | goto out_unsupported_cdb; |
@@ -3543,7 +3419,7 @@ static int transport_generic_cmd_sequencer( | |||
3543 | /* | 3419 | /* |
3544 | * For TCM/pSCSI passthrough, skip cmd->transport_emulate_cdb() | 3420 | * For TCM/pSCSI passthrough, skip cmd->transport_emulate_cdb() |
3545 | */ | 3421 | */ |
3546 | if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) | 3422 | if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) |
3547 | break; | 3423 | break; |
3548 | /* | 3424 | /* |
3549 | * Set SCF_EMULATE_CDB_ASYNC to ensure asynchronous operation | 3425 | * Set SCF_EMULATE_CDB_ASYNC to ensure asynchronous operation |
@@ -3559,7 +3435,7 @@ static int transport_generic_cmd_sequencer( | |||
3559 | break; | 3435 | break; |
3560 | case UNMAP: | 3436 | case UNMAP: |
3561 | size = get_unaligned_be16(&cdb[7]); | 3437 | size = get_unaligned_be16(&cdb[7]); |
3562 | passthrough = (TRANSPORT(dev)->transport_type == | 3438 | passthrough = (dev->transport->transport_type == |
3563 | TRANSPORT_PLUGIN_PHBA_PDEV); | 3439 | TRANSPORT_PLUGIN_PHBA_PDEV); |
3564 | /* | 3440 | /* |
3565 | * Determine if the received UNMAP used to for direct passthrough | 3441 | * Determine if the received UNMAP used to for direct passthrough |
@@ -3578,8 +3454,8 @@ static int transport_generic_cmd_sequencer( | |||
3578 | if (sector_ret) | 3454 | if (sector_ret) |
3579 | goto out_unsupported_cdb; | 3455 | goto out_unsupported_cdb; |
3580 | size = transport_get_size(sectors, cdb, cmd); | 3456 | size = transport_get_size(sectors, cdb, cmd); |
3581 | T_TASK(cmd)->t_task_lba = get_unaligned_be16(&cdb[2]); | 3457 | cmd->t_task->t_task_lba = get_unaligned_be16(&cdb[2]); |
3582 | passthrough = (TRANSPORT(dev)->transport_type == | 3458 | passthrough = (dev->transport->transport_type == |
3583 | TRANSPORT_PLUGIN_PHBA_PDEV); | 3459 | TRANSPORT_PLUGIN_PHBA_PDEV); |
3584 | /* | 3460 | /* |
3585 | * Determine if the received WRITE_SAME_16 is used to for direct | 3461 | * Determine if the received WRITE_SAME_16 is used to for direct |
@@ -3625,20 +3501,20 @@ static int transport_generic_cmd_sequencer( | |||
3625 | break; | 3501 | break; |
3626 | case REPORT_LUNS: | 3502 | case REPORT_LUNS: |
3627 | cmd->transport_emulate_cdb = | 3503 | cmd->transport_emulate_cdb = |
3628 | &transport_core_report_lun_response; | 3504 | transport_core_report_lun_response; |
3629 | size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; | 3505 | size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; |
3630 | /* | 3506 | /* |
3631 | * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS | 3507 | * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS |
3632 | * See spc4r17 section 5.3 | 3508 | * See spc4r17 section 5.3 |
3633 | */ | 3509 | */ |
3634 | if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) | 3510 | if (cmd->se_lun->lun_se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) |
3635 | cmd->sam_task_attr = MSG_HEAD_TAG; | 3511 | cmd->sam_task_attr = MSG_HEAD_TAG; |
3636 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | 3512 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; |
3637 | break; | 3513 | break; |
3638 | default: | 3514 | default: |
3639 | printk(KERN_WARNING "TARGET_CORE[%s]: Unsupported SCSI Opcode" | 3515 | printk(KERN_WARNING "TARGET_CORE[%s]: Unsupported SCSI Opcode" |
3640 | " 0x%02x, sending CHECK_CONDITION.\n", | 3516 | " 0x%02x, sending CHECK_CONDITION.\n", |
3641 | CMD_TFO(cmd)->get_fabric_name(), cdb[0]); | 3517 | cmd->se_tfo->get_fabric_name(), cdb[0]); |
3642 | cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks; | 3518 | cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks; |
3643 | goto out_unsupported_cdb; | 3519 | goto out_unsupported_cdb; |
3644 | } | 3520 | } |
@@ -3646,7 +3522,7 @@ static int transport_generic_cmd_sequencer( | |||
3646 | if (size != cmd->data_length) { | 3522 | if (size != cmd->data_length) { |
3647 | printk(KERN_WARNING "TARGET_CORE[%s]: Expected Transfer Length:" | 3523 | printk(KERN_WARNING "TARGET_CORE[%s]: Expected Transfer Length:" |
3648 | " %u does not match SCSI CDB Length: %u for SAM Opcode:" | 3524 | " %u does not match SCSI CDB Length: %u for SAM Opcode:" |
3649 | " 0x%02x\n", CMD_TFO(cmd)->get_fabric_name(), | 3525 | " 0x%02x\n", cmd->se_tfo->get_fabric_name(), |
3650 | cmd->data_length, size, cdb[0]); | 3526 | cmd->data_length, size, cdb[0]); |
3651 | 3527 | ||
3652 | cmd->cmd_spdtl = size; | 3528 | cmd->cmd_spdtl = size; |
@@ -3660,10 +3536,10 @@ static int transport_generic_cmd_sequencer( | |||
3660 | * Reject READ_* or WRITE_* with overflow/underflow for | 3536 | * Reject READ_* or WRITE_* with overflow/underflow for |
3661 | * type SCF_SCSI_DATA_SG_IO_CDB. | 3537 | * type SCF_SCSI_DATA_SG_IO_CDB. |
3662 | */ | 3538 | */ |
3663 | if (!(ret) && (DEV_ATTRIB(dev)->block_size != 512)) { | 3539 | if (!(ret) && (dev->se_sub_dev->se_dev_attrib.block_size != 512)) { |
3664 | printk(KERN_ERR "Failing OVERFLOW/UNDERFLOW for LBA op" | 3540 | printk(KERN_ERR "Failing OVERFLOW/UNDERFLOW for LBA op" |
3665 | " CDB on non 512-byte sector setup subsystem" | 3541 | " CDB on non 512-byte sector setup subsystem" |
3666 | " plugin: %s\n", TRANSPORT(dev)->name); | 3542 | " plugin: %s\n", dev->transport->name); |
3667 | /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */ | 3543 | /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */ |
3668 | goto out_invalid_cdb_field; | 3544 | goto out_invalid_cdb_field; |
3669 | } | 3545 | } |
@@ -3786,7 +3662,7 @@ static void transport_memcpy_se_mem_read_contig( | |||
3786 | */ | 3662 | */ |
3787 | static void transport_complete_task_attr(struct se_cmd *cmd) | 3663 | static void transport_complete_task_attr(struct se_cmd *cmd) |
3788 | { | 3664 | { |
3789 | struct se_device *dev = SE_DEV(cmd); | 3665 | struct se_device *dev = cmd->se_lun->lun_se_dev; |
3790 | struct se_cmd *cmd_p, *cmd_tmp; | 3666 | struct se_cmd *cmd_p, *cmd_tmp; |
3791 | int new_active_tasks = 0; | 3667 | int new_active_tasks = 0; |
3792 | 3668 | ||
@@ -3846,7 +3722,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd) | |||
3846 | * to do the processing of the Active tasks. | 3722 | * to do the processing of the Active tasks. |
3847 | */ | 3723 | */ |
3848 | if (new_active_tasks != 0) | 3724 | if (new_active_tasks != 0) |
3849 | wake_up_interruptible(&dev->dev_queue_obj->thread_wq); | 3725 | wake_up_interruptible(&dev->dev_queue_obj.thread_wq); |
3850 | } | 3726 | } |
3851 | 3727 | ||
3852 | static void transport_generic_complete_ok(struct se_cmd *cmd) | 3728 | static void transport_generic_complete_ok(struct se_cmd *cmd) |
@@ -3857,7 +3733,7 @@ static void transport_generic_complete_ok(struct se_cmd *cmd) | |||
3857 | * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task | 3733 | * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task |
3858 | * Attribute. | 3734 | * Attribute. |
3859 | */ | 3735 | */ |
3860 | if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) | 3736 | if (cmd->se_lun->lun_se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) |
3861 | transport_complete_task_attr(cmd); | 3737 | transport_complete_task_attr(cmd); |
3862 | /* | 3738 | /* |
3863 | * Check if we need to retrieve a sense buffer from | 3739 | * Check if we need to retrieve a sense buffer from |
@@ -3889,8 +3765,8 @@ static void transport_generic_complete_ok(struct se_cmd *cmd) | |||
3889 | switch (cmd->data_direction) { | 3765 | switch (cmd->data_direction) { |
3890 | case DMA_FROM_DEVICE: | 3766 | case DMA_FROM_DEVICE: |
3891 | spin_lock(&cmd->se_lun->lun_sep_lock); | 3767 | spin_lock(&cmd->se_lun->lun_sep_lock); |
3892 | if (SE_LUN(cmd)->lun_sep) { | 3768 | if (cmd->se_lun->lun_sep) { |
3893 | SE_LUN(cmd)->lun_sep->sep_stats.tx_data_octets += | 3769 | cmd->se_lun->lun_sep->sep_stats.tx_data_octets += |
3894 | cmd->data_length; | 3770 | cmd->data_length; |
3895 | } | 3771 | } |
3896 | spin_unlock(&cmd->se_lun->lun_sep_lock); | 3772 | spin_unlock(&cmd->se_lun->lun_sep_lock); |
@@ -3901,34 +3777,34 @@ static void transport_generic_complete_ok(struct se_cmd *cmd) | |||
3901 | */ | 3777 | */ |
3902 | if (cmd->se_cmd_flags & SCF_PASSTHROUGH_CONTIG_TO_SG) | 3778 | if (cmd->se_cmd_flags & SCF_PASSTHROUGH_CONTIG_TO_SG) |
3903 | transport_memcpy_write_contig(cmd, | 3779 | transport_memcpy_write_contig(cmd, |
3904 | T_TASK(cmd)->t_task_pt_sgl, | 3780 | cmd->t_task->t_task_pt_sgl, |
3905 | T_TASK(cmd)->t_task_buf); | 3781 | cmd->t_task->t_task_buf); |
3906 | 3782 | ||
3907 | CMD_TFO(cmd)->queue_data_in(cmd); | 3783 | cmd->se_tfo->queue_data_in(cmd); |
3908 | break; | 3784 | break; |
3909 | case DMA_TO_DEVICE: | 3785 | case DMA_TO_DEVICE: |
3910 | spin_lock(&cmd->se_lun->lun_sep_lock); | 3786 | spin_lock(&cmd->se_lun->lun_sep_lock); |
3911 | if (SE_LUN(cmd)->lun_sep) { | 3787 | if (cmd->se_lun->lun_sep) { |
3912 | SE_LUN(cmd)->lun_sep->sep_stats.rx_data_octets += | 3788 | cmd->se_lun->lun_sep->sep_stats.rx_data_octets += |
3913 | cmd->data_length; | 3789 | cmd->data_length; |
3914 | } | 3790 | } |
3915 | spin_unlock(&cmd->se_lun->lun_sep_lock); | 3791 | spin_unlock(&cmd->se_lun->lun_sep_lock); |
3916 | /* | 3792 | /* |
3917 | * Check if we need to send READ payload for BIDI-COMMAND | 3793 | * Check if we need to send READ payload for BIDI-COMMAND |
3918 | */ | 3794 | */ |
3919 | if (T_TASK(cmd)->t_mem_bidi_list != NULL) { | 3795 | if (cmd->t_task->t_mem_bidi_list != NULL) { |
3920 | spin_lock(&cmd->se_lun->lun_sep_lock); | 3796 | spin_lock(&cmd->se_lun->lun_sep_lock); |
3921 | if (SE_LUN(cmd)->lun_sep) { | 3797 | if (cmd->se_lun->lun_sep) { |
3922 | SE_LUN(cmd)->lun_sep->sep_stats.tx_data_octets += | 3798 | cmd->se_lun->lun_sep->sep_stats.tx_data_octets += |
3923 | cmd->data_length; | 3799 | cmd->data_length; |
3924 | } | 3800 | } |
3925 | spin_unlock(&cmd->se_lun->lun_sep_lock); | 3801 | spin_unlock(&cmd->se_lun->lun_sep_lock); |
3926 | CMD_TFO(cmd)->queue_data_in(cmd); | 3802 | cmd->se_tfo->queue_data_in(cmd); |
3927 | break; | 3803 | break; |
3928 | } | 3804 | } |
3929 | /* Fall through for DMA_TO_DEVICE */ | 3805 | /* Fall through for DMA_TO_DEVICE */ |
3930 | case DMA_NONE: | 3806 | case DMA_NONE: |
3931 | CMD_TFO(cmd)->queue_status(cmd); | 3807 | cmd->se_tfo->queue_status(cmd); |
3932 | break; | 3808 | break; |
3933 | default: | 3809 | default: |
3934 | break; | 3810 | break; |
@@ -3943,9 +3819,9 @@ static void transport_free_dev_tasks(struct se_cmd *cmd) | |||
3943 | struct se_task *task, *task_tmp; | 3819 | struct se_task *task, *task_tmp; |
3944 | unsigned long flags; | 3820 | unsigned long flags; |
3945 | 3821 | ||
3946 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 3822 | spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); |
3947 | list_for_each_entry_safe(task, task_tmp, | 3823 | list_for_each_entry_safe(task, task_tmp, |
3948 | &T_TASK(cmd)->t_task_list, t_list) { | 3824 | &cmd->t_task->t_task_list, t_list) { |
3949 | if (atomic_read(&task->task_active)) | 3825 | if (atomic_read(&task->task_active)) |
3950 | continue; | 3826 | continue; |
3951 | 3827 | ||
@@ -3954,15 +3830,15 @@ static void transport_free_dev_tasks(struct se_cmd *cmd) | |||
3954 | 3830 | ||
3955 | list_del(&task->t_list); | 3831 | list_del(&task->t_list); |
3956 | 3832 | ||
3957 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 3833 | spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); |
3958 | if (task->se_dev) | 3834 | if (task->se_dev) |
3959 | TRANSPORT(task->se_dev)->free_task(task); | 3835 | task->se_dev->transport->free_task(task); |
3960 | else | 3836 | else |
3961 | printk(KERN_ERR "task[%u] - task->se_dev is NULL\n", | 3837 | printk(KERN_ERR "task[%u] - task->se_dev is NULL\n", |
3962 | task->task_no); | 3838 | task->task_no); |
3963 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 3839 | spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); |
3964 | } | 3840 | } |
3965 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 3841 | spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); |
3966 | } | 3842 | } |
3967 | 3843 | ||
3968 | static inline void transport_free_pages(struct se_cmd *cmd) | 3844 | static inline void transport_free_pages(struct se_cmd *cmd) |
@@ -3975,9 +3851,9 @@ static inline void transport_free_pages(struct se_cmd *cmd) | |||
3975 | if (cmd->se_dev->transport->do_se_mem_map) | 3851 | if (cmd->se_dev->transport->do_se_mem_map) |
3976 | free_page = 0; | 3852 | free_page = 0; |
3977 | 3853 | ||
3978 | if (T_TASK(cmd)->t_task_buf) { | 3854 | if (cmd->t_task->t_task_buf) { |
3979 | kfree(T_TASK(cmd)->t_task_buf); | 3855 | kfree(cmd->t_task->t_task_buf); |
3980 | T_TASK(cmd)->t_task_buf = NULL; | 3856 | cmd->t_task->t_task_buf = NULL; |
3981 | return; | 3857 | return; |
3982 | } | 3858 | } |
3983 | 3859 | ||
@@ -3987,11 +3863,11 @@ static inline void transport_free_pages(struct se_cmd *cmd) | |||
3987 | if (cmd->se_cmd_flags & SCF_CMD_PASSTHROUGH_NOALLOC) | 3863 | if (cmd->se_cmd_flags & SCF_CMD_PASSTHROUGH_NOALLOC) |
3988 | return; | 3864 | return; |
3989 | 3865 | ||
3990 | if (!(T_TASK(cmd)->t_tasks_se_num)) | 3866 | if (!(cmd->t_task->t_tasks_se_num)) |
3991 | return; | 3867 | return; |
3992 | 3868 | ||
3993 | list_for_each_entry_safe(se_mem, se_mem_tmp, | 3869 | list_for_each_entry_safe(se_mem, se_mem_tmp, |
3994 | T_TASK(cmd)->t_mem_list, se_list) { | 3870 | cmd->t_task->t_mem_list, se_list) { |
3995 | /* | 3871 | /* |
3996 | * We only release call __free_page(struct se_mem->se_page) when | 3872 | * We only release call __free_page(struct se_mem->se_page) when |
3997 | * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use, | 3873 | * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use, |
@@ -4003,9 +3879,9 @@ static inline void transport_free_pages(struct se_cmd *cmd) | |||
4003 | kmem_cache_free(se_mem_cache, se_mem); | 3879 | kmem_cache_free(se_mem_cache, se_mem); |
4004 | } | 3880 | } |
4005 | 3881 | ||
4006 | if (T_TASK(cmd)->t_mem_bidi_list && T_TASK(cmd)->t_tasks_se_bidi_num) { | 3882 | if (cmd->t_task->t_mem_bidi_list && cmd->t_task->t_tasks_se_bidi_num) { |
4007 | list_for_each_entry_safe(se_mem, se_mem_tmp, | 3883 | list_for_each_entry_safe(se_mem, se_mem_tmp, |
4008 | T_TASK(cmd)->t_mem_bidi_list, se_list) { | 3884 | cmd->t_task->t_mem_bidi_list, se_list) { |
4009 | /* | 3885 | /* |
4010 | * We only release call __free_page(struct se_mem->se_page) when | 3886 | * We only release call __free_page(struct se_mem->se_page) when |
4011 | * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use, | 3887 | * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use, |
@@ -4018,11 +3894,11 @@ static inline void transport_free_pages(struct se_cmd *cmd) | |||
4018 | } | 3894 | } |
4019 | } | 3895 | } |
4020 | 3896 | ||
4021 | kfree(T_TASK(cmd)->t_mem_bidi_list); | 3897 | kfree(cmd->t_task->t_mem_bidi_list); |
4022 | T_TASK(cmd)->t_mem_bidi_list = NULL; | 3898 | cmd->t_task->t_mem_bidi_list = NULL; |
4023 | kfree(T_TASK(cmd)->t_mem_list); | 3899 | kfree(cmd->t_task->t_mem_list); |
4024 | T_TASK(cmd)->t_mem_list = NULL; | 3900 | cmd->t_task->t_mem_list = NULL; |
4025 | T_TASK(cmd)->t_tasks_se_num = 0; | 3901 | cmd->t_task->t_tasks_se_num = 0; |
4026 | } | 3902 | } |
4027 | 3903 | ||
4028 | static inline void transport_release_tasks(struct se_cmd *cmd) | 3904 | static inline void transport_release_tasks(struct se_cmd *cmd) |
@@ -4034,23 +3910,23 @@ static inline int transport_dec_and_check(struct se_cmd *cmd) | |||
4034 | { | 3910 | { |
4035 | unsigned long flags; | 3911 | unsigned long flags; |
4036 | 3912 | ||
4037 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 3913 | spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); |
4038 | if (atomic_read(&T_TASK(cmd)->t_fe_count)) { | 3914 | if (atomic_read(&cmd->t_task->t_fe_count)) { |
4039 | if (!(atomic_dec_and_test(&T_TASK(cmd)->t_fe_count))) { | 3915 | if (!(atomic_dec_and_test(&cmd->t_task->t_fe_count))) { |
4040 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, | 3916 | spin_unlock_irqrestore(&cmd->t_task->t_state_lock, |
4041 | flags); | 3917 | flags); |
4042 | return 1; | 3918 | return 1; |
4043 | } | 3919 | } |
4044 | } | 3920 | } |
4045 | 3921 | ||
4046 | if (atomic_read(&T_TASK(cmd)->t_se_count)) { | 3922 | if (atomic_read(&cmd->t_task->t_se_count)) { |
4047 | if (!(atomic_dec_and_test(&T_TASK(cmd)->t_se_count))) { | 3923 | if (!(atomic_dec_and_test(&cmd->t_task->t_se_count))) { |
4048 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, | 3924 | spin_unlock_irqrestore(&cmd->t_task->t_state_lock, |
4049 | flags); | 3925 | flags); |
4050 | return 1; | 3926 | return 1; |
4051 | } | 3927 | } |
4052 | } | 3928 | } |
4053 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 3929 | spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); |
4054 | 3930 | ||
4055 | return 0; | 3931 | return 0; |
4056 | } | 3932 | } |
@@ -4062,20 +3938,20 @@ static void transport_release_fe_cmd(struct se_cmd *cmd) | |||
4062 | if (transport_dec_and_check(cmd)) | 3938 | if (transport_dec_and_check(cmd)) |
4063 | return; | 3939 | return; |
4064 | 3940 | ||
4065 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 3941 | spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); |
4066 | if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) { | 3942 | if (!(atomic_read(&cmd->t_task->transport_dev_active))) { |
4067 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 3943 | spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); |
4068 | goto free_pages; | 3944 | goto free_pages; |
4069 | } | 3945 | } |
4070 | atomic_set(&T_TASK(cmd)->transport_dev_active, 0); | 3946 | atomic_set(&cmd->t_task->transport_dev_active, 0); |
4071 | transport_all_task_dev_remove_state(cmd); | 3947 | transport_all_task_dev_remove_state(cmd); |
4072 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 3948 | spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); |
4073 | 3949 | ||
4074 | transport_release_tasks(cmd); | 3950 | transport_release_tasks(cmd); |
4075 | free_pages: | 3951 | free_pages: |
4076 | transport_free_pages(cmd); | 3952 | transport_free_pages(cmd); |
4077 | transport_free_se_cmd(cmd); | 3953 | transport_free_se_cmd(cmd); |
4078 | CMD_TFO(cmd)->release_cmd_direct(cmd); | 3954 | cmd->se_tfo->release_cmd_direct(cmd); |
4079 | } | 3955 | } |
4080 | 3956 | ||
4081 | static int transport_generic_remove( | 3957 | static int transport_generic_remove( |
@@ -4085,27 +3961,27 @@ static int transport_generic_remove( | |||
4085 | { | 3961 | { |
4086 | unsigned long flags; | 3962 | unsigned long flags; |
4087 | 3963 | ||
4088 | if (!(T_TASK(cmd))) | 3964 | if (!(cmd->t_task)) |
4089 | goto release_cmd; | 3965 | goto release_cmd; |
4090 | 3966 | ||
4091 | if (transport_dec_and_check(cmd)) { | 3967 | if (transport_dec_and_check(cmd)) { |
4092 | if (session_reinstatement) { | 3968 | if (session_reinstatement) { |
4093 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 3969 | spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); |
4094 | transport_all_task_dev_remove_state(cmd); | 3970 | transport_all_task_dev_remove_state(cmd); |
4095 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, | 3971 | spin_unlock_irqrestore(&cmd->t_task->t_state_lock, |
4096 | flags); | 3972 | flags); |
4097 | } | 3973 | } |
4098 | return 1; | 3974 | return 1; |
4099 | } | 3975 | } |
4100 | 3976 | ||
4101 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 3977 | spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); |
4102 | if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) { | 3978 | if (!(atomic_read(&cmd->t_task->transport_dev_active))) { |
4103 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 3979 | spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); |
4104 | goto free_pages; | 3980 | goto free_pages; |
4105 | } | 3981 | } |
4106 | atomic_set(&T_TASK(cmd)->transport_dev_active, 0); | 3982 | atomic_set(&cmd->t_task->transport_dev_active, 0); |
4107 | transport_all_task_dev_remove_state(cmd); | 3983 | transport_all_task_dev_remove_state(cmd); |
4108 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 3984 | spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); |
4109 | 3985 | ||
4110 | transport_release_tasks(cmd); | 3986 | transport_release_tasks(cmd); |
4111 | free_pages: | 3987 | free_pages: |
@@ -4116,7 +3992,7 @@ release_cmd: | |||
4116 | transport_release_cmd_to_pool(cmd); | 3992 | transport_release_cmd_to_pool(cmd); |
4117 | } else { | 3993 | } else { |
4118 | transport_free_se_cmd(cmd); | 3994 | transport_free_se_cmd(cmd); |
4119 | CMD_TFO(cmd)->release_cmd_direct(cmd); | 3995 | cmd->se_tfo->release_cmd_direct(cmd); |
4120 | } | 3996 | } |
4121 | 3997 | ||
4122 | return 0; | 3998 | return 0; |
@@ -4156,8 +4032,8 @@ int transport_generic_map_mem_to_cmd( | |||
4156 | return -ENOSYS; | 4032 | return -ENOSYS; |
4157 | } | 4033 | } |
4158 | 4034 | ||
4159 | T_TASK(cmd)->t_mem_list = (struct list_head *)mem; | 4035 | cmd->t_task->t_mem_list = (struct list_head *)mem; |
4160 | T_TASK(cmd)->t_tasks_se_num = sg_mem_num; | 4036 | cmd->t_task->t_tasks_se_num = sg_mem_num; |
4161 | cmd->se_cmd_flags |= SCF_CMD_PASSTHROUGH_NOALLOC; | 4037 | cmd->se_cmd_flags |= SCF_CMD_PASSTHROUGH_NOALLOC; |
4162 | return 0; | 4038 | return 0; |
4163 | } | 4039 | } |
@@ -4172,36 +4048,36 @@ int transport_generic_map_mem_to_cmd( | |||
4172 | * processed into a TCM struct se_subsystem_dev, we do the mapping | 4048 | * processed into a TCM struct se_subsystem_dev, we do the mapping |
4173 | * from the passed physical memory to struct se_mem->se_page here. | 4049 | * from the passed physical memory to struct se_mem->se_page here. |
4174 | */ | 4050 | */ |
4175 | T_TASK(cmd)->t_mem_list = transport_init_se_mem_list(); | 4051 | cmd->t_task->t_mem_list = transport_init_se_mem_list(); |
4176 | if (!(T_TASK(cmd)->t_mem_list)) | 4052 | if (!(cmd->t_task->t_mem_list)) |
4177 | return -ENOMEM; | 4053 | return -ENOMEM; |
4178 | 4054 | ||
4179 | ret = transport_map_sg_to_mem(cmd, | 4055 | ret = transport_map_sg_to_mem(cmd, |
4180 | T_TASK(cmd)->t_mem_list, mem, &se_mem_cnt_out); | 4056 | cmd->t_task->t_mem_list, mem, &se_mem_cnt_out); |
4181 | if (ret < 0) | 4057 | if (ret < 0) |
4182 | return -ENOMEM; | 4058 | return -ENOMEM; |
4183 | 4059 | ||
4184 | T_TASK(cmd)->t_tasks_se_num = se_mem_cnt_out; | 4060 | cmd->t_task->t_tasks_se_num = se_mem_cnt_out; |
4185 | /* | 4061 | /* |
4186 | * Setup BIDI READ list of struct se_mem elements | 4062 | * Setup BIDI READ list of struct se_mem elements |
4187 | */ | 4063 | */ |
4188 | if ((mem_bidi_in) && (sg_mem_bidi_num)) { | 4064 | if ((mem_bidi_in) && (sg_mem_bidi_num)) { |
4189 | T_TASK(cmd)->t_mem_bidi_list = transport_init_se_mem_list(); | 4065 | cmd->t_task->t_mem_bidi_list = transport_init_se_mem_list(); |
4190 | if (!(T_TASK(cmd)->t_mem_bidi_list)) { | 4066 | if (!(cmd->t_task->t_mem_bidi_list)) { |
4191 | kfree(T_TASK(cmd)->t_mem_list); | 4067 | kfree(cmd->t_task->t_mem_list); |
4192 | return -ENOMEM; | 4068 | return -ENOMEM; |
4193 | } | 4069 | } |
4194 | se_mem_cnt_out = 0; | 4070 | se_mem_cnt_out = 0; |
4195 | 4071 | ||
4196 | ret = transport_map_sg_to_mem(cmd, | 4072 | ret = transport_map_sg_to_mem(cmd, |
4197 | T_TASK(cmd)->t_mem_bidi_list, mem_bidi_in, | 4073 | cmd->t_task->t_mem_bidi_list, mem_bidi_in, |
4198 | &se_mem_cnt_out); | 4074 | &se_mem_cnt_out); |
4199 | if (ret < 0) { | 4075 | if (ret < 0) { |
4200 | kfree(T_TASK(cmd)->t_mem_list); | 4076 | kfree(cmd->t_task->t_mem_list); |
4201 | return -ENOMEM; | 4077 | return -ENOMEM; |
4202 | } | 4078 | } |
4203 | 4079 | ||
4204 | T_TASK(cmd)->t_tasks_se_bidi_num = se_mem_cnt_out; | 4080 | cmd->t_task->t_tasks_se_bidi_num = se_mem_cnt_out; |
4205 | } | 4081 | } |
4206 | cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; | 4082 | cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; |
4207 | 4083 | ||
@@ -4221,7 +4097,7 @@ int transport_generic_map_mem_to_cmd( | |||
4221 | * struct scatterlist format. | 4097 | * struct scatterlist format. |
4222 | */ | 4098 | */ |
4223 | cmd->se_cmd_flags |= SCF_PASSTHROUGH_CONTIG_TO_SG; | 4099 | cmd->se_cmd_flags |= SCF_PASSTHROUGH_CONTIG_TO_SG; |
4224 | T_TASK(cmd)->t_task_pt_sgl = mem; | 4100 | cmd->t_task->t_task_pt_sgl = mem; |
4225 | } | 4101 | } |
4226 | 4102 | ||
4227 | return 0; | 4103 | return 0; |
@@ -4236,21 +4112,21 @@ static inline long long transport_dev_end_lba(struct se_device *dev) | |||
4236 | 4112 | ||
4237 | static int transport_get_sectors(struct se_cmd *cmd) | 4113 | static int transport_get_sectors(struct se_cmd *cmd) |
4238 | { | 4114 | { |
4239 | struct se_device *dev = SE_DEV(cmd); | 4115 | struct se_device *dev = cmd->se_lun->lun_se_dev; |
4240 | 4116 | ||
4241 | T_TASK(cmd)->t_tasks_sectors = | 4117 | cmd->t_task->t_tasks_sectors = |
4242 | (cmd->data_length / DEV_ATTRIB(dev)->block_size); | 4118 | (cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size); |
4243 | if (!(T_TASK(cmd)->t_tasks_sectors)) | 4119 | if (!(cmd->t_task->t_tasks_sectors)) |
4244 | T_TASK(cmd)->t_tasks_sectors = 1; | 4120 | cmd->t_task->t_tasks_sectors = 1; |
4245 | 4121 | ||
4246 | if (TRANSPORT(dev)->get_device_type(dev) != TYPE_DISK) | 4122 | if (dev->transport->get_device_type(dev) != TYPE_DISK) |
4247 | return 0; | 4123 | return 0; |
4248 | 4124 | ||
4249 | if ((T_TASK(cmd)->t_task_lba + T_TASK(cmd)->t_tasks_sectors) > | 4125 | if ((cmd->t_task->t_task_lba + cmd->t_task->t_tasks_sectors) > |
4250 | transport_dev_end_lba(dev)) { | 4126 | transport_dev_end_lba(dev)) { |
4251 | printk(KERN_ERR "LBA: %llu Sectors: %u exceeds" | 4127 | printk(KERN_ERR "LBA: %llu Sectors: %u exceeds" |
4252 | " transport_dev_end_lba(): %llu\n", | 4128 | " transport_dev_end_lba(): %llu\n", |
4253 | T_TASK(cmd)->t_task_lba, T_TASK(cmd)->t_tasks_sectors, | 4129 | cmd->t_task->t_task_lba, cmd->t_task->t_tasks_sectors, |
4254 | transport_dev_end_lba(dev)); | 4130 | transport_dev_end_lba(dev)); |
4255 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | 4131 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
4256 | cmd->scsi_sense_reason = TCM_SECTOR_COUNT_TOO_MANY; | 4132 | cmd->scsi_sense_reason = TCM_SECTOR_COUNT_TOO_MANY; |
@@ -4262,26 +4138,26 @@ static int transport_get_sectors(struct se_cmd *cmd) | |||
4262 | 4138 | ||
4263 | static int transport_new_cmd_obj(struct se_cmd *cmd) | 4139 | static int transport_new_cmd_obj(struct se_cmd *cmd) |
4264 | { | 4140 | { |
4265 | struct se_device *dev = SE_DEV(cmd); | 4141 | struct se_device *dev = cmd->se_lun->lun_se_dev; |
4266 | u32 task_cdbs = 0, rc; | 4142 | u32 task_cdbs = 0, rc; |
4267 | 4143 | ||
4268 | if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) { | 4144 | if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) { |
4269 | task_cdbs++; | 4145 | task_cdbs++; |
4270 | T_TASK(cmd)->t_task_cdbs++; | 4146 | cmd->t_task->t_task_cdbs++; |
4271 | } else { | 4147 | } else { |
4272 | int set_counts = 1; | 4148 | int set_counts = 1; |
4273 | 4149 | ||
4274 | /* | 4150 | /* |
4275 | * Setup any BIDI READ tasks and memory from | 4151 | * Setup any BIDI READ tasks and memory from |
4276 | * T_TASK(cmd)->t_mem_bidi_list so the READ struct se_tasks | 4152 | * cmd->t_task->t_mem_bidi_list so the READ struct se_tasks |
4277 | * are queued first for the non pSCSI passthrough case. | 4153 | * are queued first for the non pSCSI passthrough case. |
4278 | */ | 4154 | */ |
4279 | if ((T_TASK(cmd)->t_mem_bidi_list != NULL) && | 4155 | if ((cmd->t_task->t_mem_bidi_list != NULL) && |
4280 | (TRANSPORT(dev)->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV)) { | 4156 | (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV)) { |
4281 | rc = transport_generic_get_cdb_count(cmd, | 4157 | rc = transport_generic_get_cdb_count(cmd, |
4282 | T_TASK(cmd)->t_task_lba, | 4158 | cmd->t_task->t_task_lba, |
4283 | T_TASK(cmd)->t_tasks_sectors, | 4159 | cmd->t_task->t_tasks_sectors, |
4284 | DMA_FROM_DEVICE, T_TASK(cmd)->t_mem_bidi_list, | 4160 | DMA_FROM_DEVICE, cmd->t_task->t_mem_bidi_list, |
4285 | set_counts); | 4161 | set_counts); |
4286 | if (!(rc)) { | 4162 | if (!(rc)) { |
4287 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | 4163 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
@@ -4292,13 +4168,13 @@ static int transport_new_cmd_obj(struct se_cmd *cmd) | |||
4292 | set_counts = 0; | 4168 | set_counts = 0; |
4293 | } | 4169 | } |
4294 | /* | 4170 | /* |
4295 | * Setup the tasks and memory from T_TASK(cmd)->t_mem_list | 4171 | * Setup the tasks and memory from cmd->t_task->t_mem_list |
4296 | * Note for BIDI transfers this will contain the WRITE payload | 4172 | * Note for BIDI transfers this will contain the WRITE payload |
4297 | */ | 4173 | */ |
4298 | task_cdbs = transport_generic_get_cdb_count(cmd, | 4174 | task_cdbs = transport_generic_get_cdb_count(cmd, |
4299 | T_TASK(cmd)->t_task_lba, | 4175 | cmd->t_task->t_task_lba, |
4300 | T_TASK(cmd)->t_tasks_sectors, | 4176 | cmd->t_task->t_tasks_sectors, |
4301 | cmd->data_direction, T_TASK(cmd)->t_mem_list, | 4177 | cmd->data_direction, cmd->t_task->t_mem_list, |
4302 | set_counts); | 4178 | set_counts); |
4303 | if (!(task_cdbs)) { | 4179 | if (!(task_cdbs)) { |
4304 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | 4180 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
@@ -4306,19 +4182,19 @@ static int transport_new_cmd_obj(struct se_cmd *cmd) | |||
4306 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | 4182 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
4307 | return PYX_TRANSPORT_LU_COMM_FAILURE; | 4183 | return PYX_TRANSPORT_LU_COMM_FAILURE; |
4308 | } | 4184 | } |
4309 | T_TASK(cmd)->t_task_cdbs += task_cdbs; | 4185 | cmd->t_task->t_task_cdbs += task_cdbs; |
4310 | 4186 | ||
4311 | #if 0 | 4187 | #if 0 |
4312 | printk(KERN_INFO "data_length: %u, LBA: %llu t_tasks_sectors:" | 4188 | printk(KERN_INFO "data_length: %u, LBA: %llu t_tasks_sectors:" |
4313 | " %u, t_task_cdbs: %u\n", obj_ptr, cmd->data_length, | 4189 | " %u, t_task_cdbs: %u\n", obj_ptr, cmd->data_length, |
4314 | T_TASK(cmd)->t_task_lba, T_TASK(cmd)->t_tasks_sectors, | 4190 | cmd->t_task->t_task_lba, cmd->t_task->t_tasks_sectors, |
4315 | T_TASK(cmd)->t_task_cdbs); | 4191 | cmd->t_task->t_task_cdbs); |
4316 | #endif | 4192 | #endif |
4317 | } | 4193 | } |
4318 | 4194 | ||
4319 | atomic_set(&T_TASK(cmd)->t_task_cdbs_left, task_cdbs); | 4195 | atomic_set(&cmd->t_task->t_task_cdbs_left, task_cdbs); |
4320 | atomic_set(&T_TASK(cmd)->t_task_cdbs_ex_left, task_cdbs); | 4196 | atomic_set(&cmd->t_task->t_task_cdbs_ex_left, task_cdbs); |
4321 | atomic_set(&T_TASK(cmd)->t_task_cdbs_timeout_left, task_cdbs); | 4197 | atomic_set(&cmd->t_task->t_task_cdbs_timeout_left, task_cdbs); |
4322 | return 0; | 4198 | return 0; |
4323 | } | 4199 | } |
4324 | 4200 | ||
@@ -4342,8 +4218,8 @@ transport_generic_get_mem(struct se_cmd *cmd, u32 length, u32 dma_size) | |||
4342 | unsigned char *buf; | 4218 | unsigned char *buf; |
4343 | struct se_mem *se_mem; | 4219 | struct se_mem *se_mem; |
4344 | 4220 | ||
4345 | T_TASK(cmd)->t_mem_list = transport_init_se_mem_list(); | 4221 | cmd->t_task->t_mem_list = transport_init_se_mem_list(); |
4346 | if (!(T_TASK(cmd)->t_mem_list)) | 4222 | if (!(cmd->t_task->t_mem_list)) |
4347 | return -ENOMEM; | 4223 | return -ENOMEM; |
4348 | 4224 | ||
4349 | /* | 4225 | /* |
@@ -4355,10 +4231,10 @@ transport_generic_get_mem(struct se_cmd *cmd, u32 length, u32 dma_size) | |||
4355 | /* | 4231 | /* |
4356 | * Setup BIDI-COMMAND READ list of struct se_mem elements | 4232 | * Setup BIDI-COMMAND READ list of struct se_mem elements |
4357 | */ | 4233 | */ |
4358 | if (T_TASK(cmd)->t_tasks_bidi) { | 4234 | if (cmd->t_task->t_tasks_bidi) { |
4359 | T_TASK(cmd)->t_mem_bidi_list = transport_init_se_mem_list(); | 4235 | cmd->t_task->t_mem_bidi_list = transport_init_se_mem_list(); |
4360 | if (!(T_TASK(cmd)->t_mem_bidi_list)) { | 4236 | if (!(cmd->t_task->t_mem_bidi_list)) { |
4361 | kfree(T_TASK(cmd)->t_mem_list); | 4237 | kfree(cmd->t_task->t_mem_list); |
4362 | return -ENOMEM; | 4238 | return -ENOMEM; |
4363 | } | 4239 | } |
4364 | } | 4240 | } |
@@ -4387,8 +4263,8 @@ transport_generic_get_mem(struct se_cmd *cmd, u32 length, u32 dma_size) | |||
4387 | memset(buf, 0, se_mem->se_len); | 4263 | memset(buf, 0, se_mem->se_len); |
4388 | kunmap_atomic(buf, KM_IRQ0); | 4264 | kunmap_atomic(buf, KM_IRQ0); |
4389 | 4265 | ||
4390 | list_add_tail(&se_mem->se_list, T_TASK(cmd)->t_mem_list); | 4266 | list_add_tail(&se_mem->se_list, cmd->t_task->t_mem_list); |
4391 | T_TASK(cmd)->t_tasks_se_num++; | 4267 | cmd->t_task->t_tasks_se_num++; |
4392 | 4268 | ||
4393 | DEBUG_MEM("Allocated struct se_mem page(%p) Length(%u)" | 4269 | DEBUG_MEM("Allocated struct se_mem page(%p) Length(%u)" |
4394 | " Offset(%u)\n", se_mem->se_page, se_mem->se_len, | 4270 | " Offset(%u)\n", se_mem->se_page, se_mem->se_len, |
@@ -4398,25 +4274,25 @@ transport_generic_get_mem(struct se_cmd *cmd, u32 length, u32 dma_size) | |||
4398 | } | 4274 | } |
4399 | 4275 | ||
4400 | DEBUG_MEM("Allocated total struct se_mem elements(%u)\n", | 4276 | DEBUG_MEM("Allocated total struct se_mem elements(%u)\n", |
4401 | T_TASK(cmd)->t_tasks_se_num); | 4277 | cmd->t_task->t_tasks_se_num); |
4402 | 4278 | ||
4403 | return 0; | 4279 | return 0; |
4404 | out: | 4280 | out: |
4405 | if (se_mem) | 4281 | if (se_mem) |
4406 | __free_pages(se_mem->se_page, 0); | 4282 | __free_pages(se_mem->se_page, 0); |
4407 | kmem_cache_free(se_mem_cache, se_mem); | 4283 | kmem_cache_free(se_mem_cache, se_mem); |
4408 | return -1; | 4284 | return -ENOMEM; |
4409 | } | 4285 | } |
4410 | 4286 | ||
4411 | u32 transport_calc_sg_num( | 4287 | int transport_init_task_sg( |
4412 | struct se_task *task, | 4288 | struct se_task *task, |
4413 | struct se_mem *in_se_mem, | 4289 | struct se_mem *in_se_mem, |
4414 | u32 task_offset) | 4290 | u32 task_offset) |
4415 | { | 4291 | { |
4416 | struct se_cmd *se_cmd = task->task_se_cmd; | 4292 | struct se_cmd *se_cmd = task->task_se_cmd; |
4417 | struct se_device *se_dev = SE_DEV(se_cmd); | 4293 | struct se_device *se_dev = se_cmd->se_lun->lun_se_dev; |
4418 | struct se_mem *se_mem = in_se_mem; | 4294 | struct se_mem *se_mem = in_se_mem; |
4419 | struct target_core_fabric_ops *tfo = CMD_TFO(se_cmd); | 4295 | struct target_core_fabric_ops *tfo = se_cmd->se_tfo; |
4420 | u32 sg_length, task_size = task->task_size, task_sg_num_padded; | 4296 | u32 sg_length, task_size = task->task_size, task_sg_num_padded; |
4421 | 4297 | ||
4422 | while (task_size != 0) { | 4298 | while (task_size != 0) { |
@@ -4430,7 +4306,7 @@ u32 transport_calc_sg_num( | |||
4430 | sg_length = se_mem->se_len; | 4306 | sg_length = se_mem->se_len; |
4431 | 4307 | ||
4432 | if (!(list_is_last(&se_mem->se_list, | 4308 | if (!(list_is_last(&se_mem->se_list, |
4433 | T_TASK(se_cmd)->t_mem_list))) | 4309 | se_cmd->t_task->t_mem_list))) |
4434 | se_mem = list_entry(se_mem->se_list.next, | 4310 | se_mem = list_entry(se_mem->se_list.next, |
4435 | struct se_mem, se_list); | 4311 | struct se_mem, se_list); |
4436 | } else { | 4312 | } else { |
@@ -4450,7 +4326,7 @@ u32 transport_calc_sg_num( | |||
4450 | sg_length = (se_mem->se_len - task_offset); | 4326 | sg_length = (se_mem->se_len - task_offset); |
4451 | 4327 | ||
4452 | if (!(list_is_last(&se_mem->se_list, | 4328 | if (!(list_is_last(&se_mem->se_list, |
4453 | T_TASK(se_cmd)->t_mem_list))) | 4329 | se_cmd->t_task->t_mem_list))) |
4454 | se_mem = list_entry(se_mem->se_list.next, | 4330 | se_mem = list_entry(se_mem->se_list.next, |
4455 | struct se_mem, se_list); | 4331 | struct se_mem, se_list); |
4456 | } | 4332 | } |
@@ -4484,21 +4360,23 @@ next: | |||
4484 | if (!(task->task_sg)) { | 4360 | if (!(task->task_sg)) { |
4485 | printk(KERN_ERR "Unable to allocate memory for" | 4361 | printk(KERN_ERR "Unable to allocate memory for" |
4486 | " task->task_sg\n"); | 4362 | " task->task_sg\n"); |
4487 | return 0; | 4363 | return -ENOMEM; |
4488 | } | 4364 | } |
4489 | sg_init_table(&task->task_sg[0], task_sg_num_padded); | 4365 | sg_init_table(&task->task_sg[0], task_sg_num_padded); |
4490 | /* | 4366 | /* |
4491 | * Setup task->task_sg_bidi for SCSI READ payload for | 4367 | * Setup task->task_sg_bidi for SCSI READ payload for |
4492 | * TCM/pSCSI passthrough if present for BIDI-COMMAND | 4368 | * TCM/pSCSI passthrough if present for BIDI-COMMAND |
4493 | */ | 4369 | */ |
4494 | if ((T_TASK(se_cmd)->t_mem_bidi_list != NULL) && | 4370 | if ((se_cmd->t_task->t_mem_bidi_list != NULL) && |
4495 | (TRANSPORT(se_dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)) { | 4371 | (se_dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)) { |
4496 | task->task_sg_bidi = kzalloc(task_sg_num_padded * | 4372 | task->task_sg_bidi = kzalloc(task_sg_num_padded * |
4497 | sizeof(struct scatterlist), GFP_KERNEL); | 4373 | sizeof(struct scatterlist), GFP_KERNEL); |
4498 | if (!(task->task_sg_bidi)) { | 4374 | if (!(task->task_sg_bidi)) { |
4375 | kfree(task->task_sg); | ||
4376 | task->task_sg = NULL; | ||
4499 | printk(KERN_ERR "Unable to allocate memory for" | 4377 | printk(KERN_ERR "Unable to allocate memory for" |
4500 | " task->task_sg_bidi\n"); | 4378 | " task->task_sg_bidi\n"); |
4501 | return 0; | 4379 | return -ENOMEM; |
4502 | } | 4380 | } |
4503 | sg_init_table(&task->task_sg_bidi[0], task_sg_num_padded); | 4381 | sg_init_table(&task->task_sg_bidi[0], task_sg_num_padded); |
4504 | } | 4382 | } |
@@ -4535,13 +4413,13 @@ static inline int transport_set_tasks_sectors_disk( | |||
4535 | if ((lba + sectors) > transport_dev_end_lba(dev)) { | 4413 | if ((lba + sectors) > transport_dev_end_lba(dev)) { |
4536 | task->task_sectors = ((transport_dev_end_lba(dev) - lba) + 1); | 4414 | task->task_sectors = ((transport_dev_end_lba(dev) - lba) + 1); |
4537 | 4415 | ||
4538 | if (task->task_sectors > DEV_ATTRIB(dev)->max_sectors) { | 4416 | if (task->task_sectors > dev->se_sub_dev->se_dev_attrib.max_sectors) { |
4539 | task->task_sectors = DEV_ATTRIB(dev)->max_sectors; | 4417 | task->task_sectors = dev->se_sub_dev->se_dev_attrib.max_sectors; |
4540 | *max_sectors_set = 1; | 4418 | *max_sectors_set = 1; |
4541 | } | 4419 | } |
4542 | } else { | 4420 | } else { |
4543 | if (sectors > DEV_ATTRIB(dev)->max_sectors) { | 4421 | if (sectors > dev->se_sub_dev->se_dev_attrib.max_sectors) { |
4544 | task->task_sectors = DEV_ATTRIB(dev)->max_sectors; | 4422 | task->task_sectors = dev->se_sub_dev->se_dev_attrib.max_sectors; |
4545 | *max_sectors_set = 1; | 4423 | *max_sectors_set = 1; |
4546 | } else | 4424 | } else |
4547 | task->task_sectors = sectors; | 4425 | task->task_sectors = sectors; |
@@ -4557,8 +4435,8 @@ static inline int transport_set_tasks_sectors_non_disk( | |||
4557 | u32 sectors, | 4435 | u32 sectors, |
4558 | int *max_sectors_set) | 4436 | int *max_sectors_set) |
4559 | { | 4437 | { |
4560 | if (sectors > DEV_ATTRIB(dev)->max_sectors) { | 4438 | if (sectors > dev->se_sub_dev->se_dev_attrib.max_sectors) { |
4561 | task->task_sectors = DEV_ATTRIB(dev)->max_sectors; | 4439 | task->task_sectors = dev->se_sub_dev->se_dev_attrib.max_sectors; |
4562 | *max_sectors_set = 1; | 4440 | *max_sectors_set = 1; |
4563 | } else | 4441 | } else |
4564 | task->task_sectors = sectors; | 4442 | task->task_sectors = sectors; |
@@ -4573,7 +4451,7 @@ static inline int transport_set_tasks_sectors( | |||
4573 | u32 sectors, | 4451 | u32 sectors, |
4574 | int *max_sectors_set) | 4452 | int *max_sectors_set) |
4575 | { | 4453 | { |
4576 | return (TRANSPORT(dev)->get_device_type(dev) == TYPE_DISK) ? | 4454 | return (dev->transport->get_device_type(dev) == TYPE_DISK) ? |
4577 | transport_set_tasks_sectors_disk(task, dev, lba, sectors, | 4455 | transport_set_tasks_sectors_disk(task, dev, lba, sectors, |
4578 | max_sectors_set) : | 4456 | max_sectors_set) : |
4579 | transport_set_tasks_sectors_non_disk(task, dev, lba, sectors, | 4457 | transport_set_tasks_sectors_non_disk(task, dev, lba, sectors, |
@@ -4590,17 +4468,15 @@ static int transport_map_sg_to_mem( | |||
4590 | struct scatterlist *sg; | 4468 | struct scatterlist *sg; |
4591 | u32 sg_count = 1, cmd_size = cmd->data_length; | 4469 | u32 sg_count = 1, cmd_size = cmd->data_length; |
4592 | 4470 | ||
4593 | if (!in_mem) { | 4471 | WARN_ON(!in_mem); |
4594 | printk(KERN_ERR "No source scatterlist\n"); | 4472 | |
4595 | return -1; | ||
4596 | } | ||
4597 | sg = (struct scatterlist *)in_mem; | 4473 | sg = (struct scatterlist *)in_mem; |
4598 | 4474 | ||
4599 | while (cmd_size) { | 4475 | while (cmd_size) { |
4600 | se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL); | 4476 | se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL); |
4601 | if (!(se_mem)) { | 4477 | if (!(se_mem)) { |
4602 | printk(KERN_ERR "Unable to allocate struct se_mem\n"); | 4478 | printk(KERN_ERR "Unable to allocate struct se_mem\n"); |
4603 | return -1; | 4479 | return -ENOMEM; |
4604 | } | 4480 | } |
4605 | INIT_LIST_HEAD(&se_mem->se_list); | 4481 | INIT_LIST_HEAD(&se_mem->se_list); |
4606 | DEBUG_MEM("sg_to_mem: Starting loop with cmd_size: %u" | 4482 | DEBUG_MEM("sg_to_mem: Starting loop with cmd_size: %u" |
@@ -4658,7 +4534,7 @@ int transport_map_mem_to_sg( | |||
4658 | if (!sg) { | 4534 | if (!sg) { |
4659 | printk(KERN_ERR "Unable to locate valid struct" | 4535 | printk(KERN_ERR "Unable to locate valid struct" |
4660 | " scatterlist pointer\n"); | 4536 | " scatterlist pointer\n"); |
4661 | return -1; | 4537 | return -EINVAL; |
4662 | } | 4538 | } |
4663 | 4539 | ||
4664 | while (task_size != 0) { | 4540 | while (task_size != 0) { |
@@ -4675,7 +4551,7 @@ int transport_map_mem_to_sg( | |||
4675 | sg->length = se_mem->se_len; | 4551 | sg->length = se_mem->se_len; |
4676 | 4552 | ||
4677 | if (!(list_is_last(&se_mem->se_list, | 4553 | if (!(list_is_last(&se_mem->se_list, |
4678 | T_TASK(se_cmd)->t_mem_list))) { | 4554 | se_cmd->t_task->t_mem_list))) { |
4679 | se_mem = list_entry(se_mem->se_list.next, | 4555 | se_mem = list_entry(se_mem->se_list.next, |
4680 | struct se_mem, se_list); | 4556 | struct se_mem, se_list); |
4681 | (*se_mem_cnt)++; | 4557 | (*se_mem_cnt)++; |
@@ -4711,7 +4587,7 @@ int transport_map_mem_to_sg( | |||
4711 | sg->length = (se_mem->se_len - *task_offset); | 4587 | sg->length = (se_mem->se_len - *task_offset); |
4712 | 4588 | ||
4713 | if (!(list_is_last(&se_mem->se_list, | 4589 | if (!(list_is_last(&se_mem->se_list, |
4714 | T_TASK(se_cmd)->t_mem_list))) { | 4590 | se_cmd->t_task->t_mem_list))) { |
4715 | se_mem = list_entry(se_mem->se_list.next, | 4591 | se_mem = list_entry(se_mem->se_list.next, |
4716 | struct se_mem, se_list); | 4592 | struct se_mem, se_list); |
4717 | (*se_mem_cnt)++; | 4593 | (*se_mem_cnt)++; |
@@ -4755,7 +4631,7 @@ void transport_do_task_sg_chain(struct se_cmd *cmd) | |||
4755 | struct scatterlist *sg_head_cur = NULL, *sg_link_cur = NULL; | 4631 | struct scatterlist *sg_head_cur = NULL, *sg_link_cur = NULL; |
4756 | struct scatterlist *sg, *sg_end = NULL, *sg_end_cur = NULL; | 4632 | struct scatterlist *sg, *sg_end = NULL, *sg_end_cur = NULL; |
4757 | struct se_task *task; | 4633 | struct se_task *task; |
4758 | struct target_core_fabric_ops *tfo = CMD_TFO(cmd); | 4634 | struct target_core_fabric_ops *tfo = cmd->se_tfo; |
4759 | u32 task_sg_num = 0, sg_count = 0; | 4635 | u32 task_sg_num = 0, sg_count = 0; |
4760 | int i; | 4636 | int i; |
4761 | 4637 | ||
@@ -4769,7 +4645,7 @@ void transport_do_task_sg_chain(struct se_cmd *cmd) | |||
4769 | * Walk the struct se_task list and setup scatterlist chains | 4645 | * Walk the struct se_task list and setup scatterlist chains |
4770 | * for each contiguosly allocated struct se_task->task_sg[]. | 4646 | * for each contiguosly allocated struct se_task->task_sg[]. |
4771 | */ | 4647 | */ |
4772 | list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) { | 4648 | list_for_each_entry(task, &cmd->t_task->t_task_list, t_list) { |
4773 | if (!(task->task_sg) || !(task->task_padded_sg)) | 4649 | if (!(task->task_sg) || !(task->task_padded_sg)) |
4774 | continue; | 4650 | continue; |
4775 | 4651 | ||
@@ -4780,10 +4656,10 @@ void transport_do_task_sg_chain(struct se_cmd *cmd) | |||
4780 | * Either add chain or mark end of scatterlist | 4656 | * Either add chain or mark end of scatterlist |
4781 | */ | 4657 | */ |
4782 | if (!(list_is_last(&task->t_list, | 4658 | if (!(list_is_last(&task->t_list, |
4783 | &T_TASK(cmd)->t_task_list))) { | 4659 | &cmd->t_task->t_task_list))) { |
4784 | /* | 4660 | /* |
4785 | * Clear existing SGL termination bit set in | 4661 | * Clear existing SGL termination bit set in |
4786 | * transport_calc_sg_num(), see sg_mark_end() | 4662 | * transport_init_task_sg(), see sg_mark_end() |
4787 | */ | 4663 | */ |
4788 | sg_end_cur = &task->task_sg[task->task_sg_num - 1]; | 4664 | sg_end_cur = &task->task_sg[task->task_sg_num - 1]; |
4789 | sg_end_cur->page_link &= ~0x02; | 4665 | sg_end_cur->page_link &= ~0x02; |
@@ -4806,10 +4682,10 @@ void transport_do_task_sg_chain(struct se_cmd *cmd) | |||
4806 | /* | 4682 | /* |
4807 | * Check for single task.. | 4683 | * Check for single task.. |
4808 | */ | 4684 | */ |
4809 | if (!(list_is_last(&task->t_list, &T_TASK(cmd)->t_task_list))) { | 4685 | if (!(list_is_last(&task->t_list, &cmd->t_task->t_task_list))) { |
4810 | /* | 4686 | /* |
4811 | * Clear existing SGL termination bit set in | 4687 | * Clear existing SGL termination bit set in |
4812 | * transport_calc_sg_num(), see sg_mark_end() | 4688 | * transport_init_task_sg(), see sg_mark_end() |
4813 | */ | 4689 | */ |
4814 | sg_end = &task->task_sg[task->task_sg_num - 1]; | 4690 | sg_end = &task->task_sg[task->task_sg_num - 1]; |
4815 | sg_end->page_link &= ~0x02; | 4691 | sg_end->page_link &= ~0x02; |
@@ -4824,15 +4700,15 @@ void transport_do_task_sg_chain(struct se_cmd *cmd) | |||
4824 | * Setup the starting pointer and total t_tasks_sg_linked_no including | 4700 | * Setup the starting pointer and total t_tasks_sg_linked_no including |
4825 | * padding SGs for linking and to mark the end. | 4701 | * padding SGs for linking and to mark the end. |
4826 | */ | 4702 | */ |
4827 | T_TASK(cmd)->t_tasks_sg_chained = sg_first; | 4703 | cmd->t_task->t_tasks_sg_chained = sg_first; |
4828 | T_TASK(cmd)->t_tasks_sg_chained_no = sg_count; | 4704 | cmd->t_task->t_tasks_sg_chained_no = sg_count; |
4829 | 4705 | ||
4830 | DEBUG_CMD_M("Setup cmd: %p T_TASK(cmd)->t_tasks_sg_chained: %p and" | 4706 | DEBUG_CMD_M("Setup cmd: %p cmd->t_task->t_tasks_sg_chained: %p and" |
4831 | " t_tasks_sg_chained_no: %u\n", cmd, T_TASK(cmd)->t_tasks_sg_chained, | 4707 | " t_tasks_sg_chained_no: %u\n", cmd, cmd->t_task->t_tasks_sg_chained, |
4832 | T_TASK(cmd)->t_tasks_sg_chained_no); | 4708 | cmd->t_task->t_tasks_sg_chained_no); |
4833 | 4709 | ||
4834 | for_each_sg(T_TASK(cmd)->t_tasks_sg_chained, sg, | 4710 | for_each_sg(cmd->t_task->t_tasks_sg_chained, sg, |
4835 | T_TASK(cmd)->t_tasks_sg_chained_no, i) { | 4711 | cmd->t_task->t_tasks_sg_chained_no, i) { |
4836 | 4712 | ||
4837 | DEBUG_CMD_M("SG[%d]: %p page: %p length: %d offset: %d, magic: 0x%08x\n", | 4713 | DEBUG_CMD_M("SG[%d]: %p page: %p length: %d offset: %d, magic: 0x%08x\n", |
4838 | i, sg, sg_page(sg), sg->length, sg->offset, sg->sg_magic); | 4714 | i, sg, sg_page(sg), sg->length, sg->offset, sg->sg_magic); |
@@ -4860,12 +4736,12 @@ static int transport_do_se_mem_map( | |||
4860 | * se_subsystem_api_t->do_se_mem_map is used when internal allocation | 4736 | * se_subsystem_api_t->do_se_mem_map is used when internal allocation |
4861 | * has been done by the transport plugin. | 4737 | * has been done by the transport plugin. |
4862 | */ | 4738 | */ |
4863 | if (TRANSPORT(dev)->do_se_mem_map) { | 4739 | if (dev->transport->do_se_mem_map) { |
4864 | ret = TRANSPORT(dev)->do_se_mem_map(task, se_mem_list, | 4740 | ret = dev->transport->do_se_mem_map(task, se_mem_list, |
4865 | in_mem, in_se_mem, out_se_mem, se_mem_cnt, | 4741 | in_mem, in_se_mem, out_se_mem, se_mem_cnt, |
4866 | task_offset_in); | 4742 | task_offset_in); |
4867 | if (ret == 0) | 4743 | if (ret == 0) |
4868 | T_TASK(task->task_se_cmd)->t_tasks_se_num += *se_mem_cnt; | 4744 | task->task_se_cmd->t_task->t_tasks_se_num += *se_mem_cnt; |
4869 | 4745 | ||
4870 | return ret; | 4746 | return ret; |
4871 | } | 4747 | } |
@@ -4875,7 +4751,7 @@ static int transport_do_se_mem_map( | |||
4875 | * This is the normal path for all normal non BIDI and BIDI-COMMAND | 4751 | * This is the normal path for all normal non BIDI and BIDI-COMMAND |
4876 | * WRITE payloads.. If we need to do BIDI READ passthrough for | 4752 | * WRITE payloads.. If we need to do BIDI READ passthrough for |
4877 | * TCM/pSCSI the first call to transport_do_se_mem_map -> | 4753 | * TCM/pSCSI the first call to transport_do_se_mem_map -> |
4878 | * transport_calc_sg_num() -> transport_map_mem_to_sg() will do the | 4754 | * transport_init_task_sg() -> transport_map_mem_to_sg() will do the |
4879 | * allocation for task->task_sg_bidi, and the subsequent call to | 4755 | * allocation for task->task_sg_bidi, and the subsequent call to |
4880 | * transport_do_se_mem_map() from transport_generic_get_cdb_count() | 4756 | * transport_do_se_mem_map() from transport_generic_get_cdb_count() |
4881 | */ | 4757 | */ |
@@ -4884,8 +4760,9 @@ static int transport_do_se_mem_map( | |||
4884 | * Assume default that transport plugin speaks preallocated | 4760 | * Assume default that transport plugin speaks preallocated |
4885 | * scatterlists. | 4761 | * scatterlists. |
4886 | */ | 4762 | */ |
4887 | if (!(transport_calc_sg_num(task, in_se_mem, task_offset))) | 4763 | ret = transport_init_task_sg(task, in_se_mem, task_offset); |
4888 | return -1; | 4764 | if (ret <= 0) |
4765 | return ret; | ||
4889 | /* | 4766 | /* |
4890 | * struct se_task->task_sg now contains the struct scatterlist array. | 4767 | * struct se_task->task_sg now contains the struct scatterlist array. |
4891 | */ | 4768 | */ |
@@ -4914,7 +4791,7 @@ static u32 transport_generic_get_cdb_count( | |||
4914 | struct se_task *task; | 4791 | struct se_task *task; |
4915 | struct se_mem *se_mem = NULL, *se_mem_lout = NULL; | 4792 | struct se_mem *se_mem = NULL, *se_mem_lout = NULL; |
4916 | struct se_mem *se_mem_bidi = NULL, *se_mem_bidi_lout = NULL; | 4793 | struct se_mem *se_mem_bidi = NULL, *se_mem_bidi_lout = NULL; |
4917 | struct se_device *dev = SE_DEV(cmd); | 4794 | struct se_device *dev = cmd->se_lun->lun_se_dev; |
4918 | int max_sectors_set = 0, ret; | 4795 | int max_sectors_set = 0, ret; |
4919 | u32 task_offset_in = 0, se_mem_cnt = 0, se_mem_bidi_cnt = 0, task_cdbs = 0; | 4796 | u32 task_offset_in = 0, se_mem_cnt = 0, se_mem_bidi_cnt = 0, task_cdbs = 0; |
4920 | 4797 | ||
@@ -4933,15 +4810,15 @@ static u32 transport_generic_get_cdb_count( | |||
4933 | * Check for extra se_mem_bidi mapping for BIDI-COMMANDs to | 4810 | * Check for extra se_mem_bidi mapping for BIDI-COMMANDs to |
4934 | * struct se_task->task_sg_bidi for TCM/pSCSI passthrough operation | 4811 | * struct se_task->task_sg_bidi for TCM/pSCSI passthrough operation |
4935 | */ | 4812 | */ |
4936 | if ((T_TASK(cmd)->t_mem_bidi_list != NULL) && | 4813 | if ((cmd->t_task->t_mem_bidi_list != NULL) && |
4937 | !(list_empty(T_TASK(cmd)->t_mem_bidi_list)) && | 4814 | !(list_empty(cmd->t_task->t_mem_bidi_list)) && |
4938 | (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)) | 4815 | (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)) |
4939 | se_mem_bidi = list_entry(T_TASK(cmd)->t_mem_bidi_list->next, | 4816 | se_mem_bidi = list_entry(cmd->t_task->t_mem_bidi_list->next, |
4940 | struct se_mem, se_list); | 4817 | struct se_mem, se_list); |
4941 | 4818 | ||
4942 | while (sectors) { | 4819 | while (sectors) { |
4943 | DEBUG_VOL("ITT[0x%08x] LBA(%llu) SectorsLeft(%u) EOBJ(%llu)\n", | 4820 | DEBUG_VOL("ITT[0x%08x] LBA(%llu) SectorsLeft(%u) EOBJ(%llu)\n", |
4944 | CMD_TFO(cmd)->get_task_tag(cmd), lba, sectors, | 4821 | cmd->se_tfo->get_task_tag(cmd), lba, sectors, |
4945 | transport_dev_end_lba(dev)); | 4822 | transport_dev_end_lba(dev)); |
4946 | 4823 | ||
4947 | task = transport_generic_get_task(cmd, data_direction); | 4824 | task = transport_generic_get_task(cmd, data_direction); |
@@ -4955,19 +4832,19 @@ static u32 transport_generic_get_cdb_count( | |||
4955 | lba += task->task_sectors; | 4832 | lba += task->task_sectors; |
4956 | sectors -= task->task_sectors; | 4833 | sectors -= task->task_sectors; |
4957 | task->task_size = (task->task_sectors * | 4834 | task->task_size = (task->task_sectors * |
4958 | DEV_ATTRIB(dev)->block_size); | 4835 | dev->se_sub_dev->se_dev_attrib.block_size); |
4959 | 4836 | ||
4960 | cdb = TRANSPORT(dev)->get_cdb(task); | 4837 | cdb = dev->transport->get_cdb(task); |
4961 | if ((cdb)) { | 4838 | if ((cdb)) { |
4962 | memcpy(cdb, T_TASK(cmd)->t_task_cdb, | 4839 | memcpy(cdb, cmd->t_task->t_task_cdb, |
4963 | scsi_command_size(T_TASK(cmd)->t_task_cdb)); | 4840 | scsi_command_size(cmd->t_task->t_task_cdb)); |
4964 | cmd->transport_split_cdb(task->task_lba, | 4841 | cmd->transport_split_cdb(task->task_lba, |
4965 | &task->task_sectors, cdb); | 4842 | &task->task_sectors, cdb); |
4966 | } | 4843 | } |
4967 | 4844 | ||
4968 | /* | 4845 | /* |
4969 | * Perform the SE OBJ plugin and/or Transport plugin specific | 4846 | * Perform the SE OBJ plugin and/or Transport plugin specific |
4970 | * mapping for T_TASK(cmd)->t_mem_list. And setup the | 4847 | * mapping for cmd->t_task->t_mem_list. And setup the |
4971 | * task->task_sg and if necessary task->task_sg_bidi | 4848 | * task->task_sg and if necessary task->task_sg_bidi |
4972 | */ | 4849 | */ |
4973 | ret = transport_do_se_mem_map(dev, task, mem_list, | 4850 | ret = transport_do_se_mem_map(dev, task, mem_list, |
@@ -4978,17 +4855,17 @@ static u32 transport_generic_get_cdb_count( | |||
4978 | 4855 | ||
4979 | se_mem = se_mem_lout; | 4856 | se_mem = se_mem_lout; |
4980 | /* | 4857 | /* |
4981 | * Setup the T_TASK(cmd)->t_mem_bidi_list -> task->task_sg_bidi | 4858 | * Setup the cmd->t_task->t_mem_bidi_list -> task->task_sg_bidi |
4982 | * mapping for SCSI READ for BIDI-COMMAND passthrough with TCM/pSCSI | 4859 | * mapping for SCSI READ for BIDI-COMMAND passthrough with TCM/pSCSI |
4983 | * | 4860 | * |
4984 | * Note that the first call to transport_do_se_mem_map() above will | 4861 | * Note that the first call to transport_do_se_mem_map() above will |
4985 | * allocate struct se_task->task_sg_bidi in transport_do_se_mem_map() | 4862 | * allocate struct se_task->task_sg_bidi in transport_do_se_mem_map() |
4986 | * -> transport_calc_sg_num(), and the second here will do the | 4863 | * -> transport_init_task_sg(), and the second here will do the |
4987 | * mapping for SCSI READ for BIDI-COMMAND passthrough with TCM/pSCSI. | 4864 | * mapping for SCSI READ for BIDI-COMMAND passthrough with TCM/pSCSI. |
4988 | */ | 4865 | */ |
4989 | if (task->task_sg_bidi != NULL) { | 4866 | if (task->task_sg_bidi != NULL) { |
4990 | ret = transport_do_se_mem_map(dev, task, | 4867 | ret = transport_do_se_mem_map(dev, task, |
4991 | T_TASK(cmd)->t_mem_bidi_list, NULL, | 4868 | cmd->t_task->t_mem_bidi_list, NULL, |
4992 | se_mem_bidi, &se_mem_bidi_lout, &se_mem_bidi_cnt, | 4869 | se_mem_bidi, &se_mem_bidi_lout, &se_mem_bidi_cnt, |
4993 | &task_offset_in); | 4870 | &task_offset_in); |
4994 | if (ret < 0) | 4871 | if (ret < 0) |
@@ -5011,12 +4888,12 @@ static u32 transport_generic_get_cdb_count( | |||
5011 | } | 4888 | } |
5012 | 4889 | ||
5013 | if (set_counts) { | 4890 | if (set_counts) { |
5014 | atomic_inc(&T_TASK(cmd)->t_fe_count); | 4891 | atomic_inc(&cmd->t_task->t_fe_count); |
5015 | atomic_inc(&T_TASK(cmd)->t_se_count); | 4892 | atomic_inc(&cmd->t_task->t_se_count); |
5016 | } | 4893 | } |
5017 | 4894 | ||
5018 | DEBUG_VOL("ITT[0x%08x] total %s cdbs(%u)\n", | 4895 | DEBUG_VOL("ITT[0x%08x] total %s cdbs(%u)\n", |
5019 | CMD_TFO(cmd)->get_task_tag(cmd), (data_direction == DMA_TO_DEVICE) | 4896 | cmd->se_tfo->get_task_tag(cmd), (data_direction == DMA_TO_DEVICE) |
5020 | ? "DMA_TO_DEVICE" : "DMA_FROM_DEVICE", task_cdbs); | 4897 | ? "DMA_TO_DEVICE" : "DMA_FROM_DEVICE", task_cdbs); |
5021 | 4898 | ||
5022 | return task_cdbs; | 4899 | return task_cdbs; |
@@ -5027,7 +4904,7 @@ out: | |||
5027 | static int | 4904 | static int |
5028 | transport_map_control_cmd_to_task(struct se_cmd *cmd) | 4905 | transport_map_control_cmd_to_task(struct se_cmd *cmd) |
5029 | { | 4906 | { |
5030 | struct se_device *dev = SE_DEV(cmd); | 4907 | struct se_device *dev = cmd->se_lun->lun_se_dev; |
5031 | unsigned char *cdb; | 4908 | unsigned char *cdb; |
5032 | struct se_task *task; | 4909 | struct se_task *task; |
5033 | int ret; | 4910 | int ret; |
@@ -5036,7 +4913,7 @@ transport_map_control_cmd_to_task(struct se_cmd *cmd) | |||
5036 | if (!task) | 4913 | if (!task) |
5037 | return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; | 4914 | return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; |
5038 | 4915 | ||
5039 | cdb = TRANSPORT(dev)->get_cdb(task); | 4916 | cdb = dev->transport->get_cdb(task); |
5040 | if (cdb) | 4917 | if (cdb) |
5041 | memcpy(cdb, cmd->t_task->t_task_cdb, | 4918 | memcpy(cdb, cmd->t_task->t_task_cdb, |
5042 | scsi_command_size(cmd->t_task->t_task_cdb)); | 4919 | scsi_command_size(cmd->t_task->t_task_cdb)); |
@@ -5052,8 +4929,8 @@ transport_map_control_cmd_to_task(struct se_cmd *cmd) | |||
5052 | struct se_mem *se_mem = NULL, *se_mem_lout = NULL; | 4929 | struct se_mem *se_mem = NULL, *se_mem_lout = NULL; |
5053 | u32 se_mem_cnt = 0, task_offset = 0; | 4930 | u32 se_mem_cnt = 0, task_offset = 0; |
5054 | 4931 | ||
5055 | if (!list_empty(T_TASK(cmd)->t_mem_list)) | 4932 | if (!list_empty(cmd->t_task->t_mem_list)) |
5056 | se_mem = list_entry(T_TASK(cmd)->t_mem_list->next, | 4933 | se_mem = list_entry(cmd->t_task->t_mem_list->next, |
5057 | struct se_mem, se_list); | 4934 | struct se_mem, se_list); |
5058 | 4935 | ||
5059 | ret = transport_do_se_mem_map(dev, task, | 4936 | ret = transport_do_se_mem_map(dev, task, |
@@ -5092,14 +4969,14 @@ static int transport_generic_new_cmd(struct se_cmd *cmd) | |||
5092 | { | 4969 | { |
5093 | struct se_portal_group *se_tpg; | 4970 | struct se_portal_group *se_tpg; |
5094 | struct se_task *task; | 4971 | struct se_task *task; |
5095 | struct se_device *dev = SE_DEV(cmd); | 4972 | struct se_device *dev = cmd->se_lun->lun_se_dev; |
5096 | int ret = 0; | 4973 | int ret = 0; |
5097 | 4974 | ||
5098 | /* | 4975 | /* |
5099 | * Determine is the TCM fabric module has already allocated physical | 4976 | * Determine is the TCM fabric module has already allocated physical |
5100 | * memory, and is directly calling transport_generic_map_mem_to_cmd() | 4977 | * memory, and is directly calling transport_generic_map_mem_to_cmd() |
5101 | * to setup beforehand the linked list of physical memory at | 4978 | * to setup beforehand the linked list of physical memory at |
5102 | * T_TASK(cmd)->t_mem_list of struct se_mem->se_page | 4979 | * cmd->t_task->t_mem_list of struct se_mem->se_page |
5103 | */ | 4980 | */ |
5104 | if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)) { | 4981 | if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)) { |
5105 | ret = transport_allocate_resources(cmd); | 4982 | ret = transport_allocate_resources(cmd); |
@@ -5120,15 +4997,15 @@ static int transport_generic_new_cmd(struct se_cmd *cmd) | |||
5120 | * Linux/NET via kernel sockets and needs to allocate a | 4997 | * Linux/NET via kernel sockets and needs to allocate a |
5121 | * struct iovec array to complete the struct se_cmd | 4998 | * struct iovec array to complete the struct se_cmd |
5122 | */ | 4999 | */ |
5123 | se_tpg = SE_LUN(cmd)->lun_sep->sep_tpg; | 5000 | se_tpg = cmd->se_lun->lun_sep->sep_tpg; |
5124 | if (TPG_TFO(se_tpg)->alloc_cmd_iovecs != NULL) { | 5001 | if (se_tpg->se_tpg_tfo->alloc_cmd_iovecs != NULL) { |
5125 | ret = TPG_TFO(se_tpg)->alloc_cmd_iovecs(cmd); | 5002 | ret = se_tpg->se_tpg_tfo->alloc_cmd_iovecs(cmd); |
5126 | if (ret < 0) | 5003 | if (ret < 0) |
5127 | return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; | 5004 | return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; |
5128 | } | 5005 | } |
5129 | 5006 | ||
5130 | if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { | 5007 | if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { |
5131 | list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) { | 5008 | list_for_each_entry(task, &cmd->t_task->t_task_list, t_list) { |
5132 | if (atomic_read(&task->task_sent)) | 5009 | if (atomic_read(&task->task_sent)) |
5133 | continue; | 5010 | continue; |
5134 | if (!dev->transport->map_task_SG) | 5011 | if (!dev->transport->map_task_SG) |
@@ -5175,9 +5052,9 @@ void transport_generic_process_write(struct se_cmd *cmd) | |||
5175 | * original EDTL | 5052 | * original EDTL |
5176 | */ | 5053 | */ |
5177 | if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { | 5054 | if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { |
5178 | if (!T_TASK(cmd)->t_tasks_se_num) { | 5055 | if (!cmd->t_task->t_tasks_se_num) { |
5179 | unsigned char *dst, *buf = | 5056 | unsigned char *dst, *buf = |
5180 | (unsigned char *)T_TASK(cmd)->t_task_buf; | 5057 | (unsigned char *)cmd->t_task->t_task_buf; |
5181 | 5058 | ||
5182 | dst = kzalloc(cmd->cmd_spdtl), GFP_KERNEL); | 5059 | dst = kzalloc(cmd->cmd_spdtl), GFP_KERNEL); |
5183 | if (!(dst)) { | 5060 | if (!(dst)) { |
@@ -5189,15 +5066,15 @@ void transport_generic_process_write(struct se_cmd *cmd) | |||
5189 | } | 5066 | } |
5190 | memcpy(dst, buf, cmd->cmd_spdtl); | 5067 | memcpy(dst, buf, cmd->cmd_spdtl); |
5191 | 5068 | ||
5192 | kfree(T_TASK(cmd)->t_task_buf); | 5069 | kfree(cmd->t_task->t_task_buf); |
5193 | T_TASK(cmd)->t_task_buf = dst; | 5070 | cmd->t_task->t_task_buf = dst; |
5194 | } else { | 5071 | } else { |
5195 | struct scatterlist *sg = | 5072 | struct scatterlist *sg = |
5196 | (struct scatterlist *sg)T_TASK(cmd)->t_task_buf; | 5073 | (struct scatterlist *sg)cmd->t_task->t_task_buf; |
5197 | struct scatterlist *orig_sg; | 5074 | struct scatterlist *orig_sg; |
5198 | 5075 | ||
5199 | orig_sg = kzalloc(sizeof(struct scatterlist) * | 5076 | orig_sg = kzalloc(sizeof(struct scatterlist) * |
5200 | T_TASK(cmd)->t_tasks_se_num, | 5077 | cmd->t_task->t_tasks_se_num, |
5201 | GFP_KERNEL))) { | 5078 | GFP_KERNEL))) { |
5202 | if (!(orig_sg)) { | 5079 | if (!(orig_sg)) { |
5203 | printk(KERN_ERR "Unable to allocate memory" | 5080 | printk(KERN_ERR "Unable to allocate memory" |
@@ -5207,9 +5084,9 @@ void transport_generic_process_write(struct se_cmd *cmd) | |||
5207 | return; | 5084 | return; |
5208 | } | 5085 | } |
5209 | 5086 | ||
5210 | memcpy(orig_sg, T_TASK(cmd)->t_task_buf, | 5087 | memcpy(orig_sg, cmd->t_task->t_task_buf, |
5211 | sizeof(struct scatterlist) * | 5088 | sizeof(struct scatterlist) * |
5212 | T_TASK(cmd)->t_tasks_se_num); | 5089 | cmd->t_task->t_tasks_se_num); |
5213 | 5090 | ||
5214 | cmd->data_length = cmd->cmd_spdtl; | 5091 | cmd->data_length = cmd->cmd_spdtl; |
5215 | /* | 5092 | /* |
@@ -5240,24 +5117,24 @@ static int transport_generic_write_pending(struct se_cmd *cmd) | |||
5240 | unsigned long flags; | 5117 | unsigned long flags; |
5241 | int ret; | 5118 | int ret; |
5242 | 5119 | ||
5243 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 5120 | spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); |
5244 | cmd->t_state = TRANSPORT_WRITE_PENDING; | 5121 | cmd->t_state = TRANSPORT_WRITE_PENDING; |
5245 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 5122 | spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); |
5246 | /* | 5123 | /* |
5247 | * For the TCM control CDBs using a contiguous buffer, do the memcpy | 5124 | * For the TCM control CDBs using a contiguous buffer, do the memcpy |
5248 | * from the passed Linux/SCSI struct scatterlist located at | 5125 | * from the passed Linux/SCSI struct scatterlist located at |
5249 | * T_TASK(se_cmd)->t_task_pt_buf to the contiguous buffer at | 5126 | * se_cmd->t_task->t_task_pt_buf to the contiguous buffer at |
5250 | * T_TASK(se_cmd)->t_task_buf. | 5127 | * se_cmd->t_task->t_task_buf. |
5251 | */ | 5128 | */ |
5252 | if (cmd->se_cmd_flags & SCF_PASSTHROUGH_CONTIG_TO_SG) | 5129 | if (cmd->se_cmd_flags & SCF_PASSTHROUGH_CONTIG_TO_SG) |
5253 | transport_memcpy_read_contig(cmd, | 5130 | transport_memcpy_read_contig(cmd, |
5254 | T_TASK(cmd)->t_task_buf, | 5131 | cmd->t_task->t_task_buf, |
5255 | T_TASK(cmd)->t_task_pt_sgl); | 5132 | cmd->t_task->t_task_pt_sgl); |
5256 | /* | 5133 | /* |
5257 | * Clear the se_cmd for WRITE_PENDING status in order to set | 5134 | * Clear the se_cmd for WRITE_PENDING status in order to set |
5258 | * T_TASK(cmd)->t_transport_active=0 so that transport_generic_handle_data | 5135 | * cmd->t_task->t_transport_active=0 so that transport_generic_handle_data |
5259 | * can be called from HW target mode interrupt code. This is safe | 5136 | * can be called from HW target mode interrupt code. This is safe |
5260 | * to be called with transport_off=1 before the CMD_TFO(cmd)->write_pending | 5137 | * to be called with transport_off=1 before the cmd->se_tfo->write_pending |
5261 | * because the se_cmd->se_lun pointer is not being cleared. | 5138 | * because the se_cmd->se_lun pointer is not being cleared. |
5262 | */ | 5139 | */ |
5263 | transport_cmd_check_stop(cmd, 1, 0); | 5140 | transport_cmd_check_stop(cmd, 1, 0); |
@@ -5266,7 +5143,7 @@ static int transport_generic_write_pending(struct se_cmd *cmd) | |||
5266 | * Call the fabric write_pending function here to let the | 5143 | * Call the fabric write_pending function here to let the |
5267 | * frontend know that WRITE buffers are ready. | 5144 | * frontend know that WRITE buffers are ready. |
5268 | */ | 5145 | */ |
5269 | ret = CMD_TFO(cmd)->write_pending(cmd); | 5146 | ret = cmd->se_tfo->write_pending(cmd); |
5270 | if (ret < 0) | 5147 | if (ret < 0) |
5271 | return ret; | 5148 | return ret; |
5272 | 5149 | ||
@@ -5279,11 +5156,11 @@ static int transport_generic_write_pending(struct se_cmd *cmd) | |||
5279 | */ | 5156 | */ |
5280 | void transport_release_cmd_to_pool(struct se_cmd *cmd) | 5157 | void transport_release_cmd_to_pool(struct se_cmd *cmd) |
5281 | { | 5158 | { |
5282 | BUG_ON(!T_TASK(cmd)); | 5159 | BUG_ON(!cmd->t_task); |
5283 | BUG_ON(!CMD_TFO(cmd)); | 5160 | BUG_ON(!cmd->se_tfo); |
5284 | 5161 | ||
5285 | transport_free_se_cmd(cmd); | 5162 | transport_free_se_cmd(cmd); |
5286 | CMD_TFO(cmd)->release_cmd_to_pool(cmd); | 5163 | cmd->se_tfo->release_cmd_to_pool(cmd); |
5287 | } | 5164 | } |
5288 | EXPORT_SYMBOL(transport_release_cmd_to_pool); | 5165 | EXPORT_SYMBOL(transport_release_cmd_to_pool); |
5289 | 5166 | ||
@@ -5297,16 +5174,16 @@ void transport_generic_free_cmd( | |||
5297 | int release_to_pool, | 5174 | int release_to_pool, |
5298 | int session_reinstatement) | 5175 | int session_reinstatement) |
5299 | { | 5176 | { |
5300 | if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) || !T_TASK(cmd)) | 5177 | if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) || !cmd->t_task) |
5301 | transport_release_cmd_to_pool(cmd); | 5178 | transport_release_cmd_to_pool(cmd); |
5302 | else { | 5179 | else { |
5303 | core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd); | 5180 | core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd); |
5304 | 5181 | ||
5305 | if (SE_LUN(cmd)) { | 5182 | if (cmd->se_lun) { |
5306 | #if 0 | 5183 | #if 0 |
5307 | printk(KERN_INFO "cmd: %p ITT: 0x%08x contains" | 5184 | printk(KERN_INFO "cmd: %p ITT: 0x%08x contains" |
5308 | " SE_LUN(cmd)\n", cmd, | 5185 | " cmd->se_lun\n", cmd, |
5309 | CMD_TFO(cmd)->get_task_tag(cmd)); | 5186 | cmd->se_tfo->get_task_tag(cmd)); |
5310 | #endif | 5187 | #endif |
5311 | transport_lun_remove_cmd(cmd); | 5188 | transport_lun_remove_cmd(cmd); |
5312 | } | 5189 | } |
@@ -5343,32 +5220,32 @@ static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun) | |||
5343 | * If the frontend has already requested this struct se_cmd to | 5220 | * If the frontend has already requested this struct se_cmd to |
5344 | * be stopped, we can safely ignore this struct se_cmd. | 5221 | * be stopped, we can safely ignore this struct se_cmd. |
5345 | */ | 5222 | */ |
5346 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 5223 | spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); |
5347 | if (atomic_read(&T_TASK(cmd)->t_transport_stop)) { | 5224 | if (atomic_read(&cmd->t_task->t_transport_stop)) { |
5348 | atomic_set(&T_TASK(cmd)->transport_lun_stop, 0); | 5225 | atomic_set(&cmd->t_task->transport_lun_stop, 0); |
5349 | DEBUG_TRANSPORT_S("ConfigFS ITT[0x%08x] - t_transport_stop ==" | 5226 | DEBUG_TRANSPORT_S("ConfigFS ITT[0x%08x] - t_transport_stop ==" |
5350 | " TRUE, skipping\n", CMD_TFO(cmd)->get_task_tag(cmd)); | 5227 | " TRUE, skipping\n", cmd->se_tfo->get_task_tag(cmd)); |
5351 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 5228 | spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); |
5352 | transport_cmd_check_stop(cmd, 1, 0); | 5229 | transport_cmd_check_stop(cmd, 1, 0); |
5353 | return -1; | 5230 | return -EPERM; |
5354 | } | 5231 | } |
5355 | atomic_set(&T_TASK(cmd)->transport_lun_fe_stop, 1); | 5232 | atomic_set(&cmd->t_task->transport_lun_fe_stop, 1); |
5356 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 5233 | spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); |
5357 | 5234 | ||
5358 | wake_up_interruptible(&SE_DEV(cmd)->dev_queue_obj->thread_wq); | 5235 | wake_up_interruptible(&cmd->se_lun->lun_se_dev->dev_queue_obj.thread_wq); |
5359 | 5236 | ||
5360 | ret = transport_stop_tasks_for_cmd(cmd); | 5237 | ret = transport_stop_tasks_for_cmd(cmd); |
5361 | 5238 | ||
5362 | DEBUG_TRANSPORT_S("ConfigFS: cmd: %p t_task_cdbs: %d stop tasks ret:" | 5239 | DEBUG_TRANSPORT_S("ConfigFS: cmd: %p t_task_cdbs: %d stop tasks ret:" |
5363 | " %d\n", cmd, T_TASK(cmd)->t_task_cdbs, ret); | 5240 | " %d\n", cmd, cmd->t_task->t_task_cdbs, ret); |
5364 | if (!ret) { | 5241 | if (!ret) { |
5365 | DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopping cmd....\n", | 5242 | DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopping cmd....\n", |
5366 | CMD_TFO(cmd)->get_task_tag(cmd)); | 5243 | cmd->se_tfo->get_task_tag(cmd)); |
5367 | wait_for_completion(&T_TASK(cmd)->transport_lun_stop_comp); | 5244 | wait_for_completion(&cmd->t_task->transport_lun_stop_comp); |
5368 | DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopped cmd....\n", | 5245 | DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopped cmd....\n", |
5369 | CMD_TFO(cmd)->get_task_tag(cmd)); | 5246 | cmd->se_tfo->get_task_tag(cmd)); |
5370 | } | 5247 | } |
5371 | transport_remove_cmd_from_queue(cmd, SE_DEV(cmd)->dev_queue_obj); | 5248 | transport_remove_cmd_from_queue(cmd, &cmd->se_lun->lun_se_dev->dev_queue_obj); |
5372 | 5249 | ||
5373 | return 0; | 5250 | return 0; |
5374 | } | 5251 | } |
@@ -5394,33 +5271,33 @@ static void __transport_clear_lun_from_sessions(struct se_lun *lun) | |||
5394 | struct se_cmd, se_lun_list); | 5271 | struct se_cmd, se_lun_list); |
5395 | list_del(&cmd->se_lun_list); | 5272 | list_del(&cmd->se_lun_list); |
5396 | 5273 | ||
5397 | if (!(T_TASK(cmd))) { | 5274 | if (!(cmd->t_task)) { |
5398 | printk(KERN_ERR "ITT: 0x%08x, T_TASK(cmd) = NULL" | 5275 | printk(KERN_ERR "ITT: 0x%08x, cmd->t_task = NULL" |
5399 | "[i,t]_state: %u/%u\n", | 5276 | "[i,t]_state: %u/%u\n", |
5400 | CMD_TFO(cmd)->get_task_tag(cmd), | 5277 | cmd->se_tfo->get_task_tag(cmd), |
5401 | CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state); | 5278 | cmd->se_tfo->get_cmd_state(cmd), cmd->t_state); |
5402 | BUG(); | 5279 | BUG(); |
5403 | } | 5280 | } |
5404 | atomic_set(&T_TASK(cmd)->transport_lun_active, 0); | 5281 | atomic_set(&cmd->t_task->transport_lun_active, 0); |
5405 | /* | 5282 | /* |
5406 | * This will notify iscsi_target_transport.c: | 5283 | * This will notify iscsi_target_transport.c: |
5407 | * transport_cmd_check_stop() that a LUN shutdown is in | 5284 | * transport_cmd_check_stop() that a LUN shutdown is in |
5408 | * progress for the iscsi_cmd_t. | 5285 | * progress for the iscsi_cmd_t. |
5409 | */ | 5286 | */ |
5410 | spin_lock(&T_TASK(cmd)->t_state_lock); | 5287 | spin_lock(&cmd->t_task->t_state_lock); |
5411 | DEBUG_CLEAR_L("SE_LUN[%d] - Setting T_TASK(cmd)->transport" | 5288 | DEBUG_CLEAR_L("SE_LUN[%d] - Setting cmd->t_task->transport" |
5412 | "_lun_stop for ITT: 0x%08x\n", | 5289 | "_lun_stop for ITT: 0x%08x\n", |
5413 | SE_LUN(cmd)->unpacked_lun, | 5290 | cmd->se_lun->unpacked_lun, |
5414 | CMD_TFO(cmd)->get_task_tag(cmd)); | 5291 | cmd->se_tfo->get_task_tag(cmd)); |
5415 | atomic_set(&T_TASK(cmd)->transport_lun_stop, 1); | 5292 | atomic_set(&cmd->t_task->transport_lun_stop, 1); |
5416 | spin_unlock(&T_TASK(cmd)->t_state_lock); | 5293 | spin_unlock(&cmd->t_task->t_state_lock); |
5417 | 5294 | ||
5418 | spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags); | 5295 | spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags); |
5419 | 5296 | ||
5420 | if (!(SE_LUN(cmd))) { | 5297 | if (!(cmd->se_lun)) { |
5421 | printk(KERN_ERR "ITT: 0x%08x, [i,t]_state: %u/%u\n", | 5298 | printk(KERN_ERR "ITT: 0x%08x, [i,t]_state: %u/%u\n", |
5422 | CMD_TFO(cmd)->get_task_tag(cmd), | 5299 | cmd->se_tfo->get_task_tag(cmd), |
5423 | CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state); | 5300 | cmd->se_tfo->get_cmd_state(cmd), cmd->t_state); |
5424 | BUG(); | 5301 | BUG(); |
5425 | } | 5302 | } |
5426 | /* | 5303 | /* |
@@ -5428,27 +5305,27 @@ static void __transport_clear_lun_from_sessions(struct se_lun *lun) | |||
5428 | * and/or stop its context. | 5305 | * and/or stop its context. |
5429 | */ | 5306 | */ |
5430 | DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x before transport" | 5307 | DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x before transport" |
5431 | "_lun_wait_for_tasks()\n", SE_LUN(cmd)->unpacked_lun, | 5308 | "_lun_wait_for_tasks()\n", cmd->se_lun->unpacked_lun, |
5432 | CMD_TFO(cmd)->get_task_tag(cmd)); | 5309 | cmd->se_tfo->get_task_tag(cmd)); |
5433 | 5310 | ||
5434 | if (transport_lun_wait_for_tasks(cmd, SE_LUN(cmd)) < 0) { | 5311 | if (transport_lun_wait_for_tasks(cmd, cmd->se_lun) < 0) { |
5435 | spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); | 5312 | spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); |
5436 | continue; | 5313 | continue; |
5437 | } | 5314 | } |
5438 | 5315 | ||
5439 | DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x after transport_lun" | 5316 | DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x after transport_lun" |
5440 | "_wait_for_tasks(): SUCCESS\n", | 5317 | "_wait_for_tasks(): SUCCESS\n", |
5441 | SE_LUN(cmd)->unpacked_lun, | 5318 | cmd->se_lun->unpacked_lun, |
5442 | CMD_TFO(cmd)->get_task_tag(cmd)); | 5319 | cmd->se_tfo->get_task_tag(cmd)); |
5443 | 5320 | ||
5444 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, cmd_flags); | 5321 | spin_lock_irqsave(&cmd->t_task->t_state_lock, cmd_flags); |
5445 | if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) { | 5322 | if (!(atomic_read(&cmd->t_task->transport_dev_active))) { |
5446 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, cmd_flags); | 5323 | spin_unlock_irqrestore(&cmd->t_task->t_state_lock, cmd_flags); |
5447 | goto check_cond; | 5324 | goto check_cond; |
5448 | } | 5325 | } |
5449 | atomic_set(&T_TASK(cmd)->transport_dev_active, 0); | 5326 | atomic_set(&cmd->t_task->transport_dev_active, 0); |
5450 | transport_all_task_dev_remove_state(cmd); | 5327 | transport_all_task_dev_remove_state(cmd); |
5451 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, cmd_flags); | 5328 | spin_unlock_irqrestore(&cmd->t_task->t_state_lock, cmd_flags); |
5452 | 5329 | ||
5453 | transport_free_dev_tasks(cmd); | 5330 | transport_free_dev_tasks(cmd); |
5454 | /* | 5331 | /* |
@@ -5465,24 +5342,24 @@ check_cond: | |||
5465 | * be released, notify the waiting thread now that LU has | 5342 | * be released, notify the waiting thread now that LU has |
5466 | * finished accessing it. | 5343 | * finished accessing it. |
5467 | */ | 5344 | */ |
5468 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, cmd_flags); | 5345 | spin_lock_irqsave(&cmd->t_task->t_state_lock, cmd_flags); |
5469 | if (atomic_read(&T_TASK(cmd)->transport_lun_fe_stop)) { | 5346 | if (atomic_read(&cmd->t_task->transport_lun_fe_stop)) { |
5470 | DEBUG_CLEAR_L("SE_LUN[%d] - Detected FE stop for" | 5347 | DEBUG_CLEAR_L("SE_LUN[%d] - Detected FE stop for" |
5471 | " struct se_cmd: %p ITT: 0x%08x\n", | 5348 | " struct se_cmd: %p ITT: 0x%08x\n", |
5472 | lun->unpacked_lun, | 5349 | lun->unpacked_lun, |
5473 | cmd, CMD_TFO(cmd)->get_task_tag(cmd)); | 5350 | cmd, cmd->se_tfo->get_task_tag(cmd)); |
5474 | 5351 | ||
5475 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, | 5352 | spin_unlock_irqrestore(&cmd->t_task->t_state_lock, |
5476 | cmd_flags); | 5353 | cmd_flags); |
5477 | transport_cmd_check_stop(cmd, 1, 0); | 5354 | transport_cmd_check_stop(cmd, 1, 0); |
5478 | complete(&T_TASK(cmd)->transport_lun_fe_stop_comp); | 5355 | complete(&cmd->t_task->transport_lun_fe_stop_comp); |
5479 | spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); | 5356 | spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); |
5480 | continue; | 5357 | continue; |
5481 | } | 5358 | } |
5482 | DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x finished processing\n", | 5359 | DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x finished processing\n", |
5483 | lun->unpacked_lun, CMD_TFO(cmd)->get_task_tag(cmd)); | 5360 | lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd)); |
5484 | 5361 | ||
5485 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, cmd_flags); | 5362 | spin_unlock_irqrestore(&cmd->t_task->t_state_lock, cmd_flags); |
5486 | spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); | 5363 | spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); |
5487 | } | 5364 | } |
5488 | spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags); | 5365 | spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags); |
@@ -5506,7 +5383,7 @@ int transport_clear_lun_from_sessions(struct se_lun *lun) | |||
5506 | "tcm_cl_%u", lun->unpacked_lun); | 5383 | "tcm_cl_%u", lun->unpacked_lun); |
5507 | if (IS_ERR(kt)) { | 5384 | if (IS_ERR(kt)) { |
5508 | printk(KERN_ERR "Unable to start clear_lun thread\n"); | 5385 | printk(KERN_ERR "Unable to start clear_lun thread\n"); |
5509 | return -1; | 5386 | return PTR_ERR(kt); |
5510 | } | 5387 | } |
5511 | wait_for_completion(&lun->lun_shutdown_comp); | 5388 | wait_for_completion(&lun->lun_shutdown_comp); |
5512 | 5389 | ||
@@ -5528,20 +5405,20 @@ static void transport_generic_wait_for_tasks( | |||
5528 | if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req)) | 5405 | if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req)) |
5529 | return; | 5406 | return; |
5530 | 5407 | ||
5531 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 5408 | spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); |
5532 | /* | 5409 | /* |
5533 | * If we are already stopped due to an external event (ie: LUN shutdown) | 5410 | * If we are already stopped due to an external event (ie: LUN shutdown) |
5534 | * sleep until the connection can have the passed struct se_cmd back. | 5411 | * sleep until the connection can have the passed struct se_cmd back. |
5535 | * The T_TASK(cmd)->transport_lun_stopped_sem will be upped by | 5412 | * The cmd->t_task->transport_lun_stopped_sem will be upped by |
5536 | * transport_clear_lun_from_sessions() once the ConfigFS context caller | 5413 | * transport_clear_lun_from_sessions() once the ConfigFS context caller |
5537 | * has completed its operation on the struct se_cmd. | 5414 | * has completed its operation on the struct se_cmd. |
5538 | */ | 5415 | */ |
5539 | if (atomic_read(&T_TASK(cmd)->transport_lun_stop)) { | 5416 | if (atomic_read(&cmd->t_task->transport_lun_stop)) { |
5540 | 5417 | ||
5541 | DEBUG_TRANSPORT_S("wait_for_tasks: Stopping" | 5418 | DEBUG_TRANSPORT_S("wait_for_tasks: Stopping" |
5542 | " wait_for_completion(&T_TASK(cmd)transport_lun_fe" | 5419 | " wait_for_completion(&cmd->t_tasktransport_lun_fe" |
5543 | "_stop_comp); for ITT: 0x%08x\n", | 5420 | "_stop_comp); for ITT: 0x%08x\n", |
5544 | CMD_TFO(cmd)->get_task_tag(cmd)); | 5421 | cmd->se_tfo->get_task_tag(cmd)); |
5545 | /* | 5422 | /* |
5546 | * There is a special case for WRITES where a FE exception + | 5423 | * There is a special case for WRITES where a FE exception + |
5547 | * LUN shutdown means ConfigFS context is still sleeping on | 5424 | * LUN shutdown means ConfigFS context is still sleeping on |
@@ -5549,10 +5426,10 @@ static void transport_generic_wait_for_tasks( | |||
5549 | * We go ahead and up transport_lun_stop_comp just to be sure | 5426 | * We go ahead and up transport_lun_stop_comp just to be sure |
5550 | * here. | 5427 | * here. |
5551 | */ | 5428 | */ |
5552 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 5429 | spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); |
5553 | complete(&T_TASK(cmd)->transport_lun_stop_comp); | 5430 | complete(&cmd->t_task->transport_lun_stop_comp); |
5554 | wait_for_completion(&T_TASK(cmd)->transport_lun_fe_stop_comp); | 5431 | wait_for_completion(&cmd->t_task->transport_lun_fe_stop_comp); |
5555 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 5432 | spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); |
5556 | 5433 | ||
5557 | transport_all_task_dev_remove_state(cmd); | 5434 | transport_all_task_dev_remove_state(cmd); |
5558 | /* | 5435 | /* |
@@ -5561,39 +5438,39 @@ static void transport_generic_wait_for_tasks( | |||
5561 | * normal means below. | 5438 | * normal means below. |
5562 | */ | 5439 | */ |
5563 | DEBUG_TRANSPORT_S("wait_for_tasks: Stopped" | 5440 | DEBUG_TRANSPORT_S("wait_for_tasks: Stopped" |
5564 | " wait_for_completion(&T_TASK(cmd)transport_lun_fe_" | 5441 | " wait_for_completion(&cmd->t_tasktransport_lun_fe_" |
5565 | "stop_comp); for ITT: 0x%08x\n", | 5442 | "stop_comp); for ITT: 0x%08x\n", |
5566 | CMD_TFO(cmd)->get_task_tag(cmd)); | 5443 | cmd->se_tfo->get_task_tag(cmd)); |
5567 | 5444 | ||
5568 | atomic_set(&T_TASK(cmd)->transport_lun_stop, 0); | 5445 | atomic_set(&cmd->t_task->transport_lun_stop, 0); |
5569 | } | 5446 | } |
5570 | if (!atomic_read(&T_TASK(cmd)->t_transport_active) || | 5447 | if (!atomic_read(&cmd->t_task->t_transport_active) || |
5571 | atomic_read(&T_TASK(cmd)->t_transport_aborted)) | 5448 | atomic_read(&cmd->t_task->t_transport_aborted)) |
5572 | goto remove; | 5449 | goto remove; |
5573 | 5450 | ||
5574 | atomic_set(&T_TASK(cmd)->t_transport_stop, 1); | 5451 | atomic_set(&cmd->t_task->t_transport_stop, 1); |
5575 | 5452 | ||
5576 | DEBUG_TRANSPORT_S("wait_for_tasks: Stopping %p ITT: 0x%08x" | 5453 | DEBUG_TRANSPORT_S("wait_for_tasks: Stopping %p ITT: 0x%08x" |
5577 | " i_state: %d, t_state/def_t_state: %d/%d, t_transport_stop" | 5454 | " i_state: %d, t_state/def_t_state: %d/%d, t_transport_stop" |
5578 | " = TRUE\n", cmd, CMD_TFO(cmd)->get_task_tag(cmd), | 5455 | " = TRUE\n", cmd, cmd->se_tfo->get_task_tag(cmd), |
5579 | CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state, | 5456 | cmd->se_tfo->get_cmd_state(cmd), cmd->t_state, |
5580 | cmd->deferred_t_state); | 5457 | cmd->deferred_t_state); |
5581 | 5458 | ||
5582 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 5459 | spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); |
5583 | 5460 | ||
5584 | wake_up_interruptible(&SE_DEV(cmd)->dev_queue_obj->thread_wq); | 5461 | wake_up_interruptible(&cmd->se_lun->lun_se_dev->dev_queue_obj.thread_wq); |
5585 | 5462 | ||
5586 | wait_for_completion(&T_TASK(cmd)->t_transport_stop_comp); | 5463 | wait_for_completion(&cmd->t_task->t_transport_stop_comp); |
5587 | 5464 | ||
5588 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 5465 | spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); |
5589 | atomic_set(&T_TASK(cmd)->t_transport_active, 0); | 5466 | atomic_set(&cmd->t_task->t_transport_active, 0); |
5590 | atomic_set(&T_TASK(cmd)->t_transport_stop, 0); | 5467 | atomic_set(&cmd->t_task->t_transport_stop, 0); |
5591 | 5468 | ||
5592 | DEBUG_TRANSPORT_S("wait_for_tasks: Stopped wait_for_compltion(" | 5469 | DEBUG_TRANSPORT_S("wait_for_tasks: Stopped wait_for_compltion(" |
5593 | "&T_TASK(cmd)->t_transport_stop_comp) for ITT: 0x%08x\n", | 5470 | "&cmd->t_task->t_transport_stop_comp) for ITT: 0x%08x\n", |
5594 | CMD_TFO(cmd)->get_task_tag(cmd)); | 5471 | cmd->se_tfo->get_task_tag(cmd)); |
5595 | remove: | 5472 | remove: |
5596 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 5473 | spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); |
5597 | if (!remove_cmd) | 5474 | if (!remove_cmd) |
5598 | return; | 5475 | return; |
5599 | 5476 | ||
@@ -5632,13 +5509,13 @@ int transport_send_check_condition_and_sense( | |||
5632 | int offset; | 5509 | int offset; |
5633 | u8 asc = 0, ascq = 0; | 5510 | u8 asc = 0, ascq = 0; |
5634 | 5511 | ||
5635 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 5512 | spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); |
5636 | if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { | 5513 | if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { |
5637 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 5514 | spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); |
5638 | return 0; | 5515 | return 0; |
5639 | } | 5516 | } |
5640 | cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION; | 5517 | cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION; |
5641 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 5518 | spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); |
5642 | 5519 | ||
5643 | if (!reason && from_transport) | 5520 | if (!reason && from_transport) |
5644 | goto after_reason; | 5521 | goto after_reason; |
@@ -5651,7 +5528,7 @@ int transport_send_check_condition_and_sense( | |||
5651 | * TRANSPORT_SENSE_BUFFER is now set to SCSI_SENSE_BUFFERSIZE | 5528 | * TRANSPORT_SENSE_BUFFER is now set to SCSI_SENSE_BUFFERSIZE |
5652 | * from include/scsi/scsi_cmnd.h | 5529 | * from include/scsi/scsi_cmnd.h |
5653 | */ | 5530 | */ |
5654 | offset = CMD_TFO(cmd)->set_fabric_sense_len(cmd, | 5531 | offset = cmd->se_tfo->set_fabric_sense_len(cmd, |
5655 | TRANSPORT_SENSE_BUFFER); | 5532 | TRANSPORT_SENSE_BUFFER); |
5656 | /* | 5533 | /* |
5657 | * Actual SENSE DATA, see SPC-3 7.23.2 SPC_SENSE_KEY_OFFSET uses | 5534 | * Actual SENSE DATA, see SPC-3 7.23.2 SPC_SENSE_KEY_OFFSET uses |
@@ -5788,7 +5665,7 @@ int transport_send_check_condition_and_sense( | |||
5788 | cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER + offset; | 5665 | cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER + offset; |
5789 | 5666 | ||
5790 | after_reason: | 5667 | after_reason: |
5791 | CMD_TFO(cmd)->queue_status(cmd); | 5668 | cmd->se_tfo->queue_status(cmd); |
5792 | return 0; | 5669 | return 0; |
5793 | } | 5670 | } |
5794 | EXPORT_SYMBOL(transport_send_check_condition_and_sense); | 5671 | EXPORT_SYMBOL(transport_send_check_condition_and_sense); |
@@ -5797,18 +5674,18 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status) | |||
5797 | { | 5674 | { |
5798 | int ret = 0; | 5675 | int ret = 0; |
5799 | 5676 | ||
5800 | if (atomic_read(&T_TASK(cmd)->t_transport_aborted) != 0) { | 5677 | if (atomic_read(&cmd->t_task->t_transport_aborted) != 0) { |
5801 | if (!(send_status) || | 5678 | if (!(send_status) || |
5802 | (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS)) | 5679 | (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS)) |
5803 | return 1; | 5680 | return 1; |
5804 | #if 0 | 5681 | #if 0 |
5805 | printk(KERN_INFO "Sending delayed SAM_STAT_TASK_ABORTED" | 5682 | printk(KERN_INFO "Sending delayed SAM_STAT_TASK_ABORTED" |
5806 | " status for CDB: 0x%02x ITT: 0x%08x\n", | 5683 | " status for CDB: 0x%02x ITT: 0x%08x\n", |
5807 | T_TASK(cmd)->t_task_cdb[0], | 5684 | cmd->t_task->t_task_cdb[0], |
5808 | CMD_TFO(cmd)->get_task_tag(cmd)); | 5685 | cmd->se_tfo->get_task_tag(cmd)); |
5809 | #endif | 5686 | #endif |
5810 | cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS; | 5687 | cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS; |
5811 | CMD_TFO(cmd)->queue_status(cmd); | 5688 | cmd->se_tfo->queue_status(cmd); |
5812 | ret = 1; | 5689 | ret = 1; |
5813 | } | 5690 | } |
5814 | return ret; | 5691 | return ret; |
@@ -5824,8 +5701,8 @@ void transport_send_task_abort(struct se_cmd *cmd) | |||
5824 | * queued back to fabric module by transport_check_aborted_status(). | 5701 | * queued back to fabric module by transport_check_aborted_status(). |
5825 | */ | 5702 | */ |
5826 | if (cmd->data_direction == DMA_TO_DEVICE) { | 5703 | if (cmd->data_direction == DMA_TO_DEVICE) { |
5827 | if (CMD_TFO(cmd)->write_pending_status(cmd) != 0) { | 5704 | if (cmd->se_tfo->write_pending_status(cmd) != 0) { |
5828 | atomic_inc(&T_TASK(cmd)->t_transport_aborted); | 5705 | atomic_inc(&cmd->t_task->t_transport_aborted); |
5829 | smp_mb__after_atomic_inc(); | 5706 | smp_mb__after_atomic_inc(); |
5830 | cmd->scsi_status = SAM_STAT_TASK_ABORTED; | 5707 | cmd->scsi_status = SAM_STAT_TASK_ABORTED; |
5831 | transport_new_cmd_failure(cmd); | 5708 | transport_new_cmd_failure(cmd); |
@@ -5835,10 +5712,10 @@ void transport_send_task_abort(struct se_cmd *cmd) | |||
5835 | cmd->scsi_status = SAM_STAT_TASK_ABORTED; | 5712 | cmd->scsi_status = SAM_STAT_TASK_ABORTED; |
5836 | #if 0 | 5713 | #if 0 |
5837 | printk(KERN_INFO "Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x," | 5714 | printk(KERN_INFO "Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x," |
5838 | " ITT: 0x%08x\n", T_TASK(cmd)->t_task_cdb[0], | 5715 | " ITT: 0x%08x\n", cmd->t_task->t_task_cdb[0], |
5839 | CMD_TFO(cmd)->get_task_tag(cmd)); | 5716 | cmd->se_tfo->get_task_tag(cmd)); |
5840 | #endif | 5717 | #endif |
5841 | CMD_TFO(cmd)->queue_status(cmd); | 5718 | cmd->se_tfo->queue_status(cmd); |
5842 | } | 5719 | } |
5843 | 5720 | ||
5844 | /* transport_generic_do_tmr(): | 5721 | /* transport_generic_do_tmr(): |
@@ -5848,7 +5725,7 @@ void transport_send_task_abort(struct se_cmd *cmd) | |||
5848 | int transport_generic_do_tmr(struct se_cmd *cmd) | 5725 | int transport_generic_do_tmr(struct se_cmd *cmd) |
5849 | { | 5726 | { |
5850 | struct se_cmd *ref_cmd; | 5727 | struct se_cmd *ref_cmd; |
5851 | struct se_device *dev = SE_DEV(cmd); | 5728 | struct se_device *dev = cmd->se_lun->lun_se_dev; |
5852 | struct se_tmr_req *tmr = cmd->se_tmr_req; | 5729 | struct se_tmr_req *tmr = cmd->se_tmr_req; |
5853 | int ret; | 5730 | int ret; |
5854 | 5731 | ||
@@ -5881,7 +5758,7 @@ int transport_generic_do_tmr(struct se_cmd *cmd) | |||
5881 | } | 5758 | } |
5882 | 5759 | ||
5883 | cmd->t_state = TRANSPORT_ISTATE_PROCESSING; | 5760 | cmd->t_state = TRANSPORT_ISTATE_PROCESSING; |
5884 | CMD_TFO(cmd)->queue_tm_rsp(cmd); | 5761 | cmd->se_tfo->queue_tm_rsp(cmd); |
5885 | 5762 | ||
5886 | transport_cmd_check_stop(cmd, 2, 0); | 5763 | transport_cmd_check_stop(cmd, 2, 0); |
5887 | return 0; | 5764 | return 0; |
@@ -5920,44 +5797,44 @@ static void transport_processing_shutdown(struct se_device *dev) | |||
5920 | */ | 5797 | */ |
5921 | spin_lock_irqsave(&dev->execute_task_lock, flags); | 5798 | spin_lock_irqsave(&dev->execute_task_lock, flags); |
5922 | while ((task = transport_get_task_from_state_list(dev))) { | 5799 | while ((task = transport_get_task_from_state_list(dev))) { |
5923 | if (!(TASK_CMD(task))) { | 5800 | if (!task->task_se_cmd) { |
5924 | printk(KERN_ERR "TASK_CMD(task) is NULL!\n"); | 5801 | printk(KERN_ERR "task->task_se_cmd is NULL!\n"); |
5925 | continue; | 5802 | continue; |
5926 | } | 5803 | } |
5927 | cmd = TASK_CMD(task); | 5804 | cmd = task->task_se_cmd; |
5928 | 5805 | ||
5929 | if (!T_TASK(cmd)) { | 5806 | if (!cmd->t_task) { |
5930 | printk(KERN_ERR "T_TASK(cmd) is NULL for task: %p cmd:" | 5807 | printk(KERN_ERR "cmd->t_task is NULL for task: %p cmd:" |
5931 | " %p ITT: 0x%08x\n", task, cmd, | 5808 | " %p ITT: 0x%08x\n", task, cmd, |
5932 | CMD_TFO(cmd)->get_task_tag(cmd)); | 5809 | cmd->se_tfo->get_task_tag(cmd)); |
5933 | continue; | 5810 | continue; |
5934 | } | 5811 | } |
5935 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); | 5812 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); |
5936 | 5813 | ||
5937 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 5814 | spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); |
5938 | 5815 | ||
5939 | DEBUG_DO("PT: cmd: %p task: %p ITT/CmdSN: 0x%08x/0x%08x," | 5816 | DEBUG_DO("PT: cmd: %p task: %p ITT/CmdSN: 0x%08x/0x%08x," |
5940 | " i_state/def_i_state: %d/%d, t_state/def_t_state:" | 5817 | " i_state/def_i_state: %d/%d, t_state/def_t_state:" |
5941 | " %d/%d cdb: 0x%02x\n", cmd, task, | 5818 | " %d/%d cdb: 0x%02x\n", cmd, task, |
5942 | CMD_TFO(cmd)->get_task_tag(cmd), cmd->cmd_sn, | 5819 | cmd->se_tfo->get_task_tag(cmd), cmd->cmd_sn, |
5943 | CMD_TFO(cmd)->get_cmd_state(cmd), cmd->deferred_i_state, | 5820 | cmd->se_tfo->get_cmd_state(cmd), cmd->deferred_i_state, |
5944 | cmd->t_state, cmd->deferred_t_state, | 5821 | cmd->t_state, cmd->deferred_t_state, |
5945 | T_TASK(cmd)->t_task_cdb[0]); | 5822 | cmd->t_task->t_task_cdb[0]); |
5946 | DEBUG_DO("PT: ITT[0x%08x] - t_task_cdbs: %d t_task_cdbs_left:" | 5823 | DEBUG_DO("PT: ITT[0x%08x] - t_task_cdbs: %d t_task_cdbs_left:" |
5947 | " %d t_task_cdbs_sent: %d -- t_transport_active: %d" | 5824 | " %d t_task_cdbs_sent: %d -- t_transport_active: %d" |
5948 | " t_transport_stop: %d t_transport_sent: %d\n", | 5825 | " t_transport_stop: %d t_transport_sent: %d\n", |
5949 | CMD_TFO(cmd)->get_task_tag(cmd), | 5826 | cmd->se_tfo->get_task_tag(cmd), |
5950 | T_TASK(cmd)->t_task_cdbs, | 5827 | cmd->t_task->t_task_cdbs, |
5951 | atomic_read(&T_TASK(cmd)->t_task_cdbs_left), | 5828 | atomic_read(&cmd->t_task->t_task_cdbs_left), |
5952 | atomic_read(&T_TASK(cmd)->t_task_cdbs_sent), | 5829 | atomic_read(&cmd->t_task->t_task_cdbs_sent), |
5953 | atomic_read(&T_TASK(cmd)->t_transport_active), | 5830 | atomic_read(&cmd->t_task->t_transport_active), |
5954 | atomic_read(&T_TASK(cmd)->t_transport_stop), | 5831 | atomic_read(&cmd->t_task->t_transport_stop), |
5955 | atomic_read(&T_TASK(cmd)->t_transport_sent)); | 5832 | atomic_read(&cmd->t_task->t_transport_sent)); |
5956 | 5833 | ||
5957 | if (atomic_read(&task->task_active)) { | 5834 | if (atomic_read(&task->task_active)) { |
5958 | atomic_set(&task->task_stop, 1); | 5835 | atomic_set(&task->task_stop, 1); |
5959 | spin_unlock_irqrestore( | 5836 | spin_unlock_irqrestore( |
5960 | &T_TASK(cmd)->t_state_lock, flags); | 5837 | &cmd->t_task->t_state_lock, flags); |
5961 | 5838 | ||
5962 | DEBUG_DO("Waiting for task: %p to shutdown for dev:" | 5839 | DEBUG_DO("Waiting for task: %p to shutdown for dev:" |
5963 | " %p\n", task, dev); | 5840 | " %p\n", task, dev); |
@@ -5965,8 +5842,8 @@ static void transport_processing_shutdown(struct se_device *dev) | |||
5965 | DEBUG_DO("Completed task: %p shutdown for dev: %p\n", | 5842 | DEBUG_DO("Completed task: %p shutdown for dev: %p\n", |
5966 | task, dev); | 5843 | task, dev); |
5967 | 5844 | ||
5968 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 5845 | spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); |
5969 | atomic_dec(&T_TASK(cmd)->t_task_cdbs_left); | 5846 | atomic_dec(&cmd->t_task->t_task_cdbs_left); |
5970 | 5847 | ||
5971 | atomic_set(&task->task_active, 0); | 5848 | atomic_set(&task->task_active, 0); |
5972 | atomic_set(&task->task_stop, 0); | 5849 | atomic_set(&task->task_stop, 0); |
@@ -5976,39 +5853,39 @@ static void transport_processing_shutdown(struct se_device *dev) | |||
5976 | } | 5853 | } |
5977 | __transport_stop_task_timer(task, &flags); | 5854 | __transport_stop_task_timer(task, &flags); |
5978 | 5855 | ||
5979 | if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_ex_left))) { | 5856 | if (!(atomic_dec_and_test(&cmd->t_task->t_task_cdbs_ex_left))) { |
5980 | spin_unlock_irqrestore( | 5857 | spin_unlock_irqrestore( |
5981 | &T_TASK(cmd)->t_state_lock, flags); | 5858 | &cmd->t_task->t_state_lock, flags); |
5982 | 5859 | ||
5983 | DEBUG_DO("Skipping task: %p, dev: %p for" | 5860 | DEBUG_DO("Skipping task: %p, dev: %p for" |
5984 | " t_task_cdbs_ex_left: %d\n", task, dev, | 5861 | " t_task_cdbs_ex_left: %d\n", task, dev, |
5985 | atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left)); | 5862 | atomic_read(&cmd->t_task->t_task_cdbs_ex_left)); |
5986 | 5863 | ||
5987 | spin_lock_irqsave(&dev->execute_task_lock, flags); | 5864 | spin_lock_irqsave(&dev->execute_task_lock, flags); |
5988 | continue; | 5865 | continue; |
5989 | } | 5866 | } |
5990 | 5867 | ||
5991 | if (atomic_read(&T_TASK(cmd)->t_transport_active)) { | 5868 | if (atomic_read(&cmd->t_task->t_transport_active)) { |
5992 | DEBUG_DO("got t_transport_active = 1 for task: %p, dev:" | 5869 | DEBUG_DO("got t_transport_active = 1 for task: %p, dev:" |
5993 | " %p\n", task, dev); | 5870 | " %p\n", task, dev); |
5994 | 5871 | ||
5995 | if (atomic_read(&T_TASK(cmd)->t_fe_count)) { | 5872 | if (atomic_read(&cmd->t_task->t_fe_count)) { |
5996 | spin_unlock_irqrestore( | 5873 | spin_unlock_irqrestore( |
5997 | &T_TASK(cmd)->t_state_lock, flags); | 5874 | &cmd->t_task->t_state_lock, flags); |
5998 | transport_send_check_condition_and_sense( | 5875 | transport_send_check_condition_and_sense( |
5999 | cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, | 5876 | cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, |
6000 | 0); | 5877 | 0); |
6001 | transport_remove_cmd_from_queue(cmd, | 5878 | transport_remove_cmd_from_queue(cmd, |
6002 | SE_DEV(cmd)->dev_queue_obj); | 5879 | &cmd->se_lun->lun_se_dev->dev_queue_obj); |
6003 | 5880 | ||
6004 | transport_lun_remove_cmd(cmd); | 5881 | transport_lun_remove_cmd(cmd); |
6005 | transport_cmd_check_stop(cmd, 1, 0); | 5882 | transport_cmd_check_stop(cmd, 1, 0); |
6006 | } else { | 5883 | } else { |
6007 | spin_unlock_irqrestore( | 5884 | spin_unlock_irqrestore( |
6008 | &T_TASK(cmd)->t_state_lock, flags); | 5885 | &cmd->t_task->t_state_lock, flags); |
6009 | 5886 | ||
6010 | transport_remove_cmd_from_queue(cmd, | 5887 | transport_remove_cmd_from_queue(cmd, |
6011 | SE_DEV(cmd)->dev_queue_obj); | 5888 | &cmd->se_lun->lun_se_dev->dev_queue_obj); |
6012 | 5889 | ||
6013 | transport_lun_remove_cmd(cmd); | 5890 | transport_lun_remove_cmd(cmd); |
6014 | 5891 | ||
@@ -6022,22 +5899,22 @@ static void transport_processing_shutdown(struct se_device *dev) | |||
6022 | DEBUG_DO("Got t_transport_active = 0 for task: %p, dev: %p\n", | 5899 | DEBUG_DO("Got t_transport_active = 0 for task: %p, dev: %p\n", |
6023 | task, dev); | 5900 | task, dev); |
6024 | 5901 | ||
6025 | if (atomic_read(&T_TASK(cmd)->t_fe_count)) { | 5902 | if (atomic_read(&cmd->t_task->t_fe_count)) { |
6026 | spin_unlock_irqrestore( | 5903 | spin_unlock_irqrestore( |
6027 | &T_TASK(cmd)->t_state_lock, flags); | 5904 | &cmd->t_task->t_state_lock, flags); |
6028 | transport_send_check_condition_and_sense(cmd, | 5905 | transport_send_check_condition_and_sense(cmd, |
6029 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); | 5906 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); |
6030 | transport_remove_cmd_from_queue(cmd, | 5907 | transport_remove_cmd_from_queue(cmd, |
6031 | SE_DEV(cmd)->dev_queue_obj); | 5908 | &cmd->se_lun->lun_se_dev->dev_queue_obj); |
6032 | 5909 | ||
6033 | transport_lun_remove_cmd(cmd); | 5910 | transport_lun_remove_cmd(cmd); |
6034 | transport_cmd_check_stop(cmd, 1, 0); | 5911 | transport_cmd_check_stop(cmd, 1, 0); |
6035 | } else { | 5912 | } else { |
6036 | spin_unlock_irqrestore( | 5913 | spin_unlock_irqrestore( |
6037 | &T_TASK(cmd)->t_state_lock, flags); | 5914 | &cmd->t_task->t_state_lock, flags); |
6038 | 5915 | ||
6039 | transport_remove_cmd_from_queue(cmd, | 5916 | transport_remove_cmd_from_queue(cmd, |
6040 | SE_DEV(cmd)->dev_queue_obj); | 5917 | &cmd->se_lun->lun_se_dev->dev_queue_obj); |
6041 | transport_lun_remove_cmd(cmd); | 5918 | transport_lun_remove_cmd(cmd); |
6042 | 5919 | ||
6043 | if (transport_cmd_check_stop(cmd, 1, 0)) | 5920 | if (transport_cmd_check_stop(cmd, 1, 0)) |
@@ -6050,18 +5927,15 @@ static void transport_processing_shutdown(struct se_device *dev) | |||
6050 | /* | 5927 | /* |
6051 | * Empty the struct se_device's struct se_cmd list. | 5928 | * Empty the struct se_device's struct se_cmd list. |
6052 | */ | 5929 | */ |
6053 | spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags); | 5930 | while ((qr = transport_get_qr_from_queue(&dev->dev_queue_obj))) { |
6054 | while ((qr = __transport_get_qr_from_queue(dev->dev_queue_obj))) { | 5931 | cmd = qr->cmd; |
6055 | spin_unlock_irqrestore( | ||
6056 | &dev->dev_queue_obj->cmd_queue_lock, flags); | ||
6057 | cmd = (struct se_cmd *)qr->cmd; | ||
6058 | state = qr->state; | 5932 | state = qr->state; |
6059 | kfree(qr); | 5933 | kfree(qr); |
6060 | 5934 | ||
6061 | DEBUG_DO("From Device Queue: cmd: %p t_state: %d\n", | 5935 | DEBUG_DO("From Device Queue: cmd: %p t_state: %d\n", |
6062 | cmd, state); | 5936 | cmd, state); |
6063 | 5937 | ||
6064 | if (atomic_read(&T_TASK(cmd)->t_fe_count)) { | 5938 | if (atomic_read(&cmd->t_task->t_fe_count)) { |
6065 | transport_send_check_condition_and_sense(cmd, | 5939 | transport_send_check_condition_and_sense(cmd, |
6066 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); | 5940 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); |
6067 | 5941 | ||
@@ -6072,9 +5946,7 @@ static void transport_processing_shutdown(struct se_device *dev) | |||
6072 | if (transport_cmd_check_stop(cmd, 1, 0)) | 5946 | if (transport_cmd_check_stop(cmd, 1, 0)) |
6073 | transport_generic_remove(cmd, 0, 0); | 5947 | transport_generic_remove(cmd, 0, 0); |
6074 | } | 5948 | } |
6075 | spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags); | ||
6076 | } | 5949 | } |
6077 | spin_unlock_irqrestore(&dev->dev_queue_obj->cmd_queue_lock, flags); | ||
6078 | } | 5950 | } |
6079 | 5951 | ||
6080 | /* transport_processing_thread(): | 5952 | /* transport_processing_thread(): |
@@ -6091,8 +5963,8 @@ static int transport_processing_thread(void *param) | |||
6091 | set_user_nice(current, -20); | 5963 | set_user_nice(current, -20); |
6092 | 5964 | ||
6093 | while (!kthread_should_stop()) { | 5965 | while (!kthread_should_stop()) { |
6094 | ret = wait_event_interruptible(dev->dev_queue_obj->thread_wq, | 5966 | ret = wait_event_interruptible(dev->dev_queue_obj.thread_wq, |
6095 | atomic_read(&dev->dev_queue_obj->queue_cnt) || | 5967 | atomic_read(&dev->dev_queue_obj.queue_cnt) || |
6096 | kthread_should_stop()); | 5968 | kthread_should_stop()); |
6097 | if (ret < 0) | 5969 | if (ret < 0) |
6098 | goto out; | 5970 | goto out; |
@@ -6108,22 +5980,22 @@ static int transport_processing_thread(void *param) | |||
6108 | get_cmd: | 5980 | get_cmd: |
6109 | __transport_execute_tasks(dev); | 5981 | __transport_execute_tasks(dev); |
6110 | 5982 | ||
6111 | qr = transport_get_qr_from_queue(dev->dev_queue_obj); | 5983 | qr = transport_get_qr_from_queue(&dev->dev_queue_obj); |
6112 | if (!(qr)) | 5984 | if (!(qr)) |
6113 | continue; | 5985 | continue; |
6114 | 5986 | ||
6115 | cmd = (struct se_cmd *)qr->cmd; | 5987 | cmd = qr->cmd; |
6116 | t_state = qr->state; | 5988 | t_state = qr->state; |
6117 | kfree(qr); | 5989 | kfree(qr); |
6118 | 5990 | ||
6119 | switch (t_state) { | 5991 | switch (t_state) { |
6120 | case TRANSPORT_NEW_CMD_MAP: | 5992 | case TRANSPORT_NEW_CMD_MAP: |
6121 | if (!(CMD_TFO(cmd)->new_cmd_map)) { | 5993 | if (!(cmd->se_tfo->new_cmd_map)) { |
6122 | printk(KERN_ERR "CMD_TFO(cmd)->new_cmd_map is" | 5994 | printk(KERN_ERR "cmd->se_tfo->new_cmd_map is" |
6123 | " NULL for TRANSPORT_NEW_CMD_MAP\n"); | 5995 | " NULL for TRANSPORT_NEW_CMD_MAP\n"); |
6124 | BUG(); | 5996 | BUG(); |
6125 | } | 5997 | } |
6126 | ret = CMD_TFO(cmd)->new_cmd_map(cmd); | 5998 | ret = cmd->se_tfo->new_cmd_map(cmd); |
6127 | if (ret < 0) { | 5999 | if (ret < 0) { |
6128 | cmd->transport_error_status = ret; | 6000 | cmd->transport_error_status = ret; |
6129 | transport_generic_request_failure(cmd, NULL, | 6001 | transport_generic_request_failure(cmd, NULL, |
@@ -6168,9 +6040,9 @@ get_cmd: | |||
6168 | printk(KERN_ERR "Unknown t_state: %d deferred_t_state:" | 6040 | printk(KERN_ERR "Unknown t_state: %d deferred_t_state:" |
6169 | " %d for ITT: 0x%08x i_state: %d on SE LUN:" | 6041 | " %d for ITT: 0x%08x i_state: %d on SE LUN:" |
6170 | " %u\n", t_state, cmd->deferred_t_state, | 6042 | " %u\n", t_state, cmd->deferred_t_state, |
6171 | CMD_TFO(cmd)->get_task_tag(cmd), | 6043 | cmd->se_tfo->get_task_tag(cmd), |
6172 | CMD_TFO(cmd)->get_cmd_state(cmd), | 6044 | cmd->se_tfo->get_cmd_state(cmd), |
6173 | SE_LUN(cmd)->unpacked_lun); | 6045 | cmd->se_lun->unpacked_lun); |
6174 | BUG(); | 6046 | BUG(); |
6175 | } | 6047 | } |
6176 | 6048 | ||
diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c index df355176a377..16f41d188e26 100644 --- a/drivers/target/target_core_ua.c +++ b/drivers/target/target_core_ua.c | |||
@@ -80,10 +80,10 @@ int core_scsi3_ua_check( | |||
80 | case REQUEST_SENSE: | 80 | case REQUEST_SENSE: |
81 | return 0; | 81 | return 0; |
82 | default: | 82 | default: |
83 | return -1; | 83 | return -EINVAL; |
84 | } | 84 | } |
85 | 85 | ||
86 | return -1; | 86 | return -EINVAL; |
87 | } | 87 | } |
88 | 88 | ||
89 | int core_scsi3_ua_allocate( | 89 | int core_scsi3_ua_allocate( |
@@ -98,12 +98,12 @@ int core_scsi3_ua_allocate( | |||
98 | * PASSTHROUGH OPS | 98 | * PASSTHROUGH OPS |
99 | */ | 99 | */ |
100 | if (!(nacl)) | 100 | if (!(nacl)) |
101 | return -1; | 101 | return -EINVAL; |
102 | 102 | ||
103 | ua = kmem_cache_zalloc(se_ua_cache, GFP_ATOMIC); | 103 | ua = kmem_cache_zalloc(se_ua_cache, GFP_ATOMIC); |
104 | if (!(ua)) { | 104 | if (!(ua)) { |
105 | printk(KERN_ERR "Unable to allocate struct se_ua\n"); | 105 | printk(KERN_ERR "Unable to allocate struct se_ua\n"); |
106 | return -1; | 106 | return -ENOMEM; |
107 | } | 107 | } |
108 | INIT_LIST_HEAD(&ua->ua_dev_list); | 108 | INIT_LIST_HEAD(&ua->ua_dev_list); |
109 | INIT_LIST_HEAD(&ua->ua_nacl_list); | 109 | INIT_LIST_HEAD(&ua->ua_nacl_list); |
@@ -179,7 +179,7 @@ int core_scsi3_ua_allocate( | |||
179 | 179 | ||
180 | printk(KERN_INFO "[%s]: Allocated UNIT ATTENTION, mapped LUN: %u, ASC:" | 180 | printk(KERN_INFO "[%s]: Allocated UNIT ATTENTION, mapped LUN: %u, ASC:" |
181 | " 0x%02x, ASCQ: 0x%02x\n", | 181 | " 0x%02x, ASCQ: 0x%02x\n", |
182 | TPG_TFO(nacl->se_tpg)->get_fabric_name(), unpacked_lun, | 182 | nacl->se_tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, |
183 | asc, ascq); | 183 | asc, ascq); |
184 | 184 | ||
185 | atomic_inc(&deve->ua_count); | 185 | atomic_inc(&deve->ua_count); |
@@ -208,7 +208,7 @@ void core_scsi3_ua_for_check_condition( | |||
208 | u8 *asc, | 208 | u8 *asc, |
209 | u8 *ascq) | 209 | u8 *ascq) |
210 | { | 210 | { |
211 | struct se_device *dev = SE_DEV(cmd); | 211 | struct se_device *dev = cmd->se_lun->lun_se_dev; |
212 | struct se_dev_entry *deve; | 212 | struct se_dev_entry *deve; |
213 | struct se_session *sess = cmd->se_sess; | 213 | struct se_session *sess = cmd->se_sess; |
214 | struct se_node_acl *nacl; | 214 | struct se_node_acl *nacl; |
@@ -240,7 +240,7 @@ void core_scsi3_ua_for_check_condition( | |||
240 | * highest priority UNIT_ATTENTION and ASC/ASCQ without | 240 | * highest priority UNIT_ATTENTION and ASC/ASCQ without |
241 | * clearing it. | 241 | * clearing it. |
242 | */ | 242 | */ |
243 | if (DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl != 0) { | 243 | if (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl != 0) { |
244 | *asc = ua->ua_asc; | 244 | *asc = ua->ua_asc; |
245 | *ascq = ua->ua_ascq; | 245 | *ascq = ua->ua_ascq; |
246 | break; | 246 | break; |
@@ -267,10 +267,10 @@ void core_scsi3_ua_for_check_condition( | |||
267 | printk(KERN_INFO "[%s]: %s UNIT ATTENTION condition with" | 267 | printk(KERN_INFO "[%s]: %s UNIT ATTENTION condition with" |
268 | " INTLCK_CTRL: %d, mapped LUN: %u, got CDB: 0x%02x" | 268 | " INTLCK_CTRL: %d, mapped LUN: %u, got CDB: 0x%02x" |
269 | " reported ASC: 0x%02x, ASCQ: 0x%02x\n", | 269 | " reported ASC: 0x%02x, ASCQ: 0x%02x\n", |
270 | TPG_TFO(nacl->se_tpg)->get_fabric_name(), | 270 | nacl->se_tpg->se_tpg_tfo->get_fabric_name(), |
271 | (DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl != 0) ? "Reporting" : | 271 | (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl != 0) ? "Reporting" : |
272 | "Releasing", DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl, | 272 | "Releasing", dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl, |
273 | cmd->orig_fe_lun, T_TASK(cmd)->t_task_cdb[0], *asc, *ascq); | 273 | cmd->orig_fe_lun, cmd->t_task->t_task_cdb[0], *asc, *ascq); |
274 | } | 274 | } |
275 | 275 | ||
276 | int core_scsi3_ua_clear_for_request_sense( | 276 | int core_scsi3_ua_clear_for_request_sense( |
@@ -285,17 +285,17 @@ int core_scsi3_ua_clear_for_request_sense( | |||
285 | int head = 1; | 285 | int head = 1; |
286 | 286 | ||
287 | if (!(sess)) | 287 | if (!(sess)) |
288 | return -1; | 288 | return -EINVAL; |
289 | 289 | ||
290 | nacl = sess->se_node_acl; | 290 | nacl = sess->se_node_acl; |
291 | if (!(nacl)) | 291 | if (!(nacl)) |
292 | return -1; | 292 | return -EINVAL; |
293 | 293 | ||
294 | spin_lock_irq(&nacl->device_list_lock); | 294 | spin_lock_irq(&nacl->device_list_lock); |
295 | deve = &nacl->device_list[cmd->orig_fe_lun]; | 295 | deve = &nacl->device_list[cmd->orig_fe_lun]; |
296 | if (!(atomic_read(&deve->ua_count))) { | 296 | if (!(atomic_read(&deve->ua_count))) { |
297 | spin_unlock_irq(&nacl->device_list_lock); | 297 | spin_unlock_irq(&nacl->device_list_lock); |
298 | return -1; | 298 | return -EPERM; |
299 | } | 299 | } |
300 | /* | 300 | /* |
301 | * The highest priority Unit Attentions are placed at the head of the | 301 | * The highest priority Unit Attentions are placed at the head of the |
@@ -325,8 +325,8 @@ int core_scsi3_ua_clear_for_request_sense( | |||
325 | 325 | ||
326 | printk(KERN_INFO "[%s]: Released UNIT ATTENTION condition, mapped" | 326 | printk(KERN_INFO "[%s]: Released UNIT ATTENTION condition, mapped" |
327 | " LUN: %u, got REQUEST_SENSE reported ASC: 0x%02x," | 327 | " LUN: %u, got REQUEST_SENSE reported ASC: 0x%02x," |
328 | " ASCQ: 0x%02x\n", TPG_TFO(nacl->se_tpg)->get_fabric_name(), | 328 | " ASCQ: 0x%02x\n", nacl->se_tpg->se_tpg_tfo->get_fabric_name(), |
329 | cmd->orig_fe_lun, *asc, *ascq); | 329 | cmd->orig_fe_lun, *asc, *ascq); |
330 | 330 | ||
331 | return (head) ? -1 : 0; | 331 | return (head) ? -EPERM : 0; |
332 | } | 332 | } |
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c index 92a449aededa..19b2b9948314 100644 --- a/drivers/target/tcm_fc/tfc_cmd.c +++ b/drivers/target/tcm_fc/tfc_cmd.c | |||
@@ -72,7 +72,7 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller) | |||
72 | caller, cmd, cmd->cdb); | 72 | caller, cmd, cmd->cdb); |
73 | printk(KERN_INFO "%s: cmd %p lun %d\n", caller, cmd, cmd->lun); | 73 | printk(KERN_INFO "%s: cmd %p lun %d\n", caller, cmd, cmd->lun); |
74 | 74 | ||
75 | task = T_TASK(se_cmd); | 75 | task = se_cmd->t_task; |
76 | printk(KERN_INFO "%s: cmd %p task %p se_num %u buf %p len %u se_cmd_flags <0x%x>\n", | 76 | printk(KERN_INFO "%s: cmd %p task %p se_num %u buf %p len %u se_cmd_flags <0x%x>\n", |
77 | caller, cmd, task, task->t_tasks_se_num, | 77 | caller, cmd, task, task->t_tasks_se_num, |
78 | task->t_task_buf, se_cmd->data_length, se_cmd->se_cmd_flags); | 78 | task->t_task_buf, se_cmd->data_length, se_cmd->se_cmd_flags); |
@@ -262,9 +262,9 @@ int ft_write_pending(struct se_cmd *se_cmd) | |||
262 | * TCM/LIO target | 262 | * TCM/LIO target |
263 | */ | 263 | */ |
264 | transport_do_task_sg_chain(se_cmd); | 264 | transport_do_task_sg_chain(se_cmd); |
265 | cmd->sg = T_TASK(se_cmd)->t_tasks_sg_chained; | 265 | cmd->sg = se_cmd->t_task->t_tasks_sg_chained; |
266 | cmd->sg_cnt = | 266 | cmd->sg_cnt = |
267 | T_TASK(se_cmd)->t_tasks_sg_chained_no; | 267 | se_cmd->t_task->t_tasks_sg_chained_no; |
268 | } | 268 | } |
269 | if (cmd->sg && lport->tt.ddp_setup(lport, ep->xid, | 269 | if (cmd->sg && lport->tt.ddp_setup(lport, ep->xid, |
270 | cmd->sg, cmd->sg_cnt)) | 270 | cmd->sg, cmd->sg_cnt)) |
@@ -670,7 +670,6 @@ static void ft_send_cmd(struct ft_cmd *cmd) | |||
670 | 670 | ||
671 | err: | 671 | err: |
672 | ft_send_resp_code(cmd, FCP_CMND_FIELDS_INVALID); | 672 | ft_send_resp_code(cmd, FCP_CMND_FIELDS_INVALID); |
673 | return; | ||
674 | } | 673 | } |
675 | 674 | ||
676 | /* | 675 | /* |
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c index 84e868c255dd..8c5067c65720 100644 --- a/drivers/target/tcm_fc/tfc_conf.c +++ b/drivers/target/tcm_fc/tfc_conf.c | |||
@@ -582,10 +582,10 @@ int ft_register_configfs(void) | |||
582 | * Register the top level struct config_item_type with TCM core | 582 | * Register the top level struct config_item_type with TCM core |
583 | */ | 583 | */ |
584 | fabric = target_fabric_configfs_init(THIS_MODULE, "fc"); | 584 | fabric = target_fabric_configfs_init(THIS_MODULE, "fc"); |
585 | if (!fabric) { | 585 | if (IS_ERR(fabric)) { |
586 | printk(KERN_INFO "%s: target_fabric_configfs_init() failed!\n", | 586 | printk(KERN_INFO "%s: target_fabric_configfs_init() failed!\n", |
587 | __func__); | 587 | __func__); |
588 | return -1; | 588 | return PTR_ERR(fabric); |
589 | } | 589 | } |
590 | fabric->tf_ops = ft_fabric_ops; | 590 | fabric->tf_ops = ft_fabric_ops; |
591 | 591 | ||
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c index 8c4a24077d9d..47efcfb9f4b8 100644 --- a/drivers/target/tcm_fc/tfc_io.c +++ b/drivers/target/tcm_fc/tfc_io.c | |||
@@ -90,7 +90,7 @@ int ft_queue_data_in(struct se_cmd *se_cmd) | |||
90 | lport = ep->lp; | 90 | lport = ep->lp; |
91 | cmd->seq = lport->tt.seq_start_next(cmd->seq); | 91 | cmd->seq = lport->tt.seq_start_next(cmd->seq); |
92 | 92 | ||
93 | task = T_TASK(se_cmd); | 93 | task = se_cmd->t_task; |
94 | BUG_ON(!task); | 94 | BUG_ON(!task); |
95 | remaining = se_cmd->data_length; | 95 | remaining = se_cmd->data_length; |
96 | 96 | ||
@@ -236,7 +236,7 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp) | |||
236 | u32 f_ctl; | 236 | u32 f_ctl; |
237 | void *buf; | 237 | void *buf; |
238 | 238 | ||
239 | task = T_TASK(se_cmd); | 239 | task = se_cmd->t_task; |
240 | BUG_ON(!task); | 240 | BUG_ON(!task); |
241 | 241 | ||
242 | fh = fc_frame_header_get(fp); | 242 | fh = fc_frame_header_get(fp); |