diff options
author | Nicholas Bellinger <nab@linux-iscsi.org> | 2015-09-16 23:23:53 -0400 |
---|---|---|
committer | Nicholas Bellinger <nab@linux-iscsi.org> | 2015-09-25 02:17:27 -0400 |
commit | 8fa3a867486f85df66eba8c4df85804d3309c6ad (patch) | |
tree | 9008062c9e3567b5cf5d3b589bf7e57259da7692 | |
parent | f3bb467ff64b2598d023b3a07592748fa7768b6a (diff) |
target: Make TCM_WRITE_PROTECT failure honor D_SENSE bit
This patch changes transport_lookup_cmd_lun() to obtain
se_lun->lun_ref + se_cmd->se_device rcu_dereference during
TCM_WRITE_PROTECT -> CHECK_CONDITION failure status.
Do this to ensure the active control D_SENSE mode page bit
is being honored.
Reported-by: Sagi Grimberg <sagig@mellanox.com>
Reviewed-by: Sagi Grimberg <sagig@mellanox.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
-rw-r--r-- | drivers/target/target_core_device.c | 40 |
1 files changed, 23 insertions, 17 deletions
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index abf20763b0b6..88ea4e4f124b 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c | |||
@@ -62,22 +62,13 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun) | |||
62 | struct se_session *se_sess = se_cmd->se_sess; | 62 | struct se_session *se_sess = se_cmd->se_sess; |
63 | struct se_node_acl *nacl = se_sess->se_node_acl; | 63 | struct se_node_acl *nacl = se_sess->se_node_acl; |
64 | struct se_dev_entry *deve; | 64 | struct se_dev_entry *deve; |
65 | sense_reason_t ret = TCM_NO_SENSE; | ||
65 | 66 | ||
66 | rcu_read_lock(); | 67 | rcu_read_lock(); |
67 | deve = target_nacl_find_deve(nacl, unpacked_lun); | 68 | deve = target_nacl_find_deve(nacl, unpacked_lun); |
68 | if (deve) { | 69 | if (deve) { |
69 | atomic_long_inc(&deve->total_cmds); | 70 | atomic_long_inc(&deve->total_cmds); |
70 | 71 | ||
71 | if ((se_cmd->data_direction == DMA_TO_DEVICE) && | ||
72 | (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) { | ||
73 | pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN" | ||
74 | " Access for 0x%08llx\n", | ||
75 | se_cmd->se_tfo->get_fabric_name(), | ||
76 | unpacked_lun); | ||
77 | rcu_read_unlock(); | ||
78 | return TCM_WRITE_PROTECTED; | ||
79 | } | ||
80 | |||
81 | if (se_cmd->data_direction == DMA_TO_DEVICE) | 72 | if (se_cmd->data_direction == DMA_TO_DEVICE) |
82 | atomic_long_add(se_cmd->data_length, | 73 | atomic_long_add(se_cmd->data_length, |
83 | &deve->write_bytes); | 74 | &deve->write_bytes); |
@@ -93,6 +84,17 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun) | |||
93 | 84 | ||
94 | percpu_ref_get(&se_lun->lun_ref); | 85 | percpu_ref_get(&se_lun->lun_ref); |
95 | se_cmd->lun_ref_active = true; | 86 | se_cmd->lun_ref_active = true; |
87 | |||
88 | if ((se_cmd->data_direction == DMA_TO_DEVICE) && | ||
89 | (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) { | ||
90 | pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN" | ||
91 | " Access for 0x%08llx\n", | ||
92 | se_cmd->se_tfo->get_fabric_name(), | ||
93 | unpacked_lun); | ||
94 | rcu_read_unlock(); | ||
95 | ret = TCM_WRITE_PROTECTED; | ||
96 | goto ref_dev; | ||
97 | } | ||
96 | } | 98 | } |
97 | rcu_read_unlock(); | 99 | rcu_read_unlock(); |
98 | 100 | ||
@@ -109,12 +111,6 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun) | |||
109 | unpacked_lun); | 111 | unpacked_lun); |
110 | return TCM_NON_EXISTENT_LUN; | 112 | return TCM_NON_EXISTENT_LUN; |
111 | } | 113 | } |
112 | /* | ||
113 | * Force WRITE PROTECT for virtual LUN 0 | ||
114 | */ | ||
115 | if ((se_cmd->data_direction != DMA_FROM_DEVICE) && | ||
116 | (se_cmd->data_direction != DMA_NONE)) | ||
117 | return TCM_WRITE_PROTECTED; | ||
118 | 114 | ||
119 | se_lun = se_sess->se_tpg->tpg_virt_lun0; | 115 | se_lun = se_sess->se_tpg->tpg_virt_lun0; |
120 | se_cmd->se_lun = se_sess->se_tpg->tpg_virt_lun0; | 116 | se_cmd->se_lun = se_sess->se_tpg->tpg_virt_lun0; |
@@ -123,6 +119,15 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun) | |||
123 | 119 | ||
124 | percpu_ref_get(&se_lun->lun_ref); | 120 | percpu_ref_get(&se_lun->lun_ref); |
125 | se_cmd->lun_ref_active = true; | 121 | se_cmd->lun_ref_active = true; |
122 | |||
123 | /* | ||
124 | * Force WRITE PROTECT for virtual LUN 0 | ||
125 | */ | ||
126 | if ((se_cmd->data_direction != DMA_FROM_DEVICE) && | ||
127 | (se_cmd->data_direction != DMA_NONE)) { | ||
128 | ret = TCM_WRITE_PROTECTED; | ||
129 | goto ref_dev; | ||
130 | } | ||
126 | } | 131 | } |
127 | /* | 132 | /* |
128 | * RCU reference protected by percpu se_lun->lun_ref taken above that | 133 | * RCU reference protected by percpu se_lun->lun_ref taken above that |
@@ -130,6 +135,7 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun) | |||
130 | * pointer can be kfree_rcu() by the final se_lun->lun_group put via | 135 | * pointer can be kfree_rcu() by the final se_lun->lun_group put via |
131 | * target_core_fabric_configfs.c:target_fabric_port_release | 136 | * target_core_fabric_configfs.c:target_fabric_port_release |
132 | */ | 137 | */ |
138 | ref_dev: | ||
133 | se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev); | 139 | se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev); |
134 | atomic_long_inc(&se_cmd->se_dev->num_cmds); | 140 | atomic_long_inc(&se_cmd->se_dev->num_cmds); |
135 | 141 | ||
@@ -140,7 +146,7 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun) | |||
140 | atomic_long_add(se_cmd->data_length, | 146 | atomic_long_add(se_cmd->data_length, |
141 | &se_cmd->se_dev->read_bytes); | 147 | &se_cmd->se_dev->read_bytes); |
142 | 148 | ||
143 | return 0; | 149 | return ret; |
144 | } | 150 | } |
145 | EXPORT_SYMBOL(transport_lookup_cmd_lun); | 151 | EXPORT_SYMBOL(transport_lookup_cmd_lun); |
146 | 152 | ||