aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/qla4xxx/ql4_iocb.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/qla4xxx/ql4_iocb.c')
-rw-r--r--drivers/scsi/qla4xxx/ql4_iocb.c101
1 files changed, 38 insertions, 63 deletions
diff --git a/drivers/scsi/qla4xxx/ql4_iocb.c b/drivers/scsi/qla4xxx/ql4_iocb.c
index a216a1781afb..5006ecb3ef5e 100644
--- a/drivers/scsi/qla4xxx/ql4_iocb.c
+++ b/drivers/scsi/qla4xxx/ql4_iocb.c
@@ -6,6 +6,10 @@
6 */ 6 */
7 7
8#include "ql4_def.h" 8#include "ql4_def.h"
9#include "ql4_glbl.h"
10#include "ql4_dbg.h"
11#include "ql4_inline.h"
12
9 13
10#include <scsi/scsi_tcq.h> 14#include <scsi/scsi_tcq.h>
11 15
@@ -141,11 +145,13 @@ static void qla4xxx_build_scsi_iocbs(struct srb *srb,
141 uint16_t avail_dsds; 145 uint16_t avail_dsds;
142 struct data_seg_a64 *cur_dsd; 146 struct data_seg_a64 *cur_dsd;
143 struct scsi_cmnd *cmd; 147 struct scsi_cmnd *cmd;
148 struct scatterlist *sg;
149 int i;
144 150
145 cmd = srb->cmd; 151 cmd = srb->cmd;
146 ha = srb->ha; 152 ha = srb->ha;
147 153
148 if (cmd->request_bufflen == 0 || cmd->sc_data_direction == DMA_NONE) { 154 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
149 /* No data being transferred */ 155 /* No data being transferred */
150 cmd_entry->ttlByteCnt = __constant_cpu_to_le32(0); 156 cmd_entry->ttlByteCnt = __constant_cpu_to_le32(0);
151 return; 157 return;
@@ -154,40 +160,27 @@ static void qla4xxx_build_scsi_iocbs(struct srb *srb,
154 avail_dsds = COMMAND_SEG; 160 avail_dsds = COMMAND_SEG;
155 cur_dsd = (struct data_seg_a64 *) & (cmd_entry->dataseg[0]); 161 cur_dsd = (struct data_seg_a64 *) & (cmd_entry->dataseg[0]);
156 162
157 /* Load data segments */ 163 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
158 if (cmd->use_sg) { 164 dma_addr_t sle_dma;
159 struct scatterlist *cur_seg; 165
160 struct scatterlist *end_seg; 166 /* Allocate additional continuation packets? */
161 167 if (avail_dsds == 0) {
162 cur_seg = (struct scatterlist *)cmd->request_buffer; 168 struct continuation_t1_entry *cont_entry;
163 end_seg = cur_seg + tot_dsds; 169
164 while (cur_seg < end_seg) { 170 cont_entry = qla4xxx_alloc_cont_entry(ha);
165 dma_addr_t sle_dma; 171 cur_dsd =
166 172 (struct data_seg_a64 *)
167 /* Allocate additional continuation packets? */ 173 &cont_entry->dataseg[0];
168 if (avail_dsds == 0) { 174 avail_dsds = CONTINUE_SEG;
169 struct continuation_t1_entry *cont_entry;
170
171 cont_entry = qla4xxx_alloc_cont_entry(ha);
172 cur_dsd =
173 (struct data_seg_a64 *)
174 &cont_entry->dataseg[0];
175 avail_dsds = CONTINUE_SEG;
176 }
177
178 sle_dma = sg_dma_address(cur_seg);
179 cur_dsd->base.addrLow = cpu_to_le32(LSDW(sle_dma));
180 cur_dsd->base.addrHigh = cpu_to_le32(MSDW(sle_dma));
181 cur_dsd->count = cpu_to_le32(sg_dma_len(cur_seg));
182 avail_dsds--;
183
184 cur_dsd++;
185 cur_seg++;
186 } 175 }
187 } else { 176
188 cur_dsd->base.addrLow = cpu_to_le32(LSDW(srb->dma_handle)); 177 sle_dma = sg_dma_address(sg);
189 cur_dsd->base.addrHigh = cpu_to_le32(MSDW(srb->dma_handle)); 178 cur_dsd->base.addrLow = cpu_to_le32(LSDW(sle_dma));
190 cur_dsd->count = cpu_to_le32(cmd->request_bufflen); 179 cur_dsd->base.addrHigh = cpu_to_le32(MSDW(sle_dma));
180 cur_dsd->count = cpu_to_le32(sg_dma_len(sg));
181 avail_dsds--;
182
183 cur_dsd++;
191 } 184 }
192} 185}
193 186
@@ -204,8 +197,8 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
204 struct scsi_cmnd *cmd = srb->cmd; 197 struct scsi_cmnd *cmd = srb->cmd;
205 struct ddb_entry *ddb_entry; 198 struct ddb_entry *ddb_entry;
206 struct command_t3_entry *cmd_entry; 199 struct command_t3_entry *cmd_entry;
207 struct scatterlist *sg = NULL;
208 200
201 int nseg;
209 uint16_t tot_dsds; 202 uint16_t tot_dsds;
210 uint16_t req_cnt; 203 uint16_t req_cnt;
211 204
@@ -233,24 +226,11 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
233 index = (uint32_t)cmd->request->tag; 226 index = (uint32_t)cmd->request->tag;
234 227
235 /* Calculate the number of request entries needed. */ 228 /* Calculate the number of request entries needed. */
236 if (cmd->use_sg) { 229 nseg = scsi_dma_map(cmd);
237 sg = (struct scatterlist *)cmd->request_buffer; 230 if (nseg < 0)
238 tot_dsds = pci_map_sg(ha->pdev, sg, cmd->use_sg, 231 goto queuing_error;
239 cmd->sc_data_direction); 232 tot_dsds = nseg;
240 if (tot_dsds == 0) 233
241 goto queuing_error;
242 } else if (cmd->request_bufflen) {
243 dma_addr_t req_dma;
244
245 req_dma = pci_map_single(ha->pdev, cmd->request_buffer,
246 cmd->request_bufflen,
247 cmd->sc_data_direction);
248 if (dma_mapping_error(req_dma))
249 goto queuing_error;
250
251 srb->dma_handle = req_dma;
252 tot_dsds = 1;
253 }
254 req_cnt = qla4xxx_calc_request_entries(tot_dsds); 234 req_cnt = qla4xxx_calc_request_entries(tot_dsds);
255 235
256 if (ha->req_q_count < (req_cnt + 2)) { 236 if (ha->req_q_count < (req_cnt + 2)) {
@@ -279,7 +259,7 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
279 259
280 int_to_scsilun(cmd->device->lun, &cmd_entry->lun); 260 int_to_scsilun(cmd->device->lun, &cmd_entry->lun);
281 cmd_entry->cmdSeqNum = cpu_to_le32(ddb_entry->CmdSn); 261 cmd_entry->cmdSeqNum = cpu_to_le32(ddb_entry->CmdSn);
282 cmd_entry->ttlByteCnt = cpu_to_le32(cmd->request_bufflen); 262 cmd_entry->ttlByteCnt = cpu_to_le32(scsi_bufflen(cmd));
283 memcpy(cmd_entry->cdb, cmd->cmnd, cmd->cmd_len); 263 memcpy(cmd_entry->cdb, cmd->cmnd, cmd->cmd_len);
284 cmd_entry->dataSegCnt = cpu_to_le16(tot_dsds); 264 cmd_entry->dataSegCnt = cpu_to_le16(tot_dsds);
285 cmd_entry->hdr.entryCount = req_cnt; 265 cmd_entry->hdr.entryCount = req_cnt;
@@ -289,13 +269,13 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
289 * transferred, as the data direction bit is sometimed filled 269 * transferred, as the data direction bit is sometimed filled
290 * in when there is no data to be transferred */ 270 * in when there is no data to be transferred */
291 cmd_entry->control_flags = CF_NO_DATA; 271 cmd_entry->control_flags = CF_NO_DATA;
292 if (cmd->request_bufflen) { 272 if (scsi_bufflen(cmd)) {
293 if (cmd->sc_data_direction == DMA_TO_DEVICE) 273 if (cmd->sc_data_direction == DMA_TO_DEVICE)
294 cmd_entry->control_flags = CF_WRITE; 274 cmd_entry->control_flags = CF_WRITE;
295 else if (cmd->sc_data_direction == DMA_FROM_DEVICE) 275 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
296 cmd_entry->control_flags = CF_READ; 276 cmd_entry->control_flags = CF_READ;
297 277
298 ha->bytes_xfered += cmd->request_bufflen; 278 ha->bytes_xfered += scsi_bufflen(cmd);
299 if (ha->bytes_xfered & ~0xFFFFF){ 279 if (ha->bytes_xfered & ~0xFFFFF){
300 ha->total_mbytes_xferred += ha->bytes_xfered >> 20; 280 ha->total_mbytes_xferred += ha->bytes_xfered >> 20;
301 ha->bytes_xfered &= 0xFFFFF; 281 ha->bytes_xfered &= 0xFFFFF;
@@ -359,14 +339,9 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
359 return QLA_SUCCESS; 339 return QLA_SUCCESS;
360 340
361queuing_error: 341queuing_error:
342 if (tot_dsds)
343 scsi_dma_unmap(cmd);
362 344
363 if (cmd->use_sg && tot_dsds) {
364 sg = (struct scatterlist *) cmd->request_buffer;
365 pci_unmap_sg(ha->pdev, sg, cmd->use_sg,
366 cmd->sc_data_direction);
367 } else if (tot_dsds)
368 pci_unmap_single(ha->pdev, srb->dma_handle,
369 cmd->request_bufflen, cmd->sc_data_direction);
370 spin_unlock_irqrestore(&ha->hardware_lock, flags); 345 spin_unlock_irqrestore(&ha->hardware_lock, flags);
371 346
372 return QLA_ERROR; 347 return QLA_ERROR;