aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/qla2xxx
diff options
context:
space:
mode:
author <andrew.vasquez@qlogic.com>2005-04-17 16:10:41 -0400
committerJames Bottomley <jejb@titanic>2005-04-18 14:48:12 -0400
commit83021920e733aa706926046b6ab61912c0d63943 (patch)
treee33d19493190076f147912e40207f6956c290eb9 /drivers/scsi/qla2xxx
parentbdf796210e01145fd77bbb3ae644d04f982b6c3b (diff)
[PATCH] qla2xxx: cleanup DMA mappings...
Don't use cmd->request->nr_hw_segments as it may not be initialized (SG_IO in particular bypasses anything that initializes this and just uses scsi_do_req to insert a scsi_request directly on the head of the queue) and a bogus value here can trip up the checks to make sure that the number of segments will fit in the queue ring buffer, resulting in commands that are never completed. Fix up several issues with PCI DMA mapping and failure to check return values on the mappings. Make the check for space in the ring buffer happen after the DMA mapping is done since any checks done before the mapping has taken place are bogus. Doug Ledford <dledford@redhat.com>. Signed-off-by: Andrew Vasquez <andrew.vasquez@qlogic.com> Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
Diffstat (limited to 'drivers/scsi/qla2xxx')
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c72
1 files changed, 33 insertions, 39 deletions
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 59d62cbb994e..af964bb3d870 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -216,18 +216,7 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
216 cur_seg++; 216 cur_seg++;
217 } 217 }
218 } else { 218 } else {
219 dma_addr_t req_dma; 219 *cur_dsd++ = cpu_to_le32(sp->dma_handle);
220 struct page *page;
221 unsigned long offset;
222
223 page = virt_to_page(cmd->request_buffer);
224 offset = ((unsigned long)cmd->request_buffer & ~PAGE_MASK);
225 req_dma = pci_map_page(ha->pdev, page, offset,
226 cmd->request_bufflen, cmd->sc_data_direction);
227
228 sp->dma_handle = req_dma;
229
230 *cur_dsd++ = cpu_to_le32(req_dma);
231 *cur_dsd++ = cpu_to_le32(cmd->request_bufflen); 220 *cur_dsd++ = cpu_to_le32(cmd->request_bufflen);
232 } 221 }
233} 222}
@@ -299,19 +288,8 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
299 cur_seg++; 288 cur_seg++;
300 } 289 }
301 } else { 290 } else {
302 dma_addr_t req_dma; 291 *cur_dsd++ = cpu_to_le32(LSD(sp->dma_handle));
303 struct page *page; 292 *cur_dsd++ = cpu_to_le32(MSD(sp->dma_handle));
304 unsigned long offset;
305
306 page = virt_to_page(cmd->request_buffer);
307 offset = ((unsigned long)cmd->request_buffer & ~PAGE_MASK);
308 req_dma = pci_map_page(ha->pdev, page, offset,
309 cmd->request_bufflen, cmd->sc_data_direction);
310
311 sp->dma_handle = req_dma;
312
313 *cur_dsd++ = cpu_to_le32(LSD(req_dma));
314 *cur_dsd++ = cpu_to_le32(MSD(req_dma));
315 *cur_dsd++ = cpu_to_le32(cmd->request_bufflen); 293 *cur_dsd++ = cpu_to_le32(cmd->request_bufflen);
316 } 294 }
317} 295}
@@ -345,6 +323,8 @@ qla2x00_start_scsi(srb_t *sp)
345 ha = sp->ha; 323 ha = sp->ha;
346 reg = ha->iobase; 324 reg = ha->iobase;
347 cmd = sp->cmd; 325 cmd = sp->cmd;
326 /* So we know we haven't pci_map'ed anything yet */
327 tot_dsds = 0;
348 328
349 /* Send marker if required */ 329 /* Send marker if required */
350 if (ha->marker_needed != 0) { 330 if (ha->marker_needed != 0) {
@@ -369,8 +349,27 @@ qla2x00_start_scsi(srb_t *sp)
369 if (index == MAX_OUTSTANDING_COMMANDS) 349 if (index == MAX_OUTSTANDING_COMMANDS)
370 goto queuing_error; 350 goto queuing_error;
371 351
352 /* Map the sg table so we have an accurate count of sg entries needed */
353 if (cmd->use_sg) {
354 sg = (struct scatterlist *) cmd->request_buffer;
355 tot_dsds = pci_map_sg(ha->pdev, sg, cmd->use_sg,
356 cmd->sc_data_direction);
357 if (tot_dsds == 0)
358 goto queuing_error;
359 } else if (cmd->request_bufflen) {
360 dma_addr_t req_dma;
361
362 req_dma = pci_map_single(ha->pdev, cmd->request_buffer,
363 cmd->request_bufflen, cmd->sc_data_direction);
364 if (dma_mapping_error(req_dma))
365 goto queuing_error;
366
367 sp->dma_handle = req_dma;
368 tot_dsds = 1;
369 }
370
372 /* Calculate the number of request entries needed. */ 371 /* Calculate the number of request entries needed. */
373 req_cnt = (ha->calc_request_entries)(cmd->request->nr_hw_segments); 372 req_cnt = (ha->calc_request_entries)(tot_dsds);
374 if (ha->req_q_cnt < (req_cnt + 2)) { 373 if (ha->req_q_cnt < (req_cnt + 2)) {
375 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg)); 374 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
376 if (ha->req_ring_index < cnt) 375 if (ha->req_ring_index < cnt)
@@ -382,19 +381,6 @@ qla2x00_start_scsi(srb_t *sp)
382 if (ha->req_q_cnt < (req_cnt + 2)) 381 if (ha->req_q_cnt < (req_cnt + 2))
383 goto queuing_error; 382 goto queuing_error;
384 383
385 /* Finally, we have enough space, now perform mappings. */
386 tot_dsds = 0;
387 if (cmd->use_sg) {
388 sg = (struct scatterlist *) cmd->request_buffer;
389 tot_dsds = pci_map_sg(ha->pdev, sg, cmd->use_sg,
390 cmd->sc_data_direction);
391 if (tot_dsds == 0)
392 goto queuing_error;
393 } else if (cmd->request_bufflen) {
394 tot_dsds++;
395 }
396 req_cnt = (ha->calc_request_entries)(tot_dsds);
397
398 /* Build command packet */ 384 /* Build command packet */
399 ha->current_outstanding_cmd = handle; 385 ha->current_outstanding_cmd = handle;
400 ha->outstanding_cmds[handle] = sp; 386 ha->outstanding_cmds[handle] = sp;
@@ -461,6 +447,14 @@ qla2x00_start_scsi(srb_t *sp)
461 return (QLA_SUCCESS); 447 return (QLA_SUCCESS);
462 448
463queuing_error: 449queuing_error:
450 if (cmd->use_sg && tot_dsds) {
451 sg = (struct scatterlist *) cmd->request_buffer;
452 pci_unmap_sg(ha->pdev, sg, cmd->use_sg,
453 cmd->sc_data_direction);
454 } else if (tot_dsds) {
455 pci_unmap_single(ha->pdev, sp->dma_handle,
456 cmd->request_bufflen, cmd->sc_data_direction);
457 }
464 spin_unlock_irqrestore(&ha->hardware_lock, flags); 458 spin_unlock_irqrestore(&ha->hardware_lock, flags);
465 459
466 return (QLA_FUNCTION_FAILED); 460 return (QLA_FUNCTION_FAILED);