aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/ips.c
diff options
context:
space:
mode:
authorFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>2007-06-13 10:27:09 -0400
committerJames Bottomley <jejb@mulgrave.(none)>2007-06-16 16:39:42 -0400
commit2f4cf91cc0a1f32f75e1fa0a4d70a9bc7340a302 (patch)
treee181fdf8ddcc3146c4b70734d82de518b9a23623 /drivers/scsi/ips.c
parentbb350d1decd9c48ffaa7f7e263df3056df9f4f21 (diff)
[SCSI] ips: convert to use the data buffer accessors
- remove the unnecessary map_single path. - convert to use the new accessors for the sg lists and the parameters. Jens Axboe <jens.axboe@oracle.com> did the for_each_sg cleanup. TODO: use scsi_for_each_sg() in the breakup handling. Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Acked-by: "Salyzyn, Mark" <mark_salyzyn@adaptec.com> Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
Diffstat (limited to 'drivers/scsi/ips.c')
-rw-r--r--drivers/scsi/ips.c256
1 files changed, 99 insertions, 157 deletions
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index 84f4f5d06f9d..f9fce70f396a 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -1104,7 +1104,7 @@ static int ips_queue(struct scsi_cmnd *SC, void (*done) (struct scsi_cmnd *))
1104 /* A Reset IOCTL is only sent by the boot CD in extreme cases. */ 1104 /* A Reset IOCTL is only sent by the boot CD in extreme cases. */
1105 /* There can never be any system activity ( network or disk ), but check */ 1105 /* There can never be any system activity ( network or disk ), but check */
1106 /* anyway just as a good practice. */ 1106 /* anyway just as a good practice. */
1107 pt = (ips_passthru_t *) SC->request_buffer; 1107 pt = (ips_passthru_t *) scsi_sglist(SC);
1108 if ((pt->CoppCP.cmd.reset.op_code == IPS_CMD_RESET_CHANNEL) && 1108 if ((pt->CoppCP.cmd.reset.op_code == IPS_CMD_RESET_CHANNEL) &&
1109 (pt->CoppCP.cmd.reset.adapter_flag == 1)) { 1109 (pt->CoppCP.cmd.reset.adapter_flag == 1)) {
1110 if (ha->scb_activelist.count != 0) { 1110 if (ha->scb_activelist.count != 0) {
@@ -1507,30 +1507,22 @@ static int ips_is_passthru(struct scsi_cmnd *SC)
1507 if ((SC->cmnd[0] == IPS_IOCTL_COMMAND) && 1507 if ((SC->cmnd[0] == IPS_IOCTL_COMMAND) &&
1508 (SC->device->channel == 0) && 1508 (SC->device->channel == 0) &&
1509 (SC->device->id == IPS_ADAPTER_ID) && 1509 (SC->device->id == IPS_ADAPTER_ID) &&
1510 (SC->device->lun == 0) && SC->request_buffer) { 1510 (SC->device->lun == 0) && scsi_sglist(SC)) {
1511 if ((!SC->use_sg) && SC->request_bufflen && 1511 struct scatterlist *sg = scsi_sglist(SC);
1512 (((char *) SC->request_buffer)[0] == 'C') && 1512 char *buffer;
1513 (((char *) SC->request_buffer)[1] == 'O') && 1513
1514 (((char *) SC->request_buffer)[2] == 'P') && 1514 /* kmap_atomic() ensures addressability of the user buffer.*/
1515 (((char *) SC->request_buffer)[3] == 'P')) 1515 /* local_irq_save() protects the KM_IRQ0 address slot. */
1516 return 1; 1516 local_irq_save(flags);
1517 else if (SC->use_sg) { 1517 buffer = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
1518 struct scatterlist *sg = SC->request_buffer; 1518 if (buffer && buffer[0] == 'C' && buffer[1] == 'O' &&
1519 char *buffer; 1519 buffer[2] == 'P' && buffer[3] == 'P') {
1520 1520 kunmap_atomic(buffer - sg->offset, KM_IRQ0);
1521 /* kmap_atomic() ensures addressability of the user buffer.*/ 1521 local_irq_restore(flags);
1522 /* local_irq_save() protects the KM_IRQ0 address slot. */ 1522 return 1;
1523 local_irq_save(flags); 1523 }
1524 buffer = kmap_atomic(sg->page, KM_IRQ0) + sg->offset; 1524 kunmap_atomic(buffer - sg->offset, KM_IRQ0);
1525 if (buffer && buffer[0] == 'C' && buffer[1] == 'O' && 1525 local_irq_restore(flags);
1526 buffer[2] == 'P' && buffer[3] == 'P') {
1527 kunmap_atomic(buffer - sg->offset, KM_IRQ0);
1528 local_irq_restore(flags);
1529 return 1;
1530 }
1531 kunmap_atomic(buffer - sg->offset, KM_IRQ0);
1532 local_irq_restore(flags);
1533 }
1534 } 1526 }
1535 return 0; 1527 return 0;
1536} 1528}
@@ -1581,18 +1573,14 @@ ips_make_passthru(ips_ha_t *ha, struct scsi_cmnd *SC, ips_scb_t *scb, int intr)
1581{ 1573{
1582 ips_passthru_t *pt; 1574 ips_passthru_t *pt;
1583 int length = 0; 1575 int length = 0;
1584 int ret; 1576 int i, ret;
1577 struct scatterlist *sg = scsi_sglist(SC);
1585 1578
1586 METHOD_TRACE("ips_make_passthru", 1); 1579 METHOD_TRACE("ips_make_passthru", 1);
1587 1580
1588 if (!SC->use_sg) { 1581 scsi_for_each_sg(SC, sg, scsi_sg_count(SC), i)
1589 length = SC->request_bufflen; 1582 length += sg[i].length;
1590 } else { 1583
1591 struct scatterlist *sg = SC->request_buffer;
1592 int i;
1593 for (i = 0; i < SC->use_sg; i++)
1594 length += sg[i].length;
1595 }
1596 if (length < sizeof (ips_passthru_t)) { 1584 if (length < sizeof (ips_passthru_t)) {
1597 /* wrong size */ 1585 /* wrong size */
1598 DEBUG_VAR(1, "(%s%d) Passthru structure wrong size", 1586 DEBUG_VAR(1, "(%s%d) Passthru structure wrong size",
@@ -2016,7 +2004,7 @@ ips_cleanup_passthru(ips_ha_t * ha, ips_scb_t * scb)
2016 2004
2017 METHOD_TRACE("ips_cleanup_passthru", 1); 2005 METHOD_TRACE("ips_cleanup_passthru", 1);
2018 2006
2019 if ((!scb) || (!scb->scsi_cmd) || (!scb->scsi_cmd->request_buffer)) { 2007 if ((!scb) || (!scb->scsi_cmd) || (!scsi_sglist(scb->scsi_cmd))) {
2020 DEBUG_VAR(1, "(%s%d) couldn't cleanup after passthru", 2008 DEBUG_VAR(1, "(%s%d) couldn't cleanup after passthru",
2021 ips_name, ha->host_num); 2009 ips_name, ha->host_num);
2022 2010
@@ -2766,41 +2754,26 @@ ips_next(ips_ha_t * ha, int intr)
2766 /* copy in the CDB */ 2754 /* copy in the CDB */
2767 memcpy(scb->cdb, SC->cmnd, SC->cmd_len); 2755 memcpy(scb->cdb, SC->cmnd, SC->cmd_len);
2768 2756
2769 /* Now handle the data buffer */ 2757 scb->sg_count = scsi_dma_map(SC);
2770 if (SC->use_sg) { 2758 BUG_ON(scb->sg_count < 0);
2759 if (scb->sg_count) {
2771 struct scatterlist *sg; 2760 struct scatterlist *sg;
2772 int i; 2761 int i;
2773 2762
2774 sg = SC->request_buffer;
2775 scb->sg_count = pci_map_sg(ha->pcidev, sg, SC->use_sg,
2776 SC->sc_data_direction);
2777 scb->flags |= IPS_SCB_MAP_SG; 2763 scb->flags |= IPS_SCB_MAP_SG;
2778 for (i = 0; i < scb->sg_count; i++) { 2764
2765 scsi_for_each_sg(SC, sg, scb->sg_count, i) {
2779 if (ips_fill_scb_sg_single 2766 if (ips_fill_scb_sg_single
2780 (ha, sg_dma_address(&sg[i]), scb, i, 2767 (ha, sg_dma_address(sg), scb, i,
2781 sg_dma_len(&sg[i])) < 0) 2768 sg_dma_len(sg)) < 0)
2782 break; 2769 break;
2783 } 2770 }
2784 scb->dcdb.transfer_length = scb->data_len; 2771 scb->dcdb.transfer_length = scb->data_len;
2785 } else { 2772 } else {
2786 if (SC->request_bufflen) { 2773 scb->data_busaddr = 0L;
2787 scb->data_busaddr = 2774 scb->sg_len = 0;
2788 pci_map_single(ha->pcidev, 2775 scb->data_len = 0;
2789 SC->request_buffer, 2776 scb->dcdb.transfer_length = 0;
2790 SC->request_bufflen,
2791 SC->sc_data_direction);
2792 scb->flags |= IPS_SCB_MAP_SINGLE;
2793 ips_fill_scb_sg_single(ha, scb->data_busaddr,
2794 scb, 0,
2795 SC->request_bufflen);
2796 scb->dcdb.transfer_length = scb->data_len;
2797 } else {
2798 scb->data_busaddr = 0L;
2799 scb->sg_len = 0;
2800 scb->data_len = 0;
2801 scb->dcdb.transfer_length = 0;
2802 }
2803
2804 } 2777 }
2805 2778
2806 scb->dcdb.cmd_attribute = 2779 scb->dcdb.cmd_attribute =
@@ -3277,52 +3250,32 @@ ips_done(ips_ha_t * ha, ips_scb_t * scb)
3277 * the rest of the data and continue. 3250 * the rest of the data and continue.
3278 */ 3251 */
3279 if ((scb->breakup) || (scb->sg_break)) { 3252 if ((scb->breakup) || (scb->sg_break)) {
3253 struct scatterlist *sg;
3254 int sg_dma_index, ips_sg_index = 0;
3255
3280 /* we had a data breakup */ 3256 /* we had a data breakup */
3281 scb->data_len = 0; 3257 scb->data_len = 0;
3282 3258
3283 if (scb->sg_count) { 3259 sg = scsi_sglist(scb->scsi_cmd);
3284 /* S/G request */
3285 struct scatterlist *sg;
3286 int ips_sg_index = 0;
3287 int sg_dma_index;
3288
3289 sg = scb->scsi_cmd->request_buffer;
3290
3291 /* Spin forward to last dma chunk */
3292 sg_dma_index = scb->breakup;
3293
3294 /* Take care of possible partial on last chunk */
3295 ips_fill_scb_sg_single(ha,
3296 sg_dma_address(&sg
3297 [sg_dma_index]),
3298 scb, ips_sg_index++,
3299 sg_dma_len(&sg
3300 [sg_dma_index]));
3301
3302 for (; sg_dma_index < scb->sg_count;
3303 sg_dma_index++) {
3304 if (ips_fill_scb_sg_single
3305 (ha,
3306 sg_dma_address(&sg[sg_dma_index]),
3307 scb, ips_sg_index++,
3308 sg_dma_len(&sg[sg_dma_index])) < 0)
3309 break;
3310 3260
3311 } 3261 /* Spin forward to last dma chunk */
3262 sg_dma_index = scb->breakup;
3312 3263
3313 } else { 3264 /* Take care of possible partial on last chunk */
3314 /* Non S/G Request */ 3265 ips_fill_scb_sg_single(ha,
3315 (void) ips_fill_scb_sg_single(ha, 3266 sg_dma_address(&sg[sg_dma_index]),
3316 scb-> 3267 scb, ips_sg_index++,
3317 data_busaddr + 3268 sg_dma_len(&sg[sg_dma_index]));
3318 (scb->sg_break * 3269
3319 ha->max_xfer), 3270 for (; sg_dma_index < scsi_sg_count(scb->scsi_cmd);
3320 scb, 0, 3271 sg_dma_index++) {
3321 scb->scsi_cmd-> 3272 if (ips_fill_scb_sg_single
3322 request_bufflen - 3273 (ha,
3323 (scb->sg_break * 3274 sg_dma_address(&sg[sg_dma_index]),
3324 ha->max_xfer)); 3275 scb, ips_sg_index++,
3325 } 3276 sg_dma_len(&sg[sg_dma_index])) < 0)
3277 break;
3278 }
3326 3279
3327 scb->dcdb.transfer_length = scb->data_len; 3280 scb->dcdb.transfer_length = scb->data_len;
3328 scb->dcdb.cmd_attribute |= 3281 scb->dcdb.cmd_attribute |=
@@ -3553,32 +3506,27 @@ ips_send_wait(ips_ha_t * ha, ips_scb_t * scb, int timeout, int intr)
3553static void 3506static void
3554ips_scmd_buf_write(struct scsi_cmnd *scmd, void *data, unsigned int count) 3507ips_scmd_buf_write(struct scsi_cmnd *scmd, void *data, unsigned int count)
3555{ 3508{
3556 if (scmd->use_sg) { 3509 int i;
3557 int i; 3510 unsigned int min_cnt, xfer_cnt;
3558 unsigned int min_cnt, xfer_cnt; 3511 char *cdata = (char *) data;
3559 char *cdata = (char *) data; 3512 unsigned char *buffer;
3560 unsigned char *buffer; 3513 unsigned long flags;
3561 unsigned long flags; 3514 struct scatterlist *sg = scsi_sglist(scmd);
3562 struct scatterlist *sg = scmd->request_buffer; 3515
3563 for (i = 0, xfer_cnt = 0; 3516 for (i = 0, xfer_cnt = 0;
3564 (i < scmd->use_sg) && (xfer_cnt < count); i++) { 3517 (i < scsi_sg_count(scmd)) && (xfer_cnt < count); i++) {
3565 min_cnt = min(count - xfer_cnt, sg[i].length); 3518 min_cnt = min(count - xfer_cnt, sg[i].length);
3566 3519
3567 /* kmap_atomic() ensures addressability of the data buffer.*/ 3520 /* kmap_atomic() ensures addressability of the data buffer.*/
3568 /* local_irq_save() protects the KM_IRQ0 address slot. */ 3521 /* local_irq_save() protects the KM_IRQ0 address slot. */
3569 local_irq_save(flags); 3522 local_irq_save(flags);
3570 buffer = kmap_atomic(sg[i].page, KM_IRQ0) + sg[i].offset; 3523 buffer = kmap_atomic(sg[i].page, KM_IRQ0) + sg[i].offset;
3571 memcpy(buffer, &cdata[xfer_cnt], min_cnt); 3524 memcpy(buffer, &cdata[xfer_cnt], min_cnt);
3572 kunmap_atomic(buffer - sg[i].offset, KM_IRQ0); 3525 kunmap_atomic(buffer - sg[i].offset, KM_IRQ0);
3573 local_irq_restore(flags); 3526 local_irq_restore(flags);
3574 3527
3575 xfer_cnt += min_cnt; 3528 xfer_cnt += min_cnt;
3576 } 3529 }
3577
3578 } else {
3579 unsigned int min_cnt = min(count, scmd->request_bufflen);
3580 memcpy(scmd->request_buffer, data, min_cnt);
3581 }
3582} 3530}
3583 3531
3584/****************************************************************************/ 3532/****************************************************************************/
@@ -3591,32 +3539,27 @@ ips_scmd_buf_write(struct scsi_cmnd *scmd, void *data, unsigned int count)
3591static void 3539static void
3592ips_scmd_buf_read(struct scsi_cmnd *scmd, void *data, unsigned int count) 3540ips_scmd_buf_read(struct scsi_cmnd *scmd, void *data, unsigned int count)
3593{ 3541{
3594 if (scmd->use_sg) { 3542 int i;
3595 int i; 3543 unsigned int min_cnt, xfer_cnt;
3596 unsigned int min_cnt, xfer_cnt; 3544 char *cdata = (char *) data;
3597 char *cdata = (char *) data; 3545 unsigned char *buffer;
3598 unsigned char *buffer; 3546 unsigned long flags;
3599 unsigned long flags; 3547 struct scatterlist *sg = scsi_sglist(scmd);
3600 struct scatterlist *sg = scmd->request_buffer; 3548
3601 for (i = 0, xfer_cnt = 0; 3549 for (i = 0, xfer_cnt = 0;
3602 (i < scmd->use_sg) && (xfer_cnt < count); i++) { 3550 (i < scsi_sg_count(scmd)) && (xfer_cnt < count); i++) {
3603 min_cnt = min(count - xfer_cnt, sg[i].length); 3551 min_cnt = min(count - xfer_cnt, sg[i].length);
3604 3552
3605 /* kmap_atomic() ensures addressability of the data buffer.*/ 3553 /* kmap_atomic() ensures addressability of the data buffer.*/
3606 /* local_irq_save() protects the KM_IRQ0 address slot. */ 3554 /* local_irq_save() protects the KM_IRQ0 address slot. */
3607 local_irq_save(flags); 3555 local_irq_save(flags);
3608 buffer = kmap_atomic(sg[i].page, KM_IRQ0) + sg[i].offset; 3556 buffer = kmap_atomic(sg[i].page, KM_IRQ0) + sg[i].offset;
3609 memcpy(&cdata[xfer_cnt], buffer, min_cnt); 3557 memcpy(&cdata[xfer_cnt], buffer, min_cnt);
3610 kunmap_atomic(buffer - sg[i].offset, KM_IRQ0); 3558 kunmap_atomic(buffer - sg[i].offset, KM_IRQ0);
3611 local_irq_restore(flags); 3559 local_irq_restore(flags);
3612 3560
3613 xfer_cnt += min_cnt; 3561 xfer_cnt += min_cnt;
3614 } 3562 }
3615
3616 } else {
3617 unsigned int min_cnt = min(count, scmd->request_bufflen);
3618 memcpy(data, scmd->request_buffer, min_cnt);
3619 }
3620} 3563}
3621 3564
3622/****************************************************************************/ 3565/****************************************************************************/
@@ -4250,7 +4193,7 @@ ips_rdcap(ips_ha_t * ha, ips_scb_t * scb)
4250 4193
4251 METHOD_TRACE("ips_rdcap", 1); 4194 METHOD_TRACE("ips_rdcap", 1);
4252 4195
4253 if (scb->scsi_cmd->request_bufflen < 8) 4196 if (scsi_bufflen(scb->scsi_cmd) < 8)
4254 return (0); 4197 return (0);
4255 4198
4256 cap.lba = 4199 cap.lba =
@@ -4635,8 +4578,7 @@ ips_freescb(ips_ha_t * ha, ips_scb_t * scb)
4635 4578
4636 METHOD_TRACE("ips_freescb", 1); 4579 METHOD_TRACE("ips_freescb", 1);
4637 if (scb->flags & IPS_SCB_MAP_SG) 4580 if (scb->flags & IPS_SCB_MAP_SG)
4638 pci_unmap_sg(ha->pcidev, scb->scsi_cmd->request_buffer, 4581 scsi_dma_unmap(scb->scsi_cmd);
4639 scb->scsi_cmd->use_sg, IPS_DMA_DIR(scb));
4640 else if (scb->flags & IPS_SCB_MAP_SINGLE) 4582 else if (scb->flags & IPS_SCB_MAP_SINGLE)
4641 pci_unmap_single(ha->pcidev, scb->data_busaddr, scb->data_len, 4583 pci_unmap_single(ha->pcidev, scb->data_busaddr, scb->data_len,
4642 IPS_DMA_DIR(scb)); 4584 IPS_DMA_DIR(scb));