aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/ips.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-15 19:51:54 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-15 19:51:54 -0400
commitbc06cffdec85d487c77109dffcd2f285bdc502d3 (patch)
treeadc6e6398243da87e66c56102840597a329183a0 /drivers/scsi/ips.c
parentd3502d7f25b22cfc9762bf1781faa9db1bb3be2e (diff)
parent9413d7b8aa777dd1fc7db9563ce5e80d769fe7b5 (diff)
Merge master.kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
* master.kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (166 commits) [SCSI] ibmvscsi: convert to use the data buffer accessors [SCSI] dc395x: convert to use the data buffer accessors [SCSI] ncr53c8xx: convert to use the data buffer accessors [SCSI] sym53c8xx: convert to use the data buffer accessors [SCSI] ppa: coding police and printk levels [SCSI] aic7xxx_old: remove redundant GFP_ATOMIC from kmalloc [SCSI] i2o: remove redundant GFP_ATOMIC from kmalloc from device.c [SCSI] remove the dead CYBERSTORMIII_SCSI option [SCSI] don't build scsi_dma_{map,unmap} for !HAS_DMA [SCSI] Clean up scsi_add_lun a bit [SCSI] 53c700: Remove printk, which triggers because of low scsi clock on SNI RMs [SCSI] sni_53c710: Cleanup [SCSI] qla4xxx: Fix underrun/overrun conditions [SCSI] megaraid_mbox: use mutex instead of semaphore [SCSI] aacraid: add 51245, 51645 and 52245 adapters to documentation. [SCSI] qla2xxx: update version to 8.02.00-k1. [SCSI] qla2xxx: add support for NPIV [SCSI] stex: use resid for xfer len information [SCSI] Add Brownie 1200U3P to blacklist [SCSI] scsi.c: convert to use the data buffer accessors ...
Diffstat (limited to 'drivers/scsi/ips.c')
-rw-r--r--drivers/scsi/ips.c401
1 files changed, 117 insertions, 284 deletions
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index 40f148e0833f..9f8ed6b81576 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -211,19 +211,6 @@ module_param(ips, charp, 0);
211#warning "This driver has only been tested on the x86/ia64/x86_64 platforms" 211#warning "This driver has only been tested on the x86/ia64/x86_64 platforms"
212#endif 212#endif
213 213
214#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,5,0)
215#include <linux/blk.h>
216#include "sd.h"
217#define IPS_LOCK_SAVE(lock,flags) spin_lock_irqsave(&io_request_lock,flags)
218#define IPS_UNLOCK_RESTORE(lock,flags) spin_unlock_irqrestore(&io_request_lock,flags)
219#ifndef __devexit_p
220#define __devexit_p(x) x
221#endif
222#else
223#define IPS_LOCK_SAVE(lock,flags) do{spin_lock(lock);(void)flags;}while(0)
224#define IPS_UNLOCK_RESTORE(lock,flags) do{spin_unlock(lock);(void)flags;}while(0)
225#endif
226
227#define IPS_DMA_DIR(scb) ((!scb->scsi_cmd || ips_is_passthru(scb->scsi_cmd) || \ 214#define IPS_DMA_DIR(scb) ((!scb->scsi_cmd || ips_is_passthru(scb->scsi_cmd) || \
228 DMA_NONE == scb->scsi_cmd->sc_data_direction) ? \ 215 DMA_NONE == scb->scsi_cmd->sc_data_direction) ? \
229 PCI_DMA_BIDIRECTIONAL : \ 216 PCI_DMA_BIDIRECTIONAL : \
@@ -381,24 +368,13 @@ static struct scsi_host_template ips_driver_template = {
381 .eh_abort_handler = ips_eh_abort, 368 .eh_abort_handler = ips_eh_abort,
382 .eh_host_reset_handler = ips_eh_reset, 369 .eh_host_reset_handler = ips_eh_reset,
383 .proc_name = "ips", 370 .proc_name = "ips",
384#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
385 .proc_info = ips_proc_info, 371 .proc_info = ips_proc_info,
386 .slave_configure = ips_slave_configure, 372 .slave_configure = ips_slave_configure,
387#else
388 .proc_info = ips_proc24_info,
389 .select_queue_depths = ips_select_queue_depth,
390#endif
391 .bios_param = ips_biosparam, 373 .bios_param = ips_biosparam,
392 .this_id = -1, 374 .this_id = -1,
393 .sg_tablesize = IPS_MAX_SG, 375 .sg_tablesize = IPS_MAX_SG,
394 .cmd_per_lun = 3, 376 .cmd_per_lun = 3,
395 .use_clustering = ENABLE_CLUSTERING, 377 .use_clustering = ENABLE_CLUSTERING,
396#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
397 .use_new_eh_code = 1,
398#endif
399#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,20) && LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
400 .highmem_io = 1,
401#endif
402}; 378};
403 379
404 380
@@ -731,7 +707,7 @@ ips_release(struct Scsi_Host *sh)
731 /* free IRQ */ 707 /* free IRQ */
732 free_irq(ha->irq, ha); 708 free_irq(ha->irq, ha);
733 709
734 IPS_REMOVE_HOST(sh); 710 scsi_remove_host(sh);
735 scsi_host_put(sh); 711 scsi_host_put(sh);
736 712
737 ips_released_controllers++; 713 ips_released_controllers++;
@@ -813,7 +789,6 @@ int ips_eh_abort(struct scsi_cmnd *SC)
813 ips_ha_t *ha; 789 ips_ha_t *ha;
814 ips_copp_wait_item_t *item; 790 ips_copp_wait_item_t *item;
815 int ret; 791 int ret;
816 unsigned long cpu_flags;
817 struct Scsi_Host *host; 792 struct Scsi_Host *host;
818 793
819 METHOD_TRACE("ips_eh_abort", 1); 794 METHOD_TRACE("ips_eh_abort", 1);
@@ -830,7 +805,7 @@ int ips_eh_abort(struct scsi_cmnd *SC)
830 if (!ha->active) 805 if (!ha->active)
831 return (FAILED); 806 return (FAILED);
832 807
833 IPS_LOCK_SAVE(host->host_lock, cpu_flags); 808 spin_lock(host->host_lock);
834 809
835 /* See if the command is on the copp queue */ 810 /* See if the command is on the copp queue */
836 item = ha->copp_waitlist.head; 811 item = ha->copp_waitlist.head;
@@ -851,7 +826,7 @@ int ips_eh_abort(struct scsi_cmnd *SC)
851 ret = (FAILED); 826 ret = (FAILED);
852 } 827 }
853 828
854 IPS_UNLOCK_RESTORE(host->host_lock, cpu_flags); 829 spin_unlock(host->host_lock);
855 return ret; 830 return ret;
856} 831}
857 832
@@ -1129,7 +1104,7 @@ static int ips_queue(struct scsi_cmnd *SC, void (*done) (struct scsi_cmnd *))
1129 /* A Reset IOCTL is only sent by the boot CD in extreme cases. */ 1104 /* A Reset IOCTL is only sent by the boot CD in extreme cases. */
1130 /* There can never be any system activity ( network or disk ), but check */ 1105 /* There can never be any system activity ( network or disk ), but check */
1131 /* anyway just as a good practice. */ 1106 /* anyway just as a good practice. */
1132 pt = (ips_passthru_t *) SC->request_buffer; 1107 pt = (ips_passthru_t *) scsi_sglist(SC);
1133 if ((pt->CoppCP.cmd.reset.op_code == IPS_CMD_RESET_CHANNEL) && 1108 if ((pt->CoppCP.cmd.reset.op_code == IPS_CMD_RESET_CHANNEL) &&
1134 (pt->CoppCP.cmd.reset.adapter_flag == 1)) { 1109 (pt->CoppCP.cmd.reset.adapter_flag == 1)) {
1135 if (ha->scb_activelist.count != 0) { 1110 if (ha->scb_activelist.count != 0) {
@@ -1176,18 +1151,10 @@ static int ips_queue(struct scsi_cmnd *SC, void (*done) (struct scsi_cmnd *))
1176/* Set bios geometry for the controller */ 1151/* Set bios geometry for the controller */
1177/* */ 1152/* */
1178/****************************************************************************/ 1153/****************************************************************************/
1179static int 1154static int ips_biosparam(struct scsi_device *sdev, struct block_device *bdev,
1180#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) 1155 sector_t capacity, int geom[])
1181ips_biosparam(Disk * disk, kdev_t dev, int geom[])
1182{
1183 ips_ha_t *ha = (ips_ha_t *) disk->device->host->hostdata;
1184 unsigned long capacity = disk->capacity;
1185#else
1186ips_biosparam(struct scsi_device *sdev, struct block_device *bdev,
1187 sector_t capacity, int geom[])
1188{ 1156{
1189 ips_ha_t *ha = (ips_ha_t *) sdev->host->hostdata; 1157 ips_ha_t *ha = (ips_ha_t *) sdev->host->hostdata;
1190#endif
1191 int heads; 1158 int heads;
1192 int sectors; 1159 int sectors;
1193 int cylinders; 1160 int cylinders;
@@ -1225,70 +1192,6 @@ ips_biosparam(struct scsi_device *sdev, struct block_device *bdev,
1225 return (0); 1192 return (0);
1226} 1193}
1227 1194
1228#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
1229
1230/* ips_proc24_info is a wrapper around ips_proc_info *
1231 * for compatibility with the 2.4 scsi parameters */
1232static int
1233ips_proc24_info(char *buffer, char **start, off_t offset, int length,
1234 int hostno, int func)
1235{
1236 int i;
1237
1238 for (i = 0; i < ips_next_controller; i++) {
1239 if (ips_sh[i] && ips_sh[i]->host_no == hostno) {
1240 return ips_proc_info(ips_sh[i], buffer, start,
1241 offset, length, func);
1242 }
1243 }
1244 return -EINVAL;
1245}
1246
1247/****************************************************************************/
1248/* */
1249/* Routine Name: ips_select_queue_depth */
1250/* */
1251/* Routine Description: */
1252/* */
1253/* Select queue depths for the devices on the contoller */
1254/* */
1255/****************************************************************************/
1256static void
1257ips_select_queue_depth(struct Scsi_Host *host, struct scsi_device * scsi_devs)
1258{
1259 struct scsi_device *device;
1260 ips_ha_t *ha;
1261 int count = 0;
1262 int min;
1263
1264 ha = IPS_HA(host);
1265 min = ha->max_cmds / 4;
1266
1267 for (device = scsi_devs; device; device = device->next) {
1268 if (device->host == host) {
1269 if ((device->channel == 0) && (device->type == 0))
1270 count++;
1271 }
1272 }
1273
1274 for (device = scsi_devs; device; device = device->next) {
1275 if (device->host == host) {
1276 if ((device->channel == 0) && (device->type == 0)) {
1277 device->queue_depth =
1278 (ha->max_cmds - 1) / count;
1279 if (device->queue_depth < min)
1280 device->queue_depth = min;
1281 } else {
1282 device->queue_depth = 2;
1283 }
1284
1285 if (device->queue_depth < 2)
1286 device->queue_depth = 2;
1287 }
1288 }
1289}
1290
1291#else
1292/****************************************************************************/ 1195/****************************************************************************/
1293/* */ 1196/* */
1294/* Routine Name: ips_slave_configure */ 1197/* Routine Name: ips_slave_configure */
@@ -1316,7 +1219,6 @@ ips_slave_configure(struct scsi_device * SDptr)
1316 SDptr->skip_ms_page_3f = 1; 1219 SDptr->skip_ms_page_3f = 1;
1317 return 0; 1220 return 0;
1318} 1221}
1319#endif
1320 1222
1321/****************************************************************************/ 1223/****************************************************************************/
1322/* */ 1224/* */
@@ -1331,7 +1233,6 @@ static irqreturn_t
1331do_ipsintr(int irq, void *dev_id) 1233do_ipsintr(int irq, void *dev_id)
1332{ 1234{
1333 ips_ha_t *ha; 1235 ips_ha_t *ha;
1334 unsigned long cpu_flags;
1335 struct Scsi_Host *host; 1236 struct Scsi_Host *host;
1336 int irqstatus; 1237 int irqstatus;
1337 1238
@@ -1347,16 +1248,16 @@ do_ipsintr(int irq, void *dev_id)
1347 return IRQ_HANDLED; 1248 return IRQ_HANDLED;
1348 } 1249 }
1349 1250
1350 IPS_LOCK_SAVE(host->host_lock, cpu_flags); 1251 spin_lock(host->host_lock);
1351 1252
1352 if (!ha->active) { 1253 if (!ha->active) {
1353 IPS_UNLOCK_RESTORE(host->host_lock, cpu_flags); 1254 spin_unlock(host->host_lock);
1354 return IRQ_HANDLED; 1255 return IRQ_HANDLED;
1355 } 1256 }
1356 1257
1357 irqstatus = (*ha->func.intr) (ha); 1258 irqstatus = (*ha->func.intr) (ha);
1358 1259
1359 IPS_UNLOCK_RESTORE(host->host_lock, cpu_flags); 1260 spin_unlock(host->host_lock);
1360 1261
1361 /* start the next command */ 1262 /* start the next command */
1362 ips_next(ha, IPS_INTR_ON); 1263 ips_next(ha, IPS_INTR_ON);
@@ -1606,30 +1507,22 @@ static int ips_is_passthru(struct scsi_cmnd *SC)
1606 if ((SC->cmnd[0] == IPS_IOCTL_COMMAND) && 1507 if ((SC->cmnd[0] == IPS_IOCTL_COMMAND) &&
1607 (SC->device->channel == 0) && 1508 (SC->device->channel == 0) &&
1608 (SC->device->id == IPS_ADAPTER_ID) && 1509 (SC->device->id == IPS_ADAPTER_ID) &&
1609 (SC->device->lun == 0) && SC->request_buffer) { 1510 (SC->device->lun == 0) && scsi_sglist(SC)) {
1610 if ((!SC->use_sg) && SC->request_bufflen && 1511 struct scatterlist *sg = scsi_sglist(SC);
1611 (((char *) SC->request_buffer)[0] == 'C') && 1512 char *buffer;
1612 (((char *) SC->request_buffer)[1] == 'O') && 1513
1613 (((char *) SC->request_buffer)[2] == 'P') && 1514 /* kmap_atomic() ensures addressability of the user buffer.*/
1614 (((char *) SC->request_buffer)[3] == 'P')) 1515 /* local_irq_save() protects the KM_IRQ0 address slot. */
1615 return 1; 1516 local_irq_save(flags);
1616 else if (SC->use_sg) { 1517 buffer = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
1617 struct scatterlist *sg = SC->request_buffer; 1518 if (buffer && buffer[0] == 'C' && buffer[1] == 'O' &&
1618 char *buffer; 1519 buffer[2] == 'P' && buffer[3] == 'P') {
1619 1520 kunmap_atomic(buffer - sg->offset, KM_IRQ0);
1620 /* kmap_atomic() ensures addressability of the user buffer.*/ 1521 local_irq_restore(flags);
1621 /* local_irq_save() protects the KM_IRQ0 address slot. */ 1522 return 1;
1622 local_irq_save(flags); 1523 }
1623 buffer = kmap_atomic(sg->page, KM_IRQ0) + sg->offset; 1524 kunmap_atomic(buffer - sg->offset, KM_IRQ0);
1624 if (buffer && buffer[0] == 'C' && buffer[1] == 'O' && 1525 local_irq_restore(flags);
1625 buffer[2] == 'P' && buffer[3] == 'P') {
1626 kunmap_atomic(buffer - sg->offset, KM_IRQ0);
1627 local_irq_restore(flags);
1628 return 1;
1629 }
1630 kunmap_atomic(buffer - sg->offset, KM_IRQ0);
1631 local_irq_restore(flags);
1632 }
1633 } 1526 }
1634 return 0; 1527 return 0;
1635} 1528}
@@ -1680,18 +1573,14 @@ ips_make_passthru(ips_ha_t *ha, struct scsi_cmnd *SC, ips_scb_t *scb, int intr)
1680{ 1573{
1681 ips_passthru_t *pt; 1574 ips_passthru_t *pt;
1682 int length = 0; 1575 int length = 0;
1683 int ret; 1576 int i, ret;
1577 struct scatterlist *sg = scsi_sglist(SC);
1684 1578
1685 METHOD_TRACE("ips_make_passthru", 1); 1579 METHOD_TRACE("ips_make_passthru", 1);
1686 1580
1687 if (!SC->use_sg) { 1581 scsi_for_each_sg(SC, sg, scsi_sg_count(SC), i)
1688 length = SC->request_bufflen; 1582 length += sg[i].length;
1689 } else { 1583
1690 struct scatterlist *sg = SC->request_buffer;
1691 int i;
1692 for (i = 0; i < SC->use_sg; i++)
1693 length += sg[i].length;
1694 }
1695 if (length < sizeof (ips_passthru_t)) { 1584 if (length < sizeof (ips_passthru_t)) {
1696 /* wrong size */ 1585 /* wrong size */
1697 DEBUG_VAR(1, "(%s%d) Passthru structure wrong size", 1586 DEBUG_VAR(1, "(%s%d) Passthru structure wrong size",
@@ -2115,7 +2004,7 @@ ips_cleanup_passthru(ips_ha_t * ha, ips_scb_t * scb)
2115 2004
2116 METHOD_TRACE("ips_cleanup_passthru", 1); 2005 METHOD_TRACE("ips_cleanup_passthru", 1);
2117 2006
2118 if ((!scb) || (!scb->scsi_cmd) || (!scb->scsi_cmd->request_buffer)) { 2007 if ((!scb) || (!scb->scsi_cmd) || (!scsi_sglist(scb->scsi_cmd))) {
2119 DEBUG_VAR(1, "(%s%d) couldn't cleanup after passthru", 2008 DEBUG_VAR(1, "(%s%d) couldn't cleanup after passthru",
2120 ips_name, ha->host_num); 2009 ips_name, ha->host_num);
2121 2010
@@ -2730,7 +2619,6 @@ ips_next(ips_ha_t * ha, int intr)
2730 struct scsi_cmnd *q; 2619 struct scsi_cmnd *q;
2731 ips_copp_wait_item_t *item; 2620 ips_copp_wait_item_t *item;
2732 int ret; 2621 int ret;
2733 unsigned long cpu_flags = 0;
2734 struct Scsi_Host *host; 2622 struct Scsi_Host *host;
2735 METHOD_TRACE("ips_next", 1); 2623 METHOD_TRACE("ips_next", 1);
2736 2624
@@ -2742,7 +2630,7 @@ ips_next(ips_ha_t * ha, int intr)
2742 * this command won't time out 2630 * this command won't time out
2743 */ 2631 */
2744 if (intr == IPS_INTR_ON) 2632 if (intr == IPS_INTR_ON)
2745 IPS_LOCK_SAVE(host->host_lock, cpu_flags); 2633 spin_lock(host->host_lock);
2746 2634
2747 if ((ha->subsys->param[3] & 0x300000) 2635 if ((ha->subsys->param[3] & 0x300000)
2748 && (ha->scb_activelist.count == 0)) { 2636 && (ha->scb_activelist.count == 0)) {
@@ -2769,14 +2657,14 @@ ips_next(ips_ha_t * ha, int intr)
2769 item = ips_removeq_copp_head(&ha->copp_waitlist); 2657 item = ips_removeq_copp_head(&ha->copp_waitlist);
2770 ha->num_ioctl++; 2658 ha->num_ioctl++;
2771 if (intr == IPS_INTR_ON) 2659 if (intr == IPS_INTR_ON)
2772 IPS_UNLOCK_RESTORE(host->host_lock, cpu_flags); 2660 spin_unlock(host->host_lock);
2773 scb->scsi_cmd = item->scsi_cmd; 2661 scb->scsi_cmd = item->scsi_cmd;
2774 kfree(item); 2662 kfree(item);
2775 2663
2776 ret = ips_make_passthru(ha, scb->scsi_cmd, scb, intr); 2664 ret = ips_make_passthru(ha, scb->scsi_cmd, scb, intr);
2777 2665
2778 if (intr == IPS_INTR_ON) 2666 if (intr == IPS_INTR_ON)
2779 IPS_LOCK_SAVE(host->host_lock, cpu_flags); 2667 spin_lock(host->host_lock);
2780 switch (ret) { 2668 switch (ret) {
2781 case IPS_FAILURE: 2669 case IPS_FAILURE:
2782 if (scb->scsi_cmd) { 2670 if (scb->scsi_cmd) {
@@ -2846,7 +2734,7 @@ ips_next(ips_ha_t * ha, int intr)
2846 SC = ips_removeq_wait(&ha->scb_waitlist, q); 2734 SC = ips_removeq_wait(&ha->scb_waitlist, q);
2847 2735
2848 if (intr == IPS_INTR_ON) 2736 if (intr == IPS_INTR_ON)
2849 IPS_UNLOCK_RESTORE(host->host_lock, cpu_flags); /* Unlock HA after command is taken off queue */ 2737 spin_unlock(host->host_lock); /* Unlock HA after command is taken off queue */
2850 2738
2851 SC->result = DID_OK; 2739 SC->result = DID_OK;
2852 SC->host_scribble = NULL; 2740 SC->host_scribble = NULL;
@@ -2866,41 +2754,26 @@ ips_next(ips_ha_t * ha, int intr)
2866 /* copy in the CDB */ 2754 /* copy in the CDB */
2867 memcpy(scb->cdb, SC->cmnd, SC->cmd_len); 2755 memcpy(scb->cdb, SC->cmnd, SC->cmd_len);
2868 2756
2869 /* Now handle the data buffer */ 2757 scb->sg_count = scsi_dma_map(SC);
2870 if (SC->use_sg) { 2758 BUG_ON(scb->sg_count < 0);
2759 if (scb->sg_count) {
2871 struct scatterlist *sg; 2760 struct scatterlist *sg;
2872 int i; 2761 int i;
2873 2762
2874 sg = SC->request_buffer;
2875 scb->sg_count = pci_map_sg(ha->pcidev, sg, SC->use_sg,
2876 SC->sc_data_direction);
2877 scb->flags |= IPS_SCB_MAP_SG; 2763 scb->flags |= IPS_SCB_MAP_SG;
2878 for (i = 0; i < scb->sg_count; i++) { 2764
2765 scsi_for_each_sg(SC, sg, scb->sg_count, i) {
2879 if (ips_fill_scb_sg_single 2766 if (ips_fill_scb_sg_single
2880 (ha, sg_dma_address(&sg[i]), scb, i, 2767 (ha, sg_dma_address(sg), scb, i,
2881 sg_dma_len(&sg[i])) < 0) 2768 sg_dma_len(sg)) < 0)
2882 break; 2769 break;
2883 } 2770 }
2884 scb->dcdb.transfer_length = scb->data_len; 2771 scb->dcdb.transfer_length = scb->data_len;
2885 } else { 2772 } else {
2886 if (SC->request_bufflen) { 2773 scb->data_busaddr = 0L;
2887 scb->data_busaddr = 2774 scb->sg_len = 0;
2888 pci_map_single(ha->pcidev, 2775 scb->data_len = 0;
2889 SC->request_buffer, 2776 scb->dcdb.transfer_length = 0;
2890 SC->request_bufflen,
2891 SC->sc_data_direction);
2892 scb->flags |= IPS_SCB_MAP_SINGLE;
2893 ips_fill_scb_sg_single(ha, scb->data_busaddr,
2894 scb, 0,
2895 SC->request_bufflen);
2896 scb->dcdb.transfer_length = scb->data_len;
2897 } else {
2898 scb->data_busaddr = 0L;
2899 scb->sg_len = 0;
2900 scb->data_len = 0;
2901 scb->dcdb.transfer_length = 0;
2902 }
2903
2904 } 2777 }
2905 2778
2906 scb->dcdb.cmd_attribute = 2779 scb->dcdb.cmd_attribute =
@@ -2919,7 +2792,7 @@ ips_next(ips_ha_t * ha, int intr)
2919 scb->dcdb.transfer_length = 0; 2792 scb->dcdb.transfer_length = 0;
2920 } 2793 }
2921 if (intr == IPS_INTR_ON) 2794 if (intr == IPS_INTR_ON)
2922 IPS_LOCK_SAVE(host->host_lock, cpu_flags); 2795 spin_lock(host->host_lock);
2923 2796
2924 ret = ips_send_cmd(ha, scb); 2797 ret = ips_send_cmd(ha, scb);
2925 2798
@@ -2958,7 +2831,7 @@ ips_next(ips_ha_t * ha, int intr)
2958 } /* end while */ 2831 } /* end while */
2959 2832
2960 if (intr == IPS_INTR_ON) 2833 if (intr == IPS_INTR_ON)
2961 IPS_UNLOCK_RESTORE(host->host_lock, cpu_flags); 2834 spin_unlock(host->host_lock);
2962} 2835}
2963 2836
2964/****************************************************************************/ 2837/****************************************************************************/
@@ -3377,52 +3250,32 @@ ips_done(ips_ha_t * ha, ips_scb_t * scb)
3377 * the rest of the data and continue. 3250 * the rest of the data and continue.
3378 */ 3251 */
3379 if ((scb->breakup) || (scb->sg_break)) { 3252 if ((scb->breakup) || (scb->sg_break)) {
3253 struct scatterlist *sg;
3254 int sg_dma_index, ips_sg_index = 0;
3255
3380 /* we had a data breakup */ 3256 /* we had a data breakup */
3381 scb->data_len = 0; 3257 scb->data_len = 0;
3382 3258
3383 if (scb->sg_count) { 3259 sg = scsi_sglist(scb->scsi_cmd);
3384 /* S/G request */
3385 struct scatterlist *sg;
3386 int ips_sg_index = 0;
3387 int sg_dma_index;
3388
3389 sg = scb->scsi_cmd->request_buffer;
3390
3391 /* Spin forward to last dma chunk */
3392 sg_dma_index = scb->breakup;
3393
3394 /* Take care of possible partial on last chunk */
3395 ips_fill_scb_sg_single(ha,
3396 sg_dma_address(&sg
3397 [sg_dma_index]),
3398 scb, ips_sg_index++,
3399 sg_dma_len(&sg
3400 [sg_dma_index]));
3401
3402 for (; sg_dma_index < scb->sg_count;
3403 sg_dma_index++) {
3404 if (ips_fill_scb_sg_single
3405 (ha,
3406 sg_dma_address(&sg[sg_dma_index]),
3407 scb, ips_sg_index++,
3408 sg_dma_len(&sg[sg_dma_index])) < 0)
3409 break;
3410 3260
3411 } 3261 /* Spin forward to last dma chunk */
3262 sg_dma_index = scb->breakup;
3412 3263
3413 } else { 3264 /* Take care of possible partial on last chunk */
3414 /* Non S/G Request */ 3265 ips_fill_scb_sg_single(ha,
3415 (void) ips_fill_scb_sg_single(ha, 3266 sg_dma_address(&sg[sg_dma_index]),
3416 scb-> 3267 scb, ips_sg_index++,
3417 data_busaddr + 3268 sg_dma_len(&sg[sg_dma_index]));
3418 (scb->sg_break * 3269
3419 ha->max_xfer), 3270 for (; sg_dma_index < scsi_sg_count(scb->scsi_cmd);
3420 scb, 0, 3271 sg_dma_index++) {
3421 scb->scsi_cmd-> 3272 if (ips_fill_scb_sg_single
3422 request_bufflen - 3273 (ha,
3423 (scb->sg_break * 3274 sg_dma_address(&sg[sg_dma_index]),
3424 ha->max_xfer)); 3275 scb, ips_sg_index++,
3425 } 3276 sg_dma_len(&sg[sg_dma_index])) < 0)
3277 break;
3278 }
3426 3279
3427 scb->dcdb.transfer_length = scb->data_len; 3280 scb->dcdb.transfer_length = scb->data_len;
3428 scb->dcdb.cmd_attribute |= 3281 scb->dcdb.cmd_attribute |=
@@ -3653,32 +3506,27 @@ ips_send_wait(ips_ha_t * ha, ips_scb_t * scb, int timeout, int intr)
3653static void 3506static void
3654ips_scmd_buf_write(struct scsi_cmnd *scmd, void *data, unsigned int count) 3507ips_scmd_buf_write(struct scsi_cmnd *scmd, void *data, unsigned int count)
3655{ 3508{
3656 if (scmd->use_sg) { 3509 int i;
3657 int i; 3510 unsigned int min_cnt, xfer_cnt;
3658 unsigned int min_cnt, xfer_cnt; 3511 char *cdata = (char *) data;
3659 char *cdata = (char *) data; 3512 unsigned char *buffer;
3660 unsigned char *buffer; 3513 unsigned long flags;
3661 unsigned long flags; 3514 struct scatterlist *sg = scsi_sglist(scmd);
3662 struct scatterlist *sg = scmd->request_buffer; 3515
3663 for (i = 0, xfer_cnt = 0; 3516 for (i = 0, xfer_cnt = 0;
3664 (i < scmd->use_sg) && (xfer_cnt < count); i++) { 3517 (i < scsi_sg_count(scmd)) && (xfer_cnt < count); i++) {
3665 min_cnt = min(count - xfer_cnt, sg[i].length); 3518 min_cnt = min(count - xfer_cnt, sg[i].length);
3666 3519
3667 /* kmap_atomic() ensures addressability of the data buffer.*/ 3520 /* kmap_atomic() ensures addressability of the data buffer.*/
3668 /* local_irq_save() protects the KM_IRQ0 address slot. */ 3521 /* local_irq_save() protects the KM_IRQ0 address slot. */
3669 local_irq_save(flags); 3522 local_irq_save(flags);
3670 buffer = kmap_atomic(sg[i].page, KM_IRQ0) + sg[i].offset; 3523 buffer = kmap_atomic(sg[i].page, KM_IRQ0) + sg[i].offset;
3671 memcpy(buffer, &cdata[xfer_cnt], min_cnt); 3524 memcpy(buffer, &cdata[xfer_cnt], min_cnt);
3672 kunmap_atomic(buffer - sg[i].offset, KM_IRQ0); 3525 kunmap_atomic(buffer - sg[i].offset, KM_IRQ0);
3673 local_irq_restore(flags); 3526 local_irq_restore(flags);
3674 3527
3675 xfer_cnt += min_cnt; 3528 xfer_cnt += min_cnt;
3676 } 3529 }
3677
3678 } else {
3679 unsigned int min_cnt = min(count, scmd->request_bufflen);
3680 memcpy(scmd->request_buffer, data, min_cnt);
3681 }
3682} 3530}
3683 3531
3684/****************************************************************************/ 3532/****************************************************************************/
@@ -3691,32 +3539,27 @@ ips_scmd_buf_write(struct scsi_cmnd *scmd, void *data, unsigned int count)
3691static void 3539static void
3692ips_scmd_buf_read(struct scsi_cmnd *scmd, void *data, unsigned int count) 3540ips_scmd_buf_read(struct scsi_cmnd *scmd, void *data, unsigned int count)
3693{ 3541{
3694 if (scmd->use_sg) { 3542 int i;
3695 int i; 3543 unsigned int min_cnt, xfer_cnt;
3696 unsigned int min_cnt, xfer_cnt; 3544 char *cdata = (char *) data;
3697 char *cdata = (char *) data; 3545 unsigned char *buffer;
3698 unsigned char *buffer; 3546 unsigned long flags;
3699 unsigned long flags; 3547 struct scatterlist *sg = scsi_sglist(scmd);
3700 struct scatterlist *sg = scmd->request_buffer; 3548
3701 for (i = 0, xfer_cnt = 0; 3549 for (i = 0, xfer_cnt = 0;
3702 (i < scmd->use_sg) && (xfer_cnt < count); i++) { 3550 (i < scsi_sg_count(scmd)) && (xfer_cnt < count); i++) {
3703 min_cnt = min(count - xfer_cnt, sg[i].length); 3551 min_cnt = min(count - xfer_cnt, sg[i].length);
3704 3552
3705 /* kmap_atomic() ensures addressability of the data buffer.*/ 3553 /* kmap_atomic() ensures addressability of the data buffer.*/
3706 /* local_irq_save() protects the KM_IRQ0 address slot. */ 3554 /* local_irq_save() protects the KM_IRQ0 address slot. */
3707 local_irq_save(flags); 3555 local_irq_save(flags);
3708 buffer = kmap_atomic(sg[i].page, KM_IRQ0) + sg[i].offset; 3556 buffer = kmap_atomic(sg[i].page, KM_IRQ0) + sg[i].offset;
3709 memcpy(&cdata[xfer_cnt], buffer, min_cnt); 3557 memcpy(&cdata[xfer_cnt], buffer, min_cnt);
3710 kunmap_atomic(buffer - sg[i].offset, KM_IRQ0); 3558 kunmap_atomic(buffer - sg[i].offset, KM_IRQ0);
3711 local_irq_restore(flags); 3559 local_irq_restore(flags);
3712 3560
3713 xfer_cnt += min_cnt; 3561 xfer_cnt += min_cnt;
3714 } 3562 }
3715
3716 } else {
3717 unsigned int min_cnt = min(count, scmd->request_bufflen);
3718 memcpy(data, scmd->request_buffer, min_cnt);
3719 }
3720} 3563}
3721 3564
3722/****************************************************************************/ 3565/****************************************************************************/
@@ -4350,7 +4193,7 @@ ips_rdcap(ips_ha_t * ha, ips_scb_t * scb)
4350 4193
4351 METHOD_TRACE("ips_rdcap", 1); 4194 METHOD_TRACE("ips_rdcap", 1);
4352 4195
4353 if (scb->scsi_cmd->request_bufflen < 8) 4196 if (scsi_bufflen(scb->scsi_cmd) < 8)
4354 return (0); 4197 return (0);
4355 4198
4356 cap.lba = 4199 cap.lba =
@@ -4735,8 +4578,7 @@ ips_freescb(ips_ha_t * ha, ips_scb_t * scb)
4735 4578
4736 METHOD_TRACE("ips_freescb", 1); 4579 METHOD_TRACE("ips_freescb", 1);
4737 if (scb->flags & IPS_SCB_MAP_SG) 4580 if (scb->flags & IPS_SCB_MAP_SG)
4738 pci_unmap_sg(ha->pcidev, scb->scsi_cmd->request_buffer, 4581 scsi_dma_unmap(scb->scsi_cmd);
4739 scb->scsi_cmd->use_sg, IPS_DMA_DIR(scb));
4740 else if (scb->flags & IPS_SCB_MAP_SINGLE) 4582 else if (scb->flags & IPS_SCB_MAP_SINGLE)
4741 pci_unmap_single(ha->pcidev, scb->data_busaddr, scb->data_len, 4583 pci_unmap_single(ha->pcidev, scb->data_busaddr, scb->data_len,
4742 IPS_DMA_DIR(scb)); 4584 IPS_DMA_DIR(scb));
@@ -7004,7 +6846,6 @@ ips_register_scsi(int index)
7004 kfree(oldha); 6846 kfree(oldha);
7005 ips_sh[index] = sh; 6847 ips_sh[index] = sh;
7006 ips_ha[index] = ha; 6848 ips_ha[index] = ha;
7007 IPS_SCSI_SET_DEVICE(sh, ha);
7008 6849
7009 /* Store away needed values for later use */ 6850 /* Store away needed values for later use */
7010 sh->io_port = ha->io_addr; 6851 sh->io_port = ha->io_addr;
@@ -7016,17 +6857,16 @@ ips_register_scsi(int index)
7016 sh->cmd_per_lun = sh->hostt->cmd_per_lun; 6857 sh->cmd_per_lun = sh->hostt->cmd_per_lun;
7017 sh->unchecked_isa_dma = sh->hostt->unchecked_isa_dma; 6858 sh->unchecked_isa_dma = sh->hostt->unchecked_isa_dma;
7018 sh->use_clustering = sh->hostt->use_clustering; 6859 sh->use_clustering = sh->hostt->use_clustering;
7019
7020#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,7)
7021 sh->max_sectors = 128; 6860 sh->max_sectors = 128;
7022#endif
7023 6861
7024 sh->max_id = ha->ntargets; 6862 sh->max_id = ha->ntargets;
7025 sh->max_lun = ha->nlun; 6863 sh->max_lun = ha->nlun;
7026 sh->max_channel = ha->nbus - 1; 6864 sh->max_channel = ha->nbus - 1;
7027 sh->can_queue = ha->max_cmds - 1; 6865 sh->can_queue = ha->max_cmds - 1;
7028 6866
7029 IPS_ADD_HOST(sh, NULL); 6867 scsi_add_host(sh, NULL);
6868 scsi_scan_host(sh);
6869
7030 return 0; 6870 return 0;
7031} 6871}
7032 6872
@@ -7069,7 +6909,7 @@ ips_module_init(void)
7069 return -ENODEV; 6909 return -ENODEV;
7070 ips_driver_template.module = THIS_MODULE; 6910 ips_driver_template.module = THIS_MODULE;
7071 ips_order_controllers(); 6911 ips_order_controllers();
7072 if (IPS_REGISTER_HOSTS(&ips_driver_template)) { 6912 if (!ips_detect(&ips_driver_template)) {
7073 pci_unregister_driver(&ips_pci_driver); 6913 pci_unregister_driver(&ips_pci_driver);
7074 return -ENODEV; 6914 return -ENODEV;
7075 } 6915 }
@@ -7087,7 +6927,6 @@ ips_module_init(void)
7087static void __exit 6927static void __exit
7088ips_module_exit(void) 6928ips_module_exit(void)
7089{ 6929{
7090 IPS_UNREGISTER_HOSTS(&ips_driver_template);
7091 pci_unregister_driver(&ips_pci_driver); 6930 pci_unregister_driver(&ips_pci_driver);
7092 unregister_reboot_notifier(&ips_notifier); 6931 unregister_reboot_notifier(&ips_notifier);
7093} 6932}
@@ -7436,15 +7275,9 @@ ips_init_phase2(int index)
7436 return SUCCESS; 7275 return SUCCESS;
7437} 7276}
7438 7277
7439#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,9)
7440MODULE_LICENSE("GPL"); 7278MODULE_LICENSE("GPL");
7441#endif
7442
7443MODULE_DESCRIPTION("IBM ServeRAID Adapter Driver " IPS_VER_STRING); 7279MODULE_DESCRIPTION("IBM ServeRAID Adapter Driver " IPS_VER_STRING);
7444
7445#ifdef MODULE_VERSION
7446MODULE_VERSION(IPS_VER_STRING); 7280MODULE_VERSION(IPS_VER_STRING);
7447#endif
7448 7281
7449 7282
7450/* 7283/*