aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/message/i2o
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/message/i2o')
-rw-r--r--drivers/message/i2o/Kconfig18
-rw-r--r--drivers/message/i2o/i2o_block.h6
-rw-r--r--drivers/message/i2o/i2o_config.c4
-rw-r--r--drivers/message/i2o/i2o_scsi.c263
-rw-r--r--drivers/message/i2o/pci.c22
5 files changed, 208 insertions, 105 deletions
diff --git a/drivers/message/i2o/Kconfig b/drivers/message/i2o/Kconfig
index ce278e060aca..94b6d676c5cb 100644
--- a/drivers/message/i2o/Kconfig
+++ b/drivers/message/i2o/Kconfig
@@ -24,6 +24,24 @@ config I2O
24 24
25 If unsure, say N. 25 If unsure, say N.
26 26
27config I2O_EXT_ADAPTEC
28 bool "Enable Adaptec extensions"
29 depends on I2O
30 default y
31 ---help---
32 Say Y for support of raidutils for Adaptec I2O controllers. You also
33 have to say Y to "I2O Configuration support", "I2O SCSI OSM" below
34 and to "SCSI generic support" under "SCSI device configuration".
35
36config I2O_EXT_ADAPTEC_DMA64
37 bool "Enable 64-bit DMA"
38 depends on I2O_EXT_ADAPTEC && ( 64BIT || HIGHMEM64G )
39 default y
40 ---help---
41 Say Y for support of 64-bit DMA transfer mode on Adaptec I2O
42 controllers.
43 Note: You need at least firmware version 3709.
44
27config I2O_CONFIG 45config I2O_CONFIG
28 tristate "I2O Configuration support" 46 tristate "I2O Configuration support"
29 depends on PCI && I2O 47 depends on PCI && I2O
diff --git a/drivers/message/i2o/i2o_block.h b/drivers/message/i2o/i2o_block.h
index 9e1a95fb0833..e45cc40ce384 100644
--- a/drivers/message/i2o/i2o_block.h
+++ b/drivers/message/i2o/i2o_block.h
@@ -56,6 +56,12 @@
56#define I2O_BLOCK_RETRY_TIME HZ/4 56#define I2O_BLOCK_RETRY_TIME HZ/4
57#define I2O_BLOCK_MAX_OPEN_REQUESTS 50 57#define I2O_BLOCK_MAX_OPEN_REQUESTS 50
58 58
59/* request queue sizes */
60#define I2O_BLOCK_REQ_MEMPOOL_SIZE 32
61
62#define KERNEL_SECTOR_SHIFT 9
63#define KERNEL_SECTOR_SIZE (1 << KERNEL_SECTOR_SHIFT)
64
59/* I2O Block OSM mempool struct */ 65/* I2O Block OSM mempool struct */
60struct i2o_block_mempool { 66struct i2o_block_mempool {
61 kmem_cache_t *slab; 67 kmem_cache_t *slab;
diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c
index 849d90aad779..7636833b4623 100644
--- a/drivers/message/i2o/i2o_config.c
+++ b/drivers/message/i2o/i2o_config.c
@@ -515,6 +515,7 @@ static int i2o_cfg_evt_get(unsigned long arg, struct file *fp)
515 return 0; 515 return 0;
516} 516}
517 517
518#ifdef CONFIG_I2O_EXT_ADAPTEC
518#ifdef CONFIG_COMPAT 519#ifdef CONFIG_COMPAT
519static int i2o_cfg_passthru32(struct file *file, unsigned cmnd, unsigned long arg) 520static int i2o_cfg_passthru32(struct file *file, unsigned cmnd, unsigned long arg)
520{ 521{
@@ -964,6 +965,7 @@ static int i2o_cfg_passthru(unsigned long arg)
964 kfree(reply); 965 kfree(reply);
965 return rcode; 966 return rcode;
966} 967}
968#endif
967 969
968/* 970/*
969 * IOCTL Handler 971 * IOCTL Handler
@@ -1018,9 +1020,11 @@ static int i2o_cfg_ioctl(struct inode *inode, struct file *fp, unsigned int cmd,
1018 ret = i2o_cfg_evt_get(arg, fp); 1020 ret = i2o_cfg_evt_get(arg, fp);
1019 break; 1021 break;
1020 1022
1023#ifdef CONFIG_I2O_EXT_ADAPTEC
1021 case I2OPASSTHRU: 1024 case I2OPASSTHRU:
1022 ret = i2o_cfg_passthru(arg); 1025 ret = i2o_cfg_passthru(arg);
1023 break; 1026 break;
1027#endif
1024 1028
1025 default: 1029 default:
1026 osm_debug("unknown ioctl called!\n"); 1030 osm_debug("unknown ioctl called!\n");
diff --git a/drivers/message/i2o/i2o_scsi.c b/drivers/message/i2o/i2o_scsi.c
index c3b0c29ac02d..fef53b509a61 100644
--- a/drivers/message/i2o/i2o_scsi.c
+++ b/drivers/message/i2o/i2o_scsi.c
@@ -55,6 +55,7 @@
55#include <linux/pci.h> 55#include <linux/pci.h>
56#include <linux/blkdev.h> 56#include <linux/blkdev.h>
57#include <linux/i2o.h> 57#include <linux/i2o.h>
58#include <linux/scatterlist.h>
58 59
59#include <asm/dma.h> 60#include <asm/dma.h>
60#include <asm/system.h> 61#include <asm/system.h>
@@ -65,19 +66,23 @@
65#include <scsi/scsi_host.h> 66#include <scsi/scsi_host.h>
66#include <scsi/scsi_device.h> 67#include <scsi/scsi_device.h>
67#include <scsi/scsi_cmnd.h> 68#include <scsi/scsi_cmnd.h>
69#include <scsi/scsi_request.h>
70#include <scsi/sg.h>
71#include <scsi/sg_request.h>
68 72
69#define OSM_NAME "scsi-osm" 73#define OSM_NAME "scsi-osm"
70#define OSM_VERSION "$Rev$" 74#define OSM_VERSION "1.282"
71#define OSM_DESCRIPTION "I2O SCSI Peripheral OSM" 75#define OSM_DESCRIPTION "I2O SCSI Peripheral OSM"
72 76
73static struct i2o_driver i2o_scsi_driver; 77static struct i2o_driver i2o_scsi_driver;
74 78
75static int i2o_scsi_max_id = 16; 79static unsigned int i2o_scsi_max_id = 16;
76static int i2o_scsi_max_lun = 8; 80static unsigned int i2o_scsi_max_lun = 255;
77 81
78struct i2o_scsi_host { 82struct i2o_scsi_host {
79 struct Scsi_Host *scsi_host; /* pointer to the SCSI host */ 83 struct Scsi_Host *scsi_host; /* pointer to the SCSI host */
80 struct i2o_controller *iop; /* pointer to the I2O controller */ 84 struct i2o_controller *iop; /* pointer to the I2O controller */
85 unsigned int lun; /* lun's used for block devices */
81 struct i2o_device *channel[0]; /* channel->i2o_dev mapping table */ 86 struct i2o_device *channel[0]; /* channel->i2o_dev mapping table */
82}; 87};
83 88
@@ -100,12 +105,17 @@ static struct i2o_scsi_host *i2o_scsi_host_alloc(struct i2o_controller *c)
100 u8 type; 105 u8 type;
101 int i; 106 int i;
102 size_t size; 107 size_t size;
103 i2o_status_block *sb; 108 u16 body_size = 6;
109
110#ifdef CONFIG_I2O_EXT_ADAPTEC
111 if (c->adaptec)
112 body_size = 8;
113#endif
104 114
105 list_for_each_entry(i2o_dev, &c->devices, list) 115 list_for_each_entry(i2o_dev, &c->devices, list)
106 if (i2o_dev->lct_data.class_id == I2O_CLASS_BUS_ADAPTER) { 116 if (i2o_dev->lct_data.class_id == I2O_CLASS_BUS_ADAPTER) {
107 if (i2o_parm_field_get(i2o_dev, 0x0000, 0, &type, 1) 117 if (i2o_parm_field_get(i2o_dev, 0x0000, 0, &type, 1)
108 && (type == 0x01)) /* SCSI bus */ 118 && (type == 0x01)) /* SCSI bus */
109 max_channel++; 119 max_channel++;
110 } 120 }
111 121
@@ -127,20 +137,18 @@ static struct i2o_scsi_host *i2o_scsi_host_alloc(struct i2o_controller *c)
127 scsi_host->max_id = i2o_scsi_max_id; 137 scsi_host->max_id = i2o_scsi_max_id;
128 scsi_host->max_lun = i2o_scsi_max_lun; 138 scsi_host->max_lun = i2o_scsi_max_lun;
129 scsi_host->this_id = c->unit; 139 scsi_host->this_id = c->unit;
130 140 scsi_host->sg_tablesize = i2o_sg_tablesize(c, body_size);
131 sb = c->status_block.virt;
132
133 scsi_host->sg_tablesize = (sb->inbound_frame_size -
134 sizeof(struct i2o_message) / 4 - 6) / 2;
135 141
136 i2o_shost = (struct i2o_scsi_host *)scsi_host->hostdata; 142 i2o_shost = (struct i2o_scsi_host *)scsi_host->hostdata;
137 i2o_shost->scsi_host = scsi_host; 143 i2o_shost->scsi_host = scsi_host;
138 i2o_shost->iop = c; 144 i2o_shost->iop = c;
145 i2o_shost->lun = 1;
139 146
140 i = 0; 147 i = 0;
141 list_for_each_entry(i2o_dev, &c->devices, list) 148 list_for_each_entry(i2o_dev, &c->devices, list)
142 if (i2o_dev->lct_data.class_id == I2O_CLASS_BUS_ADAPTER) { 149 if (i2o_dev->lct_data.class_id == I2O_CLASS_BUS_ADAPTER) {
143 if (i2o_parm_field_get(i2o_dev, 0x0000, 0, &type, 1) || (type == 1)) /* only SCSI bus */ 150 if (i2o_parm_field_get(i2o_dev, 0x0000, 0, &type, 1)
151 && (type == 0x01)) /* only SCSI bus */
144 i2o_shost->channel[i++] = i2o_dev; 152 i2o_shost->channel[i++] = i2o_dev;
145 153
146 if (i >= max_channel) 154 if (i >= max_channel)
@@ -212,8 +220,8 @@ static int i2o_scsi_probe(struct device *dev)
212 struct Scsi_Host *scsi_host; 220 struct Scsi_Host *scsi_host;
213 struct i2o_device *parent; 221 struct i2o_device *parent;
214 struct scsi_device *scsi_dev; 222 struct scsi_device *scsi_dev;
215 u32 id; 223 u32 id = -1;
216 u64 lun; 224 u64 lun = -1;
217 int channel = -1; 225 int channel = -1;
218 int i; 226 int i;
219 227
@@ -223,8 +231,56 @@ static int i2o_scsi_probe(struct device *dev)
223 231
224 scsi_host = i2o_shost->scsi_host; 232 scsi_host = i2o_shost->scsi_host;
225 233
226 if (i2o_parm_field_get(i2o_dev, 0, 3, &id, 4) < 0) 234 switch (i2o_dev->lct_data.class_id) {
235 case I2O_CLASS_RANDOM_BLOCK_STORAGE:
236 case I2O_CLASS_EXECUTIVE:
237#ifdef CONFIG_I2O_EXT_ADAPTEC
238 if (c->adaptec) {
239 u8 type;
240 struct i2o_device *d = i2o_shost->channel[0];
241
242 if (i2o_parm_field_get(d, 0x0000, 0, &type, 1)
243 && (type == 0x01)) /* SCSI bus */
244 if (i2o_parm_field_get(d, 0x0200, 4, &id, 4)) {
245 channel = 0;
246 if (i2o_dev->lct_data.class_id ==
247 I2O_CLASS_RANDOM_BLOCK_STORAGE)
248 lun = i2o_shost->lun++;
249 else
250 lun = 0;
251 }
252 }
253#endif
254 break;
255
256 case I2O_CLASS_SCSI_PERIPHERAL:
257 if (i2o_parm_field_get(i2o_dev, 0x0000, 3, &id, 4) < 0)
258 return -EFAULT;
259
260 if (i2o_parm_field_get(i2o_dev, 0x0000, 4, &lun, 8) < 0)
261 return -EFAULT;
262
263 parent = i2o_iop_find_device(c, i2o_dev->lct_data.parent_tid);
264 if (!parent) {
265 osm_warn("can not find parent of device %03x\n",
266 i2o_dev->lct_data.tid);
267 return -EFAULT;
268 }
269
270 for (i = 0; i <= i2o_shost->scsi_host->max_channel; i++)
271 if (i2o_shost->channel[i] == parent)
272 channel = i;
273 break;
274
275 default:
276 return -EFAULT;
277 }
278
279 if (channel == -1) {
280 osm_warn("can not find channel of device %03x\n",
281 i2o_dev->lct_data.tid);
227 return -EFAULT; 282 return -EFAULT;
283 }
228 284
229 if (id >= scsi_host->max_id) { 285 if (id >= scsi_host->max_id) {
230 osm_warn("SCSI device id (%d) >= max_id of I2O host (%d)", id, 286 osm_warn("SCSI device id (%d) >= max_id of I2O host (%d)", id,
@@ -232,31 +288,12 @@ static int i2o_scsi_probe(struct device *dev)
232 return -EFAULT; 288 return -EFAULT;
233 } 289 }
234 290
235 if (i2o_parm_field_get(i2o_dev, 0, 4, &lun, 8) < 0)
236 return -EFAULT;
237 if (lun >= scsi_host->max_lun) { 291 if (lun >= scsi_host->max_lun) {
238 osm_warn("SCSI device id (%d) >= max_lun of I2O host (%d)", 292 osm_warn("SCSI device id (%d) >= max_lun of I2O host (%d)",
239 (unsigned int)lun, scsi_host->max_lun); 293 (unsigned int)lun, scsi_host->max_lun);
240 return -EFAULT; 294 return -EFAULT;
241 } 295 }
242 296
243 parent = i2o_iop_find_device(c, i2o_dev->lct_data.parent_tid);
244 if (!parent) {
245 osm_warn("can not find parent of device %03x\n",
246 i2o_dev->lct_data.tid);
247 return -EFAULT;
248 }
249
250 for (i = 0; i <= i2o_shost->scsi_host->max_channel; i++)
251 if (i2o_shost->channel[i] == parent)
252 channel = i;
253
254 if (channel == -1) {
255 osm_warn("can not find channel of device %03x\n",
256 i2o_dev->lct_data.tid);
257 return -EFAULT;
258 }
259
260 scsi_dev = 297 scsi_dev =
261 __scsi_add_device(i2o_shost->scsi_host, channel, id, lun, i2o_dev); 298 __scsi_add_device(i2o_shost->scsi_host, channel, id, lun, i2o_dev);
262 299
@@ -266,7 +303,8 @@ static int i2o_scsi_probe(struct device *dev)
266 return PTR_ERR(scsi_dev); 303 return PTR_ERR(scsi_dev);
267 } 304 }
268 305
269 sysfs_create_link(&i2o_dev->device.kobj, &scsi_dev->sdev_gendev.kobj, "scsi"); 306 sysfs_create_link(&i2o_dev->device.kobj, &scsi_dev->sdev_gendev.kobj,
307 "scsi");
270 308
271 osm_info("device added (TID: %03x) channel: %d, id: %d, lun: %d\n", 309 osm_info("device added (TID: %03x) channel: %d, id: %d, lun: %d\n",
272 i2o_dev->lct_data.tid, channel, id, (unsigned int)lun); 310 i2o_dev->lct_data.tid, channel, id, (unsigned int)lun);
@@ -542,9 +580,7 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt,
542 void (*done) (struct scsi_cmnd *)) 580 void (*done) (struct scsi_cmnd *))
543{ 581{
544 struct i2o_controller *c; 582 struct i2o_controller *c;
545 struct Scsi_Host *host;
546 struct i2o_device *i2o_dev; 583 struct i2o_device *i2o_dev;
547 struct device *dev;
548 int tid; 584 int tid;
549 struct i2o_message __iomem *msg; 585 struct i2o_message __iomem *msg;
550 u32 m; 586 u32 m;
@@ -554,20 +590,16 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt,
554 * RETURN_SENSE_DATA_IN_REPLY_MESSAGE_FRAME 590 * RETURN_SENSE_DATA_IN_REPLY_MESSAGE_FRAME
555 */ 591 */
556 u32 scsi_flags = 0x20a00000; 592 u32 scsi_flags = 0x20a00000;
557 u32 sg_flags; 593 u32 sgl_offset;
558 u32 __iomem *mptr; 594 u32 __iomem *mptr;
559 u32 __iomem *lenptr; 595 u32 cmd = I2O_CMD_SCSI_EXEC << 24;
560 u32 len; 596 int rc = 0;
561 int i;
562 597
563 /* 598 /*
564 * Do the incoming paperwork 599 * Do the incoming paperwork
565 */ 600 */
566
567 i2o_dev = SCpnt->device->hostdata; 601 i2o_dev = SCpnt->device->hostdata;
568 host = SCpnt->device->host;
569 c = i2o_dev->iop; 602 c = i2o_dev->iop;
570 dev = &c->pdev->dev;
571 603
572 SCpnt->scsi_done = done; 604 SCpnt->scsi_done = done;
573 605
@@ -575,7 +607,7 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt,
575 osm_warn("no I2O device in request\n"); 607 osm_warn("no I2O device in request\n");
576 SCpnt->result = DID_NO_CONNECT << 16; 608 SCpnt->result = DID_NO_CONNECT << 16;
577 done(SCpnt); 609 done(SCpnt);
578 return 0; 610 goto exit;
579 } 611 }
580 612
581 tid = i2o_dev->lct_data.tid; 613 tid = i2o_dev->lct_data.tid;
@@ -584,46 +616,85 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt,
584 osm_debug("Real scsi messages.\n"); 616 osm_debug("Real scsi messages.\n");
585 617
586 /* 618 /*
587 * Obtain an I2O message. If there are none free then
588 * throw it back to the scsi layer
589 */
590
591 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
592 if (m == I2O_QUEUE_EMPTY)
593 return SCSI_MLQUEUE_HOST_BUSY;
594
595 mptr = &msg->body[0];
596
597 /*
598 * Put together a scsi execscb message 619 * Put together a scsi execscb message
599 */ 620 */
600
601 switch (SCpnt->sc_data_direction) { 621 switch (SCpnt->sc_data_direction) {
602 case PCI_DMA_NONE: 622 case PCI_DMA_NONE:
603 /* DATA NO XFER */ 623 /* DATA NO XFER */
604 sg_flags = 0x00000000; 624 sgl_offset = SGL_OFFSET_0;
605 break; 625 break;
606 626
607 case PCI_DMA_TODEVICE: 627 case PCI_DMA_TODEVICE:
608 /* DATA OUT (iop-->dev) */ 628 /* DATA OUT (iop-->dev) */
609 scsi_flags |= 0x80000000; 629 scsi_flags |= 0x80000000;
610 sg_flags = 0x14000000; 630 sgl_offset = SGL_OFFSET_10;
611 break; 631 break;
612 632
613 case PCI_DMA_FROMDEVICE: 633 case PCI_DMA_FROMDEVICE:
614 /* DATA IN (iop<--dev) */ 634 /* DATA IN (iop<--dev) */
615 scsi_flags |= 0x40000000; 635 scsi_flags |= 0x40000000;
616 sg_flags = 0x10000000; 636 sgl_offset = SGL_OFFSET_10;
617 break; 637 break;
618 638
619 default: 639 default:
620 /* Unknown - kill the command */ 640 /* Unknown - kill the command */
621 SCpnt->result = DID_NO_CONNECT << 16; 641 SCpnt->result = DID_NO_CONNECT << 16;
622 done(SCpnt); 642 done(SCpnt);
623 return 0; 643 goto exit;
624 } 644 }
625 645
626 writel(I2O_CMD_SCSI_EXEC << 24 | HOST_TID << 12 | tid, &msg->u.head[1]); 646 /*
647 * Obtain an I2O message. If there are none free then
648 * throw it back to the scsi layer
649 */
650
651 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
652 if (m == I2O_QUEUE_EMPTY) {
653 rc = SCSI_MLQUEUE_HOST_BUSY;
654 goto exit;
655 }
656
657 mptr = &msg->body[0];
658
659#ifdef CONFIG_I2O_EXT_ADAPTEC
660 if (c->adaptec) {
661 u32 adpt_flags = 0;
662
663 if (SCpnt->sc_request && SCpnt->sc_request->upper_private_data) {
664 i2o_sg_io_hdr_t __user *usr_ptr =
665 ((Sg_request *) (SCpnt->sc_request->
666 upper_private_data))->header.
667 usr_ptr;
668
669 if (usr_ptr)
670 get_user(adpt_flags, &usr_ptr->flags);
671 }
672
673 switch (i2o_dev->lct_data.class_id) {
674 case I2O_CLASS_EXECUTIVE:
675 case I2O_CLASS_RANDOM_BLOCK_STORAGE:
676 /* interpret flag has to be set for executive */
677 adpt_flags ^= I2O_DPT_SG_FLAG_INTERPRET;
678 break;
679
680 default:
681 break;
682 }
683
684 /*
685 * for Adaptec controllers we use the PRIVATE command, because
686 * the normal SCSI EXEC doesn't support all SCSI commands on
687 * all controllers (for example READ CAPACITY).
688 */
689 if (sgl_offset == SGL_OFFSET_10)
690 sgl_offset = SGL_OFFSET_12;
691 cmd = I2O_CMD_PRIVATE << 24;
692 writel(I2O_VENDOR_DPT << 16 | I2O_CMD_SCSI_EXEC, mptr++);
693 writel(adpt_flags | tid, mptr++);
694 }
695#endif
696
697 writel(cmd | HOST_TID << 12 | tid, &msg->u.head[1]);
627 writel(i2o_scsi_driver.context, &msg->u.s.icntxt); 698 writel(i2o_scsi_driver.context, &msg->u.s.icntxt);
628 699
629 /* We want the SCSI control block back */ 700 /* We want the SCSI control block back */
@@ -655,55 +726,30 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt,
655 /* Write SCSI command into the message - always 16 byte block */ 726 /* Write SCSI command into the message - always 16 byte block */
656 memcpy_toio(mptr, SCpnt->cmnd, 16); 727 memcpy_toio(mptr, SCpnt->cmnd, 16);
657 mptr += 4; 728 mptr += 4;
658 lenptr = mptr++; /* Remember me - fill in when we know */
659
660 /* Now fill in the SGList and command */
661 if (SCpnt->use_sg) {
662 struct scatterlist *sg;
663 int sg_count;
664
665 sg = SCpnt->request_buffer;
666 len = 0;
667 729
668 sg_count = dma_map_sg(dev, sg, SCpnt->use_sg, 730 if (sgl_offset != SGL_OFFSET_0) {
669 SCpnt->sc_data_direction); 731 /* write size of data addressed by SGL */
670 732 writel(SCpnt->request_bufflen, mptr++);
671 if (unlikely(sg_count <= 0)) 733
672 return -ENOMEM; 734 /* Now fill in the SGList and command */
673 735 if (SCpnt->use_sg) {
674 for (i = SCpnt->use_sg; i > 0; i--) { 736 if (!i2o_dma_map_sg(c, SCpnt->request_buffer,
675 if (i == 1) 737 SCpnt->use_sg,
676 sg_flags |= 0xC0000000; 738 SCpnt->sc_data_direction, &mptr))
677 writel(sg_flags | sg_dma_len(sg), mptr++); 739 goto nomem;
678 writel(sg_dma_address(sg), mptr++); 740 } else {
679 len += sg_dma_len(sg); 741 SCpnt->SCp.dma_handle =
680 sg++; 742 i2o_dma_map_single(c, SCpnt->request_buffer,
681 } 743 SCpnt->request_bufflen,
682 744 SCpnt->sc_data_direction, &mptr);
683 writel(len, lenptr); 745 if (dma_mapping_error(SCpnt->SCp.dma_handle))
684 } else { 746 goto nomem;
685 len = SCpnt->request_bufflen;
686
687 writel(len, lenptr);
688
689 if (len > 0) {
690 dma_addr_t dma_addr;
691
692 dma_addr = dma_map_single(dev, SCpnt->request_buffer,
693 SCpnt->request_bufflen,
694 SCpnt->sc_data_direction);
695 if (!dma_addr)
696 return -ENOMEM;
697
698 SCpnt->SCp.ptr = (void *)(unsigned long)dma_addr;
699 sg_flags |= 0xC0000000;
700 writel(sg_flags | SCpnt->request_bufflen, mptr++);
701 writel(dma_addr, mptr++);
702 } 747 }
703 } 748 }
704 749
705 /* Stick the headers on */ 750 /* Stick the headers on */
706 writel((mptr - &msg->u.head[0]) << 16 | SGL_OFFSET_10, &msg->u.head[0]); 751 writel(I2O_MESSAGE_SIZE(mptr - &msg->u.head[0]) | sgl_offset,
752 &msg->u.head[0]);
707 753
708 /* Queue the message */ 754 /* Queue the message */
709 i2o_msg_post(c, m); 755 i2o_msg_post(c, m);
@@ -711,6 +757,13 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt,
711 osm_debug("Issued %ld\n", SCpnt->serial_number); 757 osm_debug("Issued %ld\n", SCpnt->serial_number);
712 758
713 return 0; 759 return 0;
760
761 nomem:
762 rc = -ENOMEM;
763 i2o_msg_nop(c, m);
764
765 exit:
766 return rc;
714}; 767};
715 768
716/** 769/**
diff --git a/drivers/message/i2o/pci.c b/drivers/message/i2o/pci.c
index a499af096a68..964fe481849e 100644
--- a/drivers/message/i2o/pci.c
+++ b/drivers/message/i2o/pci.c
@@ -362,11 +362,33 @@ static int __devinit i2o_pci_probe(struct pci_dev *pdev,
362 c->promise = 1; 362 c->promise = 1;
363 } 363 }
364 364
365 if (pdev->subsystem_vendor == PCI_VENDOR_ID_DPT)
366 c->adaptec = 1;
367
365 /* Cards that go bananas if you quiesce them before you reset them. */ 368 /* Cards that go bananas if you quiesce them before you reset them. */
366 if (pdev->vendor == PCI_VENDOR_ID_DPT) { 369 if (pdev->vendor == PCI_VENDOR_ID_DPT) {
367 c->no_quiesce = 1; 370 c->no_quiesce = 1;
368 if (pdev->device == 0xa511) 371 if (pdev->device == 0xa511)
369 c->raptor = 1; 372 c->raptor = 1;
373
374 if (pdev->subsystem_device == 0xc05a) {
375 c->limit_sectors = 1;
376 printk(KERN_INFO
377 "%s: limit sectors per request to %d\n", c->name,
378 I2O_MAX_SECTORS_LIMITED);
379 }
380#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
381 if (sizeof(dma_addr_t) > 4) {
382 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK))
383 printk(KERN_INFO "%s: 64-bit DMA unavailable\n",
384 c->name);
385 else {
386 c->pae_support = 1;
387 printk(KERN_INFO "%s: using 64-bit DMA\n",
388 c->name);
389 }
390 }
391#endif
370 } 392 }
371 393
372 if ((rc = i2o_pci_alloc(c))) { 394 if ((rc = i2o_pci_alloc(c))) {