aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMarkus Lidel <Markus.Lidel@shadowconnect.com>2005-06-24 01:02:16 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-06-24 03:05:28 -0400
commitf10378fff658f61307496e0ae00095041725cf07 (patch)
tree0c0413649317677771fa325dded94f1e12a6a0b7
parentf88e119c4b824a5017456fa094950d0f4092d96c (diff)
[PATCH] I2O: new sysfs attributes and Adaptec specific block device access and 64-bit DMA support
Changes: - Added Bus-OSM which could be used by user space programs to reset a channel on the controller - Make ioctl's in Config-OSM obsolete in prefer for sysfs attributes and move those to its own file - Added sysfs attribute for firmware read and write access for I2O controllers - Added special handling of firmware read and write access for Adaptec controllers - Added vendor id and product id as sysfs-attribute to Executive classes - Added automatic notification of LCT change handling to Exec-OSM - Added flushing function to Block-OSM for later barrier implementation - Use PRIVATE messages for Block access on Adaptec controllers, which are faster then BLOCK class access - Cleaned up support for Promise controller - New messages are now detected using the IRQ status register as suggested by the I2O spec - Added i2o_dma_high() and i2o_dma_low() functions - Added facility for SG tablesize calculation when using 32-bit and 64-bit DMA addresses - Added i2o_dma_map_single() and i2o_dma_map_sg() which could build the SG list for 32-bit as well as 64-bit DMA addresses Signed-off-by: Markus Lidel <Markus.Lidel@shadowconnect.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--drivers/message/i2o/Kconfig18
-rw-r--r--drivers/message/i2o/Makefile3
-rw-r--r--drivers/message/i2o/bus-osm.c164
-rw-r--r--drivers/message/i2o/config-osm.c579
-rw-r--r--drivers/message/i2o/driver.c12
-rw-r--r--drivers/message/i2o/exec-osm.c74
-rw-r--r--drivers/message/i2o/i2o_block.c277
-rw-r--r--drivers/message/i2o/i2o_block.h4
-rw-r--r--drivers/message/i2o/i2o_config.c156
-rw-r--r--drivers/message/i2o/i2o_proc.c4
-rw-r--r--drivers/message/i2o/i2o_scsi.c30
-rw-r--r--drivers/message/i2o/iop.c263
-rw-r--r--drivers/message/i2o/pci.c67
-rw-r--r--include/linux/i2o-dev.h6
-rw-r--r--include/linux/i2o.h321
15 files changed, 1446 insertions, 532 deletions
diff --git a/drivers/message/i2o/Kconfig b/drivers/message/i2o/Kconfig
index 8d132b0d6b12..ce278e060aca 100644
--- a/drivers/message/i2o/Kconfig
+++ b/drivers/message/i2o/Kconfig
@@ -35,6 +35,24 @@ config I2O_CONFIG
35 To compile this support as a module, choose M here: the 35 To compile this support as a module, choose M here: the
36 module will be called i2o_config. 36 module will be called i2o_config.
37 37
38config I2O_CONFIG_OLD_IOCTL
39 bool "Enable ioctls (OBSOLETE)"
40 depends on I2O_CONFIG
41 default y
42 ---help---
43 Enables old ioctls.
44
45config I2O_BUS
46 tristate "I2O Bus Adapter OSM"
47 depends on I2O
48 ---help---
49 Include support for the I2O Bus Adapter OSM. The Bus Adapter OSM
50 provides access to the busses on the I2O controller. The main purpose
51 is to rescan the bus to find new devices.
52
53 To compile this support as a module, choose M here: the
54 module will be called i2o_bus.
55
38config I2O_BLOCK 56config I2O_BLOCK
39 tristate "I2O Block OSM" 57 tristate "I2O Block OSM"
40 depends on I2O 58 depends on I2O
diff --git a/drivers/message/i2o/Makefile b/drivers/message/i2o/Makefile
index aabc6cdc3fce..2c2e39aa1efa 100644
--- a/drivers/message/i2o/Makefile
+++ b/drivers/message/i2o/Makefile
@@ -6,8 +6,11 @@
6# 6#
7 7
8i2o_core-y += iop.o driver.o device.o debug.o pci.o exec-osm.o 8i2o_core-y += iop.o driver.o device.o debug.o pci.o exec-osm.o
9i2o_bus-y += bus-osm.o
10i2o_config-y += config-osm.o
9obj-$(CONFIG_I2O) += i2o_core.o 11obj-$(CONFIG_I2O) += i2o_core.o
10obj-$(CONFIG_I2O_CONFIG)+= i2o_config.o 12obj-$(CONFIG_I2O_CONFIG)+= i2o_config.o
13obj-$(CONFIG_I2O_BUS) += i2o_bus.o
11obj-$(CONFIG_I2O_BLOCK) += i2o_block.o 14obj-$(CONFIG_I2O_BLOCK) += i2o_block.o
12obj-$(CONFIG_I2O_SCSI) += i2o_scsi.o 15obj-$(CONFIG_I2O_SCSI) += i2o_scsi.o
13obj-$(CONFIG_I2O_PROC) += i2o_proc.o 16obj-$(CONFIG_I2O_PROC) += i2o_proc.o
diff --git a/drivers/message/i2o/bus-osm.c b/drivers/message/i2o/bus-osm.c
new file mode 100644
index 000000000000..d43c35894ae9
--- /dev/null
+++ b/drivers/message/i2o/bus-osm.c
@@ -0,0 +1,164 @@
1/*
2 * Bus Adapter OSM
3 *
4 * Copyright (C) 2005 Markus Lidel <Markus.Lidel@shadowconnect.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 *
11 * Fixes/additions:
12 * Markus Lidel <Markus.Lidel@shadowconnect.com>
13 * initial version.
14 */
15
16#include <linux/module.h>
17#include <linux/i2o.h>
18
19#define OSM_NAME "bus-osm"
20#define OSM_VERSION "$Rev$"
21#define OSM_DESCRIPTION "I2O Bus Adapter OSM"
22
23static struct i2o_driver i2o_bus_driver;
24
25/* Bus OSM class handling definition */
26static struct i2o_class_id i2o_bus_class_id[] = {
27 {I2O_CLASS_BUS_ADAPTER},
28 {I2O_CLASS_END}
29};
30
31/**
32 * i2o_bus_scan - Scan the bus for new devices
33 * @dev: I2O device of the bus, which should be scanned
34 *
35 * Scans the bus dev for new / removed devices. After the scan a new LCT
36 * will be fetched automatically.
37 *
38 * Returns 0 on success or negative error code on failure.
39 */
40static int i2o_bus_scan(struct i2o_device *dev)
41{
42 struct i2o_message __iomem *msg;
43 u32 m;
44
45 m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET);
46 if (m == I2O_QUEUE_EMPTY)
47 return -ETIMEDOUT;
48
49 writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
50 writel(I2O_CMD_BUS_SCAN << 24 | HOST_TID << 12 | dev->lct_data.tid,
51 &msg->u.head[1]);
52
53 return i2o_msg_post_wait(dev->iop, m, 60);
54};
55
56/**
57 * i2o_bus_store_scan - Scan the I2O Bus Adapter
58 * @d: device which should be scanned
59 *
60 * Returns count.
61 */
62static ssize_t i2o_bus_store_scan(struct device *d, const char *buf,
63 size_t count)
64{
65 struct i2o_device *i2o_dev = to_i2o_device(d);
66 int rc;
67
68 if ((rc = i2o_bus_scan(i2o_dev)))
69 osm_warn("bus scan failed %d\n", rc);
70
71 return count;
72}
73
74/* Bus Adapter OSM device attributes */
75static DEVICE_ATTR(scan, S_IWUSR, NULL, i2o_bus_store_scan);
76
77/**
78 * i2o_bus_probe - verify if dev is a I2O Bus Adapter device and install it
79 * @dev: device to verify if it is a I2O Bus Adapter device
80 *
81 * Because we want all Bus Adapters always return 0.
82 *
83 * Returns 0.
84 */
85static int i2o_bus_probe(struct device *dev)
86{
87 struct i2o_device *i2o_dev = to_i2o_device(get_device(dev));
88
89 device_create_file(dev, &dev_attr_scan);
90
91 osm_info("device added (TID: %03x)\n", i2o_dev->lct_data.tid);
92
93 return 0;
94};
95
96/**
97 * i2o_bus_remove - remove the I2O Bus Adapter device from the system again
98 * @dev: I2O Bus Adapter device which should be removed
99 *
100 * Always returns 0.
101 */
102static int i2o_bus_remove(struct device *dev)
103{
104 struct i2o_device *i2o_dev = to_i2o_device(dev);
105
106 device_remove_file(dev, &dev_attr_scan);
107
108 put_device(dev);
109
110 osm_info("device removed (TID: %03x)\n", i2o_dev->lct_data.tid);
111
112 return 0;
113};
114
115/* Bus Adapter OSM driver struct */
116static struct i2o_driver i2o_bus_driver = {
117 .name = OSM_NAME,
118 .classes = i2o_bus_class_id,
119 .driver = {
120 .probe = i2o_bus_probe,
121 .remove = i2o_bus_remove,
122 },
123};
124
125/**
126 * i2o_bus_init - Bus Adapter OSM initialization function
127 *
128 * Only register the Bus Adapter OSM in the I2O core.
129 *
130 * Returns 0 on success or negative error code on failure.
131 */
132static int __init i2o_bus_init(void)
133{
134 int rc;
135
136 printk(KERN_INFO OSM_DESCRIPTION " v" OSM_VERSION "\n");
137
138 /* Register Bus Adapter OSM into I2O core */
139 rc = i2o_driver_register(&i2o_bus_driver);
140 if (rc) {
141 osm_err("Could not register Bus Adapter OSM\n");
142 return rc;
143 }
144
145 return 0;
146};
147
148/**
149 * i2o_bus_exit - Bus Adapter OSM exit function
150 *
151 * Unregisters Bus Adapter OSM from I2O core.
152 */
153static void __exit i2o_bus_exit(void)
154{
155 i2o_driver_unregister(&i2o_bus_driver);
156};
157
158MODULE_AUTHOR("Markus Lidel <Markus.Lidel@shadowconnect.com>");
159MODULE_LICENSE("GPL");
160MODULE_DESCRIPTION(OSM_DESCRIPTION);
161MODULE_VERSION(OSM_VERSION);
162
163module_init(i2o_bus_init);
164module_exit(i2o_bus_exit);
diff --git a/drivers/message/i2o/config-osm.c b/drivers/message/i2o/config-osm.c
new file mode 100644
index 000000000000..d0267609a949
--- /dev/null
+++ b/drivers/message/i2o/config-osm.c
@@ -0,0 +1,579 @@
1/*
2 * Configuration OSM
3 *
4 * Copyright (C) 2005 Markus Lidel <Markus.Lidel@shadowconnect.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 *
11 * Fixes/additions:
12 * Markus Lidel <Markus.Lidel@shadowconnect.com>
13 * initial version.
14 */
15
16#include <linux/module.h>
17#include <linux/i2o.h>
18#include <linux/namei.h>
19
20#include <asm/uaccess.h>
21
22#define OSM_NAME "config-osm"
23#define OSM_VERSION "1.248"
24#define OSM_DESCRIPTION "I2O Configuration OSM"
25
26/* access mode user rw */
27#define S_IWRSR (S_IRUSR | S_IWUSR)
28
29static struct i2o_driver i2o_config_driver;
30
31/* Special file operations for sysfs */
32struct fops_attribute {
33 struct bin_attribute bin;
34 struct file_operations fops;
35};
36
37/**
38 * sysfs_read_dummy
39 */
40static ssize_t sysfs_read_dummy(struct kobject *kobj, char *buf, loff_t offset,
41 size_t count)
42{
43 return 0;
44};
45
46/**
47 * sysfs_write_dummy
48 */
49static ssize_t sysfs_write_dummy(struct kobject *kobj, char *buf, loff_t offset,
50 size_t count)
51{
52 return 0;
53};
54
55/**
56 * sysfs_create_fops_file - Creates attribute with special file operations
57 * @kobj: kobject which should contains the attribute
58 * @attr: attributes which should be used to create file
59 *
60 * First creates attribute @attr in kobject @kobj. If it is the first time
61 * this function is called, merge old fops from sysfs with new one and
62 * write it back. Afterwords the new fops will be set for the created
63 * attribute.
64 *
65 * Returns 0 on success or negative error code on failure.
66 */
67static int sysfs_create_fops_file(struct kobject *kobj,
68 struct fops_attribute *attr)
69{
70 struct file_operations tmp, *fops;
71 struct dentry *d;
72 struct qstr qstr;
73 int rc;
74
75 fops = &attr->fops;
76
77 if (fops->read)
78 attr->bin.read = sysfs_read_dummy;
79
80 if (fops->write)
81 attr->bin.write = sysfs_write_dummy;
82
83 if ((rc = sysfs_create_bin_file(kobj, &attr->bin)))
84 return rc;
85
86 qstr.name = attr->bin.attr.name;
87 qstr.len = strlen(qstr.name);
88 qstr.hash = full_name_hash(qstr.name, qstr.len);
89
90 if ((d = lookup_hash(&qstr, kobj->dentry))) {
91 if (!fops->owner) {
92 memcpy(&tmp, d->d_inode->i_fop, sizeof(tmp));
93 if (fops->read)
94 tmp.read = fops->read;
95 if (fops->write)
96 tmp.write = fops->write;
97 memcpy(fops, &tmp, sizeof(tmp));
98 }
99
100 d->d_inode->i_fop = fops;
101 } else
102 sysfs_remove_bin_file(kobj, &attr->bin);
103
104 return -ENOENT;
105};
106
107/**
108 * sysfs_remove_fops_file - Remove attribute with special file operations
109 * @kobj: kobject which contains the attribute
110 * @attr: attributes which are used to create file
111 *
112 * Only wrapper arround sysfs_remove_bin_file()
113 *
114 * Returns 0 on success or negative error code on failure.
115 */
116static inline int sysfs_remove_fops_file(struct kobject *kobj,
117 struct fops_attribute *attr)
118{
119 return sysfs_remove_bin_file(kobj, &attr->bin);
120};
121
122/**
123 * i2o_config_read_hrt - Returns the HRT of the controller
124 * @kob: kernel object handle
125 * @buf: buffer into which the HRT should be copied
126 * @off: file offset
127 * @count: number of bytes to read
128 *
129 * Put @count bytes starting at @off into @buf from the HRT of the I2O
130 * controller corresponding to @kobj.
131 *
132 * Returns number of bytes copied into buffer.
133 */
134static ssize_t i2o_config_read_hrt(struct kobject *kobj, char *buf,
135 loff_t offset, size_t count)
136{
137 struct i2o_controller *c = kobj_to_i2o_device(kobj)->iop;
138 i2o_hrt *hrt = c->hrt.virt;
139
140 u32 size = (hrt->num_entries * hrt->entry_len + 2) * 4;
141
142 if (offset > size)
143 return 0;
144
145 if (offset + count > size)
146 count = size - offset;
147
148 memcpy(buf, (u8 *) hrt + offset, count);
149
150 return count;
151};
152
153/**
154 * i2o_config_read_lct - Returns the LCT of the controller
155 * @kob: kernel object handle
156 * @buf: buffer into which the LCT should be copied
157 * @off: file offset
158 * @count: number of bytes to read
159 *
160 * Put @count bytes starting at @off into @buf from the LCT of the I2O
161 * controller corresponding to @kobj.
162 *
163 * Returns number of bytes copied into buffer.
164 */
165static ssize_t i2o_config_read_lct(struct kobject *kobj, char *buf,
166 loff_t offset, size_t count)
167{
168 struct i2o_controller *c = kobj_to_i2o_device(kobj)->iop;
169 u32 size = c->lct->table_size * 4;
170
171 if (offset > size)
172 return 0;
173
174 if (offset + count > size)
175 count = size - offset;
176
177 memcpy(buf, (u8 *) c->lct + offset, count);
178
179 return count;
180};
181
182#define I2O_CONFIG_SW_ATTR(_name,_mode,_type,_swid) \
183static ssize_t i2o_config_##_name##_read(struct file *file, char __user *buf, size_t count, loff_t * offset) { \
184 return i2o_config_sw_read(file, buf, count, offset, _type, _swid); \
185};\
186\
187static ssize_t i2o_config_##_name##_write(struct file *file, const char __user *buf, size_t count, loff_t * offset) { \
188 return i2o_config_sw_write(file, buf, count, offset, _type, _swid); \
189}; \
190\
191static struct fops_attribute i2o_config_attr_##_name = { \
192 .bin = { .attr = { .name = __stringify(_name), .mode = _mode, \
193 .owner = THIS_MODULE }, \
194 .size = 0, }, \
195 .fops = { .write = i2o_config_##_name##_write, \
196 .read = i2o_config_##_name##_read} \
197};
198
199#ifdef CONFIG_I2O_EXT_ADAPTEC
200
201/**
202 * i2o_config_dpt_reagion - Converts type and id to flash region
203 * @swtype: type of software module reading
204 * @swid: id of software which should be read
205 *
206 * Converts type and id from I2O spec to the matching region for DPT /
207 * Adaptec controllers.
208 *
209 * Returns region which match type and id or -1 on error.
210 */
211static u32 i2o_config_dpt_region(u8 swtype, u8 swid)
212{
213 switch (swtype) {
214 case I2O_SOFTWARE_MODULE_IRTOS:
215 /*
216 * content: operation firmware
217 * region size:
218 * 0xbc000 for 2554, 3754, 2564, 3757
219 * 0x170000 for 2865
220 * 0x17c000 for 3966
221 */
222 if (!swid)
223 return 0;
224
225 break;
226
227 case I2O_SOFTWARE_MODULE_IOP_PRIVATE:
228 /*
229 * content: BIOS and SMOR
230 * BIOS size: first 0x8000 bytes
231 * region size:
232 * 0x40000 for 2554, 3754, 2564, 3757
233 * 0x80000 for 2865, 3966
234 */
235 if (!swid)
236 return 1;
237
238 break;
239
240 case I2O_SOFTWARE_MODULE_IOP_CONFIG:
241 switch (swid) {
242 case 0:
243 /*
244 * content: NVRAM defaults
245 * region size: 0x2000 bytes
246 */
247 return 2;
248 case 1:
249 /*
250 * content: serial number
251 * region size: 0x2000 bytes
252 */
253 return 3;
254 }
255 break;
256 }
257
258 return -1;
259};
260
261#endif
262
263/**
264 * i2o_config_sw_read - Read a software module from controller
265 * @file: file pointer
266 * @buf: buffer into which the data should be copied
267 * @count: number of bytes to read
268 * @off: file offset
269 * @swtype: type of software module reading
270 * @swid: id of software which should be read
271 *
272 * Transfers @count bytes at offset @offset from IOP into buffer using
273 * type @swtype and id @swid as described in I2O spec.
274 *
275 * Returns number of bytes copied into buffer or error code on failure.
276 */
277static ssize_t i2o_config_sw_read(struct file *file, char __user * buf,
278 size_t count, loff_t * offset, u8 swtype,
279 u32 swid)
280{
281 struct sysfs_dirent *sd = file->f_dentry->d_parent->d_fsdata;
282 struct kobject *kobj = sd->s_element;
283 struct i2o_controller *c = kobj_to_i2o_device(kobj)->iop;
284 u32 m, function = I2O_CMD_SW_UPLOAD;
285 struct i2o_dma buffer;
286 struct i2o_message __iomem *msg;
287 u32 __iomem *mptr;
288 int rc, status;
289
290 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
291 if (m == I2O_QUEUE_EMPTY)
292 return -EBUSY;
293
294 mptr = &msg->body[3];
295
296 if ((rc = i2o_dma_alloc(&c->pdev->dev, &buffer, count, GFP_KERNEL))) {
297 i2o_msg_nop(c, m);
298 return rc;
299 }
300#ifdef CONFIG_I2O_EXT_ADAPTEC
301 if (c->adaptec) {
302 mptr = &msg->body[4];
303 function = I2O_CMD_PRIVATE;
304
305 writel(TEN_WORD_MSG_SIZE | SGL_OFFSET_8, &msg->u.head[0]);
306
307 writel(I2O_VENDOR_DPT << 16 | I2O_DPT_FLASH_READ,
308 &msg->body[0]);
309 writel(i2o_config_dpt_region(swtype, swid), &msg->body[1]);
310 writel(*offset, &msg->body[2]);
311 writel(count, &msg->body[3]);
312 } else
313#endif
314 writel(NINE_WORD_MSG_SIZE | SGL_OFFSET_7, &msg->u.head[0]);
315
316 writel(0xD0000000 | count, mptr++);
317 writel(buffer.phys, mptr);
318
319 writel(function << 24 | HOST_TID << 12 | ADAPTER_TID, &msg->u.head[1]);
320 writel(i2o_config_driver.context, &msg->u.head[2]);
321 writel(0, &msg->u.head[3]);
322
323#ifdef CONFIG_I2O_EXT_ADAPTEC
324 if (!c->adaptec)
325#endif
326 {
327 writel((u32) swtype << 16 | (u32) 1 << 8, &msg->body[0]);
328 writel(0, &msg->body[1]);
329 writel(swid, &msg->body[2]);
330 }
331
332 status = i2o_msg_post_wait_mem(c, m, 60, &buffer);
333
334 if (status == I2O_POST_WAIT_OK) {
335 if (!(rc = copy_to_user(buf, buffer.virt, count))) {
336 rc = count;
337 *offset += count;
338 }
339 } else
340 rc = -EIO;
341
342 if (status != -ETIMEDOUT)
343 i2o_dma_free(&c->pdev->dev, &buffer);
344
345 return rc;
346};
347
348/**
349 * i2o_config_sw_write - Write a software module to controller
350 * @file: file pointer
351 * @buf: buffer into which the data should be copied
352 * @count: number of bytes to read
353 * @off: file offset
354 * @swtype: type of software module writing
355 * @swid: id of software which should be written
356 *
357 * Transfers @count bytes at offset @offset from buffer to IOP using
358 * type @swtype and id @swid as described in I2O spec.
359 *
360 * Returns number of bytes copied from buffer or error code on failure.
361 */
362static ssize_t i2o_config_sw_write(struct file *file, const char __user * buf,
363 size_t count, loff_t * offset, u8 swtype,
364 u32 swid)
365{
366 struct sysfs_dirent *sd = file->f_dentry->d_parent->d_fsdata;
367 struct kobject *kobj = sd->s_element;
368 struct i2o_controller *c = kobj_to_i2o_device(kobj)->iop;
369 u32 m, function = I2O_CMD_SW_DOWNLOAD;
370 struct i2o_dma buffer;
371 struct i2o_message __iomem *msg;
372 u32 __iomem *mptr;
373 int rc, status;
374
375 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
376 if (m == I2O_QUEUE_EMPTY)
377 return -EBUSY;
378
379 mptr = &msg->body[3];
380
381 if ((rc = i2o_dma_alloc(&c->pdev->dev, &buffer, count, GFP_KERNEL)))
382 goto nop_msg;
383
384 if ((rc = copy_from_user(buffer.virt, buf, count)))
385 goto free_buffer;
386
387#ifdef CONFIG_I2O_EXT_ADAPTEC
388 if (c->adaptec) {
389 mptr = &msg->body[4];
390 function = I2O_CMD_PRIVATE;
391
392 writel(TEN_WORD_MSG_SIZE | SGL_OFFSET_8, &msg->u.head[0]);
393
394 writel(I2O_VENDOR_DPT << 16 | I2O_DPT_FLASH_WRITE,
395 &msg->body[0]);
396 writel(i2o_config_dpt_region(swtype, swid), &msg->body[1]);
397 writel(*offset, &msg->body[2]);
398 writel(count, &msg->body[3]);
399 } else
400#endif
401 writel(NINE_WORD_MSG_SIZE | SGL_OFFSET_7, &msg->u.head[0]);
402
403 writel(0xD4000000 | count, mptr++);
404 writel(buffer.phys, mptr);
405
406 writel(function << 24 | HOST_TID << 12 | ADAPTER_TID, &msg->u.head[1]);
407 writel(i2o_config_driver.context, &msg->u.head[2]);
408 writel(0, &msg->u.head[3]);
409
410#ifdef CONFIG_I2O_EXT_ADAPTEC
411 if (!c->adaptec)
412#endif
413 {
414 writel((u32) swtype << 16 | (u32) 1 << 8, &msg->body[0]);
415 writel(0, &msg->body[1]);
416 writel(swid, &msg->body[2]);
417 }
418
419 status = i2o_msg_post_wait_mem(c, m, 60, &buffer);
420
421 if (status != -ETIMEDOUT)
422 i2o_dma_free(&c->pdev->dev, &buffer);
423
424 if (status != I2O_POST_WAIT_OK)
425 return -EIO;
426
427 *offset += count;
428
429 return count;
430
431 free_buffer:
432 i2o_dma_free(&c->pdev->dev, &buffer);
433
434 nop_msg:
435 i2o_msg_nop(c, m);
436
437 return rc;
438};
439
440/* attribute for HRT in sysfs */
441static struct bin_attribute i2o_config_hrt_attr = {
442 .attr = {
443 .name = "hrt",
444 .mode = S_IRUGO,
445 .owner = THIS_MODULE},
446 .size = 0,
447 .read = i2o_config_read_hrt
448};
449
450/* attribute for LCT in sysfs */
451static struct bin_attribute i2o_config_lct_attr = {
452 .attr = {
453 .name = "lct",
454 .mode = S_IRUGO,
455 .owner = THIS_MODULE},
456 .size = 0,
457 .read = i2o_config_read_lct
458};
459
460/* IRTOS firmware access */
461I2O_CONFIG_SW_ATTR(irtos, S_IWRSR, I2O_SOFTWARE_MODULE_IRTOS, 0);
462
463#ifdef CONFIG_I2O_EXT_ADAPTEC
464
465/*
466 * attribute for BIOS / SMOR, nvram and serial number access on DPT / Adaptec
467 * controllers
468 */
469I2O_CONFIG_SW_ATTR(bios, S_IWRSR, I2O_SOFTWARE_MODULE_IOP_PRIVATE, 0);
470I2O_CONFIG_SW_ATTR(nvram, S_IWRSR, I2O_SOFTWARE_MODULE_IOP_CONFIG, 0);
471I2O_CONFIG_SW_ATTR(serial, S_IWRSR, I2O_SOFTWARE_MODULE_IOP_CONFIG, 1);
472
473#endif
474
475/**
476 * i2o_config_notify_controller_add - Notify of added controller
477 * @c: the controller which was added
478 *
479 * If a I2O controller is added, we catch the notification to add sysfs
480 * entries.
481 */
482static void i2o_config_notify_controller_add(struct i2o_controller *c)
483{
484 struct kobject *kobj = &c->exec->device.kobj;
485
486 sysfs_create_bin_file(kobj, &i2o_config_hrt_attr);
487 sysfs_create_bin_file(kobj, &i2o_config_lct_attr);
488
489 sysfs_create_fops_file(kobj, &i2o_config_attr_irtos);
490#ifdef CONFIG_I2O_EXT_ADAPTEC
491 if (c->adaptec) {
492 sysfs_create_fops_file(kobj, &i2o_config_attr_bios);
493 sysfs_create_fops_file(kobj, &i2o_config_attr_nvram);
494 sysfs_create_fops_file(kobj, &i2o_config_attr_serial);
495 }
496#endif
497};
498
499/**
500 * i2o_config_notify_controller_remove - Notify of removed controller
501 * @c: the controller which was removed
502 *
503 * If a I2O controller is removed, we catch the notification to remove the
504 * sysfs entries.
505 */
506static void i2o_config_notify_controller_remove(struct i2o_controller *c)
507{
508 struct kobject *kobj = &c->exec->device.kobj;
509
510#ifdef CONFIG_I2O_EXT_ADAPTEC
511 if (c->adaptec) {
512 sysfs_remove_fops_file(kobj, &i2o_config_attr_serial);
513 sysfs_remove_fops_file(kobj, &i2o_config_attr_nvram);
514 sysfs_remove_fops_file(kobj, &i2o_config_attr_bios);
515 }
516#endif
517 sysfs_remove_fops_file(kobj, &i2o_config_attr_irtos);
518
519 sysfs_remove_bin_file(kobj, &i2o_config_lct_attr);
520 sysfs_remove_bin_file(kobj, &i2o_config_hrt_attr);
521};
522
523/* Config OSM driver struct */
524static struct i2o_driver i2o_config_driver = {
525 .name = OSM_NAME,
526 .notify_controller_add = i2o_config_notify_controller_add,
527 .notify_controller_remove = i2o_config_notify_controller_remove
528};
529
530#ifdef CONFIG_I2O_CONFIG_OLD_IOCTL
531#include "i2o_config.c"
532#endif
533
534/**
535 * i2o_config_init - Configuration OSM initialization function
536 *
537 * Registers Configuration OSM in the I2O core and if old ioctl's are
538 * compiled in initialize them.
539 *
540 * Returns 0 on success or negative error code on failure.
541 */
542static int __init i2o_config_init(void)
543{
544 printk(KERN_INFO OSM_DESCRIPTION " v" OSM_VERSION "\n");
545
546 if (i2o_driver_register(&i2o_config_driver)) {
547 osm_err("handler register failed.\n");
548 return -EBUSY;
549 }
550#ifdef CONFIG_I2O_CONFIG_OLD_IOCTL
551 if (i2o_config_old_init())
552 i2o_driver_unregister(&i2o_config_driver);
553#endif
554
555 return 0;
556}
557
558/**
559 * i2o_config_exit - Configuration OSM exit function
560 *
561 * If old ioctl's are compiled in exit remove them and unregisters
562 * Configuration OSM from I2O core.
563 */
564static void i2o_config_exit(void)
565{
566#ifdef CONFIG_I2O_CONFIG_OLD_IOCTL
567 i2o_config_old_exit();
568#endif
569
570 i2o_driver_unregister(&i2o_config_driver);
571}
572
573MODULE_AUTHOR("Markus Lidel <Markus.Lidel@shadowconnect.com>");
574MODULE_LICENSE("GPL");
575MODULE_DESCRIPTION(OSM_DESCRIPTION);
576MODULE_VERSION(OSM_VERSION);
577
578module_init(i2o_config_init);
579module_exit(i2o_config_exit);
diff --git a/drivers/message/i2o/driver.c b/drivers/message/i2o/driver.c
index bebdd509b5d8..393be8e2914c 100644
--- a/drivers/message/i2o/driver.c
+++ b/drivers/message/i2o/driver.c
@@ -180,7 +180,13 @@ int i2o_driver_dispatch(struct i2o_controller *c, u32 m)
180{ 180{
181 struct i2o_driver *drv; 181 struct i2o_driver *drv;
182 struct i2o_message __iomem *msg = i2o_msg_out_to_virt(c, m); 182 struct i2o_message __iomem *msg = i2o_msg_out_to_virt(c, m);
183 u32 context = readl(&msg->u.s.icntxt); 183 u32 context;
184 unsigned long flags;
185
186 if(unlikely(!msg))
187 return -EIO;
188
189 context = readl(&msg->u.s.icntxt);
184 190
185 if (unlikely(context >= i2o_max_drivers)) { 191 if (unlikely(context >= i2o_max_drivers)) {
186 osm_warn("%s: Spurious reply to unknown driver %d\n", c->name, 192 osm_warn("%s: Spurious reply to unknown driver %d\n", c->name,
@@ -188,9 +194,9 @@ int i2o_driver_dispatch(struct i2o_controller *c, u32 m)
188 return -EIO; 194 return -EIO;
189 } 195 }
190 196
191 spin_lock(&i2o_drivers_lock); 197 spin_lock_irqsave(&i2o_drivers_lock, flags);
192 drv = i2o_drivers[context]; 198 drv = i2o_drivers[context];
193 spin_unlock(&i2o_drivers_lock); 199 spin_unlock_irqrestore(&i2o_drivers_lock, flags);
194 200
195 if (unlikely(!drv)) { 201 if (unlikely(!drv)) {
196 osm_warn("%s: Spurious reply to unknown driver %d\n", c->name, 202 osm_warn("%s: Spurious reply to unknown driver %d\n", c->name,
diff --git a/drivers/message/i2o/exec-osm.c b/drivers/message/i2o/exec-osm.c
index 5581344fbba6..0160221c802a 100644
--- a/drivers/message/i2o/exec-osm.c
+++ b/drivers/message/i2o/exec-osm.c
@@ -206,6 +206,7 @@ static int i2o_msg_post_wait_complete(struct i2o_controller *c, u32 m,
206 u32 context) 206 u32 context)
207{ 207{
208 struct i2o_exec_wait *wait, *tmp; 208 struct i2o_exec_wait *wait, *tmp;
209 unsigned long flags;
209 static spinlock_t lock = SPIN_LOCK_UNLOCKED; 210 static spinlock_t lock = SPIN_LOCK_UNLOCKED;
210 int rc = 1; 211 int rc = 1;
211 212
@@ -216,11 +217,13 @@ static int i2o_msg_post_wait_complete(struct i2o_controller *c, u32 m,
216 * already expired. Not much we can do about that except log it for 217 * already expired. Not much we can do about that except log it for
217 * debug purposes, increase timeout, and recompile. 218 * debug purposes, increase timeout, and recompile.
218 */ 219 */
219 spin_lock(&lock); 220 spin_lock_irqsave(&lock, flags);
220 list_for_each_entry_safe(wait, tmp, &i2o_exec_wait_list, list) { 221 list_for_each_entry_safe(wait, tmp, &i2o_exec_wait_list, list) {
221 if (wait->tcntxt == context) { 222 if (wait->tcntxt == context) {
222 list_del(&wait->list); 223 list_del(&wait->list);
223 224
225 spin_unlock_irqrestore(&lock, flags);
226
224 wait->m = m; 227 wait->m = m;
225 wait->msg = msg; 228 wait->msg = msg;
226 wait->complete = 1; 229 wait->complete = 1;
@@ -242,13 +245,11 @@ static int i2o_msg_post_wait_complete(struct i2o_controller *c, u32 m,
242 rc = -1; 245 rc = -1;
243 } 246 }
244 247
245 spin_unlock(&lock);
246
247 return rc; 248 return rc;
248 } 249 }
249 } 250 }
250 251
251 spin_unlock(&lock); 252 spin_unlock_irqrestore(&lock, flags);
252 253
253 osm_warn("%s: Bogus reply in POST WAIT (tr-context: %08x)!\n", c->name, 254 osm_warn("%s: Bogus reply in POST WAIT (tr-context: %08x)!\n", c->name,
254 context); 255 context);
@@ -257,6 +258,50 @@ static int i2o_msg_post_wait_complete(struct i2o_controller *c, u32 m,
257}; 258};
258 259
259/** 260/**
261 * i2o_exec_show_vendor_id - Displays Vendor ID of controller
262 * @d: device of which the Vendor ID should be displayed
263 * @buf: buffer into which the Vendor ID should be printed
264 *
265 * Returns number of bytes printed into buffer.
266 */
267static ssize_t i2o_exec_show_vendor_id(struct device *d, char *buf)
268{
269 struct i2o_device *dev = to_i2o_device(d);
270 u16 id;
271
272 if (i2o_parm_field_get(dev, 0x0000, 0, &id, 2)) {
273 sprintf(buf, "0x%04x", id);
274 return strlen(buf) + 1;
275 }
276
277 return 0;
278};
279
280/**
281 * i2o_exec_show_product_id - Displays Product ID of controller
282 * @d: device of which the Product ID should be displayed
283 * @buf: buffer into which the Product ID should be printed
284 *
285 * Returns number of bytes printed into buffer.
286 */
287static ssize_t i2o_exec_show_product_id(struct device *d, char *buf)
288{
289 struct i2o_device *dev = to_i2o_device(d);
290 u16 id;
291
292 if (i2o_parm_field_get(dev, 0x0000, 1, &id, 2)) {
293 sprintf(buf, "0x%04x", id);
294 return strlen(buf) + 1;
295 }
296
297 return 0;
298};
299
300/* Exec-OSM device attributes */
301static DEVICE_ATTR(vendor_id, S_IRUGO, i2o_exec_show_vendor_id, NULL);
302static DEVICE_ATTR(product_id, S_IRUGO, i2o_exec_show_product_id, NULL);
303
304/**
260 * i2o_exec_probe - Called if a new I2O device (executive class) appears 305 * i2o_exec_probe - Called if a new I2O device (executive class) appears
261 * @dev: I2O device which should be probed 306 * @dev: I2O device which should be probed
262 * 307 *
@@ -268,10 +313,16 @@ static int i2o_msg_post_wait_complete(struct i2o_controller *c, u32 m,
268static int i2o_exec_probe(struct device *dev) 313static int i2o_exec_probe(struct device *dev)
269{ 314{
270 struct i2o_device *i2o_dev = to_i2o_device(dev); 315 struct i2o_device *i2o_dev = to_i2o_device(dev);
316 struct i2o_controller *c = i2o_dev->iop;
271 317
272 i2o_event_register(i2o_dev, &i2o_exec_driver, 0, 0xffffffff); 318 i2o_event_register(i2o_dev, &i2o_exec_driver, 0, 0xffffffff);
273 319
274 i2o_dev->iop->exec = i2o_dev; 320 c->exec = i2o_dev;
321
322 i2o_exec_lct_notify(c, c->lct->change_ind + 1);
323
324 device_create_file(dev, &dev_attr_vendor_id);
325 device_create_file(dev, &dev_attr_product_id);
275 326
276 return 0; 327 return 0;
277}; 328};
@@ -286,6 +337,9 @@ static int i2o_exec_probe(struct device *dev)
286 */ 337 */
287static int i2o_exec_remove(struct device *dev) 338static int i2o_exec_remove(struct device *dev)
288{ 339{
340 device_remove_file(dev, &dev_attr_product_id);
341 device_remove_file(dev, &dev_attr_vendor_id);
342
289 i2o_event_register(to_i2o_device(dev), &i2o_exec_driver, 0, 0); 343 i2o_event_register(to_i2o_device(dev), &i2o_exec_driver, 0, 0);
290 344
291 return 0; 345 return 0;
@@ -297,12 +351,16 @@ static int i2o_exec_remove(struct device *dev)
297 * 351 *
298 * This function handles asynchronus LCT NOTIFY replies. It parses the 352 * This function handles asynchronus LCT NOTIFY replies. It parses the
299 * new LCT and if the buffer for the LCT was to small sends a LCT NOTIFY 353 * new LCT and if the buffer for the LCT was to small sends a LCT NOTIFY
300 * again. 354 * again, otherwise send LCT NOTIFY to get informed on next LCT change.
301 */ 355 */
302static void i2o_exec_lct_modified(struct i2o_controller *c) 356static void i2o_exec_lct_modified(struct i2o_controller *c)
303{ 357{
304 if (i2o_device_parse_lct(c) == -EAGAIN) 358 u32 change_ind = 0;
305 i2o_exec_lct_notify(c, 0); 359
360 if (i2o_device_parse_lct(c) != -EAGAIN)
361 change_ind = c->lct->change_ind + 1;
362
363 i2o_exec_lct_notify(c, change_ind);
306}; 364};
307 365
308/** 366/**
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c
index e69421e36ac5..1dd2b9dad50e 100644
--- a/drivers/message/i2o/i2o_block.c
+++ b/drivers/message/i2o/i2o_block.c
@@ -147,6 +147,29 @@ static int i2o_block_device_flush(struct i2o_device *dev)
147}; 147};
148 148
149/** 149/**
150 * i2o_block_issue_flush - device-flush interface for block-layer
151 * @queue: the request queue of the device which should be flushed
152 * @disk: gendisk
153 * @error_sector: error offset
154 *
155 * Helper function to provide flush functionality to block-layer.
156 *
157 * Returns 0 on success or negative error code on failure.
158 */
159
160static int i2o_block_issue_flush(request_queue_t * queue, struct gendisk *disk,
161 sector_t * error_sector)
162{
163 struct i2o_block_device *i2o_blk_dev = queue->queuedata;
164 int rc = -ENODEV;
165
166 if (likely(i2o_blk_dev))
167 rc = i2o_block_device_flush(i2o_blk_dev->i2o_dev);
168
169 return rc;
170}
171
172/**
150 * i2o_block_device_mount - Mount (load) the media of device dev 173 * i2o_block_device_mount - Mount (load) the media of device dev
151 * @dev: I2O device which should receive the mount request 174 * @dev: I2O device which should receive the mount request
152 * @media_id: Media Identifier 175 * @media_id: Media Identifier
@@ -299,28 +322,31 @@ static inline void i2o_block_request_free(struct i2o_block_request *ireq)
299 322
300/** 323/**
301 * i2o_block_sglist_alloc - Allocate the SG list and map it 324 * i2o_block_sglist_alloc - Allocate the SG list and map it
325 * @c: I2O controller to which the request belongs
302 * @ireq: I2O block request 326 * @ireq: I2O block request
303 * 327 *
304 * Builds the SG list and map it into to be accessable by the controller. 328 * Builds the SG list and map it to be accessable by the controller.
305 * 329 *
306 * Returns the number of elements in the SG list or 0 on failure. 330 * Returns 0 on failure or 1 on success.
307 */ 331 */
308static inline int i2o_block_sglist_alloc(struct i2o_block_request *ireq) 332static inline int i2o_block_sglist_alloc(struct i2o_controller *c,
333 struct i2o_block_request *ireq,
334 u32 __iomem ** mptr)
309{ 335{
310 struct device *dev = &ireq->i2o_blk_dev->i2o_dev->iop->pdev->dev;
311 int nents; 336 int nents;
337 enum dma_data_direction direction;
312 338
339 ireq->dev = &c->pdev->dev;
313 nents = blk_rq_map_sg(ireq->req->q, ireq->req, ireq->sg_table); 340 nents = blk_rq_map_sg(ireq->req->q, ireq->req, ireq->sg_table);
314 341
315 if (rq_data_dir(ireq->req) == READ) 342 if (rq_data_dir(ireq->req) == READ)
316 ireq->sg_dma_direction = PCI_DMA_FROMDEVICE; 343 direction = PCI_DMA_FROMDEVICE;
317 else 344 else
318 ireq->sg_dma_direction = PCI_DMA_TODEVICE; 345 direction = PCI_DMA_TODEVICE;
319 346
320 ireq->sg_nents = dma_map_sg(dev, ireq->sg_table, nents, 347 ireq->sg_nents = nents;
321 ireq->sg_dma_direction);
322 348
323 return ireq->sg_nents; 349 return i2o_dma_map_sg(c, ireq->sg_table, nents, direction, mptr);
324}; 350};
325 351
326/** 352/**
@@ -331,10 +357,14 @@ static inline int i2o_block_sglist_alloc(struct i2o_block_request *ireq)
331 */ 357 */
332static inline void i2o_block_sglist_free(struct i2o_block_request *ireq) 358static inline void i2o_block_sglist_free(struct i2o_block_request *ireq)
333{ 359{
334 struct device *dev = &ireq->i2o_blk_dev->i2o_dev->iop->pdev->dev; 360 enum dma_data_direction direction;
335 361
336 dma_unmap_sg(dev, ireq->sg_table, ireq->sg_nents, 362 if (rq_data_dir(ireq->req) == READ)
337 ireq->sg_dma_direction); 363 direction = PCI_DMA_FROMDEVICE;
364 else
365 direction = PCI_DMA_TODEVICE;
366
367 dma_unmap_sg(ireq->dev, ireq->sg_table, ireq->sg_nents, direction);
338}; 368};
339 369
340/** 370/**
@@ -352,6 +382,11 @@ static int i2o_block_prep_req_fn(struct request_queue *q, struct request *req)
352 struct i2o_block_device *i2o_blk_dev = q->queuedata; 382 struct i2o_block_device *i2o_blk_dev = q->queuedata;
353 struct i2o_block_request *ireq; 383 struct i2o_block_request *ireq;
354 384
385 if (unlikely(!i2o_blk_dev)) {
386 osm_err("block device already removed\n");
387 return BLKPREP_KILL;
388 }
389
355 /* request is already processed by us, so return */ 390 /* request is already processed by us, so return */
356 if (req->flags & REQ_SPECIAL) { 391 if (req->flags & REQ_SPECIAL) {
357 osm_debug("REQ_SPECIAL already set!\n"); 392 osm_debug("REQ_SPECIAL already set!\n");
@@ -414,11 +449,11 @@ static void i2o_block_end_request(struct request *req, int uptodate,
414{ 449{
415 struct i2o_block_request *ireq = req->special; 450 struct i2o_block_request *ireq = req->special;
416 struct i2o_block_device *dev = ireq->i2o_blk_dev; 451 struct i2o_block_device *dev = ireq->i2o_blk_dev;
417 request_queue_t *q = dev->gd->queue; 452 request_queue_t *q = req->q;
418 unsigned long flags; 453 unsigned long flags;
419 454
420 if (end_that_request_chunk(req, uptodate, nr_bytes)) { 455 if (end_that_request_chunk(req, uptodate, nr_bytes)) {
421 int leftover = (req->hard_nr_sectors << 9); 456 int leftover = (req->hard_nr_sectors << KERNEL_SECTOR_SHIFT);
422 457
423 if (blk_pc_request(req)) 458 if (blk_pc_request(req))
424 leftover = req->data_len; 459 leftover = req->data_len;
@@ -432,8 +467,11 @@ static void i2o_block_end_request(struct request *req, int uptodate,
432 spin_lock_irqsave(q->queue_lock, flags); 467 spin_lock_irqsave(q->queue_lock, flags);
433 468
434 end_that_request_last(req); 469 end_that_request_last(req);
435 dev->open_queue_depth--; 470
436 list_del(&ireq->queue); 471 if (likely(dev)) {
472 dev->open_queue_depth--;
473 list_del(&ireq->queue);
474 }
437 475
438 blk_start_queue(q); 476 blk_start_queue(q);
439 477
@@ -483,8 +521,8 @@ static int i2o_block_reply(struct i2o_controller *c, u32 m,
483 * Don't stick a supertrak100 into cache aggressive modes 521 * Don't stick a supertrak100 into cache aggressive modes
484 */ 522 */
485 523
486 osm_err("%03x error status: %02x, detailed status: %04x\n", 524 osm_err("TID %03x error status: 0x%02x, detailed status: "
487 (le32_to_cpu(msg->u.head[1]) >> 12 & 0xfff), 525 "0x%04x\n", (le32_to_cpu(msg->u.head[1]) >> 12 & 0xfff),
488 status >> 24, status & 0xffff); 526 status >> 24, status & 0xffff);
489 527
490 req->errors++; 528 req->errors++;
@@ -705,18 +743,25 @@ static int i2o_block_media_changed(struct gendisk *disk)
705static int i2o_block_transfer(struct request *req) 743static int i2o_block_transfer(struct request *req)
706{ 744{
707 struct i2o_block_device *dev = req->rq_disk->private_data; 745 struct i2o_block_device *dev = req->rq_disk->private_data;
708 struct i2o_controller *c = dev->i2o_dev->iop; 746 struct i2o_controller *c;
709 int tid = dev->i2o_dev->lct_data.tid; 747 int tid = dev->i2o_dev->lct_data.tid;
710 struct i2o_message __iomem *msg; 748 struct i2o_message __iomem *msg;
711 void __iomem *mptr; 749 u32 __iomem *mptr;
712 struct i2o_block_request *ireq = req->special; 750 struct i2o_block_request *ireq = req->special;
713 struct scatterlist *sg;
714 int sgnum;
715 int i;
716 u32 m; 751 u32 m;
717 u32 tcntxt; 752 u32 tcntxt;
718 u32 sg_flags; 753 u32 sgl_offset = SGL_OFFSET_8;
754 u32 ctl_flags = 0x00000000;
719 int rc; 755 int rc;
756 u32 cmd;
757
758 if (unlikely(!dev->i2o_dev)) {
759 osm_err("transfer to removed drive\n");
760 rc = -ENODEV;
761 goto exit;
762 }
763
764 c = dev->i2o_dev->iop;
720 765
721 m = i2o_msg_get(c, &msg); 766 m = i2o_msg_get(c, &msg);
722 if (m == I2O_QUEUE_EMPTY) { 767 if (m == I2O_QUEUE_EMPTY) {
@@ -730,80 +775,109 @@ static int i2o_block_transfer(struct request *req)
730 goto nop_msg; 775 goto nop_msg;
731 } 776 }
732 777
733 if ((sgnum = i2o_block_sglist_alloc(ireq)) <= 0) {
734 rc = -ENOMEM;
735 goto context_remove;
736 }
737
738 /* Build the message based on the request. */
739 writel(i2o_block_driver.context, &msg->u.s.icntxt); 778 writel(i2o_block_driver.context, &msg->u.s.icntxt);
740 writel(tcntxt, &msg->u.s.tcntxt); 779 writel(tcntxt, &msg->u.s.tcntxt);
741 writel(req->nr_sectors << 9, &msg->body[1]);
742 780
743 writel((((u64) req->sector) << 9) & 0xffffffff, &msg->body[2]); 781 mptr = &msg->body[0];
744 writel(req->sector >> 23, &msg->body[3]);
745
746 mptr = &msg->body[4];
747
748 sg = ireq->sg_table;
749 782
750 if (rq_data_dir(req) == READ) { 783 if (rq_data_dir(req) == READ) {
751 writel(I2O_CMD_BLOCK_READ << 24 | HOST_TID << 12 | tid, 784 cmd = I2O_CMD_BLOCK_READ << 24;
752 &msg->u.head[1]); 785
753 sg_flags = 0x10000000;
754 switch (dev->rcache) { 786 switch (dev->rcache) {
755 case CACHE_NULL:
756 writel(0, &msg->body[0]);
757 break;
758 case CACHE_PREFETCH: 787 case CACHE_PREFETCH:
759 writel(0x201F0008, &msg->body[0]); 788 ctl_flags = 0x201F0008;
760 break; 789 break;
790
761 case CACHE_SMARTFETCH: 791 case CACHE_SMARTFETCH:
762 if (req->nr_sectors > 16) 792 if (req->nr_sectors > 16)
763 writel(0x201F0008, &msg->body[0]); 793 ctl_flags = 0x201F0008;
764 else 794 else
765 writel(0x001F0000, &msg->body[0]); 795 ctl_flags = 0x001F0000;
796 break;
797
798 default:
766 break; 799 break;
767 } 800 }
768 } else { 801 } else {
769 writel(I2O_CMD_BLOCK_WRITE << 24 | HOST_TID << 12 | tid, 802 cmd = I2O_CMD_BLOCK_WRITE << 24;
770 &msg->u.head[1]); 803
771 sg_flags = 0x14000000;
772 switch (dev->wcache) { 804 switch (dev->wcache) {
773 case CACHE_NULL:
774 writel(0, &msg->body[0]);
775 break;
776 case CACHE_WRITETHROUGH: 805 case CACHE_WRITETHROUGH:
777 writel(0x001F0008, &msg->body[0]); 806 ctl_flags = 0x001F0008;
778 break; 807 break;
779 case CACHE_WRITEBACK: 808 case CACHE_WRITEBACK:
780 writel(0x001F0010, &msg->body[0]); 809 ctl_flags = 0x001F0010;
781 break; 810 break;
782 case CACHE_SMARTBACK: 811 case CACHE_SMARTBACK:
783 if (req->nr_sectors > 16) 812 if (req->nr_sectors > 16)
784 writel(0x001F0004, &msg->body[0]); 813 ctl_flags = 0x001F0004;
785 else 814 else
786 writel(0x001F0010, &msg->body[0]); 815 ctl_flags = 0x001F0010;
787 break; 816 break;
788 case CACHE_SMARTTHROUGH: 817 case CACHE_SMARTTHROUGH:
789 if (req->nr_sectors > 16) 818 if (req->nr_sectors > 16)
790 writel(0x001F0004, &msg->body[0]); 819 ctl_flags = 0x001F0004;
791 else 820 else
792 writel(0x001F0010, &msg->body[0]); 821 ctl_flags = 0x001F0010;
822 default:
823 break;
824 }
825 }
826
827#ifdef CONFIG_I2O_EXT_ADAPTEC
828 if (c->adaptec) {
829 u8 cmd[10];
830 u32 scsi_flags;
831 u16 hwsec = queue_hardsect_size(req->q) >> KERNEL_SECTOR_SHIFT;
832
833 memset(cmd, 0, 10);
834
835 sgl_offset = SGL_OFFSET_12;
836
837 writel(I2O_CMD_PRIVATE << 24 | HOST_TID << 12 | tid,
838 &msg->u.head[1]);
839
840 writel(I2O_VENDOR_DPT << 16 | I2O_CMD_SCSI_EXEC, mptr++);
841 writel(tid, mptr++);
842
843 /*
844 * ENABLE_DISCONNECT
845 * SIMPLE_TAG
846 * RETURN_SENSE_DATA_IN_REPLY_MESSAGE_FRAME
847 */
848 if (rq_data_dir(req) == READ) {
849 cmd[0] = 0x28;
850 scsi_flags = 0x60a0000a;
851 } else {
852 cmd[0] = 0x2A;
853 scsi_flags = 0xa0a0000a;
793 } 854 }
855
856 writel(scsi_flags, mptr++);
857
858 *((u32 *) & cmd[2]) = cpu_to_be32(req->sector * hwsec);
859 *((u16 *) & cmd[7]) = cpu_to_be16(req->nr_sectors * hwsec);
860
861 memcpy_toio(mptr, cmd, 10);
862 mptr += 4;
863 writel(req->nr_sectors << KERNEL_SECTOR_SHIFT, mptr++);
864 } else
865#endif
866 {
867 writel(cmd | HOST_TID << 12 | tid, &msg->u.head[1]);
868 writel(ctl_flags, mptr++);
869 writel(req->nr_sectors << KERNEL_SECTOR_SHIFT, mptr++);
870 writel((u32) (req->sector << KERNEL_SECTOR_SHIFT), mptr++);
871 writel(req->sector >> (32 - KERNEL_SECTOR_SHIFT), mptr++);
794 } 872 }
795 873
796 for (i = sgnum; i > 0; i--) { 874 if (!i2o_block_sglist_alloc(c, ireq, &mptr)) {
797 if (i == 1) 875 rc = -ENOMEM;
798 sg_flags |= 0x80000000; 876 goto context_remove;
799 writel(sg_flags | sg_dma_len(sg), mptr);
800 writel(sg_dma_address(sg), mptr + 4);
801 mptr += 8;
802 sg++;
803 } 877 }
804 878
805 writel(I2O_MESSAGE_SIZE(mptr - &msg->u.head[0]) | SGL_OFFSET_8, 879 writel(I2O_MESSAGE_SIZE(mptr - &msg->u.head[0]) |
806 &msg->u.head[0]); 880 sgl_offset, &msg->u.head[0]);
807 881
808 list_add_tail(&ireq->queue, &dev->open_queue); 882 list_add_tail(&ireq->queue, &dev->open_queue);
809 dev->open_queue_depth++; 883 dev->open_queue_depth++;
@@ -846,11 +920,13 @@ static void i2o_block_request_fn(struct request_queue *q)
846 920
847 queue_depth = ireq->i2o_blk_dev->open_queue_depth; 921 queue_depth = ireq->i2o_blk_dev->open_queue_depth;
848 922
849 if (queue_depth < I2O_BLOCK_MAX_OPEN_REQUESTS) 923 if (queue_depth < I2O_BLOCK_MAX_OPEN_REQUESTS) {
850 if (!i2o_block_transfer(req)) { 924 if (!i2o_block_transfer(req)) {
851 blkdev_dequeue_request(req); 925 blkdev_dequeue_request(req);
852 continue; 926 continue;
853 } 927 } else
928 osm_info("transfer error\n");
929 }
854 930
855 if (queue_depth) 931 if (queue_depth)
856 break; 932 break;
@@ -933,6 +1009,7 @@ static struct i2o_block_device *i2o_block_device_alloc(void)
933 } 1009 }
934 1010
935 blk_queue_prep_rq(queue, i2o_block_prep_req_fn); 1011 blk_queue_prep_rq(queue, i2o_block_prep_req_fn);
1012 blk_queue_issue_flush_fn(queue, i2o_block_issue_flush);
936 1013
937 gd->major = I2O_MAJOR; 1014 gd->major = I2O_MAJOR;
938 gd->queue = queue; 1015 gd->queue = queue;
@@ -974,7 +1051,18 @@ static int i2o_block_probe(struct device *dev)
974 u64 size; 1051 u64 size;
975 u32 blocksize; 1052 u32 blocksize;
976 u32 flags, status; 1053 u32 flags, status;
977 int segments; 1054 u16 body_size = 4;
1055 unsigned short max_sectors;
1056
1057#ifdef CONFIG_I2O_EXT_ADAPTEC
1058 if (c->adaptec)
1059 body_size = 8;
1060#endif
1061
1062 if (c->limit_sectors)
1063 max_sectors = I2O_MAX_SECTORS_LIMITED;
1064 else
1065 max_sectors = I2O_MAX_SECTORS;
978 1066
979 /* skip devices which are used by IOP */ 1067 /* skip devices which are used by IOP */
980 if (i2o_dev->lct_data.user_tid != 0xfff) { 1068 if (i2o_dev->lct_data.user_tid != 0xfff) {
@@ -1009,50 +1097,35 @@ static int i2o_block_probe(struct device *dev)
1009 queue = gd->queue; 1097 queue = gd->queue;
1010 queue->queuedata = i2o_blk_dev; 1098 queue->queuedata = i2o_blk_dev;
1011 1099
1012 blk_queue_max_phys_segments(queue, I2O_MAX_SEGMENTS); 1100 blk_queue_max_phys_segments(queue, I2O_MAX_PHYS_SEGMENTS);
1013 blk_queue_max_sectors(queue, I2O_MAX_SECTORS); 1101 blk_queue_max_sectors(queue, max_sectors);
1014 1102 blk_queue_max_hw_segments(queue, i2o_sg_tablesize(c, body_size));
1015 if (c->short_req)
1016 segments = 8;
1017 else {
1018 i2o_status_block *sb;
1019 1103
1020 sb = c->status_block.virt; 1104 osm_debug("max sectors = %d\n", queue->max_phys_segments);
1021 1105 osm_debug("phys segments = %d\n", queue->max_sectors);
1022 segments = (sb->inbound_frame_size - 1106 osm_debug("max hw segments = %d\n", queue->max_hw_segments);
1023 sizeof(struct i2o_message) / 4 - 4) / 2;
1024 }
1025
1026 blk_queue_max_hw_segments(queue, segments);
1027
1028 osm_debug("max sectors = %d\n", I2O_MAX_SECTORS);
1029 osm_debug("phys segments = %d\n", I2O_MAX_SEGMENTS);
1030 osm_debug("hw segments = %d\n", segments);
1031 1107
1032 /* 1108 /*
1033 * Ask for the current media data. If that isn't supported 1109 * Ask for the current media data. If that isn't supported
1034 * then we ask for the device capacity data 1110 * then we ask for the device capacity data
1035 */ 1111 */
1036 if (!i2o_parm_field_get(i2o_dev, 0x0004, 0, &size, 8)) 1112 if (i2o_parm_field_get(i2o_dev, 0x0004, 1, &blocksize, 4) ||
1037 if (!i2o_parm_field_get(i2o_dev, 0x0000, 4, &size, 8)) { 1113 i2o_parm_field_get(i2o_dev, 0x0000, 3, &blocksize, 4)) {
1038 osm_warn("could not get size of %s\n", gd->disk_name); 1114 blk_queue_hardsect_size(queue, blocksize);
1039 size = 0; 1115 } else
1040 } 1116 osm_warn("unable to get blocksize of %s\n", gd->disk_name);
1041 1117
1042 if (!i2o_parm_field_get(i2o_dev, 0x0004, 1, &blocksize, 4)) 1118 if (i2o_parm_field_get(i2o_dev, 0x0004, 0, &size, 8) ||
1043 if (!i2o_parm_field_get(i2o_dev, 0x0000, 3, &blocksize, 4)) { 1119 i2o_parm_field_get(i2o_dev, 0x0000, 4, &size, 8)) {
1044 osm_warn("unable to get blocksize of %s\n", 1120 set_capacity(gd, size >> KERNEL_SECTOR_SHIFT);
1045 gd->disk_name); 1121 } else
1046 blocksize = 0; 1122 osm_warn("could not get size of %s\n", gd->disk_name);
1047 }
1048 1123
1049 if (!i2o_parm_field_get(i2o_dev, 0x0000, 2, &i2o_blk_dev->power, 2)) 1124 if (!i2o_parm_field_get(i2o_dev, 0x0000, 2, &i2o_blk_dev->power, 2))
1050 i2o_blk_dev->power = 0; 1125 i2o_blk_dev->power = 0;
1051 i2o_parm_field_get(i2o_dev, 0x0000, 5, &flags, 4); 1126 i2o_parm_field_get(i2o_dev, 0x0000, 5, &flags, 4);
1052 i2o_parm_field_get(i2o_dev, 0x0000, 6, &status, 4); 1127 i2o_parm_field_get(i2o_dev, 0x0000, 6, &status, 4);
1053 1128
1054 set_capacity(gd, size >> 9);
1055
1056 i2o_event_register(i2o_dev, &i2o_block_driver, 0, 0xffffffff); 1129 i2o_event_register(i2o_dev, &i2o_block_driver, 0, 0xffffffff);
1057 1130
1058 add_disk(gd); 1131 add_disk(gd);
@@ -1109,7 +1182,7 @@ static int __init i2o_block_init(void)
1109 goto exit; 1182 goto exit;
1110 } 1183 }
1111 1184
1112 i2o_blk_req_pool.pool = mempool_create(I2O_REQ_MEMPOOL_SIZE, 1185 i2o_blk_req_pool.pool = mempool_create(I2O_BLOCK_REQ_MEMPOOL_SIZE,
1113 mempool_alloc_slab, 1186 mempool_alloc_slab,
1114 mempool_free_slab, 1187 mempool_free_slab,
1115 i2o_blk_req_pool.slab); 1188 i2o_blk_req_pool.slab);
diff --git a/drivers/message/i2o/i2o_block.h b/drivers/message/i2o/i2o_block.h
index 712111ffa638..9e1a95fb0833 100644
--- a/drivers/message/i2o/i2o_block.h
+++ b/drivers/message/i2o/i2o_block.h
@@ -84,9 +84,9 @@ struct i2o_block_request
84 struct list_head queue; 84 struct list_head queue;
85 struct request *req; /* corresponding request */ 85 struct request *req; /* corresponding request */
86 struct i2o_block_device *i2o_blk_dev; /* I2O block device */ 86 struct i2o_block_device *i2o_blk_dev; /* I2O block device */
87 int sg_dma_direction; /* direction of DMA buffer read/write */ 87 struct device *dev; /* device used for DMA */
88 int sg_nents; /* number of SG elements */ 88 int sg_nents; /* number of SG elements */
89 struct scatterlist sg_table[I2O_MAX_SEGMENTS]; /* SG table */ 89 struct scatterlist sg_table[I2O_MAX_PHYS_SEGMENTS]; /* SG table */
90}; 90};
91 91
92/* I2O Block device delayed request */ 92/* I2O Block device delayed request */
diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c
index 383e89a5c9f0..849d90aad779 100644
--- a/drivers/message/i2o/i2o_config.c
+++ b/drivers/message/i2o/i2o_config.c
@@ -30,27 +30,11 @@
30 * 2 of the License, or (at your option) any later version. 30 * 2 of the License, or (at your option) any later version.
31 */ 31 */
32 32
33#include <linux/module.h>
34#include <linux/kernel.h>
35#include <linux/pci.h>
36#include <linux/i2o.h>
37#include <linux/errno.h>
38#include <linux/init.h>
39#include <linux/slab.h>
40#include <linux/miscdevice.h> 33#include <linux/miscdevice.h>
41#include <linux/mm.h>
42#include <linux/spinlock.h>
43#include <linux/smp_lock.h> 34#include <linux/smp_lock.h>
44#include <linux/ioctl32.h>
45#include <linux/compat.h> 35#include <linux/compat.h>
46#include <linux/syscalls.h>
47 36
48#include <asm/uaccess.h> 37#include <asm/uaccess.h>
49#include <asm/io.h>
50
51#define OSM_NAME "config-osm"
52#define OSM_VERSION "$Rev$"
53#define OSM_DESCRIPTION "I2O Configuration OSM"
54 38
55extern int i2o_parm_issue(struct i2o_device *, int, void *, int, void *, int); 39extern int i2o_parm_issue(struct i2o_device *, int, void *, int, void *, int);
56 40
@@ -80,125 +64,6 @@ struct i2o_cfg_info {
80static struct i2o_cfg_info *open_files = NULL; 64static struct i2o_cfg_info *open_files = NULL;
81static ulong i2o_cfg_info_id = 0; 65static ulong i2o_cfg_info_id = 0;
82 66
83/**
84 * i2o_config_read_hrt - Returns the HRT of the controller
85 * @kob: kernel object handle
86 * @buf: buffer into which the HRT should be copied
87 * @off: file offset
88 * @count: number of bytes to read
89 *
90 * Put @count bytes starting at @off into @buf from the HRT of the I2O
91 * controller corresponding to @kobj.
92 *
93 * Returns number of bytes copied into buffer.
94 */
95static ssize_t i2o_config_read_hrt(struct kobject *kobj, char *buf,
96 loff_t offset, size_t count)
97{
98 struct i2o_controller *c = to_i2o_controller(container_of(kobj,
99 struct device,
100 kobj));
101 i2o_hrt *hrt = c->hrt.virt;
102
103 u32 size = (hrt->num_entries * hrt->entry_len + 2) * 4;
104
105 if(offset > size)
106 return 0;
107
108 if(offset + count > size)
109 count = size - offset;
110
111 memcpy(buf, (u8 *) hrt + offset, count);
112
113 return count;
114};
115
116/**
117 * i2o_config_read_lct - Returns the LCT of the controller
118 * @kob: kernel object handle
119 * @buf: buffer into which the LCT should be copied
120 * @off: file offset
121 * @count: number of bytes to read
122 *
123 * Put @count bytes starting at @off into @buf from the LCT of the I2O
124 * controller corresponding to @kobj.
125 *
126 * Returns number of bytes copied into buffer.
127 */
128static ssize_t i2o_config_read_lct(struct kobject *kobj, char *buf,
129 loff_t offset, size_t count)
130{
131 struct i2o_controller *c = to_i2o_controller(container_of(kobj,
132 struct device,
133 kobj));
134 u32 size = c->lct->table_size * 4;
135
136 if(offset > size)
137 return 0;
138
139 if(offset + count > size)
140 count = size - offset;
141
142 memcpy(buf, (u8 *) c->lct + offset, count);
143
144 return count;
145};
146
147/* attribute for HRT in sysfs */
148static struct bin_attribute i2o_config_hrt_attr = {
149 .attr = {
150 .name = "hrt",
151 .mode = S_IRUGO,
152 .owner = THIS_MODULE
153 },
154 .size = 0,
155 .read = i2o_config_read_hrt
156};
157
158/* attribute for LCT in sysfs */
159static struct bin_attribute i2o_config_lct_attr = {
160 .attr = {
161 .name = "lct",
162 .mode = S_IRUGO,
163 .owner = THIS_MODULE
164 },
165 .size = 0,
166 .read = i2o_config_read_lct
167};
168
169/**
170 * i2o_config_notify_controller_add - Notify of added controller
171 * @c: the controller which was added
172 *
173 * If a I2O controller is added, we catch the notification to add sysfs
174 * entries.
175 */
176static void i2o_config_notify_controller_add(struct i2o_controller *c)
177{
178 sysfs_create_bin_file(&(c->device.kobj), &i2o_config_hrt_attr);
179 sysfs_create_bin_file(&(c->device.kobj), &i2o_config_lct_attr);
180};
181
182/**
183 * i2o_config_notify_controller_remove - Notify of removed controller
184 * @c: the controller which was removed
185 *
186 * If a I2O controller is removed, we catch the notification to remove the
187 * sysfs entries.
188 */
189static void i2o_config_notify_controller_remove(struct i2o_controller *c)
190{
191 sysfs_remove_bin_file(&c->device.kobj, &i2o_config_lct_attr);
192 sysfs_remove_bin_file(&c->device.kobj, &i2o_config_hrt_attr);
193};
194
195/* Config OSM driver struct */
196static struct i2o_driver i2o_config_driver = {
197 .name = OSM_NAME,
198 .notify_controller_add = i2o_config_notify_controller_add,
199 .notify_controller_remove = i2o_config_notify_controller_remove
200};
201
202static int i2o_cfg_getiops(unsigned long arg) 67static int i2o_cfg_getiops(unsigned long arg)
203{ 68{
204 struct i2o_controller *c; 69 struct i2o_controller *c;
@@ -1257,37 +1122,20 @@ static struct miscdevice i2o_miscdev = {
1257 &config_fops 1122 &config_fops
1258}; 1123};
1259 1124
1260static int __init i2o_config_init(void) 1125static int __init i2o_config_old_init(void)
1261{ 1126{
1262 printk(KERN_INFO OSM_DESCRIPTION " v" OSM_VERSION "\n");
1263
1264 spin_lock_init(&i2o_config_lock); 1127 spin_lock_init(&i2o_config_lock);
1265 1128
1266 if (misc_register(&i2o_miscdev) < 0) { 1129 if (misc_register(&i2o_miscdev) < 0) {
1267 osm_err("can't register device.\n"); 1130 osm_err("can't register device.\n");
1268 return -EBUSY; 1131 return -EBUSY;
1269 } 1132 }
1270 /*
1271 * Install our handler
1272 */
1273 if (i2o_driver_register(&i2o_config_driver)) {
1274 osm_err("handler register failed.\n");
1275 misc_deregister(&i2o_miscdev);
1276 return -EBUSY;
1277 }
1278 return 0; 1133 return 0;
1279} 1134}
1280 1135
1281static void i2o_config_exit(void) 1136static void i2o_config_old_exit(void)
1282{ 1137{
1283 misc_deregister(&i2o_miscdev); 1138 misc_deregister(&i2o_miscdev);
1284 i2o_driver_unregister(&i2o_config_driver);
1285} 1139}
1286 1140
1287MODULE_AUTHOR("Red Hat Software"); 1141MODULE_AUTHOR("Red Hat Software");
1288MODULE_LICENSE("GPL");
1289MODULE_DESCRIPTION(OSM_DESCRIPTION);
1290MODULE_VERSION(OSM_VERSION);
1291
1292module_init(i2o_config_init);
1293module_exit(i2o_config_exit);
diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
index b176d0eeff7f..e5b74452c495 100644
--- a/drivers/message/i2o/i2o_proc.c
+++ b/drivers/message/i2o/i2o_proc.c
@@ -228,7 +228,7 @@ static const char *i2o_get_class_name(int class)
228 case I2O_CLASS_FLOPPY_DEVICE: 228 case I2O_CLASS_FLOPPY_DEVICE:
229 idx = 12; 229 idx = 12;
230 break; 230 break;
231 case I2O_CLASS_BUS_ADAPTER_PORT: 231 case I2O_CLASS_BUS_ADAPTER:
232 idx = 13; 232 idx = 13;
233 break; 233 break;
234 case I2O_CLASS_PEER_TRANSPORT_AGENT: 234 case I2O_CLASS_PEER_TRANSPORT_AGENT:
@@ -490,7 +490,7 @@ static int i2o_seq_show_lct(struct seq_file *seq, void *v)
490 seq_printf(seq, ", Unknown Device Type"); 490 seq_printf(seq, ", Unknown Device Type");
491 break; 491 break;
492 492
493 case I2O_CLASS_BUS_ADAPTER_PORT: 493 case I2O_CLASS_BUS_ADAPTER:
494 if (lct->lct_entry[i].sub_class < BUS_TABLE_SIZE) 494 if (lct->lct_entry[i].sub_class < BUS_TABLE_SIZE)
495 seq_printf(seq, ", %s", 495 seq_printf(seq, ", %s",
496 bus_ports[lct->lct_entry[i]. 496 bus_ports[lct->lct_entry[i].
diff --git a/drivers/message/i2o/i2o_scsi.c b/drivers/message/i2o/i2o_scsi.c
index 812c29ec86d3..c3b0c29ac02d 100644
--- a/drivers/message/i2o/i2o_scsi.c
+++ b/drivers/message/i2o/i2o_scsi.c
@@ -103,7 +103,7 @@ static struct i2o_scsi_host *i2o_scsi_host_alloc(struct i2o_controller *c)
103 i2o_status_block *sb; 103 i2o_status_block *sb;
104 104
105 list_for_each_entry(i2o_dev, &c->devices, list) 105 list_for_each_entry(i2o_dev, &c->devices, list)
106 if (i2o_dev->lct_data.class_id == I2O_CLASS_BUS_ADAPTER_PORT) { 106 if (i2o_dev->lct_data.class_id == I2O_CLASS_BUS_ADAPTER) {
107 if (i2o_parm_field_get(i2o_dev, 0x0000, 0, &type, 1) 107 if (i2o_parm_field_get(i2o_dev, 0x0000, 0, &type, 1)
108 && (type == 0x01)) /* SCSI bus */ 108 && (type == 0x01)) /* SCSI bus */
109 max_channel++; 109 max_channel++;
@@ -139,7 +139,7 @@ static struct i2o_scsi_host *i2o_scsi_host_alloc(struct i2o_controller *c)
139 139
140 i = 0; 140 i = 0;
141 list_for_each_entry(i2o_dev, &c->devices, list) 141 list_for_each_entry(i2o_dev, &c->devices, list)
142 if (i2o_dev->lct_data.class_id == I2O_CLASS_BUS_ADAPTER_PORT) { 142 if (i2o_dev->lct_data.class_id == I2O_CLASS_BUS_ADAPTER) {
143 if (i2o_parm_field_get(i2o_dev, 0x0000, 0, &type, 1) || (type == 1)) /* only SCSI bus */ 143 if (i2o_parm_field_get(i2o_dev, 0x0000, 0, &type, 1) || (type == 1)) /* only SCSI bus */
144 i2o_shost->channel[i++] = i2o_dev; 144 i2o_shost->channel[i++] = i2o_dev;
145 145
@@ -186,6 +186,7 @@ static int i2o_scsi_remove(struct device *dev)
186 186
187 shost_for_each_device(scsi_dev, i2o_shost->scsi_host) 187 shost_for_each_device(scsi_dev, i2o_shost->scsi_host)
188 if (scsi_dev->hostdata == i2o_dev) { 188 if (scsi_dev->hostdata == i2o_dev) {
189 sysfs_remove_link(&i2o_dev->device.kobj, "scsi");
189 scsi_remove_device(scsi_dev); 190 scsi_remove_device(scsi_dev);
190 scsi_device_put(scsi_dev); 191 scsi_device_put(scsi_dev);
191 break; 192 break;
@@ -259,12 +260,14 @@ static int i2o_scsi_probe(struct device *dev)
259 scsi_dev = 260 scsi_dev =
260 __scsi_add_device(i2o_shost->scsi_host, channel, id, lun, i2o_dev); 261 __scsi_add_device(i2o_shost->scsi_host, channel, id, lun, i2o_dev);
261 262
262 if (!scsi_dev) { 263 if (IS_ERR(scsi_dev)) {
263 osm_warn("can not add SCSI device %03x\n", 264 osm_warn("can not add SCSI device %03x\n",
264 i2o_dev->lct_data.tid); 265 i2o_dev->lct_data.tid);
265 return -EFAULT; 266 return PTR_ERR(scsi_dev);
266 } 267 }
267 268
269 sysfs_create_link(&i2o_dev->device.kobj, &scsi_dev->sdev_gendev.kobj, "scsi");
270
268 osm_info("device added (TID: %03x) channel: %d, id: %d, lun: %d\n", 271 osm_info("device added (TID: %03x) channel: %d, id: %d, lun: %d\n",
269 i2o_dev->lct_data.tid, channel, id, (unsigned int)lun); 272 i2o_dev->lct_data.tid, channel, id, (unsigned int)lun);
270 273
@@ -545,7 +548,13 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt,
545 int tid; 548 int tid;
546 struct i2o_message __iomem *msg; 549 struct i2o_message __iomem *msg;
547 u32 m; 550 u32 m;
548 u32 scsi_flags, sg_flags; 551 /*
552 * ENABLE_DISCONNECT
553 * SIMPLE_TAG
554 * RETURN_SENSE_DATA_IN_REPLY_MESSAGE_FRAME
555 */
556 u32 scsi_flags = 0x20a00000;
557 u32 sg_flags;
549 u32 __iomem *mptr; 558 u32 __iomem *mptr;
550 u32 __iomem *lenptr; 559 u32 __iomem *lenptr;
551 u32 len; 560 u32 len;
@@ -591,17 +600,19 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt,
591 600
592 switch (SCpnt->sc_data_direction) { 601 switch (SCpnt->sc_data_direction) {
593 case PCI_DMA_NONE: 602 case PCI_DMA_NONE:
594 scsi_flags = 0x00000000; // DATA NO XFER 603 /* DATA NO XFER */
595 sg_flags = 0x00000000; 604 sg_flags = 0x00000000;
596 break; 605 break;
597 606
598 case PCI_DMA_TODEVICE: 607 case PCI_DMA_TODEVICE:
599 scsi_flags = 0x80000000; // DATA OUT (iop-->dev) 608 /* DATA OUT (iop-->dev) */
609 scsi_flags |= 0x80000000;
600 sg_flags = 0x14000000; 610 sg_flags = 0x14000000;
601 break; 611 break;
602 612
603 case PCI_DMA_FROMDEVICE: 613 case PCI_DMA_FROMDEVICE:
604 scsi_flags = 0x40000000; // DATA IN (iop<--dev) 614 /* DATA IN (iop<--dev) */
615 scsi_flags |= 0x40000000;
605 sg_flags = 0x10000000; 616 sg_flags = 0x10000000;
606 break; 617 break;
607 618
@@ -639,8 +650,7 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt,
639 } 650 }
640 */ 651 */
641 652
642 /* Direction, disconnect ok, tag, CDBLen */ 653 writel(scsi_flags | SCpnt->cmd_len, mptr++);
643 writel(scsi_flags | 0x20200000 | SCpnt->cmd_len, mptr ++);
644 654
645 /* Write SCSI command into the message - always 16 byte block */ 655 /* Write SCSI command into the message - always 16 byte block */
646 memcpy_toio(mptr, SCpnt->cmnd, 16); 656 memcpy_toio(mptr, SCpnt->cmnd, 16);
diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
index 62b0d8bed186..40312053b38d 100644
--- a/drivers/message/i2o/iop.c
+++ b/drivers/message/i2o/iop.c
@@ -456,6 +456,70 @@ static int i2o_iop_clear(struct i2o_controller *c)
456} 456}
457 457
458/** 458/**
459 * i2o_iop_init_outbound_queue - setup the outbound message queue
460 * @c: I2O controller
461 *
462 * Clear and (re)initialize IOP's outbound queue and post the message
463 * frames to the IOP.
464 *
465 * Returns 0 on success or a negative errno code on failure.
466 */
467static int i2o_iop_init_outbound_queue(struct i2o_controller *c)
468{
469 u8 *status = c->status.virt;
470 u32 m;
471 struct i2o_message __iomem *msg;
472 ulong timeout;
473 int i;
474
475 osm_debug("%s: Initializing Outbound Queue...\n", c->name);
476
477 memset(status, 0, 4);
478
479 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
480 if (m == I2O_QUEUE_EMPTY)
481 return -ETIMEDOUT;
482
483 writel(EIGHT_WORD_MSG_SIZE | TRL_OFFSET_6, &msg->u.head[0]);
484 writel(I2O_CMD_OUTBOUND_INIT << 24 | HOST_TID << 12 | ADAPTER_TID,
485 &msg->u.head[1]);
486 writel(i2o_exec_driver.context, &msg->u.s.icntxt);
487 writel(0x0106, &msg->u.s.tcntxt); /* FIXME: why 0x0106, maybe in
488 Spec? */
489 writel(PAGE_SIZE, &msg->body[0]);
490 /* Outbound msg frame size in words and Initcode */
491 writel(MSG_FRAME_SIZE << 16 | 0x80, &msg->body[1]);
492 writel(0xd0000004, &msg->body[2]);
493 writel(i2o_dma_low(c->status.phys), &msg->body[3]);
494 writel(i2o_dma_high(c->status.phys), &msg->body[4]);
495
496 i2o_msg_post(c, m);
497
498 timeout = jiffies + I2O_TIMEOUT_INIT_OUTBOUND_QUEUE * HZ;
499 while (*status <= I2O_CMD_IN_PROGRESS) {
500 if (time_after(jiffies, timeout)) {
501 osm_warn("%s: Timeout Initializing\n", c->name);
502 return -ETIMEDOUT;
503 }
504 set_current_state(TASK_UNINTERRUPTIBLE);
505 schedule_timeout(1);
506
507 rmb();
508 }
509
510 m = c->out_queue.phys;
511
512 /* Post frames */
513 for (i = 0; i < NMBR_MSG_FRAMES; i++) {
514 i2o_flush_reply(c, m);
515 udelay(1); /* Promise */
516 m += MSG_FRAME_SIZE * 4;
517 }
518
519 return 0;
520}
521
522/**
459 * i2o_iop_reset - reset an I2O controller 523 * i2o_iop_reset - reset an I2O controller
460 * @c: controller to reset 524 * @c: controller to reset
461 * 525 *
@@ -491,25 +555,16 @@ static int i2o_iop_reset(struct i2o_controller *c)
491 writel(0, &msg->u.s.tcntxt); //FIXME: use reasonable transaction context 555 writel(0, &msg->u.s.tcntxt); //FIXME: use reasonable transaction context
492 writel(0, &msg->body[0]); 556 writel(0, &msg->body[0]);
493 writel(0, &msg->body[1]); 557 writel(0, &msg->body[1]);
494 writel(i2o_ptr_low((void *)c->status.phys), &msg->body[2]); 558 writel(i2o_dma_low(c->status.phys), &msg->body[2]);
495 writel(i2o_ptr_high((void *)c->status.phys), &msg->body[3]); 559 writel(i2o_dma_high(c->status.phys), &msg->body[3]);
496 560
497 i2o_msg_post(c, m); 561 i2o_msg_post(c, m);
498 562
499 /* Wait for a reply */ 563 /* Wait for a reply */
500 timeout = jiffies + I2O_TIMEOUT_RESET * HZ; 564 timeout = jiffies + I2O_TIMEOUT_RESET * HZ;
501 while (!*status) { 565 while (!*status) {
502 if (time_after(jiffies, timeout)) { 566 if (time_after(jiffies, timeout))
503 printk(KERN_ERR "%s: IOP reset timeout.\n", c->name);
504 rc = -ETIMEDOUT;
505 goto exit;
506 }
507
508 /* Promise bug */
509 if (status[1] || status[4]) {
510 *status = 0;
511 break; 567 break;
512 }
513 568
514 set_current_state(TASK_UNINTERRUPTIBLE); 569 set_current_state(TASK_UNINTERRUPTIBLE);
515 schedule_timeout(1); 570 schedule_timeout(1);
@@ -517,14 +572,20 @@ static int i2o_iop_reset(struct i2o_controller *c)
517 rmb(); 572 rmb();
518 } 573 }
519 574
520 if (*status == I2O_CMD_IN_PROGRESS) { 575 switch (*status) {
576 case I2O_CMD_REJECTED:
577 osm_warn("%s: IOP reset rejected\n", c->name);
578 rc = -EPERM;
579 break;
580
581 case I2O_CMD_IN_PROGRESS:
521 /* 582 /*
522 * Once the reset is sent, the IOP goes into the INIT state 583 * Once the reset is sent, the IOP goes into the INIT state
523 * which is indeterminate. We need to wait until the IOP 584 * which is indeterminate. We need to wait until the IOP has
524 * has rebooted before we can let the system talk to 585 * rebooted before we can let the system talk to it. We read
525 * it. We read the inbound Free_List until a message is 586 * the inbound Free_List until a message is available. If we
526 * available. If we can't read one in the given ammount of 587 * can't read one in the given ammount of time, we assume the
527 * time, we assume the IOP could not reboot properly. 588 * IOP could not reboot properly.
528 */ 589 */
529 pr_debug("%s: Reset in progress, waiting for reboot...\n", 590 pr_debug("%s: Reset in progress, waiting for reboot...\n",
530 c->name); 591 c->name);
@@ -543,19 +604,26 @@ static int i2o_iop_reset(struct i2o_controller *c)
543 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_RESET); 604 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_RESET);
544 } 605 }
545 i2o_msg_nop(c, m); 606 i2o_msg_nop(c, m);
546 }
547 607
548 /* from here all quiesce commands are safe */ 608 /* from here all quiesce commands are safe */
549 c->no_quiesce = 0; 609 c->no_quiesce = 0;
550 610
551 /* If IopReset was rejected or didn't perform reset, try IopClear */ 611 /* verify if controller is in state RESET */
552 i2o_status_get(c); 612 i2o_status_get(c);
553 if (*status == I2O_CMD_REJECTED || sb->iop_state != ADAPTER_STATE_RESET) { 613
554 printk(KERN_WARNING "%s: Reset rejected, trying to clear\n", 614 if (!c->promise && (sb->iop_state != ADAPTER_STATE_RESET))
555 c->name); 615 osm_warn("%s: reset completed, but adapter not in RESET"
556 i2o_iop_clear(c); 616 " state.\n", c->name);
557 } else 617 else
558 pr_debug("%s: Reset completed.\n", c->name); 618 osm_debug("%s: reset completed.\n", c->name);
619
620 break;
621
622 default:
623 osm_err("%s: IOP reset timeout.\n", c->name);
624 rc = -ETIMEDOUT;
625 break;
626 }
559 627
560 exit: 628 exit:
561 /* Enable all IOPs */ 629 /* Enable all IOPs */
@@ -565,87 +633,6 @@ static int i2o_iop_reset(struct i2o_controller *c)
565}; 633};
566 634
567/** 635/**
568 * i2o_iop_init_outbound_queue - setup the outbound message queue
569 * @c: I2O controller
570 *
571 * Clear and (re)initialize IOP's outbound queue and post the message
572 * frames to the IOP.
573 *
574 * Returns 0 on success or a negative errno code on failure.
575 */
576static int i2o_iop_init_outbound_queue(struct i2o_controller *c)
577{
578 u8 *status = c->status.virt;
579 u32 m;
580 struct i2o_message __iomem *msg;
581 ulong timeout;
582 int i;
583
584 pr_debug("%s: Initializing Outbound Queue...\n", c->name);
585
586 memset(status, 0, 4);
587
588 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
589 if (m == I2O_QUEUE_EMPTY)
590 return -ETIMEDOUT;
591
592 writel(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6, &msg->u.head[0]);
593 writel(I2O_CMD_OUTBOUND_INIT << 24 | HOST_TID << 12 | ADAPTER_TID,
594 &msg->u.head[1]);
595 writel(i2o_exec_driver.context, &msg->u.s.icntxt);
596 writel(0x00000000, &msg->u.s.tcntxt);
597 writel(PAGE_SIZE, &msg->body[0]);
598 writel(MSG_FRAME_SIZE << 16 | 0x80, &msg->body[1]); /* Outbound msg frame
599 size in words and Initcode */
600 writel(0xd0000004, &msg->body[2]);
601 writel(i2o_ptr_low((void *)c->status.phys), &msg->body[3]);
602 writel(i2o_ptr_high((void *)c->status.phys), &msg->body[4]);
603
604 i2o_msg_post(c, m);
605
606 timeout = jiffies + I2O_TIMEOUT_INIT_OUTBOUND_QUEUE * HZ;
607 while (*status <= I2O_CMD_IN_PROGRESS) {
608 if (time_after(jiffies, timeout)) {
609 printk(KERN_WARNING "%s: Timeout Initializing\n",
610 c->name);
611 return -ETIMEDOUT;
612 }
613 set_current_state(TASK_UNINTERRUPTIBLE);
614 schedule_timeout(1);
615
616 rmb();
617 }
618
619 m = c->out_queue.phys;
620
621 /* Post frames */
622 for (i = 0; i < NMBR_MSG_FRAMES; i++) {
623 i2o_flush_reply(c, m);
624 udelay(1); /* Promise */
625 m += MSG_FRAME_SIZE * 4;
626 }
627
628 return 0;
629}
630
631/**
632 * i2o_iop_send_nop - send a core NOP message
633 * @c: controller
634 *
635 * Send a no-operation message with a reply set to cause no
636 * action either. Needed for bringing up promise controllers.
637 */
638static int i2o_iop_send_nop(struct i2o_controller *c)
639{
640 struct i2o_message __iomem *msg;
641 u32 m = i2o_msg_get_wait(c, &msg, HZ);
642 if (m == I2O_QUEUE_EMPTY)
643 return -ETIMEDOUT;
644 i2o_msg_nop(c, m);
645 return 0;
646}
647
648/**
649 * i2o_iop_activate - Bring controller up to HOLD 636 * i2o_iop_activate - Bring controller up to HOLD
650 * @c: controller 637 * @c: controller
651 * 638 *
@@ -656,26 +643,9 @@ static int i2o_iop_send_nop(struct i2o_controller *c)
656 */ 643 */
657static int i2o_iop_activate(struct i2o_controller *c) 644static int i2o_iop_activate(struct i2o_controller *c)
658{ 645{
659 struct pci_dev *i960 = NULL;
660 i2o_status_block *sb = c->status_block.virt; 646 i2o_status_block *sb = c->status_block.virt;
661 int rc; 647 int rc;
662 648 int state;
663 if (c->promise) {
664 /* Beat up the hardware first of all */
665 i960 =
666 pci_find_slot(c->pdev->bus->number,
667 PCI_DEVFN(PCI_SLOT(c->pdev->devfn), 0));
668 if (i960)
669 pci_write_config_word(i960, 0x42, 0);
670
671 /* Follow this sequence precisely or the controller
672 ceases to perform useful functions until reboot */
673 if ((rc = i2o_iop_send_nop(c)))
674 return rc;
675
676 if ((rc = i2o_iop_reset(c)))
677 return rc;
678 }
679 649
680 /* In INIT state, Wait Inbound Q to initialize (in i2o_status_get) */ 650 /* In INIT state, Wait Inbound Q to initialize (in i2o_status_get) */
681 /* In READY state, Get status */ 651 /* In READY state, Get status */
@@ -684,7 +654,8 @@ static int i2o_iop_activate(struct i2o_controller *c)
684 if (rc) { 654 if (rc) {
685 printk(KERN_INFO "%s: Unable to obtain status, " 655 printk(KERN_INFO "%s: Unable to obtain status, "
686 "attempting a reset.\n", c->name); 656 "attempting a reset.\n", c->name);
687 if (i2o_iop_reset(c)) 657 rc = i2o_iop_reset(c);
658 if (rc)
688 return rc; 659 return rc;
689 } 660 }
690 661
@@ -697,37 +668,37 @@ static int i2o_iop_activate(struct i2o_controller *c)
697 switch (sb->iop_state) { 668 switch (sb->iop_state) {
698 case ADAPTER_STATE_FAULTED: 669 case ADAPTER_STATE_FAULTED:
699 printk(KERN_CRIT "%s: hardware fault\n", c->name); 670 printk(KERN_CRIT "%s: hardware fault\n", c->name);
700 return -ENODEV; 671 return -EFAULT;
701 672
702 case ADAPTER_STATE_READY: 673 case ADAPTER_STATE_READY:
703 case ADAPTER_STATE_OPERATIONAL: 674 case ADAPTER_STATE_OPERATIONAL:
704 case ADAPTER_STATE_HOLD: 675 case ADAPTER_STATE_HOLD:
705 case ADAPTER_STATE_FAILED: 676 case ADAPTER_STATE_FAILED:
706 pr_debug("%s: already running, trying to reset...\n", c->name); 677 pr_debug("%s: already running, trying to reset...\n", c->name);
707 if (i2o_iop_reset(c)) 678 rc = i2o_iop_reset(c);
708 return -ENODEV; 679 if (rc)
680 return rc;
709 } 681 }
710 682
683 /* preserve state */
684 state = sb->iop_state;
685
711 rc = i2o_iop_init_outbound_queue(c); 686 rc = i2o_iop_init_outbound_queue(c);
712 if (rc) 687 if (rc)
713 return rc; 688 return rc;
714 689
715 if (c->promise) { 690 /* if adapter was not in RESET state clear now */
716 if ((rc = i2o_iop_send_nop(c))) 691 if (state != ADAPTER_STATE_RESET)
717 return rc; 692 i2o_iop_clear(c);
718 693
719 if ((rc = i2o_status_get(c))) 694 i2o_status_get(c);
720 return rc;
721 695
722 if (i960) 696 if (sb->iop_state != ADAPTER_STATE_HOLD) {
723 pci_write_config_word(i960, 0x42, 0x3FF); 697 osm_err("%s: failed to bring IOP into HOLD state\n", c->name);
698 return -EIO;
724 } 699 }
725 700
726 /* In HOLD state */ 701 return i2o_hrt_get(c);
727
728 rc = i2o_hrt_get(c);
729
730 return rc;
731}; 702};
732 703
733/** 704/**
@@ -1030,8 +1001,8 @@ int i2o_status_get(struct i2o_controller *c)
1030 writel(0, &msg->u.s.tcntxt); // FIXME: use resonable transaction context 1001 writel(0, &msg->u.s.tcntxt); // FIXME: use resonable transaction context
1031 writel(0, &msg->body[0]); 1002 writel(0, &msg->body[0]);
1032 writel(0, &msg->body[1]); 1003 writel(0, &msg->body[1]);
1033 writel(i2o_ptr_low((void *)c->status_block.phys), &msg->body[2]); 1004 writel(i2o_dma_low(c->status_block.phys), &msg->body[2]);
1034 writel(i2o_ptr_high((void *)c->status_block.phys), &msg->body[3]); 1005 writel(i2o_dma_high(c->status_block.phys), &msg->body[3]);
1035 writel(sizeof(i2o_status_block), &msg->body[4]); /* always 88 bytes */ 1006 writel(sizeof(i2o_status_block), &msg->body[4]); /* always 88 bytes */
1036 1007
1037 i2o_msg_post(c, m); 1008 i2o_msg_post(c, m);
diff --git a/drivers/message/i2o/pci.c b/drivers/message/i2o/pci.c
index f33fd81f77a4..a499af096a68 100644
--- a/drivers/message/i2o/pci.c
+++ b/drivers/message/i2o/pci.c
@@ -50,30 +50,6 @@ static struct pci_device_id __devinitdata i2o_pci_ids[] = {
50}; 50};
51 51
52/** 52/**
53 * i2o_dma_realloc - Realloc DMA memory
54 * @dev: struct device pointer to the PCI device of the I2O controller
55 * @addr: pointer to a i2o_dma struct DMA buffer
56 * @len: new length of memory
57 * @gfp_mask: GFP mask
58 *
59 * If there was something allocated in the addr, free it first. If len > 0
60 * than try to allocate it and write the addresses back to the addr
61 * structure. If len == 0 set the virtual address to NULL.
62 *
63 * Returns the 0 on success or negative error code on failure.
64 */
65int i2o_dma_realloc(struct device *dev, struct i2o_dma *addr, size_t len,
66 unsigned int gfp_mask)
67{
68 i2o_dma_free(dev, addr);
69
70 if (len)
71 return i2o_dma_alloc(dev, addr, len, gfp_mask);
72
73 return 0;
74};
75
76/**
77 * i2o_pci_free - Frees the DMA memory for the I2O controller 53 * i2o_pci_free - Frees the DMA memory for the I2O controller
78 * @c: I2O controller to free 54 * @c: I2O controller to free
79 * 55 *
@@ -185,6 +161,7 @@ static int __devinit i2o_pci_alloc(struct i2o_controller *c)
185 } else 161 } else
186 c->in_queue = c->base; 162 c->in_queue = c->base;
187 163
164 c->irq_status = c->base.virt + I2O_IRQ_STATUS;
188 c->irq_mask = c->base.virt + I2O_IRQ_MASK; 165 c->irq_mask = c->base.virt + I2O_IRQ_MASK;
189 c->in_port = c->base.virt + I2O_IN_PORT; 166 c->in_port = c->base.virt + I2O_IN_PORT;
190 c->out_port = c->base.virt + I2O_OUT_PORT; 167 c->out_port = c->base.virt + I2O_OUT_PORT;
@@ -232,36 +209,30 @@ static int __devinit i2o_pci_alloc(struct i2o_controller *c)
232static irqreturn_t i2o_pci_interrupt(int irq, void *dev_id, struct pt_regs *r) 209static irqreturn_t i2o_pci_interrupt(int irq, void *dev_id, struct pt_regs *r)
233{ 210{
234 struct i2o_controller *c = dev_id; 211 struct i2o_controller *c = dev_id;
235 struct device *dev = &c->pdev->dev; 212 u32 m;
236 u32 mv = readl(c->out_port); 213 irqreturn_t rc = IRQ_NONE;
237 214
238 /* 215 while (readl(c->irq_status) & I2O_IRQ_OUTBOUND_POST) {
239 * Old 960 steppings had a bug in the I2O unit that caused 216 m = readl(c->out_port);
240 * the queue to appear empty when it wasn't. 217 if (m == I2O_QUEUE_EMPTY) {
241 */ 218 /*
242 if (mv == I2O_QUEUE_EMPTY) { 219 * Old 960 steppings had a bug in the I2O unit that
243 mv = readl(c->out_port); 220 * caused the queue to appear empty when it wasn't.
244 if (unlikely(mv == I2O_QUEUE_EMPTY)) 221 */
245 return IRQ_NONE; 222 m = readl(c->out_port);
246 else 223 if (unlikely(m == I2O_QUEUE_EMPTY))
247 pr_debug("%s: 960 bug detected\n", c->name); 224 break;
248 } 225 }
249 226
250 while (mv != I2O_QUEUE_EMPTY) {
251 /* dispatch it */ 227 /* dispatch it */
252 if (i2o_driver_dispatch(c, mv)) 228 if (i2o_driver_dispatch(c, m))
253 /* flush it if result != 0 */ 229 /* flush it if result != 0 */
254 i2o_flush_reply(c, mv); 230 i2o_flush_reply(c, m);
255 231
256 /* 232 rc = IRQ_HANDLED;
257 * That 960 bug again...
258 */
259 mv = readl(c->out_port);
260 if (mv == I2O_QUEUE_EMPTY)
261 mv = readl(c->out_port);
262 } 233 }
263 234
264 return IRQ_HANDLED; 235 return rc;
265} 236}
266 237
267/** 238/**
diff --git a/include/linux/i2o-dev.h b/include/linux/i2o-dev.h
index 3414325bdcfd..90c984ecd521 100644
--- a/include/linux/i2o-dev.h
+++ b/include/linux/i2o-dev.h
@@ -32,6 +32,10 @@ typedef unsigned int u32;
32 32
33#endif /* __KERNEL__ */ 33#endif /* __KERNEL__ */
34 34
35/*
36 * Vendors
37 */
38#define I2O_VENDOR_DPT 0x001b
35 39
36/* 40/*
37 * I2O Control IOCTLs and structures 41 * I2O Control IOCTLs and structures
@@ -333,7 +337,7 @@ typedef struct _i2o_status_block {
333#define I2O_CLASS_ATE_PERIPHERAL 0x061 337#define I2O_CLASS_ATE_PERIPHERAL 0x061
334#define I2O_CLASS_FLOPPY_CONTROLLER 0x070 338#define I2O_CLASS_FLOPPY_CONTROLLER 0x070
335#define I2O_CLASS_FLOPPY_DEVICE 0x071 339#define I2O_CLASS_FLOPPY_DEVICE 0x071
336#define I2O_CLASS_BUS_ADAPTER_PORT 0x080 340#define I2O_CLASS_BUS_ADAPTER 0x080
337#define I2O_CLASS_PEER_TRANSPORT_AGENT 0x090 341#define I2O_CLASS_PEER_TRANSPORT_AGENT 0x090
338#define I2O_CLASS_PEER_TRANSPORT 0x091 342#define I2O_CLASS_PEER_TRANSPORT 0x091
339#define I2O_CLASS_END 0xfff 343#define I2O_CLASS_END 0xfff
diff --git a/include/linux/i2o.h b/include/linux/i2o.h
index e8cd11290010..497ea574f96b 100644
--- a/include/linux/i2o.h
+++ b/include/linux/i2o.h
@@ -157,7 +157,8 @@ struct i2o_controller {
157 157
158 void __iomem *in_port; /* Inbout port address */ 158 void __iomem *in_port; /* Inbout port address */
159 void __iomem *out_port; /* Outbound port address */ 159 void __iomem *out_port; /* Outbound port address */
160 void __iomem *irq_mask; /* Interrupt register address */ 160 void __iomem *irq_status; /* Interrupt status register address */
161 void __iomem *irq_mask; /* Interrupt mask register address */
161 162
162 /* Dynamic LCT related data */ 163 /* Dynamic LCT related data */
163 164
@@ -242,15 +243,6 @@ extern int i2o_msg_post_wait_mem(struct i2o_controller *, u32, unsigned long,
242extern void i2o_msg_nop(struct i2o_controller *, u32); 243extern void i2o_msg_nop(struct i2o_controller *, u32);
243static inline void i2o_flush_reply(struct i2o_controller *, u32); 244static inline void i2o_flush_reply(struct i2o_controller *, u32);
244 245
245/* DMA handling functions */
246static inline int i2o_dma_alloc(struct device *, struct i2o_dma *, size_t,
247 unsigned int);
248static inline void i2o_dma_free(struct device *, struct i2o_dma *);
249int i2o_dma_realloc(struct device *, struct i2o_dma *, size_t, unsigned int);
250
251static inline int i2o_dma_map(struct device *, struct i2o_dma *);
252static inline void i2o_dma_unmap(struct device *, struct i2o_dma *);
253
254/* IOP functions */ 246/* IOP functions */
255extern int i2o_status_get(struct i2o_controller *); 247extern int i2o_status_get(struct i2o_controller *);
256 248
@@ -275,6 +267,16 @@ static inline u32 i2o_ptr_high(void *ptr)
275{ 267{
276 return (u32) ((u64) ptr >> 32); 268 return (u32) ((u64) ptr >> 32);
277}; 269};
270
271static inline u32 i2o_dma_low(dma_addr_t dma_addr)
272{
273 return (u32) (u64) dma_addr;
274};
275
276static inline u32 i2o_dma_high(dma_addr_t dma_addr)
277{
278 return (u32) ((u64) dma_addr >> 32);
279};
278#else 280#else
279static inline u32 i2o_cntxt_list_add(struct i2o_controller *c, void *ptr) 281static inline u32 i2o_cntxt_list_add(struct i2o_controller *c, void *ptr)
280{ 282{
@@ -305,8 +307,246 @@ static inline u32 i2o_ptr_high(void *ptr)
305{ 307{
306 return 0; 308 return 0;
307}; 309};
310
311static inline u32 i2o_dma_low(dma_addr_t dma_addr)
312{
313 return (u32) dma_addr;
314};
315
316static inline u32 i2o_dma_high(dma_addr_t dma_addr)
317{
318 return 0;
319};
320#endif
321
322/**
323 * i2o_sg_tablesize - Calculate the maximum number of elements in a SGL
324 * @c: I2O controller for which the calculation should be done
325 * @body_size: maximum body size used for message in 32-bit words.
326 *
327 * Return the maximum number of SG elements in a SG list.
328 */
329static inline u16 i2o_sg_tablesize(struct i2o_controller *c, u16 body_size)
330{
331 i2o_status_block *sb = c->status_block.virt;
332 u16 sg_count =
333 (sb->inbound_frame_size - sizeof(struct i2o_message) / 4) -
334 body_size;
335
336 if (c->pae_support) {
337 /*
338 * for 64-bit a SG attribute element must be added and each
339 * SG element needs 12 bytes instead of 8.
340 */
341 sg_count -= 2;
342 sg_count /= 3;
343 } else
344 sg_count /= 2;
345
346 if (c->short_req && (sg_count > 8))
347 sg_count = 8;
348
349 return sg_count;
350};
351
352/**
353 * i2o_dma_map_single - Map pointer to controller and fill in I2O message.
354 * @c: I2O controller
355 * @ptr: pointer to the data which should be mapped
356 * @size: size of data in bytes
357 * @direction: DMA_TO_DEVICE / DMA_FROM_DEVICE
358 * @sg_ptr: pointer to the SG list inside the I2O message
359 *
360 * This function does all necessary DMA handling and also writes the I2O
361 * SGL elements into the I2O message. For details on DMA handling see also
362 * dma_map_single(). The pointer sg_ptr will only be set to the end of the
363 * SG list if the allocation was successful.
364 *
365 * Returns DMA address which must be checked for failures using
366 * dma_mapping_error().
367 */
368static inline dma_addr_t i2o_dma_map_single(struct i2o_controller *c, void *ptr,
369 size_t size,
370 enum dma_data_direction direction,
371 u32 __iomem ** sg_ptr)
372{
373 u32 sg_flags;
374 u32 __iomem *mptr = *sg_ptr;
375 dma_addr_t dma_addr;
376
377 switch (direction) {
378 case DMA_TO_DEVICE:
379 sg_flags = 0xd4000000;
380 break;
381 case DMA_FROM_DEVICE:
382 sg_flags = 0xd0000000;
383 break;
384 default:
385 return 0;
386 }
387
388 dma_addr = dma_map_single(&c->pdev->dev, ptr, size, direction);
389 if (!dma_mapping_error(dma_addr)) {
390#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
391 if ((sizeof(dma_addr_t) > 4) && c->pae_support) {
392 writel(0x7C020002, mptr++);
393 writel(PAGE_SIZE, mptr++);
394 }
395#endif
396
397 writel(sg_flags | size, mptr++);
398 writel(i2o_dma_low(dma_addr), mptr++);
399#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
400 if ((sizeof(dma_addr_t) > 4) && c->pae_support)
401 writel(i2o_dma_high(dma_addr), mptr++);
402#endif
403 *sg_ptr = mptr;
404 }
405 return dma_addr;
406};
407
408/**
409 * i2o_dma_map_sg - Map a SG List to controller and fill in I2O message.
410 * @c: I2O controller
411 * @sg: SG list to be mapped
412 * @sg_count: number of elements in the SG list
413 * @direction: DMA_TO_DEVICE / DMA_FROM_DEVICE
414 * @sg_ptr: pointer to the SG list inside the I2O message
415 *
416 * This function does all necessary DMA handling and also writes the I2O
417 * SGL elements into the I2O message. For details on DMA handling see also
418 * dma_map_sg(). The pointer sg_ptr will only be set to the end of the SG
419 * list if the allocation was successful.
420 *
421 * Returns 0 on failure or 1 on success.
422 */
423static inline int i2o_dma_map_sg(struct i2o_controller *c,
424 struct scatterlist *sg, int sg_count,
425 enum dma_data_direction direction,
426 u32 __iomem ** sg_ptr)
427{
428 u32 sg_flags;
429 u32 __iomem *mptr = *sg_ptr;
430
431 switch (direction) {
432 case DMA_TO_DEVICE:
433 sg_flags = 0x14000000;
434 break;
435 case DMA_FROM_DEVICE:
436 sg_flags = 0x10000000;
437 break;
438 default:
439 return 0;
440 }
441
442 sg_count = dma_map_sg(&c->pdev->dev, sg, sg_count, direction);
443 if (!sg_count)
444 return 0;
445
446#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
447 if ((sizeof(dma_addr_t) > 4) && c->pae_support) {
448 writel(0x7C020002, mptr++);
449 writel(PAGE_SIZE, mptr++);
450 }
308#endif 451#endif
309 452
453 while (sg_count-- > 0) {
454 if (!sg_count)
455 sg_flags |= 0xC0000000;
456 writel(sg_flags | sg_dma_len(sg), mptr++);
457 writel(i2o_dma_low(sg_dma_address(sg)), mptr++);
458#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
459 if ((sizeof(dma_addr_t) > 4) && c->pae_support)
460 writel(i2o_dma_high(sg_dma_address(sg)), mptr++);
461#endif
462 sg++;
463 }
464 *sg_ptr = mptr;
465
466 return 1;
467};
468
469/**
470 * i2o_dma_alloc - Allocate DMA memory
471 * @dev: struct device pointer to the PCI device of the I2O controller
472 * @addr: i2o_dma struct which should get the DMA buffer
473 * @len: length of the new DMA memory
474 * @gfp_mask: GFP mask
475 *
476 * Allocate a coherent DMA memory and write the pointers into addr.
477 *
478 * Returns 0 on success or -ENOMEM on failure.
479 */
480static inline int i2o_dma_alloc(struct device *dev, struct i2o_dma *addr,
481 size_t len, unsigned int gfp_mask)
482{
483 struct pci_dev *pdev = to_pci_dev(dev);
484 int dma_64 = 0;
485
486 if ((sizeof(dma_addr_t) > 4) && (pdev->dma_mask == DMA_64BIT_MASK)) {
487 dma_64 = 1;
488 if (pci_set_dma_mask(pdev, DMA_32BIT_MASK))
489 return -ENOMEM;
490 }
491
492 addr->virt = dma_alloc_coherent(dev, len, &addr->phys, gfp_mask);
493
494 if ((sizeof(dma_addr_t) > 4) && dma_64)
495 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK))
496 printk(KERN_WARNING "i2o: unable to set 64-bit DMA");
497
498 if (!addr->virt)
499 return -ENOMEM;
500
501 memset(addr->virt, 0, len);
502 addr->len = len;
503
504 return 0;
505};
506
507/**
508 * i2o_dma_free - Free DMA memory
509 * @dev: struct device pointer to the PCI device of the I2O controller
510 * @addr: i2o_dma struct which contains the DMA buffer
511 *
512 * Free a coherent DMA memory and set virtual address of addr to NULL.
513 */
514static inline void i2o_dma_free(struct device *dev, struct i2o_dma *addr)
515{
516 if (addr->virt) {
517 if (addr->phys)
518 dma_free_coherent(dev, addr->len, addr->virt,
519 addr->phys);
520 else
521 kfree(addr->virt);
522 addr->virt = NULL;
523 }
524};
525
526/**
527 * i2o_dma_realloc - Realloc DMA memory
528 * @dev: struct device pointer to the PCI device of the I2O controller
529 * @addr: pointer to a i2o_dma struct DMA buffer
530 * @len: new length of memory
531 * @gfp_mask: GFP mask
532 *
533 * If there was something allocated in the addr, free it first. If len > 0
534 * than try to allocate it and write the addresses back to the addr
535 * structure. If len == 0 set the virtual address to NULL.
536 *
537 * Returns the 0 on success or negative error code on failure.
538 */
539static inline int i2o_dma_realloc(struct device *dev, struct i2o_dma *addr,
540 size_t len, unsigned int gfp_mask)
541{
542 i2o_dma_free(dev, addr);
543
544 if (len)
545 return i2o_dma_alloc(dev, addr, len, gfp_mask);
546
547 return 0;
548};
549
310/* I2O driver (OSM) functions */ 550/* I2O driver (OSM) functions */
311extern int i2o_driver_register(struct i2o_driver *); 551extern int i2o_driver_register(struct i2o_driver *);
312extern void i2o_driver_unregister(struct i2o_driver *); 552extern void i2o_driver_unregister(struct i2o_driver *);
@@ -375,10 +615,11 @@ extern int i2o_device_claim_release(struct i2o_device *);
375/* Exec OSM functions */ 615/* Exec OSM functions */
376extern int i2o_exec_lct_get(struct i2o_controller *); 616extern int i2o_exec_lct_get(struct i2o_controller *);
377 617
378/* device / driver conversion functions */ 618/* device / driver / kobject conversion functions */
379#define to_i2o_driver(drv) container_of(drv,struct i2o_driver, driver) 619#define to_i2o_driver(drv) container_of(drv,struct i2o_driver, driver)
380#define to_i2o_device(dev) container_of(dev, struct i2o_device, device) 620#define to_i2o_device(dev) container_of(dev, struct i2o_device, device)
381#define to_i2o_controller(dev) container_of(dev, struct i2o_controller, device) 621#define to_i2o_controller(dev) container_of(dev, struct i2o_controller, device)
622#define kobj_to_i2o_device(kobj) to_i2o_device(container_of(kobj, struct device, kobj))
382 623
383/** 624/**
384 * i2o_msg_get - obtain an I2O message from the IOP 625 * i2o_msg_get - obtain an I2O message from the IOP
@@ -466,8 +707,10 @@ static inline struct i2o_message __iomem *i2o_msg_out_to_virt(struct
466 i2o_controller *c, 707 i2o_controller *c,
467 u32 m) 708 u32 m)
468{ 709{
469 BUG_ON(m < c->out_queue.phys 710 if (unlikely
470 || m >= c->out_queue.phys + c->out_queue.len); 711 (m < c->out_queue.phys
712 || m >= c->out_queue.phys + c->out_queue.len))
713 return NULL;
471 714
472 return c->out_queue.virt + (m - c->out_queue.phys); 715 return c->out_queue.virt + (m - c->out_queue.phys);
473}; 716};
@@ -532,48 +775,6 @@ static inline void i2o_dma_free(struct device *dev, struct i2o_dma *addr)
532 } 775 }
533}; 776};
534 777
535/**
536 * i2o_dma_map - Map the memory to DMA
537 * @dev: struct device pointer to the PCI device of the I2O controller
538 * @addr: i2o_dma struct which should be mapped
539 *
540 * Map the memory in addr->virt to coherent DMA memory and write the
541 * physical address into addr->phys.
542 *
543 * Returns 0 on success or -ENOMEM on failure.
544 */
545static inline int i2o_dma_map(struct device *dev, struct i2o_dma *addr)
546{
547 if (!addr->virt)
548 return -EFAULT;
549
550 if (!addr->phys)
551 addr->phys = dma_map_single(dev, addr->virt, addr->len,
552 DMA_BIDIRECTIONAL);
553 if (!addr->phys)
554 return -ENOMEM;
555
556 return 0;
557};
558
559/**
560 * i2o_dma_unmap - Unmap the DMA memory
561 * @dev: struct device pointer to the PCI device of the I2O controller
562 * @addr: i2o_dma struct which should be unmapped
563 *
564 * Unmap the memory in addr->virt from DMA memory.
565 */
566static inline void i2o_dma_unmap(struct device *dev, struct i2o_dma *addr)
567{
568 if (!addr->virt)
569 return;
570
571 if (addr->phys) {
572 dma_unmap_single(dev, addr->phys, addr->len, DMA_BIDIRECTIONAL);
573 addr->phys = 0;
574 }
575};
576
577/* 778/*
578 * Endian handling wrapped into the macro - keeps the core code 779 * Endian handling wrapped into the macro - keeps the core code
579 * cleaner. 780 * cleaner.
@@ -726,6 +927,14 @@ extern void i2o_debug_state(struct i2o_controller *c);
726#define I2O_CMD_SCSI_BUSRESET 0x27 927#define I2O_CMD_SCSI_BUSRESET 0x27
727 928
728/* 929/*
930 * Bus Adapter Class
931 */
932#define I2O_CMD_BUS_ADAPTER_RESET 0x85
933#define I2O_CMD_BUS_RESET 0x87
934#define I2O_CMD_BUS_SCAN 0x89
935#define I2O_CMD_BUS_QUIESCE 0x8b
936
937/*
729 * Random Block Storage Class 938 * Random Block Storage Class
730 */ 939 */
731#define I2O_CMD_BLOCK_READ 0x30 940#define I2O_CMD_BLOCK_READ 0x30
@@ -948,7 +1157,7 @@ extern void i2o_debug_state(struct i2o_controller *c);
948 1157
949/* request queue sizes */ 1158/* request queue sizes */
950#define I2O_MAX_SECTORS 1024 1159#define I2O_MAX_SECTORS 1024
951#define I2O_MAX_SEGMENTS 128 1160#define I2O_MAX_PHYS_SEGMENTS MAX_PHYS_SEGMENTS
952 1161
953#define I2O_REQ_MEMPOOL_SIZE 32 1162#define I2O_REQ_MEMPOOL_SIZE 32
954 1163