aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/message
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/message')
-rw-r--r--drivers/message/i2o/Kconfig12
-rw-r--r--drivers/message/i2o/bus-osm.c23
-rw-r--r--drivers/message/i2o/config-osm.c2
-rw-r--r--drivers/message/i2o/core.h20
-rw-r--r--drivers/message/i2o/device.c339
-rw-r--r--drivers/message/i2o/driver.c12
-rw-r--r--drivers/message/i2o/exec-osm.c114
-rw-r--r--drivers/message/i2o/i2o_block.c190
-rw-r--r--drivers/message/i2o/i2o_config.c196
-rw-r--r--drivers/message/i2o/i2o_lan.h38
-rw-r--r--drivers/message/i2o/i2o_proc.c2
-rw-r--r--drivers/message/i2o/i2o_scsi.c89
-rw-r--r--drivers/message/i2o/iop.c356
-rw-r--r--drivers/message/i2o/pci.c7
14 files changed, 662 insertions, 738 deletions
diff --git a/drivers/message/i2o/Kconfig b/drivers/message/i2o/Kconfig
index 43a942a29c2e..fef677103880 100644
--- a/drivers/message/i2o/Kconfig
+++ b/drivers/message/i2o/Kconfig
@@ -24,6 +24,18 @@ config I2O
24 24
25 If unsure, say N. 25 If unsure, say N.
26 26
27config I2O_LCT_NOTIFY_ON_CHANGES
28 bool "Enable LCT notification"
29 depends on I2O
30 default y
31 ---help---
32 Only say N here if you have a I2O controller from SUN. The SUN
33 firmware doesn't support LCT notification on changes. If this option
34 is enabled on such a controller the driver will hang up in a endless
35 loop. On all other controllers say Y.
36
37 If unsure, say Y.
38
27config I2O_EXT_ADAPTEC 39config I2O_EXT_ADAPTEC
28 bool "Enable Adaptec extensions" 40 bool "Enable Adaptec extensions"
29 depends on I2O 41 depends on I2O
diff --git a/drivers/message/i2o/bus-osm.c b/drivers/message/i2o/bus-osm.c
index 151b228e1cb3..ac06f10c54ec 100644
--- a/drivers/message/i2o/bus-osm.c
+++ b/drivers/message/i2o/bus-osm.c
@@ -17,7 +17,7 @@
17#include <linux/i2o.h> 17#include <linux/i2o.h>
18 18
19#define OSM_NAME "bus-osm" 19#define OSM_NAME "bus-osm"
20#define OSM_VERSION "$Rev$" 20#define OSM_VERSION "1.317"
21#define OSM_DESCRIPTION "I2O Bus Adapter OSM" 21#define OSM_DESCRIPTION "I2O Bus Adapter OSM"
22 22
23static struct i2o_driver i2o_bus_driver; 23static struct i2o_driver i2o_bus_driver;
@@ -39,18 +39,18 @@ static struct i2o_class_id i2o_bus_class_id[] = {
39 */ 39 */
40static int i2o_bus_scan(struct i2o_device *dev) 40static int i2o_bus_scan(struct i2o_device *dev)
41{ 41{
42 struct i2o_message __iomem *msg; 42 struct i2o_message *msg;
43 u32 m;
44 43
45 m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET); 44 msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET);
46 if (m == I2O_QUEUE_EMPTY) 45 if (IS_ERR(msg))
47 return -ETIMEDOUT; 46 return -ETIMEDOUT;
48 47
49 writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 48 msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
50 writel(I2O_CMD_BUS_SCAN << 24 | HOST_TID << 12 | dev->lct_data.tid, 49 msg->u.head[1] =
51 &msg->u.head[1]); 50 cpu_to_le32(I2O_CMD_BUS_SCAN << 24 | HOST_TID << 12 | dev->lct_data.
51 tid);
52 52
53 return i2o_msg_post_wait(dev->iop, m, 60); 53 return i2o_msg_post_wait(dev->iop, msg, 60);
54}; 54};
55 55
56/** 56/**
@@ -59,8 +59,9 @@ static int i2o_bus_scan(struct i2o_device *dev)
59 * 59 *
60 * Returns count. 60 * Returns count.
61 */ 61 */
62static ssize_t i2o_bus_store_scan(struct device *d, struct device_attribute *attr, const char *buf, 62static ssize_t i2o_bus_store_scan(struct device *d,
63 size_t count) 63 struct device_attribute *attr,
64 const char *buf, size_t count)
64{ 65{
65 struct i2o_device *i2o_dev = to_i2o_device(d); 66 struct i2o_device *i2o_dev = to_i2o_device(d);
66 int rc; 67 int rc;
diff --git a/drivers/message/i2o/config-osm.c b/drivers/message/i2o/config-osm.c
index 10432f665201..3bba7aa82e58 100644
--- a/drivers/message/i2o/config-osm.c
+++ b/drivers/message/i2o/config-osm.c
@@ -22,7 +22,7 @@
22#include <asm/uaccess.h> 22#include <asm/uaccess.h>
23 23
24#define OSM_NAME "config-osm" 24#define OSM_NAME "config-osm"
25#define OSM_VERSION "1.248" 25#define OSM_VERSION "1.323"
26#define OSM_DESCRIPTION "I2O Configuration OSM" 26#define OSM_DESCRIPTION "I2O Configuration OSM"
27 27
28/* access mode user rw */ 28/* access mode user rw */
diff --git a/drivers/message/i2o/core.h b/drivers/message/i2o/core.h
index 9eefedb16211..90628562851e 100644
--- a/drivers/message/i2o/core.h
+++ b/drivers/message/i2o/core.h
@@ -14,8 +14,6 @@
14 */ 14 */
15 15
16/* Exec-OSM */ 16/* Exec-OSM */
17extern struct bus_type i2o_bus_type;
18
19extern struct i2o_driver i2o_exec_driver; 17extern struct i2o_driver i2o_exec_driver;
20extern int i2o_exec_lct_get(struct i2o_controller *); 18extern int i2o_exec_lct_get(struct i2o_controller *);
21 19
@@ -23,6 +21,8 @@ extern int __init i2o_exec_init(void);
23extern void __exit i2o_exec_exit(void); 21extern void __exit i2o_exec_exit(void);
24 22
25/* driver */ 23/* driver */
24extern struct bus_type i2o_bus_type;
25
26extern int i2o_driver_dispatch(struct i2o_controller *, u32); 26extern int i2o_driver_dispatch(struct i2o_controller *, u32);
27 27
28extern int __init i2o_driver_init(void); 28extern int __init i2o_driver_init(void);
@@ -33,19 +33,27 @@ extern int __init i2o_pci_init(void);
33extern void __exit i2o_pci_exit(void); 33extern void __exit i2o_pci_exit(void);
34 34
35/* device */ 35/* device */
36extern struct device_attribute i2o_device_attrs[];
37
36extern void i2o_device_remove(struct i2o_device *); 38extern void i2o_device_remove(struct i2o_device *);
37extern int i2o_device_parse_lct(struct i2o_controller *); 39extern int i2o_device_parse_lct(struct i2o_controller *);
38 40
39/* IOP */ 41/* IOP */
40extern struct i2o_controller *i2o_iop_alloc(void); 42extern struct i2o_controller *i2o_iop_alloc(void);
41extern void i2o_iop_free(struct i2o_controller *); 43
44/**
45 * i2o_iop_free - Free the i2o_controller struct
46 * @c: I2O controller to free
47 */
48static inline void i2o_iop_free(struct i2o_controller *c)
49{
50 i2o_pool_free(&c->in_msg);
51 kfree(c);
52}
42 53
43extern int i2o_iop_add(struct i2o_controller *); 54extern int i2o_iop_add(struct i2o_controller *);
44extern void i2o_iop_remove(struct i2o_controller *); 55extern void i2o_iop_remove(struct i2o_controller *);
45 56
46/* config */
47extern int i2o_parm_issue(struct i2o_device *, int, void *, int, void *, int);
48
49/* control registers relative to c->base */ 57/* control registers relative to c->base */
50#define I2O_IRQ_STATUS 0x30 58#define I2O_IRQ_STATUS 0x30
51#define I2O_IRQ_MASK 0x34 59#define I2O_IRQ_MASK 0x34
diff --git a/drivers/message/i2o/device.c b/drivers/message/i2o/device.c
index 8eb50cdb8ae1..ee183053fa23 100644
--- a/drivers/message/i2o/device.c
+++ b/drivers/message/i2o/device.c
@@ -35,18 +35,18 @@
35static inline int i2o_device_issue_claim(struct i2o_device *dev, u32 cmd, 35static inline int i2o_device_issue_claim(struct i2o_device *dev, u32 cmd,
36 u32 type) 36 u32 type)
37{ 37{
38 struct i2o_message __iomem *msg; 38 struct i2o_message *msg;
39 u32 m;
40 39
41 m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET); 40 msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET);
42 if (m == I2O_QUEUE_EMPTY) 41 if (IS_ERR(msg))
43 return -ETIMEDOUT; 42 return PTR_ERR(msg);
44 43
45 writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 44 msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
46 writel(cmd << 24 | HOST_TID << 12 | dev->lct_data.tid, &msg->u.head[1]); 45 msg->u.head[1] =
47 writel(type, &msg->body[0]); 46 cpu_to_le32(cmd << 24 | HOST_TID << 12 | dev->lct_data.tid);
47 msg->body[0] = cpu_to_le32(type);
48 48
49 return i2o_msg_post_wait(dev->iop, m, 60); 49 return i2o_msg_post_wait(dev->iop, msg, 60);
50} 50}
51 51
52/** 52/**
@@ -123,7 +123,6 @@ int i2o_device_claim_release(struct i2o_device *dev)
123 return rc; 123 return rc;
124} 124}
125 125
126
127/** 126/**
128 * i2o_device_release - release the memory for a I2O device 127 * i2o_device_release - release the memory for a I2O device
129 * @dev: I2O device which should be released 128 * @dev: I2O device which should be released
@@ -140,10 +139,10 @@ static void i2o_device_release(struct device *dev)
140 kfree(i2o_dev); 139 kfree(i2o_dev);
141} 140}
142 141
143
144/** 142/**
145 * i2o_device_class_show_class_id - Displays class id of I2O device 143 * i2o_device_show_class_id - Displays class id of I2O device
146 * @cd: class device of which the class id should be displayed 144 * @dev: device of which the class id should be displayed
145 * @attr: pointer to device attribute
147 * @buf: buffer into which the class id should be printed 146 * @buf: buffer into which the class id should be printed
148 * 147 *
149 * Returns the number of bytes which are printed into the buffer. 148 * Returns the number of bytes which are printed into the buffer.
@@ -159,15 +158,15 @@ static ssize_t i2o_device_show_class_id(struct device *dev,
159} 158}
160 159
161/** 160/**
162 * i2o_device_class_show_tid - Displays TID of I2O device 161 * i2o_device_show_tid - Displays TID of I2O device
163 * @cd: class device of which the TID should be displayed 162 * @dev: device of which the TID should be displayed
164 * @buf: buffer into which the class id should be printed 163 * @attr: pointer to device attribute
164 * @buf: buffer into which the TID should be printed
165 * 165 *
166 * Returns the number of bytes which are printed into the buffer. 166 * Returns the number of bytes which are printed into the buffer.
167 */ 167 */
168static ssize_t i2o_device_show_tid(struct device *dev, 168static ssize_t i2o_device_show_tid(struct device *dev,
169 struct device_attribute *attr, 169 struct device_attribute *attr, char *buf)
170 char *buf)
171{ 170{
172 struct i2o_device *i2o_dev = to_i2o_device(dev); 171 struct i2o_device *i2o_dev = to_i2o_device(dev);
173 172
@@ -175,6 +174,7 @@ static ssize_t i2o_device_show_tid(struct device *dev,
175 return strlen(buf) + 1; 174 return strlen(buf) + 1;
176} 175}
177 176
177/* I2O device attributes */
178struct device_attribute i2o_device_attrs[] = { 178struct device_attribute i2o_device_attrs[] = {
179 __ATTR(class_id, S_IRUGO, i2o_device_show_class_id, NULL), 179 __ATTR(class_id, S_IRUGO, i2o_device_show_class_id, NULL),
180 __ATTR(tid, S_IRUGO, i2o_device_show_tid, NULL), 180 __ATTR(tid, S_IRUGO, i2o_device_show_tid, NULL),
@@ -193,12 +193,10 @@ static struct i2o_device *i2o_device_alloc(void)
193{ 193{
194 struct i2o_device *dev; 194 struct i2o_device *dev;
195 195
196 dev = kmalloc(sizeof(*dev), GFP_KERNEL); 196 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
197 if (!dev) 197 if (!dev)
198 return ERR_PTR(-ENOMEM); 198 return ERR_PTR(-ENOMEM);
199 199
200 memset(dev, 0, sizeof(*dev));
201
202 INIT_LIST_HEAD(&dev->list); 200 INIT_LIST_HEAD(&dev->list);
203 init_MUTEX(&dev->lock); 201 init_MUTEX(&dev->lock);
204 202
@@ -209,66 +207,6 @@ static struct i2o_device *i2o_device_alloc(void)
209} 207}
210 208
211/** 209/**
212 * i2o_setup_sysfs_links - Adds attributes to the I2O device
213 * @cd: I2O class device which is added to the I2O device class
214 *
215 * This function get called when a I2O device is added to the class. It
216 * creates the attributes for each device and creates user/parent symlink
217 * if necessary.
218 *
219 * Returns 0 on success or negative error code on failure.
220 */
221static void i2o_setup_sysfs_links(struct i2o_device *i2o_dev)
222{
223 struct i2o_controller *c = i2o_dev->iop;
224 struct i2o_device *tmp;
225
226 /* create user entries for this device */
227 tmp = i2o_iop_find_device(i2o_dev->iop, i2o_dev->lct_data.user_tid);
228 if (tmp && tmp != i2o_dev)
229 sysfs_create_link(&i2o_dev->device.kobj,
230 &tmp->device.kobj, "user");
231
232 /* create user entries refering to this device */
233 list_for_each_entry(tmp, &c->devices, list)
234 if (tmp->lct_data.user_tid == i2o_dev->lct_data.tid &&
235 tmp != i2o_dev)
236 sysfs_create_link(&tmp->device.kobj,
237 &i2o_dev->device.kobj, "user");
238
239 /* create parent entries for this device */
240 tmp = i2o_iop_find_device(i2o_dev->iop, i2o_dev->lct_data.parent_tid);
241 if (tmp && tmp != i2o_dev)
242 sysfs_create_link(&i2o_dev->device.kobj,
243 &tmp->device.kobj, "parent");
244
245 /* create parent entries refering to this device */
246 list_for_each_entry(tmp, &c->devices, list)
247 if (tmp->lct_data.parent_tid == i2o_dev->lct_data.tid &&
248 tmp != i2o_dev)
249 sysfs_create_link(&tmp->device.kobj,
250 &i2o_dev->device.kobj, "parent");
251}
252
253static void i2o_remove_sysfs_links(struct i2o_device *i2o_dev)
254{
255 struct i2o_controller *c = i2o_dev->iop;
256 struct i2o_device *tmp;
257
258 sysfs_remove_link(&i2o_dev->device.kobj, "parent");
259 sysfs_remove_link(&i2o_dev->device.kobj, "user");
260
261 list_for_each_entry(tmp, &c->devices, list) {
262 if (tmp->lct_data.parent_tid == i2o_dev->lct_data.tid)
263 sysfs_remove_link(&tmp->device.kobj, "parent");
264 if (tmp->lct_data.user_tid == i2o_dev->lct_data.tid)
265 sysfs_remove_link(&tmp->device.kobj, "user");
266 }
267}
268
269
270
271/**
272 * i2o_device_add - allocate a new I2O device and add it to the IOP 210 * i2o_device_add - allocate a new I2O device and add it to the IOP
273 * @iop: I2O controller where the device is on 211 * @iop: I2O controller where the device is on
274 * @entry: LCT entry of the I2O device 212 * @entry: LCT entry of the I2O device
@@ -282,33 +220,57 @@ static void i2o_remove_sysfs_links(struct i2o_device *i2o_dev)
282static struct i2o_device *i2o_device_add(struct i2o_controller *c, 220static struct i2o_device *i2o_device_add(struct i2o_controller *c,
283 i2o_lct_entry * entry) 221 i2o_lct_entry * entry)
284{ 222{
285 struct i2o_device *dev; 223 struct i2o_device *i2o_dev, *tmp;
286 224
287 dev = i2o_device_alloc(); 225 i2o_dev = i2o_device_alloc();
288 if (IS_ERR(dev)) { 226 if (IS_ERR(i2o_dev)) {
289 printk(KERN_ERR "i2o: unable to allocate i2o device\n"); 227 printk(KERN_ERR "i2o: unable to allocate i2o device\n");
290 return dev; 228 return i2o_dev;
291 } 229 }
292 230
293 dev->lct_data = *entry; 231 i2o_dev->lct_data = *entry;
294 dev->iop = c;
295 232
296 snprintf(dev->device.bus_id, BUS_ID_SIZE, "%d:%03x", c->unit, 233 snprintf(i2o_dev->device.bus_id, BUS_ID_SIZE, "%d:%03x", c->unit,
297 dev->lct_data.tid); 234 i2o_dev->lct_data.tid);
298 235
299 dev->device.parent = &c->device; 236 i2o_dev->iop = c;
237 i2o_dev->device.parent = &c->device;
300 238
301 device_register(&dev->device); 239 device_register(&i2o_dev->device);
302 240
303 list_add_tail(&dev->list, &c->devices); 241 list_add_tail(&i2o_dev->list, &c->devices);
304 242
305 i2o_setup_sysfs_links(dev); 243 /* create user entries for this device */
244 tmp = i2o_iop_find_device(i2o_dev->iop, i2o_dev->lct_data.user_tid);
245 if (tmp && (tmp != i2o_dev))
246 sysfs_create_link(&i2o_dev->device.kobj, &tmp->device.kobj,
247 "user");
306 248
307 i2o_driver_notify_device_add_all(dev); 249 /* create user entries refering to this device */
250 list_for_each_entry(tmp, &c->devices, list)
251 if ((tmp->lct_data.user_tid == i2o_dev->lct_data.tid)
252 && (tmp != i2o_dev))
253 sysfs_create_link(&tmp->device.kobj,
254 &i2o_dev->device.kobj, "user");
308 255
309 pr_debug("i2o: device %s added\n", dev->device.bus_id); 256 /* create parent entries for this device */
257 tmp = i2o_iop_find_device(i2o_dev->iop, i2o_dev->lct_data.parent_tid);
258 if (tmp && (tmp != i2o_dev))
259 sysfs_create_link(&i2o_dev->device.kobj, &tmp->device.kobj,
260 "parent");
310 261
311 return dev; 262 /* create parent entries refering to this device */
263 list_for_each_entry(tmp, &c->devices, list)
264 if ((tmp->lct_data.parent_tid == i2o_dev->lct_data.tid)
265 && (tmp != i2o_dev))
266 sysfs_create_link(&tmp->device.kobj,
267 &i2o_dev->device.kobj, "parent");
268
269 i2o_driver_notify_device_add_all(i2o_dev);
270
271 pr_debug("i2o: device %s added\n", i2o_dev->device.bus_id);
272
273 return i2o_dev;
312} 274}
313 275
314/** 276/**
@@ -321,9 +283,22 @@ static struct i2o_device *i2o_device_add(struct i2o_controller *c,
321 */ 283 */
322void i2o_device_remove(struct i2o_device *i2o_dev) 284void i2o_device_remove(struct i2o_device *i2o_dev)
323{ 285{
286 struct i2o_device *tmp;
287 struct i2o_controller *c = i2o_dev->iop;
288
324 i2o_driver_notify_device_remove_all(i2o_dev); 289 i2o_driver_notify_device_remove_all(i2o_dev);
325 i2o_remove_sysfs_links(i2o_dev); 290
291 sysfs_remove_link(&i2o_dev->device.kobj, "parent");
292 sysfs_remove_link(&i2o_dev->device.kobj, "user");
293
294 list_for_each_entry(tmp, &c->devices, list) {
295 if (tmp->lct_data.parent_tid == i2o_dev->lct_data.tid)
296 sysfs_remove_link(&tmp->device.kobj, "parent");
297 if (tmp->lct_data.user_tid == i2o_dev->lct_data.tid)
298 sysfs_remove_link(&tmp->device.kobj, "user");
299 }
326 list_del(&i2o_dev->list); 300 list_del(&i2o_dev->list);
301
327 device_unregister(&i2o_dev->device); 302 device_unregister(&i2o_dev->device);
328} 303}
329 304
@@ -341,56 +316,83 @@ int i2o_device_parse_lct(struct i2o_controller *c)
341{ 316{
342 struct i2o_device *dev, *tmp; 317 struct i2o_device *dev, *tmp;
343 i2o_lct *lct; 318 i2o_lct *lct;
344 int i; 319 u32 *dlct = c->dlct.virt;
345 int max; 320 int max = 0, i = 0;
321 u16 table_size;
322 u32 buf;
346 323
347 down(&c->lct_lock); 324 down(&c->lct_lock);
348 325
349 kfree(c->lct); 326 kfree(c->lct);
350 327
351 lct = c->dlct.virt; 328 buf = le32_to_cpu(*dlct++);
329 table_size = buf & 0xffff;
352 330
353 c->lct = kmalloc(lct->table_size * 4, GFP_KERNEL); 331 lct = c->lct = kmalloc(table_size * 4, GFP_KERNEL);
354 if (!c->lct) { 332 if (!lct) {
355 up(&c->lct_lock); 333 up(&c->lct_lock);
356 return -ENOMEM; 334 return -ENOMEM;
357 } 335 }
358 336
359 if (lct->table_size * 4 > c->dlct.len) { 337 lct->lct_ver = buf >> 28;
360 memcpy(c->lct, c->dlct.virt, c->dlct.len); 338 lct->boot_tid = buf >> 16 & 0xfff;
361 up(&c->lct_lock); 339 lct->table_size = table_size;
362 return -EAGAIN; 340 lct->change_ind = le32_to_cpu(*dlct++);
363 } 341 lct->iop_flags = le32_to_cpu(*dlct++);
364 342
365 memcpy(c->lct, c->dlct.virt, lct->table_size * 4); 343 table_size -= 3;
366
367 lct = c->lct;
368
369 max = (lct->table_size - 3) / 9;
370 344
371 pr_debug("%s: LCT has %d entries (LCT size: %d)\n", c->name, max, 345 pr_debug("%s: LCT has %d entries (LCT size: %d)\n", c->name, max,
372 lct->table_size); 346 lct->table_size);
373 347
374 /* remove devices, which are not in the LCT anymore */ 348 while (table_size > 0) {
375 list_for_each_entry_safe(dev, tmp, &c->devices, list) { 349 i2o_lct_entry *entry = &lct->lct_entry[max];
376 int found = 0; 350 int found = 0;
377 351
378 for (i = 0; i < max; i++) { 352 buf = le32_to_cpu(*dlct++);
379 if (lct->lct_entry[i].tid == dev->lct_data.tid) { 353 entry->entry_size = buf & 0xffff;
354 entry->tid = buf >> 16 & 0xfff;
355
356 entry->change_ind = le32_to_cpu(*dlct++);
357 entry->device_flags = le32_to_cpu(*dlct++);
358
359 buf = le32_to_cpu(*dlct++);
360 entry->class_id = buf & 0xfff;
361 entry->version = buf >> 12 & 0xf;
362 entry->vendor_id = buf >> 16;
363
364 entry->sub_class = le32_to_cpu(*dlct++);
365
366 buf = le32_to_cpu(*dlct++);
367 entry->user_tid = buf & 0xfff;
368 entry->parent_tid = buf >> 12 & 0xfff;
369 entry->bios_info = buf >> 24;
370
371 memcpy(&entry->identity_tag, dlct, 8);
372 dlct += 2;
373
374 entry->event_capabilities = le32_to_cpu(*dlct++);
375
376 /* add new devices, which are new in the LCT */
377 list_for_each_entry_safe(dev, tmp, &c->devices, list) {
378 if (entry->tid == dev->lct_data.tid) {
380 found = 1; 379 found = 1;
381 break; 380 break;
382 } 381 }
383 } 382 }
384 383
385 if (!found) 384 if (!found)
386 i2o_device_remove(dev); 385 i2o_device_add(c, entry);
386
387 table_size -= 9;
388 max++;
387 } 389 }
388 390
389 /* add new devices, which are new in the LCT */ 391 /* remove devices, which are not in the LCT anymore */
390 for (i = 0; i < max; i++) { 392 list_for_each_entry_safe(dev, tmp, &c->devices, list) {
391 int found = 0; 393 int found = 0;
392 394
393 list_for_each_entry_safe(dev, tmp, &c->devices, list) { 395 for (i = 0; i < max; i++) {
394 if (lct->lct_entry[i].tid == dev->lct_data.tid) { 396 if (lct->lct_entry[i].tid == dev->lct_data.tid) {
395 found = 1; 397 found = 1;
396 break; 398 break;
@@ -398,14 +400,14 @@ int i2o_device_parse_lct(struct i2o_controller *c)
398 } 400 }
399 401
400 if (!found) 402 if (!found)
401 i2o_device_add(c, &lct->lct_entry[i]); 403 i2o_device_remove(dev);
402 } 404 }
405
403 up(&c->lct_lock); 406 up(&c->lct_lock);
404 407
405 return 0; 408 return 0;
406} 409}
407 410
408
409/* 411/*
410 * Run time support routines 412 * Run time support routines
411 */ 413 */
@@ -419,13 +421,9 @@ int i2o_device_parse_lct(struct i2o_controller *c)
419 * ResultCount, ErrorInfoSize, BlockStatus and BlockSize. 421 * ResultCount, ErrorInfoSize, BlockStatus and BlockSize.
420 */ 422 */
421int i2o_parm_issue(struct i2o_device *i2o_dev, int cmd, void *oplist, 423int i2o_parm_issue(struct i2o_device *i2o_dev, int cmd, void *oplist,
422 int oplen, void *reslist, int reslen) 424 int oplen, void *reslist, int reslen)
423{ 425{
424 struct i2o_message __iomem *msg; 426 struct i2o_message *msg;
425 u32 m;
426 u32 *res32 = (u32 *) reslist;
427 u32 *restmp = (u32 *) reslist;
428 int len = 0;
429 int i = 0; 427 int i = 0;
430 int rc; 428 int rc;
431 struct i2o_dma res; 429 struct i2o_dma res;
@@ -437,26 +435,27 @@ int i2o_parm_issue(struct i2o_device *i2o_dev, int cmd, void *oplist,
437 if (i2o_dma_alloc(dev, &res, reslen, GFP_KERNEL)) 435 if (i2o_dma_alloc(dev, &res, reslen, GFP_KERNEL))
438 return -ENOMEM; 436 return -ENOMEM;
439 437
440 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 438 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
441 if (m == I2O_QUEUE_EMPTY) { 439 if (IS_ERR(msg)) {
442 i2o_dma_free(dev, &res); 440 i2o_dma_free(dev, &res);
443 return -ETIMEDOUT; 441 return PTR_ERR(msg);
444 } 442 }
445 443
446 i = 0; 444 i = 0;
447 writel(cmd << 24 | HOST_TID << 12 | i2o_dev->lct_data.tid, 445 msg->u.head[1] =
448 &msg->u.head[1]); 446 cpu_to_le32(cmd << 24 | HOST_TID << 12 | i2o_dev->lct_data.tid);
449 writel(0, &msg->body[i++]); 447 msg->body[i++] = cpu_to_le32(0x00000000);
450 writel(0x4C000000 | oplen, &msg->body[i++]); /* OperationList */ 448 msg->body[i++] = cpu_to_le32(0x4C000000 | oplen); /* OperationList */
451 memcpy_toio(&msg->body[i], oplist, oplen); 449 memcpy(&msg->body[i], oplist, oplen);
452 i += (oplen / 4 + (oplen % 4 ? 1 : 0)); 450 i += (oplen / 4 + (oplen % 4 ? 1 : 0));
453 writel(0xD0000000 | res.len, &msg->body[i++]); /* ResultList */ 451 msg->body[i++] = cpu_to_le32(0xD0000000 | res.len); /* ResultList */
454 writel(res.phys, &msg->body[i++]); 452 msg->body[i++] = cpu_to_le32(res.phys);
455 453
456 writel(I2O_MESSAGE_SIZE(i + sizeof(struct i2o_message) / 4) | 454 msg->u.head[0] =
457 SGL_OFFSET_5, &msg->u.head[0]); 455 cpu_to_le32(I2O_MESSAGE_SIZE(i + sizeof(struct i2o_message) / 4) |
456 SGL_OFFSET_5);
458 457
459 rc = i2o_msg_post_wait_mem(c, m, 10, &res); 458 rc = i2o_msg_post_wait_mem(c, msg, 10, &res);
460 459
461 /* This only looks like a memory leak - don't "fix" it. */ 460 /* This only looks like a memory leak - don't "fix" it. */
462 if (rc == -ETIMEDOUT) 461 if (rc == -ETIMEDOUT)
@@ -465,36 +464,7 @@ int i2o_parm_issue(struct i2o_device *i2o_dev, int cmd, void *oplist,
465 memcpy(reslist, res.virt, res.len); 464 memcpy(reslist, res.virt, res.len);
466 i2o_dma_free(dev, &res); 465 i2o_dma_free(dev, &res);
467 466
468 /* Query failed */ 467 return rc;
469 if (rc)
470 return rc;
471 /*
472 * Calculate number of bytes of Result LIST
473 * We need to loop through each Result BLOCK and grab the length
474 */
475 restmp = res32 + 1;
476 len = 1;
477 for (i = 0; i < (res32[0] & 0X0000FFFF); i++) {
478 if (restmp[0] & 0x00FF0000) { /* BlockStatus != SUCCESS */
479 printk(KERN_WARNING
480 "%s - Error:\n ErrorInfoSize = 0x%02x, "
481 "BlockStatus = 0x%02x, BlockSize = 0x%04x\n",
482 (cmd ==
483 I2O_CMD_UTIL_PARAMS_SET) ? "PARAMS_SET" :
484 "PARAMS_GET", res32[1] >> 24,
485 (res32[1] >> 16) & 0xFF, res32[1] & 0xFFFF);
486
487 /*
488 * If this is the only request,than we return an error
489 */
490 if ((res32[0] & 0x0000FFFF) == 1) {
491 return -((res32[1] >> 16) & 0xFF); /* -BlockStatus */
492 }
493 }
494 len += restmp[0] & 0x0000FFFF; /* Length of res BLOCK */
495 restmp += restmp[0] & 0x0000FFFF; /* Skip to next BLOCK */
496 }
497 return (len << 2); /* bytes used by result list */
498} 468}
499 469
500/* 470/*
@@ -503,28 +473,25 @@ int i2o_parm_issue(struct i2o_device *i2o_dev, int cmd, void *oplist,
503int i2o_parm_field_get(struct i2o_device *i2o_dev, int group, int field, 473int i2o_parm_field_get(struct i2o_device *i2o_dev, int group, int field,
504 void *buf, int buflen) 474 void *buf, int buflen)
505{ 475{
506 u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field }; 476 u32 opblk[] = { cpu_to_le32(0x00000001),
477 cpu_to_le32((u16) group << 16 | I2O_PARAMS_FIELD_GET),
478 cpu_to_le32((s16) field << 16 | 0x00000001)
479 };
507 u8 *resblk; /* 8 bytes for header */ 480 u8 *resblk; /* 8 bytes for header */
508 int size; 481 int rc;
509
510 if (field == -1) /* whole group */
511 opblk[4] = -1;
512 482
513 resblk = kmalloc(buflen + 8, GFP_KERNEL | GFP_ATOMIC); 483 resblk = kmalloc(buflen + 8, GFP_KERNEL | GFP_ATOMIC);
514 if (!resblk) 484 if (!resblk)
515 return -ENOMEM; 485 return -ENOMEM;
516 486
517 size = i2o_parm_issue(i2o_dev, I2O_CMD_UTIL_PARAMS_GET, opblk, 487 rc = i2o_parm_issue(i2o_dev, I2O_CMD_UTIL_PARAMS_GET, opblk,
518 sizeof(opblk), resblk, buflen + 8); 488 sizeof(opblk), resblk, buflen + 8);
519 489
520 memcpy(buf, resblk + 8, buflen); /* cut off header */ 490 memcpy(buf, resblk + 8, buflen); /* cut off header */
521 491
522 kfree(resblk); 492 kfree(resblk);
523 493
524 if (size > buflen) 494 return rc;
525 return buflen;
526
527 return size;
528} 495}
529 496
530/* 497/*
@@ -534,12 +501,12 @@ int i2o_parm_field_get(struct i2o_device *i2o_dev, int group, int field,
534 * else return specific fields 501 * else return specific fields
535 * ibuf contains fieldindexes 502 * ibuf contains fieldindexes
536 * 503 *
537 * if oper == I2O_PARAMS_LIST_GET, get from specific rows 504 * if oper == I2O_PARAMS_LIST_GET, get from specific rows
538 * if fieldcount == -1 return all fields 505 * if fieldcount == -1 return all fields
539 * ibuf contains rowcount, keyvalues 506 * ibuf contains rowcount, keyvalues
540 * else return specific fields 507 * else return specific fields
541 * fieldcount is # of fieldindexes 508 * fieldcount is # of fieldindexes
542 * ibuf contains fieldindexes, rowcount, keyvalues 509 * ibuf contains fieldindexes, rowcount, keyvalues
543 * 510 *
544 * You could also use directly function i2o_issue_params(). 511 * You could also use directly function i2o_issue_params().
545 */ 512 */
diff --git a/drivers/message/i2o/driver.c b/drivers/message/i2o/driver.c
index 0fb9c4e2ad4c..64130227574f 100644
--- a/drivers/message/i2o/driver.c
+++ b/drivers/message/i2o/driver.c
@@ -61,12 +61,10 @@ static int i2o_bus_match(struct device *dev, struct device_driver *drv)
61}; 61};
62 62
63/* I2O bus type */ 63/* I2O bus type */
64extern struct device_attribute i2o_device_attrs[];
65
66struct bus_type i2o_bus_type = { 64struct bus_type i2o_bus_type = {
67 .name = "i2o", 65 .name = "i2o",
68 .match = i2o_bus_match, 66 .match = i2o_bus_match,
69 .dev_attrs = i2o_device_attrs, 67 .dev_attrs = i2o_device_attrs
70}; 68};
71 69
72/** 70/**
@@ -219,14 +217,14 @@ int i2o_driver_dispatch(struct i2o_controller *c, u32 m)
219 /* cut of header from message size (in 32-bit words) */ 217 /* cut of header from message size (in 32-bit words) */
220 size = (le32_to_cpu(msg->u.head[0]) >> 16) - 5; 218 size = (le32_to_cpu(msg->u.head[0]) >> 16) - 5;
221 219
222 evt = kmalloc(size * 4 + sizeof(*evt), GFP_ATOMIC | __GFP_ZERO); 220 evt = kzalloc(size * 4 + sizeof(*evt), GFP_ATOMIC);
223 if (!evt) 221 if (!evt)
224 return -ENOMEM; 222 return -ENOMEM;
225 223
226 evt->size = size; 224 evt->size = size;
227 evt->tcntxt = le32_to_cpu(msg->u.s.tcntxt); 225 evt->tcntxt = le32_to_cpu(msg->u.s.tcntxt);
228 evt->event_indicator = le32_to_cpu(msg->body[0]); 226 evt->event_indicator = le32_to_cpu(msg->body[0]);
229 memcpy(&evt->tcntxt, &msg->u.s.tcntxt, size * 4); 227 memcpy(&evt->data, &msg->body[1], size * 4);
230 228
231 list_for_each_entry_safe(dev, tmp, &c->devices, list) 229 list_for_each_entry_safe(dev, tmp, &c->devices, list)
232 if (dev->lct_data.tid == tid) { 230 if (dev->lct_data.tid == tid) {
@@ -349,12 +347,10 @@ int __init i2o_driver_init(void)
349 osm_info("max drivers = %d\n", i2o_max_drivers); 347 osm_info("max drivers = %d\n", i2o_max_drivers);
350 348
351 i2o_drivers = 349 i2o_drivers =
352 kmalloc(i2o_max_drivers * sizeof(*i2o_drivers), GFP_KERNEL); 350 kzalloc(i2o_max_drivers * sizeof(*i2o_drivers), GFP_KERNEL);
353 if (!i2o_drivers) 351 if (!i2o_drivers)
354 return -ENOMEM; 352 return -ENOMEM;
355 353
356 memset(i2o_drivers, 0, i2o_max_drivers * sizeof(*i2o_drivers));
357
358 rc = bus_register(&i2o_bus_type); 354 rc = bus_register(&i2o_bus_type);
359 355
360 if (rc < 0) 356 if (rc < 0)
diff --git a/drivers/message/i2o/exec-osm.c b/drivers/message/i2o/exec-osm.c
index 9c339a2505b0..9bb9859f6dfe 100644
--- a/drivers/message/i2o/exec-osm.c
+++ b/drivers/message/i2o/exec-osm.c
@@ -33,7 +33,7 @@
33#include <linux/workqueue.h> 33#include <linux/workqueue.h>
34#include <linux/string.h> 34#include <linux/string.h>
35#include <linux/slab.h> 35#include <linux/slab.h>
36#include <linux/sched.h> /* wait_event_interruptible_timeout() needs this */ 36#include <linux/sched.h> /* wait_event_interruptible_timeout() needs this */
37#include <asm/param.h> /* HZ */ 37#include <asm/param.h> /* HZ */
38#include "core.h" 38#include "core.h"
39 39
@@ -75,11 +75,9 @@ static struct i2o_exec_wait *i2o_exec_wait_alloc(void)
75{ 75{
76 struct i2o_exec_wait *wait; 76 struct i2o_exec_wait *wait;
77 77
78 wait = kmalloc(sizeof(*wait), GFP_KERNEL); 78 wait = kzalloc(sizeof(*wait), GFP_KERNEL);
79 if (!wait) 79 if (!wait)
80 return ERR_PTR(-ENOMEM); 80 return NULL;
81
82 memset(wait, 0, sizeof(*wait));
83 81
84 INIT_LIST_HEAD(&wait->list); 82 INIT_LIST_HEAD(&wait->list);
85 83
@@ -114,13 +112,12 @@ static void i2o_exec_wait_free(struct i2o_exec_wait *wait)
114 * Returns 0 on success, negative error code on timeout or positive error 112 * Returns 0 on success, negative error code on timeout or positive error
115 * code from reply. 113 * code from reply.
116 */ 114 */
117int i2o_msg_post_wait_mem(struct i2o_controller *c, u32 m, unsigned long 115int i2o_msg_post_wait_mem(struct i2o_controller *c, struct i2o_message *msg,
118 timeout, struct i2o_dma *dma) 116 unsigned long timeout, struct i2o_dma *dma)
119{ 117{
120 DECLARE_WAIT_QUEUE_HEAD(wq); 118 DECLARE_WAIT_QUEUE_HEAD(wq);
121 struct i2o_exec_wait *wait; 119 struct i2o_exec_wait *wait;
122 static u32 tcntxt = 0x80000000; 120 static u32 tcntxt = 0x80000000;
123 struct i2o_message __iomem *msg = i2o_msg_in_to_virt(c, m);
124 int rc = 0; 121 int rc = 0;
125 122
126 wait = i2o_exec_wait_alloc(); 123 wait = i2o_exec_wait_alloc();
@@ -138,15 +135,15 @@ int i2o_msg_post_wait_mem(struct i2o_controller *c, u32 m, unsigned long
138 * We will only use transaction contexts >= 0x80000000 for POST WAIT, 135 * We will only use transaction contexts >= 0x80000000 for POST WAIT,
139 * so we could find a POST WAIT reply easier in the reply handler. 136 * so we could find a POST WAIT reply easier in the reply handler.
140 */ 137 */
141 writel(i2o_exec_driver.context, &msg->u.s.icntxt); 138 msg->u.s.icntxt = cpu_to_le32(i2o_exec_driver.context);
142 wait->tcntxt = tcntxt++; 139 wait->tcntxt = tcntxt++;
143 writel(wait->tcntxt, &msg->u.s.tcntxt); 140 msg->u.s.tcntxt = cpu_to_le32(wait->tcntxt);
144 141
145 /* 142 /*
146 * Post the message to the controller. At some point later it will 143 * Post the message to the controller. At some point later it will
147 * return. If we time out before it returns then complete will be zero. 144 * return. If we time out before it returns then complete will be zero.
148 */ 145 */
149 i2o_msg_post(c, m); 146 i2o_msg_post(c, msg);
150 147
151 if (!wait->complete) { 148 if (!wait->complete) {
152 wait->wq = &wq; 149 wait->wq = &wq;
@@ -266,13 +263,14 @@ static int i2o_msg_post_wait_complete(struct i2o_controller *c, u32 m,
266 * 263 *
267 * Returns number of bytes printed into buffer. 264 * Returns number of bytes printed into buffer.
268 */ 265 */
269static ssize_t i2o_exec_show_vendor_id(struct device *d, struct device_attribute *attr, char *buf) 266static ssize_t i2o_exec_show_vendor_id(struct device *d,
267 struct device_attribute *attr, char *buf)
270{ 268{
271 struct i2o_device *dev = to_i2o_device(d); 269 struct i2o_device *dev = to_i2o_device(d);
272 u16 id; 270 u16 id;
273 271
274 if (i2o_parm_field_get(dev, 0x0000, 0, &id, 2)) { 272 if (!i2o_parm_field_get(dev, 0x0000, 0, &id, 2)) {
275 sprintf(buf, "0x%04x", id); 273 sprintf(buf, "0x%04x", le16_to_cpu(id));
276 return strlen(buf) + 1; 274 return strlen(buf) + 1;
277 } 275 }
278 276
@@ -286,13 +284,15 @@ static ssize_t i2o_exec_show_vendor_id(struct device *d, struct device_attribute
286 * 284 *
287 * Returns number of bytes printed into buffer. 285 * Returns number of bytes printed into buffer.
288 */ 286 */
289static ssize_t i2o_exec_show_product_id(struct device *d, struct device_attribute *attr, char *buf) 287static ssize_t i2o_exec_show_product_id(struct device *d,
288 struct device_attribute *attr,
289 char *buf)
290{ 290{
291 struct i2o_device *dev = to_i2o_device(d); 291 struct i2o_device *dev = to_i2o_device(d);
292 u16 id; 292 u16 id;
293 293
294 if (i2o_parm_field_get(dev, 0x0000, 1, &id, 2)) { 294 if (!i2o_parm_field_get(dev, 0x0000, 1, &id, 2)) {
295 sprintf(buf, "0x%04x", id); 295 sprintf(buf, "0x%04x", le16_to_cpu(id));
296 return strlen(buf) + 1; 296 return strlen(buf) + 1;
297 } 297 }
298 298
@@ -362,7 +362,9 @@ static void i2o_exec_lct_modified(struct i2o_controller *c)
362 if (i2o_device_parse_lct(c) != -EAGAIN) 362 if (i2o_device_parse_lct(c) != -EAGAIN)
363 change_ind = c->lct->change_ind + 1; 363 change_ind = c->lct->change_ind + 1;
364 364
365#ifdef CONFIG_I2O_LCT_NOTIFY_ON_CHANGES
365 i2o_exec_lct_notify(c, change_ind); 366 i2o_exec_lct_notify(c, change_ind);
367#endif
366}; 368};
367 369
368/** 370/**
@@ -385,23 +387,22 @@ static int i2o_exec_reply(struct i2o_controller *c, u32 m,
385 u32 context; 387 u32 context;
386 388
387 if (le32_to_cpu(msg->u.head[0]) & MSG_FAIL) { 389 if (le32_to_cpu(msg->u.head[0]) & MSG_FAIL) {
390 struct i2o_message __iomem *pmsg;
391 u32 pm;
392
388 /* 393 /*
389 * If Fail bit is set we must take the transaction context of 394 * If Fail bit is set we must take the transaction context of
390 * the preserved message to find the right request again. 395 * the preserved message to find the right request again.
391 */ 396 */
392 struct i2o_message __iomem *pmsg;
393 u32 pm;
394 397
395 pm = le32_to_cpu(msg->body[3]); 398 pm = le32_to_cpu(msg->body[3]);
396
397 pmsg = i2o_msg_in_to_virt(c, pm); 399 pmsg = i2o_msg_in_to_virt(c, pm);
400 context = readl(&pmsg->u.s.tcntxt);
398 401
399 i2o_report_status(KERN_INFO, "i2o_core", msg); 402 i2o_report_status(KERN_INFO, "i2o_core", msg);
400 403
401 context = readl(&pmsg->u.s.tcntxt);
402
403 /* Release the preserved msg */ 404 /* Release the preserved msg */
404 i2o_msg_nop(c, pm); 405 i2o_msg_nop_mfa(c, pm);
405 } else 406 } else
406 context = le32_to_cpu(msg->u.s.tcntxt); 407 context = le32_to_cpu(msg->u.s.tcntxt);
407 408
@@ -462,25 +463,26 @@ static void i2o_exec_event(struct i2o_event *evt)
462 */ 463 */
463int i2o_exec_lct_get(struct i2o_controller *c) 464int i2o_exec_lct_get(struct i2o_controller *c)
464{ 465{
465 struct i2o_message __iomem *msg; 466 struct i2o_message *msg;
466 u32 m;
467 int i = 0; 467 int i = 0;
468 int rc = -EAGAIN; 468 int rc = -EAGAIN;
469 469
470 for (i = 1; i <= I2O_LCT_GET_TRIES; i++) { 470 for (i = 1; i <= I2O_LCT_GET_TRIES; i++) {
471 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 471 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
472 if (m == I2O_QUEUE_EMPTY) 472 if (IS_ERR(msg))
473 return -ETIMEDOUT; 473 return PTR_ERR(msg);
474 474
475 writel(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6, &msg->u.head[0]); 475 msg->u.head[0] =
476 writel(I2O_CMD_LCT_NOTIFY << 24 | HOST_TID << 12 | ADAPTER_TID, 476 cpu_to_le32(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6);
477 &msg->u.head[1]); 477 msg->u.head[1] =
478 writel(0xffffffff, &msg->body[0]); 478 cpu_to_le32(I2O_CMD_LCT_NOTIFY << 24 | HOST_TID << 12 |
479 writel(0x00000000, &msg->body[1]); 479 ADAPTER_TID);
480 writel(0xd0000000 | c->dlct.len, &msg->body[2]); 480 msg->body[0] = cpu_to_le32(0xffffffff);
481 writel(c->dlct.phys, &msg->body[3]); 481 msg->body[1] = cpu_to_le32(0x00000000);
482 482 msg->body[2] = cpu_to_le32(0xd0000000 | c->dlct.len);
483 rc = i2o_msg_post_wait(c, m, I2O_TIMEOUT_LCT_GET); 483 msg->body[3] = cpu_to_le32(c->dlct.phys);
484
485 rc = i2o_msg_post_wait(c, msg, I2O_TIMEOUT_LCT_GET);
484 if (rc < 0) 486 if (rc < 0)
485 break; 487 break;
486 488
@@ -506,29 +508,29 @@ static int i2o_exec_lct_notify(struct i2o_controller *c, u32 change_ind)
506{ 508{
507 i2o_status_block *sb = c->status_block.virt; 509 i2o_status_block *sb = c->status_block.virt;
508 struct device *dev; 510 struct device *dev;
509 struct i2o_message __iomem *msg; 511 struct i2o_message *msg;
510 u32 m;
511 512
512 dev = &c->pdev->dev; 513 dev = &c->pdev->dev;
513 514
514 if (i2o_dma_realloc(dev, &c->dlct, sb->expected_lct_size, GFP_KERNEL)) 515 if (i2o_dma_realloc
516 (dev, &c->dlct, le32_to_cpu(sb->expected_lct_size), GFP_KERNEL))
515 return -ENOMEM; 517 return -ENOMEM;
516 518
517 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 519 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
518 if (m == I2O_QUEUE_EMPTY) 520 if (IS_ERR(msg))
519 return -ETIMEDOUT; 521 return PTR_ERR(msg);
520 522
521 writel(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6, &msg->u.head[0]); 523 msg->u.head[0] = cpu_to_le32(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6);
522 writel(I2O_CMD_LCT_NOTIFY << 24 | HOST_TID << 12 | ADAPTER_TID, 524 msg->u.head[1] = cpu_to_le32(I2O_CMD_LCT_NOTIFY << 24 | HOST_TID << 12 |
523 &msg->u.head[1]); 525 ADAPTER_TID);
524 writel(i2o_exec_driver.context, &msg->u.s.icntxt); 526 msg->u.s.icntxt = cpu_to_le32(i2o_exec_driver.context);
525 writel(0, &msg->u.s.tcntxt); /* FIXME */ 527 msg->u.s.tcntxt = cpu_to_le32(0x00000000);
526 writel(0xffffffff, &msg->body[0]); 528 msg->body[0] = cpu_to_le32(0xffffffff);
527 writel(change_ind, &msg->body[1]); 529 msg->body[1] = cpu_to_le32(change_ind);
528 writel(0xd0000000 | c->dlct.len, &msg->body[2]); 530 msg->body[2] = cpu_to_le32(0xd0000000 | c->dlct.len);
529 writel(c->dlct.phys, &msg->body[3]); 531 msg->body[3] = cpu_to_le32(c->dlct.phys);
530 532
531 i2o_msg_post(c, m); 533 i2o_msg_post(c, msg);
532 534
533 return 0; 535 return 0;
534}; 536};
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c
index f283b5bafdd3..5b1febed3133 100644
--- a/drivers/message/i2o/i2o_block.c
+++ b/drivers/message/i2o/i2o_block.c
@@ -59,10 +59,12 @@
59#include <linux/blkdev.h> 59#include <linux/blkdev.h>
60#include <linux/hdreg.h> 60#include <linux/hdreg.h>
61 61
62#include <scsi/scsi.h>
63
62#include "i2o_block.h" 64#include "i2o_block.h"
63 65
64#define OSM_NAME "block-osm" 66#define OSM_NAME "block-osm"
65#define OSM_VERSION "1.287" 67#define OSM_VERSION "1.325"
66#define OSM_DESCRIPTION "I2O Block Device OSM" 68#define OSM_DESCRIPTION "I2O Block Device OSM"
67 69
68static struct i2o_driver i2o_block_driver; 70static struct i2o_driver i2o_block_driver;
@@ -130,20 +132,20 @@ static int i2o_block_remove(struct device *dev)
130 */ 132 */
131static int i2o_block_device_flush(struct i2o_device *dev) 133static int i2o_block_device_flush(struct i2o_device *dev)
132{ 134{
133 struct i2o_message __iomem *msg; 135 struct i2o_message *msg;
134 u32 m;
135 136
136 m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET); 137 msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET);
137 if (m == I2O_QUEUE_EMPTY) 138 if (IS_ERR(msg))
138 return -ETIMEDOUT; 139 return PTR_ERR(msg);
139 140
140 writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 141 msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
141 writel(I2O_CMD_BLOCK_CFLUSH << 24 | HOST_TID << 12 | dev->lct_data.tid, 142 msg->u.head[1] =
142 &msg->u.head[1]); 143 cpu_to_le32(I2O_CMD_BLOCK_CFLUSH << 24 | HOST_TID << 12 | dev->
143 writel(60 << 16, &msg->body[0]); 144 lct_data.tid);
145 msg->body[0] = cpu_to_le32(60 << 16);
144 osm_debug("Flushing...\n"); 146 osm_debug("Flushing...\n");
145 147
146 return i2o_msg_post_wait(dev->iop, m, 60); 148 return i2o_msg_post_wait(dev->iop, msg, 60);
147}; 149};
148 150
149/** 151/**
@@ -181,21 +183,21 @@ static int i2o_block_issue_flush(request_queue_t * queue, struct gendisk *disk,
181 */ 183 */
182static int i2o_block_device_mount(struct i2o_device *dev, u32 media_id) 184static int i2o_block_device_mount(struct i2o_device *dev, u32 media_id)
183{ 185{
184 struct i2o_message __iomem *msg; 186 struct i2o_message *msg;
185 u32 m; 187
186 188 msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET);
187 m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET); 189 if (IS_ERR(msg))
188 if (m == I2O_QUEUE_EMPTY) 190 return PTR_ERR(msg);
189 return -ETIMEDOUT; 191
190 192 msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
191 writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 193 msg->u.head[1] =
192 writel(I2O_CMD_BLOCK_MMOUNT << 24 | HOST_TID << 12 | dev->lct_data.tid, 194 cpu_to_le32(I2O_CMD_BLOCK_MMOUNT << 24 | HOST_TID << 12 | dev->
193 &msg->u.head[1]); 195 lct_data.tid);
194 writel(-1, &msg->body[0]); 196 msg->body[0] = cpu_to_le32(-1);
195 writel(0, &msg->body[1]); 197 msg->body[1] = cpu_to_le32(0x00000000);
196 osm_debug("Mounting...\n"); 198 osm_debug("Mounting...\n");
197 199
198 return i2o_msg_post_wait(dev->iop, m, 2); 200 return i2o_msg_post_wait(dev->iop, msg, 2);
199}; 201};
200 202
201/** 203/**
@@ -210,20 +212,20 @@ static int i2o_block_device_mount(struct i2o_device *dev, u32 media_id)
210 */ 212 */
211static int i2o_block_device_lock(struct i2o_device *dev, u32 media_id) 213static int i2o_block_device_lock(struct i2o_device *dev, u32 media_id)
212{ 214{
213 struct i2o_message __iomem *msg; 215 struct i2o_message *msg;
214 u32 m;
215 216
216 m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET); 217 msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET);
217 if (m == I2O_QUEUE_EMPTY) 218 if (IS_ERR(msg) == I2O_QUEUE_EMPTY)
218 return -ETIMEDOUT; 219 return PTR_ERR(msg);
219 220
220 writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 221 msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
221 writel(I2O_CMD_BLOCK_MLOCK << 24 | HOST_TID << 12 | dev->lct_data.tid, 222 msg->u.head[1] =
222 &msg->u.head[1]); 223 cpu_to_le32(I2O_CMD_BLOCK_MLOCK << 24 | HOST_TID << 12 | dev->
223 writel(-1, &msg->body[0]); 224 lct_data.tid);
225 msg->body[0] = cpu_to_le32(-1);
224 osm_debug("Locking...\n"); 226 osm_debug("Locking...\n");
225 227
226 return i2o_msg_post_wait(dev->iop, m, 2); 228 return i2o_msg_post_wait(dev->iop, msg, 2);
227}; 229};
228 230
229/** 231/**
@@ -238,20 +240,20 @@ static int i2o_block_device_lock(struct i2o_device *dev, u32 media_id)
238 */ 240 */
239static int i2o_block_device_unlock(struct i2o_device *dev, u32 media_id) 241static int i2o_block_device_unlock(struct i2o_device *dev, u32 media_id)
240{ 242{
241 struct i2o_message __iomem *msg; 243 struct i2o_message *msg;
242 u32 m;
243 244
244 m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET); 245 msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET);
245 if (m == I2O_QUEUE_EMPTY) 246 if (IS_ERR(msg))
246 return -ETIMEDOUT; 247 return PTR_ERR(msg);
247 248
248 writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 249 msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
249 writel(I2O_CMD_BLOCK_MUNLOCK << 24 | HOST_TID << 12 | dev->lct_data.tid, 250 msg->u.head[1] =
250 &msg->u.head[1]); 251 cpu_to_le32(I2O_CMD_BLOCK_MUNLOCK << 24 | HOST_TID << 12 | dev->
251 writel(media_id, &msg->body[0]); 252 lct_data.tid);
253 msg->body[0] = cpu_to_le32(media_id);
252 osm_debug("Unlocking...\n"); 254 osm_debug("Unlocking...\n");
253 255
254 return i2o_msg_post_wait(dev->iop, m, 2); 256 return i2o_msg_post_wait(dev->iop, msg, 2);
255}; 257};
256 258
257/** 259/**
@@ -267,21 +269,21 @@ static int i2o_block_device_power(struct i2o_block_device *dev, u8 op)
267{ 269{
268 struct i2o_device *i2o_dev = dev->i2o_dev; 270 struct i2o_device *i2o_dev = dev->i2o_dev;
269 struct i2o_controller *c = i2o_dev->iop; 271 struct i2o_controller *c = i2o_dev->iop;
270 struct i2o_message __iomem *msg; 272 struct i2o_message *msg;
271 u32 m;
272 int rc; 273 int rc;
273 274
274 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 275 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
275 if (m == I2O_QUEUE_EMPTY) 276 if (IS_ERR(msg))
276 return -ETIMEDOUT; 277 return PTR_ERR(msg);
277 278
278 writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 279 msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0);
279 writel(I2O_CMD_BLOCK_POWER << 24 | HOST_TID << 12 | i2o_dev->lct_data. 280 msg->u.head[1] =
280 tid, &msg->u.head[1]); 281 cpu_to_le32(I2O_CMD_BLOCK_POWER << 24 | HOST_TID << 12 | i2o_dev->
281 writel(op << 24, &msg->body[0]); 282 lct_data.tid);
283 msg->body[0] = cpu_to_le32(op << 24);
282 osm_debug("Power...\n"); 284 osm_debug("Power...\n");
283 285
284 rc = i2o_msg_post_wait(c, m, 60); 286 rc = i2o_msg_post_wait(c, msg, 60);
285 if (!rc) 287 if (!rc)
286 dev->power = op; 288 dev->power = op;
287 289
@@ -331,7 +333,7 @@ static inline void i2o_block_request_free(struct i2o_block_request *ireq)
331 */ 333 */
332static inline int i2o_block_sglist_alloc(struct i2o_controller *c, 334static inline int i2o_block_sglist_alloc(struct i2o_controller *c,
333 struct i2o_block_request *ireq, 335 struct i2o_block_request *ireq,
334 u32 __iomem ** mptr) 336 u32 ** mptr)
335{ 337{
336 int nents; 338 int nents;
337 enum dma_data_direction direction; 339 enum dma_data_direction direction;
@@ -466,7 +468,7 @@ static void i2o_block_end_request(struct request *req, int uptodate,
466 468
467 spin_lock_irqsave(q->queue_lock, flags); 469 spin_lock_irqsave(q->queue_lock, flags);
468 470
469 end_that_request_last(req); 471 end_that_request_last(req, uptodate);
470 472
471 if (likely(dev)) { 473 if (likely(dev)) {
472 dev->open_queue_depth--; 474 dev->open_queue_depth--;
@@ -745,10 +747,9 @@ static int i2o_block_transfer(struct request *req)
745 struct i2o_block_device *dev = req->rq_disk->private_data; 747 struct i2o_block_device *dev = req->rq_disk->private_data;
746 struct i2o_controller *c; 748 struct i2o_controller *c;
747 int tid = dev->i2o_dev->lct_data.tid; 749 int tid = dev->i2o_dev->lct_data.tid;
748 struct i2o_message __iomem *msg; 750 struct i2o_message *msg;
749 u32 __iomem *mptr; 751 u32 *mptr;
750 struct i2o_block_request *ireq = req->special; 752 struct i2o_block_request *ireq = req->special;
751 u32 m;
752 u32 tcntxt; 753 u32 tcntxt;
753 u32 sgl_offset = SGL_OFFSET_8; 754 u32 sgl_offset = SGL_OFFSET_8;
754 u32 ctl_flags = 0x00000000; 755 u32 ctl_flags = 0x00000000;
@@ -763,9 +764,9 @@ static int i2o_block_transfer(struct request *req)
763 764
764 c = dev->i2o_dev->iop; 765 c = dev->i2o_dev->iop;
765 766
766 m = i2o_msg_get(c, &msg); 767 msg = i2o_msg_get(c);
767 if (m == I2O_QUEUE_EMPTY) { 768 if (IS_ERR(msg)) {
768 rc = -EBUSY; 769 rc = PTR_ERR(msg);
769 goto exit; 770 goto exit;
770 } 771 }
771 772
@@ -775,8 +776,8 @@ static int i2o_block_transfer(struct request *req)
775 goto nop_msg; 776 goto nop_msg;
776 } 777 }
777 778
778 writel(i2o_block_driver.context, &msg->u.s.icntxt); 779 msg->u.s.icntxt = cpu_to_le32(i2o_block_driver.context);
779 writel(tcntxt, &msg->u.s.tcntxt); 780 msg->u.s.tcntxt = cpu_to_le32(tcntxt);
780 781
781 mptr = &msg->body[0]; 782 mptr = &msg->body[0];
782 783
@@ -834,11 +835,11 @@ static int i2o_block_transfer(struct request *req)
834 835
835 sgl_offset = SGL_OFFSET_12; 836 sgl_offset = SGL_OFFSET_12;
836 837
837 writel(I2O_CMD_PRIVATE << 24 | HOST_TID << 12 | tid, 838 msg->u.head[1] =
838 &msg->u.head[1]); 839 cpu_to_le32(I2O_CMD_PRIVATE << 24 | HOST_TID << 12 | tid);
839 840
840 writel(I2O_VENDOR_DPT << 16 | I2O_CMD_SCSI_EXEC, mptr++); 841 *mptr++ = cpu_to_le32(I2O_VENDOR_DPT << 16 | I2O_CMD_SCSI_EXEC);
841 writel(tid, mptr++); 842 *mptr++ = cpu_to_le32(tid);
842 843
843 /* 844 /*
844 * ENABLE_DISCONNECT 845 * ENABLE_DISCONNECT
@@ -846,29 +847,31 @@ static int i2o_block_transfer(struct request *req)
846 * RETURN_SENSE_DATA_IN_REPLY_MESSAGE_FRAME 847 * RETURN_SENSE_DATA_IN_REPLY_MESSAGE_FRAME
847 */ 848 */
848 if (rq_data_dir(req) == READ) { 849 if (rq_data_dir(req) == READ) {
849 cmd[0] = 0x28; 850 cmd[0] = READ_10;
850 scsi_flags = 0x60a0000a; 851 scsi_flags = 0x60a0000a;
851 } else { 852 } else {
852 cmd[0] = 0x2A; 853 cmd[0] = WRITE_10;
853 scsi_flags = 0xa0a0000a; 854 scsi_flags = 0xa0a0000a;
854 } 855 }
855 856
856 writel(scsi_flags, mptr++); 857 *mptr++ = cpu_to_le32(scsi_flags);
857 858
858 *((u32 *) & cmd[2]) = cpu_to_be32(req->sector * hwsec); 859 *((u32 *) & cmd[2]) = cpu_to_be32(req->sector * hwsec);
859 *((u16 *) & cmd[7]) = cpu_to_be16(req->nr_sectors * hwsec); 860 *((u16 *) & cmd[7]) = cpu_to_be16(req->nr_sectors * hwsec);
860 861
861 memcpy_toio(mptr, cmd, 10); 862 memcpy(mptr, cmd, 10);
862 mptr += 4; 863 mptr += 4;
863 writel(req->nr_sectors << KERNEL_SECTOR_SHIFT, mptr++); 864 *mptr++ = cpu_to_le32(req->nr_sectors << KERNEL_SECTOR_SHIFT);
864 } else 865 } else
865#endif 866#endif
866 { 867 {
867 writel(cmd | HOST_TID << 12 | tid, &msg->u.head[1]); 868 msg->u.head[1] = cpu_to_le32(cmd | HOST_TID << 12 | tid);
868 writel(ctl_flags, mptr++); 869 *mptr++ = cpu_to_le32(ctl_flags);
869 writel(req->nr_sectors << KERNEL_SECTOR_SHIFT, mptr++); 870 *mptr++ = cpu_to_le32(req->nr_sectors << KERNEL_SECTOR_SHIFT);
870 writel((u32) (req->sector << KERNEL_SECTOR_SHIFT), mptr++); 871 *mptr++ =
871 writel(req->sector >> (32 - KERNEL_SECTOR_SHIFT), mptr++); 872 cpu_to_le32((u32) (req->sector << KERNEL_SECTOR_SHIFT));
873 *mptr++ =
874 cpu_to_le32(req->sector >> (32 - KERNEL_SECTOR_SHIFT));
872 } 875 }
873 876
874 if (!i2o_block_sglist_alloc(c, ireq, &mptr)) { 877 if (!i2o_block_sglist_alloc(c, ireq, &mptr)) {
@@ -876,13 +879,13 @@ static int i2o_block_transfer(struct request *req)
876 goto context_remove; 879 goto context_remove;
877 } 880 }
878 881
879 writel(I2O_MESSAGE_SIZE(mptr - &msg->u.head[0]) | 882 msg->u.head[0] =
880 sgl_offset, &msg->u.head[0]); 883 cpu_to_le32(I2O_MESSAGE_SIZE(mptr - &msg->u.head[0]) | sgl_offset);
881 884
882 list_add_tail(&ireq->queue, &dev->open_queue); 885 list_add_tail(&ireq->queue, &dev->open_queue);
883 dev->open_queue_depth++; 886 dev->open_queue_depth++;
884 887
885 i2o_msg_post(c, m); 888 i2o_msg_post(c, msg);
886 889
887 return 0; 890 return 0;
888 891
@@ -890,7 +893,7 @@ static int i2o_block_transfer(struct request *req)
890 i2o_cntxt_list_remove(c, req); 893 i2o_cntxt_list_remove(c, req);
891 894
892 nop_msg: 895 nop_msg:
893 i2o_msg_nop(c, m); 896 i2o_msg_nop(c, msg);
894 897
895 exit: 898 exit:
896 return rc; 899 return rc;
@@ -978,13 +981,12 @@ static struct i2o_block_device *i2o_block_device_alloc(void)
978 struct request_queue *queue; 981 struct request_queue *queue;
979 int rc; 982 int rc;
980 983
981 dev = kmalloc(sizeof(*dev), GFP_KERNEL); 984 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
982 if (!dev) { 985 if (!dev) {
983 osm_err("Insufficient memory to allocate I2O Block disk.\n"); 986 osm_err("Insufficient memory to allocate I2O Block disk.\n");
984 rc = -ENOMEM; 987 rc = -ENOMEM;
985 goto exit; 988 goto exit;
986 } 989 }
987 memset(dev, 0, sizeof(*dev));
988 990
989 INIT_LIST_HEAD(&dev->open_queue); 991 INIT_LIST_HEAD(&dev->open_queue);
990 spin_lock_init(&dev->lock); 992 spin_lock_init(&dev->lock);
@@ -1049,8 +1051,8 @@ static int i2o_block_probe(struct device *dev)
1049 int rc; 1051 int rc;
1050 u64 size; 1052 u64 size;
1051 u32 blocksize; 1053 u32 blocksize;
1052 u32 flags, status;
1053 u16 body_size = 4; 1054 u16 body_size = 4;
1055 u16 power;
1054 unsigned short max_sectors; 1056 unsigned short max_sectors;
1055 1057
1056#ifdef CONFIG_I2O_EXT_ADAPTEC 1058#ifdef CONFIG_I2O_EXT_ADAPTEC
@@ -1108,22 +1110,20 @@ static int i2o_block_probe(struct device *dev)
1108 * Ask for the current media data. If that isn't supported 1110 * Ask for the current media data. If that isn't supported
1109 * then we ask for the device capacity data 1111 * then we ask for the device capacity data
1110 */ 1112 */
1111 if (i2o_parm_field_get(i2o_dev, 0x0004, 1, &blocksize, 4) || 1113 if (!i2o_parm_field_get(i2o_dev, 0x0004, 1, &blocksize, 4) ||
1112 i2o_parm_field_get(i2o_dev, 0x0000, 3, &blocksize, 4)) { 1114 !i2o_parm_field_get(i2o_dev, 0x0000, 3, &blocksize, 4)) {
1113 blk_queue_hardsect_size(queue, blocksize); 1115 blk_queue_hardsect_size(queue, le32_to_cpu(blocksize));
1114 } else 1116 } else
1115 osm_warn("unable to get blocksize of %s\n", gd->disk_name); 1117 osm_warn("unable to get blocksize of %s\n", gd->disk_name);
1116 1118
1117 if (i2o_parm_field_get(i2o_dev, 0x0004, 0, &size, 8) || 1119 if (!i2o_parm_field_get(i2o_dev, 0x0004, 0, &size, 8) ||
1118 i2o_parm_field_get(i2o_dev, 0x0000, 4, &size, 8)) { 1120 !i2o_parm_field_get(i2o_dev, 0x0000, 4, &size, 8)) {
1119 set_capacity(gd, size >> KERNEL_SECTOR_SHIFT); 1121 set_capacity(gd, le64_to_cpu(size) >> KERNEL_SECTOR_SHIFT);
1120 } else 1122 } else
1121 osm_warn("could not get size of %s\n", gd->disk_name); 1123 osm_warn("could not get size of %s\n", gd->disk_name);
1122 1124
1123 if (!i2o_parm_field_get(i2o_dev, 0x0000, 2, &i2o_blk_dev->power, 2)) 1125 if (!i2o_parm_field_get(i2o_dev, 0x0000, 2, &power, 2))
1124 i2o_blk_dev->power = 0; 1126 i2o_blk_dev->power = power;
1125 i2o_parm_field_get(i2o_dev, 0x0000, 5, &flags, 4);
1126 i2o_parm_field_get(i2o_dev, 0x0000, 6, &status, 4);
1127 1127
1128 i2o_event_register(i2o_dev, &i2o_block_driver, 0, 0xffffffff); 1128 i2o_event_register(i2o_dev, &i2o_block_driver, 0, 0xffffffff);
1129 1129
diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c
index 3c3a7abebb1b..89daf67b764d 100644
--- a/drivers/message/i2o/i2o_config.c
+++ b/drivers/message/i2o/i2o_config.c
@@ -36,12 +36,12 @@
36 36
37#include <asm/uaccess.h> 37#include <asm/uaccess.h>
38 38
39#include "core.h"
40
41#define SG_TABLESIZE 30 39#define SG_TABLESIZE 30
42 40
43static int i2o_cfg_ioctl(struct inode *inode, struct file *fp, unsigned int cmd, 41extern int i2o_parm_issue(struct i2o_device *, int, void *, int, void *, int);
44 unsigned long arg); 42
43static int i2o_cfg_ioctl(struct inode *, struct file *, unsigned int,
44 unsigned long);
45 45
46static spinlock_t i2o_config_lock; 46static spinlock_t i2o_config_lock;
47 47
@@ -230,8 +230,7 @@ static int i2o_cfg_swdl(unsigned long arg)
230 struct i2o_sw_xfer __user *pxfer = (struct i2o_sw_xfer __user *)arg; 230 struct i2o_sw_xfer __user *pxfer = (struct i2o_sw_xfer __user *)arg;
231 unsigned char maxfrag = 0, curfrag = 1; 231 unsigned char maxfrag = 0, curfrag = 1;
232 struct i2o_dma buffer; 232 struct i2o_dma buffer;
233 struct i2o_message __iomem *msg; 233 struct i2o_message *msg;
234 u32 m;
235 unsigned int status = 0, swlen = 0, fragsize = 8192; 234 unsigned int status = 0, swlen = 0, fragsize = 8192;
236 struct i2o_controller *c; 235 struct i2o_controller *c;
237 236
@@ -257,31 +256,34 @@ static int i2o_cfg_swdl(unsigned long arg)
257 if (!c) 256 if (!c)
258 return -ENXIO; 257 return -ENXIO;
259 258
260 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 259 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
261 if (m == I2O_QUEUE_EMPTY) 260 if (IS_ERR(msg))
262 return -EBUSY; 261 return PTR_ERR(msg);
263 262
264 if (i2o_dma_alloc(&c->pdev->dev, &buffer, fragsize, GFP_KERNEL)) { 263 if (i2o_dma_alloc(&c->pdev->dev, &buffer, fragsize, GFP_KERNEL)) {
265 i2o_msg_nop(c, m); 264 i2o_msg_nop(c, msg);
266 return -ENOMEM; 265 return -ENOMEM;
267 } 266 }
268 267
269 __copy_from_user(buffer.virt, kxfer.buf, fragsize); 268 __copy_from_user(buffer.virt, kxfer.buf, fragsize);
270 269
271 writel(NINE_WORD_MSG_SIZE | SGL_OFFSET_7, &msg->u.head[0]); 270 msg->u.head[0] = cpu_to_le32(NINE_WORD_MSG_SIZE | SGL_OFFSET_7);
272 writel(I2O_CMD_SW_DOWNLOAD << 24 | HOST_TID << 12 | ADAPTER_TID, 271 msg->u.head[1] =
273 &msg->u.head[1]); 272 cpu_to_le32(I2O_CMD_SW_DOWNLOAD << 24 | HOST_TID << 12 |
274 writel(i2o_config_driver.context, &msg->u.head[2]); 273 ADAPTER_TID);
275 writel(0, &msg->u.head[3]); 274 msg->u.head[2] = cpu_to_le32(i2o_config_driver.context);
276 writel((((u32) kxfer.flags) << 24) | (((u32) kxfer.sw_type) << 16) | 275 msg->u.head[3] = cpu_to_le32(0);
277 (((u32) maxfrag) << 8) | (((u32) curfrag)), &msg->body[0]); 276 msg->body[0] =
278 writel(swlen, &msg->body[1]); 277 cpu_to_le32((((u32) kxfer.flags) << 24) | (((u32) kxfer.
279 writel(kxfer.sw_id, &msg->body[2]); 278 sw_type) << 16) |
280 writel(0xD0000000 | fragsize, &msg->body[3]); 279 (((u32) maxfrag) << 8) | (((u32) curfrag)));
281 writel(buffer.phys, &msg->body[4]); 280 msg->body[1] = cpu_to_le32(swlen);
281 msg->body[2] = cpu_to_le32(kxfer.sw_id);
282 msg->body[3] = cpu_to_le32(0xD0000000 | fragsize);
283 msg->body[4] = cpu_to_le32(buffer.phys);
282 284
283 osm_debug("swdl frag %d/%d (size %d)\n", curfrag, maxfrag, fragsize); 285 osm_debug("swdl frag %d/%d (size %d)\n", curfrag, maxfrag, fragsize);
284 status = i2o_msg_post_wait_mem(c, m, 60, &buffer); 286 status = i2o_msg_post_wait_mem(c, msg, 60, &buffer);
285 287
286 if (status != -ETIMEDOUT) 288 if (status != -ETIMEDOUT)
287 i2o_dma_free(&c->pdev->dev, &buffer); 289 i2o_dma_free(&c->pdev->dev, &buffer);
@@ -302,8 +304,7 @@ static int i2o_cfg_swul(unsigned long arg)
302 struct i2o_sw_xfer __user *pxfer = (struct i2o_sw_xfer __user *)arg; 304 struct i2o_sw_xfer __user *pxfer = (struct i2o_sw_xfer __user *)arg;
303 unsigned char maxfrag = 0, curfrag = 1; 305 unsigned char maxfrag = 0, curfrag = 1;
304 struct i2o_dma buffer; 306 struct i2o_dma buffer;
305 struct i2o_message __iomem *msg; 307 struct i2o_message *msg;
306 u32 m;
307 unsigned int status = 0, swlen = 0, fragsize = 8192; 308 unsigned int status = 0, swlen = 0, fragsize = 8192;
308 struct i2o_controller *c; 309 struct i2o_controller *c;
309 int ret = 0; 310 int ret = 0;
@@ -330,30 +331,30 @@ static int i2o_cfg_swul(unsigned long arg)
330 if (!c) 331 if (!c)
331 return -ENXIO; 332 return -ENXIO;
332 333
333 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 334 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
334 if (m == I2O_QUEUE_EMPTY) 335 if (IS_ERR(msg))
335 return -EBUSY; 336 return PTR_ERR(msg);
336 337
337 if (i2o_dma_alloc(&c->pdev->dev, &buffer, fragsize, GFP_KERNEL)) { 338 if (i2o_dma_alloc(&c->pdev->dev, &buffer, fragsize, GFP_KERNEL)) {
338 i2o_msg_nop(c, m); 339 i2o_msg_nop(c, msg);
339 return -ENOMEM; 340 return -ENOMEM;
340 } 341 }
341 342
342 writel(NINE_WORD_MSG_SIZE | SGL_OFFSET_7, &msg->u.head[0]); 343 msg->u.head[0] = cpu_to_le32(NINE_WORD_MSG_SIZE | SGL_OFFSET_7);
343 writel(I2O_CMD_SW_UPLOAD << 24 | HOST_TID << 12 | ADAPTER_TID, 344 msg->u.head[1] =
344 &msg->u.head[1]); 345 cpu_to_le32(I2O_CMD_SW_UPLOAD << 24 | HOST_TID << 12 | ADAPTER_TID);
345 writel(i2o_config_driver.context, &msg->u.head[2]); 346 msg->u.head[2] = cpu_to_le32(i2o_config_driver.context);
346 writel(0, &msg->u.head[3]); 347 msg->u.head[3] = cpu_to_le32(0);
347 writel((u32) kxfer.flags << 24 | (u32) kxfer. 348 msg->body[0] =
348 sw_type << 16 | (u32) maxfrag << 8 | (u32) curfrag, 349 cpu_to_le32((u32) kxfer.flags << 24 | (u32) kxfer.
349 &msg->body[0]); 350 sw_type << 16 | (u32) maxfrag << 8 | (u32) curfrag);
350 writel(swlen, &msg->body[1]); 351 msg->body[1] = cpu_to_le32(swlen);
351 writel(kxfer.sw_id, &msg->body[2]); 352 msg->body[2] = cpu_to_le32(kxfer.sw_id);
352 writel(0xD0000000 | fragsize, &msg->body[3]); 353 msg->body[3] = cpu_to_le32(0xD0000000 | fragsize);
353 writel(buffer.phys, &msg->body[4]); 354 msg->body[4] = cpu_to_le32(buffer.phys);
354 355
355 osm_debug("swul frag %d/%d (size %d)\n", curfrag, maxfrag, fragsize); 356 osm_debug("swul frag %d/%d (size %d)\n", curfrag, maxfrag, fragsize);
356 status = i2o_msg_post_wait_mem(c, m, 60, &buffer); 357 status = i2o_msg_post_wait_mem(c, msg, 60, &buffer);
357 358
358 if (status != I2O_POST_WAIT_OK) { 359 if (status != I2O_POST_WAIT_OK) {
359 if (status != -ETIMEDOUT) 360 if (status != -ETIMEDOUT)
@@ -380,8 +381,7 @@ static int i2o_cfg_swdel(unsigned long arg)
380 struct i2o_controller *c; 381 struct i2o_controller *c;
381 struct i2o_sw_xfer kxfer; 382 struct i2o_sw_xfer kxfer;
382 struct i2o_sw_xfer __user *pxfer = (struct i2o_sw_xfer __user *)arg; 383 struct i2o_sw_xfer __user *pxfer = (struct i2o_sw_xfer __user *)arg;
383 struct i2o_message __iomem *msg; 384 struct i2o_message *msg;
384 u32 m;
385 unsigned int swlen; 385 unsigned int swlen;
386 int token; 386 int token;
387 387
@@ -395,21 +395,21 @@ static int i2o_cfg_swdel(unsigned long arg)
395 if (!c) 395 if (!c)
396 return -ENXIO; 396 return -ENXIO;
397 397
398 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 398 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
399 if (m == I2O_QUEUE_EMPTY) 399 if (IS_ERR(msg))
400 return -EBUSY; 400 return PTR_ERR(msg);
401 401
402 writel(SEVEN_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 402 msg->u.head[0] = cpu_to_le32(SEVEN_WORD_MSG_SIZE | SGL_OFFSET_0);
403 writel(I2O_CMD_SW_REMOVE << 24 | HOST_TID << 12 | ADAPTER_TID, 403 msg->u.head[1] =
404 &msg->u.head[1]); 404 cpu_to_le32(I2O_CMD_SW_REMOVE << 24 | HOST_TID << 12 | ADAPTER_TID);
405 writel(i2o_config_driver.context, &msg->u.head[2]); 405 msg->u.head[2] = cpu_to_le32(i2o_config_driver.context);
406 writel(0, &msg->u.head[3]); 406 msg->u.head[3] = cpu_to_le32(0);
407 writel((u32) kxfer.flags << 24 | (u32) kxfer.sw_type << 16, 407 msg->body[0] =
408 &msg->body[0]); 408 cpu_to_le32((u32) kxfer.flags << 24 | (u32) kxfer.sw_type << 16);
409 writel(swlen, &msg->body[1]); 409 msg->body[1] = cpu_to_le32(swlen);
410 writel(kxfer.sw_id, &msg->body[2]); 410 msg->body[2] = cpu_to_le32(kxfer.sw_id);
411 411
412 token = i2o_msg_post_wait(c, m, 10); 412 token = i2o_msg_post_wait(c, msg, 10);
413 413
414 if (token != I2O_POST_WAIT_OK) { 414 if (token != I2O_POST_WAIT_OK) {
415 osm_info("swdel failed, DetailedStatus = %d\n", token); 415 osm_info("swdel failed, DetailedStatus = %d\n", token);
@@ -423,25 +423,24 @@ static int i2o_cfg_validate(unsigned long arg)
423{ 423{
424 int token; 424 int token;
425 int iop = (int)arg; 425 int iop = (int)arg;
426 struct i2o_message __iomem *msg; 426 struct i2o_message *msg;
427 u32 m;
428 struct i2o_controller *c; 427 struct i2o_controller *c;
429 428
430 c = i2o_find_iop(iop); 429 c = i2o_find_iop(iop);
431 if (!c) 430 if (!c)
432 return -ENXIO; 431 return -ENXIO;
433 432
434 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 433 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
435 if (m == I2O_QUEUE_EMPTY) 434 if (IS_ERR(msg))
436 return -EBUSY; 435 return PTR_ERR(msg);
437 436
438 writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 437 msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0);
439 writel(I2O_CMD_CONFIG_VALIDATE << 24 | HOST_TID << 12 | iop, 438 msg->u.head[1] =
440 &msg->u.head[1]); 439 cpu_to_le32(I2O_CMD_CONFIG_VALIDATE << 24 | HOST_TID << 12 | iop);
441 writel(i2o_config_driver.context, &msg->u.head[2]); 440 msg->u.head[2] = cpu_to_le32(i2o_config_driver.context);
442 writel(0, &msg->u.head[3]); 441 msg->u.head[3] = cpu_to_le32(0);
443 442
444 token = i2o_msg_post_wait(c, m, 10); 443 token = i2o_msg_post_wait(c, msg, 10);
445 444
446 if (token != I2O_POST_WAIT_OK) { 445 if (token != I2O_POST_WAIT_OK) {
447 osm_info("Can't validate configuration, ErrorStatus = %d\n", 446 osm_info("Can't validate configuration, ErrorStatus = %d\n",
@@ -454,8 +453,7 @@ static int i2o_cfg_validate(unsigned long arg)
454 453
455static int i2o_cfg_evt_reg(unsigned long arg, struct file *fp) 454static int i2o_cfg_evt_reg(unsigned long arg, struct file *fp)
456{ 455{
457 struct i2o_message __iomem *msg; 456 struct i2o_message *msg;
458 u32 m;
459 struct i2o_evt_id __user *pdesc = (struct i2o_evt_id __user *)arg; 457 struct i2o_evt_id __user *pdesc = (struct i2o_evt_id __user *)arg;
460 struct i2o_evt_id kdesc; 458 struct i2o_evt_id kdesc;
461 struct i2o_controller *c; 459 struct i2o_controller *c;
@@ -474,18 +472,19 @@ static int i2o_cfg_evt_reg(unsigned long arg, struct file *fp)
474 if (!d) 472 if (!d)
475 return -ENODEV; 473 return -ENODEV;
476 474
477 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 475 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
478 if (m == I2O_QUEUE_EMPTY) 476 if (IS_ERR(msg))
479 return -EBUSY; 477 return PTR_ERR(msg);
480 478
481 writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 479 msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0);
482 writel(I2O_CMD_UTIL_EVT_REGISTER << 24 | HOST_TID << 12 | kdesc.tid, 480 msg->u.head[1] =
483 &msg->u.head[1]); 481 cpu_to_le32(I2O_CMD_UTIL_EVT_REGISTER << 24 | HOST_TID << 12 |
484 writel(i2o_config_driver.context, &msg->u.head[2]); 482 kdesc.tid);
485 writel(i2o_cntxt_list_add(c, fp->private_data), &msg->u.head[3]); 483 msg->u.head[2] = cpu_to_le32(i2o_config_driver.context);
486 writel(kdesc.evt_mask, &msg->body[0]); 484 msg->u.head[3] = cpu_to_le32(i2o_cntxt_list_add(c, fp->private_data));
485 msg->body[0] = cpu_to_le32(kdesc.evt_mask);
487 486
488 i2o_msg_post(c, m); 487 i2o_msg_post(c, msg);
489 488
490 return 0; 489 return 0;
491} 490}
@@ -537,7 +536,6 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
537 u32 sg_index = 0; 536 u32 sg_index = 0;
538 i2o_status_block *sb; 537 i2o_status_block *sb;
539 struct i2o_message *msg; 538 struct i2o_message *msg;
540 u32 m;
541 unsigned int iop; 539 unsigned int iop;
542 540
543 cmd = (struct i2o_cmd_passthru32 __user *)arg; 541 cmd = (struct i2o_cmd_passthru32 __user *)arg;
@@ -553,7 +551,7 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
553 return -ENXIO; 551 return -ENXIO;
554 } 552 }
555 553
556 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 554 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
557 555
558 sb = c->status_block.virt; 556 sb = c->status_block.virt;
559 557
@@ -585,19 +583,15 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
585 reply_size >>= 16; 583 reply_size >>= 16;
586 reply_size <<= 2; 584 reply_size <<= 2;
587 585
588 reply = kmalloc(reply_size, GFP_KERNEL); 586 reply = kzalloc(reply_size, GFP_KERNEL);
589 if (!reply) { 587 if (!reply) {
590 printk(KERN_WARNING "%s: Could not allocate reply buffer\n", 588 printk(KERN_WARNING "%s: Could not allocate reply buffer\n",
591 c->name); 589 c->name);
592 return -ENOMEM; 590 return -ENOMEM;
593 } 591 }
594 memset(reply, 0, reply_size);
595 592
596 sg_offset = (msg->u.head[0] >> 4) & 0x0f; 593 sg_offset = (msg->u.head[0] >> 4) & 0x0f;
597 594
598 writel(i2o_config_driver.context, &msg->u.s.icntxt);
599 writel(i2o_cntxt_list_add(c, reply), &msg->u.s.tcntxt);
600
601 memset(sg_list, 0, sizeof(sg_list[0]) * SG_TABLESIZE); 595 memset(sg_list, 0, sizeof(sg_list[0]) * SG_TABLESIZE);
602 if (sg_offset) { 596 if (sg_offset) {
603 struct sg_simple_element *sg; 597 struct sg_simple_element *sg;
@@ -631,7 +625,7 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
631 goto cleanup; 625 goto cleanup;
632 } 626 }
633 sg_size = sg[i].flag_count & 0xffffff; 627 sg_size = sg[i].flag_count & 0xffffff;
634 p = &(sg_list[sg_index++]); 628 p = &(sg_list[sg_index]);
635 /* Allocate memory for the transfer */ 629 /* Allocate memory for the transfer */
636 if (i2o_dma_alloc 630 if (i2o_dma_alloc
637 (&c->pdev->dev, p, sg_size, 631 (&c->pdev->dev, p, sg_size,
@@ -642,6 +636,7 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
642 rcode = -ENOMEM; 636 rcode = -ENOMEM;
643 goto sg_list_cleanup; 637 goto sg_list_cleanup;
644 } 638 }
639 sg_index++;
645 /* Copy in the user's SG buffer if necessary */ 640 /* Copy in the user's SG buffer if necessary */
646 if (sg[i]. 641 if (sg[i].
647 flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR */ ) { 642 flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR */ ) {
@@ -662,9 +657,11 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
662 } 657 }
663 } 658 }
664 659
665 rcode = i2o_msg_post_wait(c, m, 60); 660 rcode = i2o_msg_post_wait(c, msg, 60);
666 if (rcode) 661 if (rcode) {
662 reply[4] = ((u32) rcode) << 24;
667 goto sg_list_cleanup; 663 goto sg_list_cleanup;
664 }
668 665
669 if (sg_offset) { 666 if (sg_offset) {
670 u32 msg[I2O_OUTBOUND_MSG_FRAME_SIZE]; 667 u32 msg[I2O_OUTBOUND_MSG_FRAME_SIZE];
@@ -714,6 +711,7 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
714 } 711 }
715 } 712 }
716 713
714 sg_list_cleanup:
717 /* Copy back the reply to user space */ 715 /* Copy back the reply to user space */
718 if (reply_size) { 716 if (reply_size) {
719 // we wrote our own values for context - now restore the user supplied ones 717 // we wrote our own values for context - now restore the user supplied ones
@@ -731,7 +729,6 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
731 } 729 }
732 } 730 }
733 731
734 sg_list_cleanup:
735 for (i = 0; i < sg_index; i++) 732 for (i = 0; i < sg_index; i++)
736 i2o_dma_free(&c->pdev->dev, &sg_list[i]); 733 i2o_dma_free(&c->pdev->dev, &sg_list[i]);
737 734
@@ -780,8 +777,7 @@ static int i2o_cfg_passthru(unsigned long arg)
780 u32 i = 0; 777 u32 i = 0;
781 void *p = NULL; 778 void *p = NULL;
782 i2o_status_block *sb; 779 i2o_status_block *sb;
783 struct i2o_message __iomem *msg; 780 struct i2o_message *msg;
784 u32 m;
785 unsigned int iop; 781 unsigned int iop;
786 782
787 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg)) 783 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
@@ -793,7 +789,7 @@ static int i2o_cfg_passthru(unsigned long arg)
793 return -ENXIO; 789 return -ENXIO;
794 } 790 }
795 791
796 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 792 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
797 793
798 sb = c->status_block.virt; 794 sb = c->status_block.virt;
799 795
@@ -820,19 +816,15 @@ static int i2o_cfg_passthru(unsigned long arg)
820 reply_size >>= 16; 816 reply_size >>= 16;
821 reply_size <<= 2; 817 reply_size <<= 2;
822 818
823 reply = kmalloc(reply_size, GFP_KERNEL); 819 reply = kzalloc(reply_size, GFP_KERNEL);
824 if (!reply) { 820 if (!reply) {
825 printk(KERN_WARNING "%s: Could not allocate reply buffer\n", 821 printk(KERN_WARNING "%s: Could not allocate reply buffer\n",
826 c->name); 822 c->name);
827 return -ENOMEM; 823 return -ENOMEM;
828 } 824 }
829 memset(reply, 0, reply_size);
830 825
831 sg_offset = (msg->u.head[0] >> 4) & 0x0f; 826 sg_offset = (msg->u.head[0] >> 4) & 0x0f;
832 827
833 writel(i2o_config_driver.context, &msg->u.s.icntxt);
834 writel(i2o_cntxt_list_add(c, reply), &msg->u.s.tcntxt);
835
836 memset(sg_list, 0, sizeof(sg_list[0]) * SG_TABLESIZE); 828 memset(sg_list, 0, sizeof(sg_list[0]) * SG_TABLESIZE);
837 if (sg_offset) { 829 if (sg_offset) {
838 struct sg_simple_element *sg; 830 struct sg_simple_element *sg;
@@ -894,9 +886,11 @@ static int i2o_cfg_passthru(unsigned long arg)
894 } 886 }
895 } 887 }
896 888
897 rcode = i2o_msg_post_wait(c, m, 60); 889 rcode = i2o_msg_post_wait(c, msg, 60);
898 if (rcode) 890 if (rcode) {
891 reply[4] = ((u32) rcode) << 24;
899 goto sg_list_cleanup; 892 goto sg_list_cleanup;
893 }
900 894
901 if (sg_offset) { 895 if (sg_offset) {
902 u32 msg[128]; 896 u32 msg[128];
@@ -946,6 +940,7 @@ static int i2o_cfg_passthru(unsigned long arg)
946 } 940 }
947 } 941 }
948 942
943 sg_list_cleanup:
949 /* Copy back the reply to user space */ 944 /* Copy back the reply to user space */
950 if (reply_size) { 945 if (reply_size) {
951 // we wrote our own values for context - now restore the user supplied ones 946 // we wrote our own values for context - now restore the user supplied ones
@@ -962,7 +957,6 @@ static int i2o_cfg_passthru(unsigned long arg)
962 } 957 }
963 } 958 }
964 959
965 sg_list_cleanup:
966 for (i = 0; i < sg_index; i++) 960 for (i = 0; i < sg_index; i++)
967 kfree(sg_list[i]); 961 kfree(sg_list[i]);
968 962
diff --git a/drivers/message/i2o/i2o_lan.h b/drivers/message/i2o/i2o_lan.h
index 561d63304d7e..6502b817df58 100644
--- a/drivers/message/i2o/i2o_lan.h
+++ b/drivers/message/i2o/i2o_lan.h
@@ -103,14 +103,14 @@
103#define I2O_LAN_DSC_SUSPENDED 0x11 103#define I2O_LAN_DSC_SUSPENDED 0x11
104 104
105struct i2o_packet_info { 105struct i2o_packet_info {
106 u32 offset : 24; 106 u32 offset:24;
107 u32 flags : 8; 107 u32 flags:8;
108 u32 len : 24; 108 u32 len:24;
109 u32 status : 8; 109 u32 status:8;
110}; 110};
111 111
112struct i2o_bucket_descriptor { 112struct i2o_bucket_descriptor {
113 u32 context; /* FIXME: 64bit support */ 113 u32 context; /* FIXME: 64bit support */
114 struct i2o_packet_info packet_info[1]; 114 struct i2o_packet_info packet_info[1];
115}; 115};
116 116
@@ -127,14 +127,14 @@ struct i2o_lan_local {
127 u8 unit; 127 u8 unit;
128 struct i2o_device *i2o_dev; 128 struct i2o_device *i2o_dev;
129 129
130 struct fddi_statistics stats; /* see also struct net_device_stats */ 130 struct fddi_statistics stats; /* see also struct net_device_stats */
131 unsigned short (*type_trans)(struct sk_buff *, struct net_device *); 131 unsigned short (*type_trans) (struct sk_buff *, struct net_device *);
132 atomic_t buckets_out; /* nbr of unused buckets on DDM */ 132 atomic_t buckets_out; /* nbr of unused buckets on DDM */
133 atomic_t tx_out; /* outstanding TXes */ 133 atomic_t tx_out; /* outstanding TXes */
134 u8 tx_count; /* packets in one TX message frame */ 134 u8 tx_count; /* packets in one TX message frame */
135 u16 tx_max_out; /* DDM's Tx queue len */ 135 u16 tx_max_out; /* DDM's Tx queue len */
136 u8 sgl_max; /* max SGLs in one message frame */ 136 u8 sgl_max; /* max SGLs in one message frame */
137 u32 m; /* IOP address of the batch msg frame */ 137 u32 m; /* IOP address of the batch msg frame */
138 138
139 struct work_struct i2o_batch_send_task; 139 struct work_struct i2o_batch_send_task;
140 int send_active; 140 int send_active;
@@ -144,16 +144,16 @@ struct i2o_lan_local {
144 144
145 spinlock_t tx_lock; 145 spinlock_t tx_lock;
146 146
147 u32 max_size_mc_table; /* max number of multicast addresses */ 147 u32 max_size_mc_table; /* max number of multicast addresses */
148 148
149 /* LAN OSM configurable parameters are here: */ 149 /* LAN OSM configurable parameters are here: */
150 150
151 u16 max_buckets_out; /* max nbr of buckets to send to DDM */ 151 u16 max_buckets_out; /* max nbr of buckets to send to DDM */
152 u16 bucket_thresh; /* send more when this many used */ 152 u16 bucket_thresh; /* send more when this many used */
153 u16 rx_copybreak; 153 u16 rx_copybreak;
154 154
155 u8 tx_batch_mode; /* Set when using batch mode sends */ 155 u8 tx_batch_mode; /* Set when using batch mode sends */
156 u32 i2o_event_mask; /* To turn on interesting event flags */ 156 u32 i2o_event_mask; /* To turn on interesting event flags */
157}; 157};
158 158
159#endif /* _I2O_LAN_H */ 159#endif /* _I2O_LAN_H */
diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
index d559a1758363..2a0c42b8cda5 100644
--- a/drivers/message/i2o/i2o_proc.c
+++ b/drivers/message/i2o/i2o_proc.c
@@ -28,7 +28,7 @@
28 */ 28 */
29 29
30#define OSM_NAME "proc-osm" 30#define OSM_NAME "proc-osm"
31#define OSM_VERSION "1.145" 31#define OSM_VERSION "1.316"
32#define OSM_DESCRIPTION "I2O ProcFS OSM" 32#define OSM_DESCRIPTION "I2O ProcFS OSM"
33 33
34#define I2O_MAX_MODULES 4 34#define I2O_MAX_MODULES 4
diff --git a/drivers/message/i2o/i2o_scsi.c b/drivers/message/i2o/i2o_scsi.c
index 9f1744c3933b..f9e5a23697a1 100644
--- a/drivers/message/i2o/i2o_scsi.c
+++ b/drivers/message/i2o/i2o_scsi.c
@@ -70,7 +70,7 @@
70#include <scsi/sg_request.h> 70#include <scsi/sg_request.h>
71 71
72#define OSM_NAME "scsi-osm" 72#define OSM_NAME "scsi-osm"
73#define OSM_VERSION "1.282" 73#define OSM_VERSION "1.316"
74#define OSM_DESCRIPTION "I2O SCSI Peripheral OSM" 74#define OSM_DESCRIPTION "I2O SCSI Peripheral OSM"
75 75
76static struct i2o_driver i2o_scsi_driver; 76static struct i2o_driver i2o_scsi_driver;
@@ -113,7 +113,7 @@ static struct i2o_scsi_host *i2o_scsi_host_alloc(struct i2o_controller *c)
113 113
114 list_for_each_entry(i2o_dev, &c->devices, list) 114 list_for_each_entry(i2o_dev, &c->devices, list)
115 if (i2o_dev->lct_data.class_id == I2O_CLASS_BUS_ADAPTER) { 115 if (i2o_dev->lct_data.class_id == I2O_CLASS_BUS_ADAPTER) {
116 if (i2o_parm_field_get(i2o_dev, 0x0000, 0, &type, 1) 116 if (!i2o_parm_field_get(i2o_dev, 0x0000, 0, &type, 1)
117 && (type == 0x01)) /* SCSI bus */ 117 && (type == 0x01)) /* SCSI bus */
118 max_channel++; 118 max_channel++;
119 } 119 }
@@ -146,7 +146,7 @@ static struct i2o_scsi_host *i2o_scsi_host_alloc(struct i2o_controller *c)
146 i = 0; 146 i = 0;
147 list_for_each_entry(i2o_dev, &c->devices, list) 147 list_for_each_entry(i2o_dev, &c->devices, list)
148 if (i2o_dev->lct_data.class_id == I2O_CLASS_BUS_ADAPTER) { 148 if (i2o_dev->lct_data.class_id == I2O_CLASS_BUS_ADAPTER) {
149 if (i2o_parm_field_get(i2o_dev, 0x0000, 0, &type, 1) 149 if (!i2o_parm_field_get(i2o_dev, 0x0000, 0, &type, 1)
150 && (type == 0x01)) /* only SCSI bus */ 150 && (type == 0x01)) /* only SCSI bus */
151 i2o_shost->channel[i++] = i2o_dev; 151 i2o_shost->channel[i++] = i2o_dev;
152 152
@@ -238,13 +238,15 @@ static int i2o_scsi_probe(struct device *dev)
238 u8 type; 238 u8 type;
239 struct i2o_device *d = i2o_shost->channel[0]; 239 struct i2o_device *d = i2o_shost->channel[0];
240 240
241 if (i2o_parm_field_get(d, 0x0000, 0, &type, 1) 241 if (!i2o_parm_field_get(d, 0x0000, 0, &type, 1)
242 && (type == 0x01)) /* SCSI bus */ 242 && (type == 0x01)) /* SCSI bus */
243 if (i2o_parm_field_get(d, 0x0200, 4, &id, 4)) { 243 if (!i2o_parm_field_get(d, 0x0200, 4, &id, 4)) {
244 channel = 0; 244 channel = 0;
245 if (i2o_dev->lct_data.class_id == 245 if (i2o_dev->lct_data.class_id ==
246 I2O_CLASS_RANDOM_BLOCK_STORAGE) 246 I2O_CLASS_RANDOM_BLOCK_STORAGE)
247 lun = i2o_shost->lun++; 247 lun =
248 cpu_to_le64(i2o_shost->
249 lun++);
248 else 250 else
249 lun = 0; 251 lun = 0;
250 } 252 }
@@ -253,10 +255,10 @@ static int i2o_scsi_probe(struct device *dev)
253 break; 255 break;
254 256
255 case I2O_CLASS_SCSI_PERIPHERAL: 257 case I2O_CLASS_SCSI_PERIPHERAL:
256 if (i2o_parm_field_get(i2o_dev, 0x0000, 3, &id, 4) < 0) 258 if (i2o_parm_field_get(i2o_dev, 0x0000, 3, &id, 4))
257 return -EFAULT; 259 return -EFAULT;
258 260
259 if (i2o_parm_field_get(i2o_dev, 0x0000, 4, &lun, 8) < 0) 261 if (i2o_parm_field_get(i2o_dev, 0x0000, 4, &lun, 8))
260 return -EFAULT; 262 return -EFAULT;
261 263
262 parent = i2o_iop_find_device(c, i2o_dev->lct_data.parent_tid); 264 parent = i2o_iop_find_device(c, i2o_dev->lct_data.parent_tid);
@@ -281,20 +283,22 @@ static int i2o_scsi_probe(struct device *dev)
281 return -EFAULT; 283 return -EFAULT;
282 } 284 }
283 285
284 if (id >= scsi_host->max_id) { 286 if (le32_to_cpu(id) >= scsi_host->max_id) {
285 osm_warn("SCSI device id (%d) >= max_id of I2O host (%d)", id, 287 osm_warn("SCSI device id (%d) >= max_id of I2O host (%d)",
286 scsi_host->max_id); 288 le32_to_cpu(id), scsi_host->max_id);
287 return -EFAULT; 289 return -EFAULT;
288 } 290 }
289 291
290 if (lun >= scsi_host->max_lun) { 292 if (le64_to_cpu(lun) >= scsi_host->max_lun) {
291 osm_warn("SCSI device id (%d) >= max_lun of I2O host (%d)", 293 osm_warn("SCSI device lun (%lu) >= max_lun of I2O host (%d)",
292 (unsigned int)lun, scsi_host->max_lun); 294 (long unsigned int)le64_to_cpu(lun),
295 scsi_host->max_lun);
293 return -EFAULT; 296 return -EFAULT;
294 } 297 }
295 298
296 scsi_dev = 299 scsi_dev =
297 __scsi_add_device(i2o_shost->scsi_host, channel, id, lun, i2o_dev); 300 __scsi_add_device(i2o_shost->scsi_host, channel, le32_to_cpu(id),
301 le64_to_cpu(lun), i2o_dev);
298 302
299 if (IS_ERR(scsi_dev)) { 303 if (IS_ERR(scsi_dev)) {
300 osm_warn("can not add SCSI device %03x\n", 304 osm_warn("can not add SCSI device %03x\n",
@@ -305,8 +309,9 @@ static int i2o_scsi_probe(struct device *dev)
305 sysfs_create_link(&i2o_dev->device.kobj, &scsi_dev->sdev_gendev.kobj, 309 sysfs_create_link(&i2o_dev->device.kobj, &scsi_dev->sdev_gendev.kobj,
306 "scsi"); 310 "scsi");
307 311
308 osm_info("device added (TID: %03x) channel: %d, id: %d, lun: %d\n", 312 osm_info("device added (TID: %03x) channel: %d, id: %d, lun: %ld\n",
309 i2o_dev->lct_data.tid, channel, id, (unsigned int)lun); 313 i2o_dev->lct_data.tid, channel, le32_to_cpu(id),
314 (long unsigned int)le64_to_cpu(lun));
310 315
311 return 0; 316 return 0;
312}; 317};
@@ -510,8 +515,7 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt,
510 struct i2o_controller *c; 515 struct i2o_controller *c;
511 struct i2o_device *i2o_dev; 516 struct i2o_device *i2o_dev;
512 int tid; 517 int tid;
513 struct i2o_message __iomem *msg; 518 struct i2o_message *msg;
514 u32 m;
515 /* 519 /*
516 * ENABLE_DISCONNECT 520 * ENABLE_DISCONNECT
517 * SIMPLE_TAG 521 * SIMPLE_TAG
@@ -519,7 +523,7 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt,
519 */ 523 */
520 u32 scsi_flags = 0x20a00000; 524 u32 scsi_flags = 0x20a00000;
521 u32 sgl_offset; 525 u32 sgl_offset;
522 u32 __iomem *mptr; 526 u32 *mptr;
523 u32 cmd = I2O_CMD_SCSI_EXEC << 24; 527 u32 cmd = I2O_CMD_SCSI_EXEC << 24;
524 int rc = 0; 528 int rc = 0;
525 529
@@ -576,8 +580,8 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt,
576 * throw it back to the scsi layer 580 * throw it back to the scsi layer
577 */ 581 */
578 582
579 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 583 msg = i2o_msg_get(c);
580 if (m == I2O_QUEUE_EMPTY) { 584 if (IS_ERR(msg)) {
581 rc = SCSI_MLQUEUE_HOST_BUSY; 585 rc = SCSI_MLQUEUE_HOST_BUSY;
582 goto exit; 586 goto exit;
583 } 587 }
@@ -617,16 +621,16 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt,
617 if (sgl_offset == SGL_OFFSET_10) 621 if (sgl_offset == SGL_OFFSET_10)
618 sgl_offset = SGL_OFFSET_12; 622 sgl_offset = SGL_OFFSET_12;
619 cmd = I2O_CMD_PRIVATE << 24; 623 cmd = I2O_CMD_PRIVATE << 24;
620 writel(I2O_VENDOR_DPT << 16 | I2O_CMD_SCSI_EXEC, mptr++); 624 *mptr++ = cpu_to_le32(I2O_VENDOR_DPT << 16 | I2O_CMD_SCSI_EXEC);
621 writel(adpt_flags | tid, mptr++); 625 *mptr++ = cpu_to_le32(adpt_flags | tid);
622 } 626 }
623#endif 627#endif
624 628
625 writel(cmd | HOST_TID << 12 | tid, &msg->u.head[1]); 629 msg->u.head[1] = cpu_to_le32(cmd | HOST_TID << 12 | tid);
626 writel(i2o_scsi_driver.context, &msg->u.s.icntxt); 630 msg->u.s.icntxt = cpu_to_le32(i2o_scsi_driver.context);
627 631
628 /* We want the SCSI control block back */ 632 /* We want the SCSI control block back */
629 writel(i2o_cntxt_list_add(c, SCpnt), &msg->u.s.tcntxt); 633 msg->u.s.tcntxt = cpu_to_le32(i2o_cntxt_list_add(c, SCpnt));
630 634
631 /* LSI_920_PCI_QUIRK 635 /* LSI_920_PCI_QUIRK
632 * 636 *
@@ -649,15 +653,15 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt,
649 } 653 }
650 */ 654 */
651 655
652 writel(scsi_flags | SCpnt->cmd_len, mptr++); 656 *mptr++ = cpu_to_le32(scsi_flags | SCpnt->cmd_len);
653 657
654 /* Write SCSI command into the message - always 16 byte block */ 658 /* Write SCSI command into the message - always 16 byte block */
655 memcpy_toio(mptr, SCpnt->cmnd, 16); 659 memcpy(mptr, SCpnt->cmnd, 16);
656 mptr += 4; 660 mptr += 4;
657 661
658 if (sgl_offset != SGL_OFFSET_0) { 662 if (sgl_offset != SGL_OFFSET_0) {
659 /* write size of data addressed by SGL */ 663 /* write size of data addressed by SGL */
660 writel(SCpnt->request_bufflen, mptr++); 664 *mptr++ = cpu_to_le32(SCpnt->request_bufflen);
661 665
662 /* Now fill in the SGList and command */ 666 /* Now fill in the SGList and command */
663 if (SCpnt->use_sg) { 667 if (SCpnt->use_sg) {
@@ -676,11 +680,11 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt,
676 } 680 }
677 681
678 /* Stick the headers on */ 682 /* Stick the headers on */
679 writel(I2O_MESSAGE_SIZE(mptr - &msg->u.head[0]) | sgl_offset, 683 msg->u.head[0] =
680 &msg->u.head[0]); 684 cpu_to_le32(I2O_MESSAGE_SIZE(mptr - &msg->u.head[0]) | sgl_offset);
681 685
682 /* Queue the message */ 686 /* Queue the message */
683 i2o_msg_post(c, m); 687 i2o_msg_post(c, msg);
684 688
685 osm_debug("Issued %ld\n", SCpnt->serial_number); 689 osm_debug("Issued %ld\n", SCpnt->serial_number);
686 690
@@ -688,7 +692,7 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt,
688 692
689 nomem: 693 nomem:
690 rc = -ENOMEM; 694 rc = -ENOMEM;
691 i2o_msg_nop(c, m); 695 i2o_msg_nop(c, msg);
692 696
693 exit: 697 exit:
694 return rc; 698 return rc;
@@ -709,8 +713,7 @@ static int i2o_scsi_abort(struct scsi_cmnd *SCpnt)
709{ 713{
710 struct i2o_device *i2o_dev; 714 struct i2o_device *i2o_dev;
711 struct i2o_controller *c; 715 struct i2o_controller *c;
712 struct i2o_message __iomem *msg; 716 struct i2o_message *msg;
713 u32 m;
714 int tid; 717 int tid;
715 int status = FAILED; 718 int status = FAILED;
716 719
@@ -720,16 +723,16 @@ static int i2o_scsi_abort(struct scsi_cmnd *SCpnt)
720 c = i2o_dev->iop; 723 c = i2o_dev->iop;
721 tid = i2o_dev->lct_data.tid; 724 tid = i2o_dev->lct_data.tid;
722 725
723 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 726 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
724 if (m == I2O_QUEUE_EMPTY) 727 if (IS_ERR(msg))
725 return SCSI_MLQUEUE_HOST_BUSY; 728 return SCSI_MLQUEUE_HOST_BUSY;
726 729
727 writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 730 msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
728 writel(I2O_CMD_SCSI_ABORT << 24 | HOST_TID << 12 | tid, 731 msg->u.head[1] =
729 &msg->u.head[1]); 732 cpu_to_le32(I2O_CMD_SCSI_ABORT << 24 | HOST_TID << 12 | tid);
730 writel(i2o_cntxt_list_get_ptr(c, SCpnt), &msg->body[0]); 733 msg->body[0] = cpu_to_le32(i2o_cntxt_list_get_ptr(c, SCpnt));
731 734
732 if (i2o_msg_post_wait(c, m, I2O_TIMEOUT_SCSI_SCB_ABORT)) 735 if (i2o_msg_post_wait(c, msg, I2O_TIMEOUT_SCSI_SCB_ABORT))
733 status = SUCCESS; 736 status = SUCCESS;
734 737
735 return status; 738 return status;
diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
index 4eb53258842e..492167446936 100644
--- a/drivers/message/i2o/iop.c
+++ b/drivers/message/i2o/iop.c
@@ -32,7 +32,7 @@
32#include "core.h" 32#include "core.h"
33 33
34#define OSM_NAME "i2o" 34#define OSM_NAME "i2o"
35#define OSM_VERSION "1.288" 35#define OSM_VERSION "1.325"
36#define OSM_DESCRIPTION "I2O subsystem" 36#define OSM_DESCRIPTION "I2O subsystem"
37 37
38/* global I2O controller list */ 38/* global I2O controller list */
@@ -47,27 +47,6 @@ static struct i2o_dma i2o_systab;
47static int i2o_hrt_get(struct i2o_controller *c); 47static int i2o_hrt_get(struct i2o_controller *c);
48 48
49/** 49/**
50 * i2o_msg_nop - Returns a message which is not used
51 * @c: I2O controller from which the message was created
52 * @m: message which should be returned
53 *
54 * If you fetch a message via i2o_msg_get, and can't use it, you must
55 * return the message with this function. Otherwise the message frame
56 * is lost.
57 */
58void i2o_msg_nop(struct i2o_controller *c, u32 m)
59{
60 struct i2o_message __iomem *msg = i2o_msg_in_to_virt(c, m);
61
62 writel(THREE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
63 writel(I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | ADAPTER_TID,
64 &msg->u.head[1]);
65 writel(0, &msg->u.head[2]);
66 writel(0, &msg->u.head[3]);
67 i2o_msg_post(c, m);
68};
69
70/**
71 * i2o_msg_get_wait - obtain an I2O message from the IOP 50 * i2o_msg_get_wait - obtain an I2O message from the IOP
72 * @c: I2O controller 51 * @c: I2O controller
73 * @msg: pointer to a I2O message pointer 52 * @msg: pointer to a I2O message pointer
@@ -81,22 +60,21 @@ void i2o_msg_nop(struct i2o_controller *c, u32 m)
81 * address from the read port (see the i2o spec). If no message is 60 * address from the read port (see the i2o spec). If no message is
82 * available returns I2O_QUEUE_EMPTY and msg is leaved untouched. 61 * available returns I2O_QUEUE_EMPTY and msg is leaved untouched.
83 */ 62 */
84u32 i2o_msg_get_wait(struct i2o_controller *c, 63struct i2o_message *i2o_msg_get_wait(struct i2o_controller *c, int wait)
85 struct i2o_message __iomem ** msg, int wait)
86{ 64{
87 unsigned long timeout = jiffies + wait * HZ; 65 unsigned long timeout = jiffies + wait * HZ;
88 u32 m; 66 struct i2o_message *msg;
89 67
90 while ((m = i2o_msg_get(c, msg)) == I2O_QUEUE_EMPTY) { 68 while (IS_ERR(msg = i2o_msg_get(c))) {
91 if (time_after(jiffies, timeout)) { 69 if (time_after(jiffies, timeout)) {
92 osm_debug("%s: Timeout waiting for message frame.\n", 70 osm_debug("%s: Timeout waiting for message frame.\n",
93 c->name); 71 c->name);
94 return I2O_QUEUE_EMPTY; 72 return ERR_PTR(-ETIMEDOUT);
95 } 73 }
96 schedule_timeout_uninterruptible(1); 74 schedule_timeout_uninterruptible(1);
97 } 75 }
98 76
99 return m; 77 return msg;
100}; 78};
101 79
102#if BITS_PER_LONG == 64 80#if BITS_PER_LONG == 64
@@ -301,8 +279,7 @@ struct i2o_device *i2o_iop_find_device(struct i2o_controller *c, u16 tid)
301 */ 279 */
302static int i2o_iop_quiesce(struct i2o_controller *c) 280static int i2o_iop_quiesce(struct i2o_controller *c)
303{ 281{
304 struct i2o_message __iomem *msg; 282 struct i2o_message *msg;
305 u32 m;
306 i2o_status_block *sb = c->status_block.virt; 283 i2o_status_block *sb = c->status_block.virt;
307 int rc; 284 int rc;
308 285
@@ -313,16 +290,17 @@ static int i2o_iop_quiesce(struct i2o_controller *c)
313 (sb->iop_state != ADAPTER_STATE_OPERATIONAL)) 290 (sb->iop_state != ADAPTER_STATE_OPERATIONAL))
314 return 0; 291 return 0;
315 292
316 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 293 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
317 if (m == I2O_QUEUE_EMPTY) 294 if (IS_ERR(msg))
318 return -ETIMEDOUT; 295 return PTR_ERR(msg);
319 296
320 writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 297 msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0);
321 writel(I2O_CMD_SYS_QUIESCE << 24 | HOST_TID << 12 | ADAPTER_TID, 298 msg->u.head[1] =
322 &msg->u.head[1]); 299 cpu_to_le32(I2O_CMD_SYS_QUIESCE << 24 | HOST_TID << 12 |
300 ADAPTER_TID);
323 301
324 /* Long timeout needed for quiesce if lots of devices */ 302 /* Long timeout needed for quiesce if lots of devices */
325 if ((rc = i2o_msg_post_wait(c, m, 240))) 303 if ((rc = i2o_msg_post_wait(c, msg, 240)))
326 osm_info("%s: Unable to quiesce (status=%#x).\n", c->name, -rc); 304 osm_info("%s: Unable to quiesce (status=%#x).\n", c->name, -rc);
327 else 305 else
328 osm_debug("%s: Quiesced.\n", c->name); 306 osm_debug("%s: Quiesced.\n", c->name);
@@ -342,8 +320,7 @@ static int i2o_iop_quiesce(struct i2o_controller *c)
342 */ 320 */
343static int i2o_iop_enable(struct i2o_controller *c) 321static int i2o_iop_enable(struct i2o_controller *c)
344{ 322{
345 struct i2o_message __iomem *msg; 323 struct i2o_message *msg;
346 u32 m;
347 i2o_status_block *sb = c->status_block.virt; 324 i2o_status_block *sb = c->status_block.virt;
348 int rc; 325 int rc;
349 326
@@ -353,16 +330,17 @@ static int i2o_iop_enable(struct i2o_controller *c)
353 if (sb->iop_state != ADAPTER_STATE_READY) 330 if (sb->iop_state != ADAPTER_STATE_READY)
354 return -EINVAL; 331 return -EINVAL;
355 332
356 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 333 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
357 if (m == I2O_QUEUE_EMPTY) 334 if (IS_ERR(msg))
358 return -ETIMEDOUT; 335 return PTR_ERR(msg);
359 336
360 writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 337 msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0);
361 writel(I2O_CMD_SYS_ENABLE << 24 | HOST_TID << 12 | ADAPTER_TID, 338 msg->u.head[1] =
362 &msg->u.head[1]); 339 cpu_to_le32(I2O_CMD_SYS_ENABLE << 24 | HOST_TID << 12 |
340 ADAPTER_TID);
363 341
364 /* How long of a timeout do we need? */ 342 /* How long of a timeout do we need? */
365 if ((rc = i2o_msg_post_wait(c, m, 240))) 343 if ((rc = i2o_msg_post_wait(c, msg, 240)))
366 osm_err("%s: Could not enable (status=%#x).\n", c->name, -rc); 344 osm_err("%s: Could not enable (status=%#x).\n", c->name, -rc);
367 else 345 else
368 osm_debug("%s: Enabled.\n", c->name); 346 osm_debug("%s: Enabled.\n", c->name);
@@ -413,22 +391,22 @@ static inline void i2o_iop_enable_all(void)
413 */ 391 */
414static int i2o_iop_clear(struct i2o_controller *c) 392static int i2o_iop_clear(struct i2o_controller *c)
415{ 393{
416 struct i2o_message __iomem *msg; 394 struct i2o_message *msg;
417 u32 m;
418 int rc; 395 int rc;
419 396
420 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 397 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
421 if (m == I2O_QUEUE_EMPTY) 398 if (IS_ERR(msg))
422 return -ETIMEDOUT; 399 return PTR_ERR(msg);
423 400
424 /* Quiesce all IOPs first */ 401 /* Quiesce all IOPs first */
425 i2o_iop_quiesce_all(); 402 i2o_iop_quiesce_all();
426 403
427 writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 404 msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0);
428 writel(I2O_CMD_ADAPTER_CLEAR << 24 | HOST_TID << 12 | ADAPTER_TID, 405 msg->u.head[1] =
429 &msg->u.head[1]); 406 cpu_to_le32(I2O_CMD_ADAPTER_CLEAR << 24 | HOST_TID << 12 |
407 ADAPTER_TID);
430 408
431 if ((rc = i2o_msg_post_wait(c, m, 30))) 409 if ((rc = i2o_msg_post_wait(c, msg, 30)))
432 osm_info("%s: Unable to clear (status=%#x).\n", c->name, -rc); 410 osm_info("%s: Unable to clear (status=%#x).\n", c->name, -rc);
433 else 411 else
434 osm_debug("%s: Cleared.\n", c->name); 412 osm_debug("%s: Cleared.\n", c->name);
@@ -446,13 +424,13 @@ static int i2o_iop_clear(struct i2o_controller *c)
446 * Clear and (re)initialize IOP's outbound queue and post the message 424 * Clear and (re)initialize IOP's outbound queue and post the message
447 * frames to the IOP. 425 * frames to the IOP.
448 * 426 *
449 * Returns 0 on success or a negative errno code on failure. 427 * Returns 0 on success or negative error code on failure.
450 */ 428 */
451static int i2o_iop_init_outbound_queue(struct i2o_controller *c) 429static int i2o_iop_init_outbound_queue(struct i2o_controller *c)
452{ 430{
453 volatile u8 *status = c->status.virt;
454 u32 m; 431 u32 m;
455 struct i2o_message __iomem *msg; 432 volatile u8 *status = c->status.virt;
433 struct i2o_message *msg;
456 ulong timeout; 434 ulong timeout;
457 int i; 435 int i;
458 436
@@ -460,23 +438,24 @@ static int i2o_iop_init_outbound_queue(struct i2o_controller *c)
460 438
461 memset(c->status.virt, 0, 4); 439 memset(c->status.virt, 0, 4);
462 440
463 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 441 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
464 if (m == I2O_QUEUE_EMPTY) 442 if (IS_ERR(msg))
465 return -ETIMEDOUT; 443 return PTR_ERR(msg);
466 444
467 writel(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6, &msg->u.head[0]); 445 msg->u.head[0] = cpu_to_le32(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6);
468 writel(I2O_CMD_OUTBOUND_INIT << 24 | HOST_TID << 12 | ADAPTER_TID, 446 msg->u.head[1] =
469 &msg->u.head[1]); 447 cpu_to_le32(I2O_CMD_OUTBOUND_INIT << 24 | HOST_TID << 12 |
470 writel(i2o_exec_driver.context, &msg->u.s.icntxt); 448 ADAPTER_TID);
471 writel(0x00000000, &msg->u.s.tcntxt); 449 msg->u.s.icntxt = cpu_to_le32(i2o_exec_driver.context);
472 writel(PAGE_SIZE, &msg->body[0]); 450 msg->u.s.tcntxt = cpu_to_le32(0x00000000);
451 msg->body[0] = cpu_to_le32(PAGE_SIZE);
473 /* Outbound msg frame size in words and Initcode */ 452 /* Outbound msg frame size in words and Initcode */
474 writel(I2O_OUTBOUND_MSG_FRAME_SIZE << 16 | 0x80, &msg->body[1]); 453 msg->body[1] = cpu_to_le32(I2O_OUTBOUND_MSG_FRAME_SIZE << 16 | 0x80);
475 writel(0xd0000004, &msg->body[2]); 454 msg->body[2] = cpu_to_le32(0xd0000004);
476 writel(i2o_dma_low(c->status.phys), &msg->body[3]); 455 msg->body[3] = cpu_to_le32(i2o_dma_low(c->status.phys));
477 writel(i2o_dma_high(c->status.phys), &msg->body[4]); 456 msg->body[4] = cpu_to_le32(i2o_dma_high(c->status.phys));
478 457
479 i2o_msg_post(c, m); 458 i2o_msg_post(c, msg);
480 459
481 timeout = jiffies + I2O_TIMEOUT_INIT_OUTBOUND_QUEUE * HZ; 460 timeout = jiffies + I2O_TIMEOUT_INIT_OUTBOUND_QUEUE * HZ;
482 while (*status <= I2O_CMD_IN_PROGRESS) { 461 while (*status <= I2O_CMD_IN_PROGRESS) {
@@ -511,34 +490,34 @@ static int i2o_iop_init_outbound_queue(struct i2o_controller *c)
511static int i2o_iop_reset(struct i2o_controller *c) 490static int i2o_iop_reset(struct i2o_controller *c)
512{ 491{
513 volatile u8 *status = c->status.virt; 492 volatile u8 *status = c->status.virt;
514 struct i2o_message __iomem *msg; 493 struct i2o_message *msg;
515 u32 m;
516 unsigned long timeout; 494 unsigned long timeout;
517 i2o_status_block *sb = c->status_block.virt; 495 i2o_status_block *sb = c->status_block.virt;
518 int rc = 0; 496 int rc = 0;
519 497
520 osm_debug("%s: Resetting controller\n", c->name); 498 osm_debug("%s: Resetting controller\n", c->name);
521 499
522 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 500 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
523 if (m == I2O_QUEUE_EMPTY) 501 if (IS_ERR(msg))
524 return -ETIMEDOUT; 502 return PTR_ERR(msg);
525 503
526 memset(c->status_block.virt, 0, 8); 504 memset(c->status_block.virt, 0, 8);
527 505
528 /* Quiesce all IOPs first */ 506 /* Quiesce all IOPs first */
529 i2o_iop_quiesce_all(); 507 i2o_iop_quiesce_all();
530 508
531 writel(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 509 msg->u.head[0] = cpu_to_le32(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_0);
532 writel(I2O_CMD_ADAPTER_RESET << 24 | HOST_TID << 12 | ADAPTER_TID, 510 msg->u.head[1] =
533 &msg->u.head[1]); 511 cpu_to_le32(I2O_CMD_ADAPTER_RESET << 24 | HOST_TID << 12 |
534 writel(i2o_exec_driver.context, &msg->u.s.icntxt); 512 ADAPTER_TID);
535 writel(0, &msg->u.s.tcntxt); //FIXME: use reasonable transaction context 513 msg->u.s.icntxt = cpu_to_le32(i2o_exec_driver.context);
536 writel(0, &msg->body[0]); 514 msg->u.s.tcntxt = cpu_to_le32(0x00000000);
537 writel(0, &msg->body[1]); 515 msg->body[0] = cpu_to_le32(0x00000000);
538 writel(i2o_dma_low(c->status.phys), &msg->body[2]); 516 msg->body[1] = cpu_to_le32(0x00000000);
539 writel(i2o_dma_high(c->status.phys), &msg->body[3]); 517 msg->body[2] = cpu_to_le32(i2o_dma_low(c->status.phys));
518 msg->body[3] = cpu_to_le32(i2o_dma_high(c->status.phys));
540 519
541 i2o_msg_post(c, m); 520 i2o_msg_post(c, msg);
542 521
543 /* Wait for a reply */ 522 /* Wait for a reply */
544 timeout = jiffies + I2O_TIMEOUT_RESET * HZ; 523 timeout = jiffies + I2O_TIMEOUT_RESET * HZ;
@@ -567,18 +546,15 @@ static int i2o_iop_reset(struct i2o_controller *c)
567 osm_debug("%s: Reset in progress, waiting for reboot...\n", 546 osm_debug("%s: Reset in progress, waiting for reboot...\n",
568 c->name); 547 c->name);
569 548
570 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_RESET); 549 while (IS_ERR(msg = i2o_msg_get_wait(c, I2O_TIMEOUT_RESET))) {
571 while (m == I2O_QUEUE_EMPTY) {
572 if (time_after(jiffies, timeout)) { 550 if (time_after(jiffies, timeout)) {
573 osm_err("%s: IOP reset timeout.\n", c->name); 551 osm_err("%s: IOP reset timeout.\n", c->name);
574 rc = -ETIMEDOUT; 552 rc = PTR_ERR(msg);
575 goto exit; 553 goto exit;
576 } 554 }
577 schedule_timeout_uninterruptible(1); 555 schedule_timeout_uninterruptible(1);
578
579 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_RESET);
580 } 556 }
581 i2o_msg_nop(c, m); 557 i2o_msg_nop(c, msg);
582 558
583 /* from here all quiesce commands are safe */ 559 /* from here all quiesce commands are safe */
584 c->no_quiesce = 0; 560 c->no_quiesce = 0;
@@ -686,8 +662,7 @@ static int i2o_iop_activate(struct i2o_controller *c)
686 */ 662 */
687static int i2o_iop_systab_set(struct i2o_controller *c) 663static int i2o_iop_systab_set(struct i2o_controller *c)
688{ 664{
689 struct i2o_message __iomem *msg; 665 struct i2o_message *msg;
690 u32 m;
691 i2o_status_block *sb = c->status_block.virt; 666 i2o_status_block *sb = c->status_block.virt;
692 struct device *dev = &c->pdev->dev; 667 struct device *dev = &c->pdev->dev;
693 struct resource *root; 668 struct resource *root;
@@ -735,41 +710,38 @@ static int i2o_iop_systab_set(struct i2o_controller *c)
735 } 710 }
736 } 711 }
737 712
738 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 713 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
739 if (m == I2O_QUEUE_EMPTY) 714 if (IS_ERR(msg))
740 return -ETIMEDOUT; 715 return PTR_ERR(msg);
741 716
742 i2o_systab.phys = dma_map_single(dev, i2o_systab.virt, i2o_systab.len, 717 i2o_systab.phys = dma_map_single(dev, i2o_systab.virt, i2o_systab.len,
743 PCI_DMA_TODEVICE); 718 PCI_DMA_TODEVICE);
744 if (!i2o_systab.phys) { 719 if (!i2o_systab.phys) {
745 i2o_msg_nop(c, m); 720 i2o_msg_nop(c, msg);
746 return -ENOMEM; 721 return -ENOMEM;
747 } 722 }
748 723
749 writel(I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6, &msg->u.head[0]); 724 msg->u.head[0] = cpu_to_le32(I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6);
750 writel(I2O_CMD_SYS_TAB_SET << 24 | HOST_TID << 12 | ADAPTER_TID, 725 msg->u.head[1] =
751 &msg->u.head[1]); 726 cpu_to_le32(I2O_CMD_SYS_TAB_SET << 24 | HOST_TID << 12 |
727 ADAPTER_TID);
752 728
753 /* 729 /*
754 * Provide three SGL-elements: 730 * Provide three SGL-elements:
755 * System table (SysTab), Private memory space declaration and 731 * System table (SysTab), Private memory space declaration and
756 * Private i/o space declaration 732 * Private i/o space declaration
757 *
758 * FIXME: is this still true?
759 * Nasty one here. We can't use dma_alloc_coherent to send the
760 * same table to everyone. We have to go remap it for them all
761 */ 733 */
762 734
763 writel(c->unit + 2, &msg->body[0]); 735 msg->body[0] = cpu_to_le32(c->unit + 2);
764 writel(0, &msg->body[1]); 736 msg->body[1] = cpu_to_le32(0x00000000);
765 writel(0x54000000 | i2o_systab.len, &msg->body[2]); 737 msg->body[2] = cpu_to_le32(0x54000000 | i2o_systab.len);
766 writel(i2o_systab.phys, &msg->body[3]); 738 msg->body[3] = cpu_to_le32(i2o_systab.phys);
767 writel(0x54000000 | sb->current_mem_size, &msg->body[4]); 739 msg->body[4] = cpu_to_le32(0x54000000 | sb->current_mem_size);
768 writel(sb->current_mem_base, &msg->body[5]); 740 msg->body[5] = cpu_to_le32(sb->current_mem_base);
769 writel(0xd4000000 | sb->current_io_size, &msg->body[6]); 741 msg->body[6] = cpu_to_le32(0xd4000000 | sb->current_io_size);
770 writel(sb->current_io_base, &msg->body[6]); 742 msg->body[6] = cpu_to_le32(sb->current_io_base);
771 743
772 rc = i2o_msg_post_wait(c, m, 120); 744 rc = i2o_msg_post_wait(c, msg, 120);
773 745
774 dma_unmap_single(dev, i2o_systab.phys, i2o_systab.len, 746 dma_unmap_single(dev, i2o_systab.phys, i2o_systab.len,
775 PCI_DMA_TODEVICE); 747 PCI_DMA_TODEVICE);
@@ -780,8 +752,6 @@ static int i2o_iop_systab_set(struct i2o_controller *c)
780 else 752 else
781 osm_debug("%s: SysTab set.\n", c->name); 753 osm_debug("%s: SysTab set.\n", c->name);
782 754
783 i2o_status_get(c); // Entered READY state
784
785 return rc; 755 return rc;
786} 756}
787 757
@@ -791,7 +761,7 @@ static int i2o_iop_systab_set(struct i2o_controller *c)
791 * 761 *
792 * Send the system table and enable the I2O controller. 762 * Send the system table and enable the I2O controller.
793 * 763 *
794 * Returns 0 on success or negativer error code on failure. 764 * Returns 0 on success or negative error code on failure.
795 */ 765 */
796static int i2o_iop_online(struct i2o_controller *c) 766static int i2o_iop_online(struct i2o_controller *c)
797{ 767{
@@ -830,7 +800,6 @@ void i2o_iop_remove(struct i2o_controller *c)
830 list_for_each_entry_safe(dev, tmp, &c->devices, list) 800 list_for_each_entry_safe(dev, tmp, &c->devices, list)
831 i2o_device_remove(dev); 801 i2o_device_remove(dev);
832 802
833 class_device_unregister(c->classdev);
834 device_del(&c->device); 803 device_del(&c->device);
835 804
836 /* Ask the IOP to switch to RESET state */ 805 /* Ask the IOP to switch to RESET state */
@@ -869,12 +838,11 @@ static int i2o_systab_build(void)
869 i2o_systab.len = sizeof(struct i2o_sys_tbl) + num_controllers * 838 i2o_systab.len = sizeof(struct i2o_sys_tbl) + num_controllers *
870 sizeof(struct i2o_sys_tbl_entry); 839 sizeof(struct i2o_sys_tbl_entry);
871 840
872 systab = i2o_systab.virt = kmalloc(i2o_systab.len, GFP_KERNEL); 841 systab = i2o_systab.virt = kzalloc(i2o_systab.len, GFP_KERNEL);
873 if (!systab) { 842 if (!systab) {
874 osm_err("unable to allocate memory for System Table\n"); 843 osm_err("unable to allocate memory for System Table\n");
875 return -ENOMEM; 844 return -ENOMEM;
876 } 845 }
877 memset(systab, 0, i2o_systab.len);
878 846
879 systab->version = I2OVERSION; 847 systab->version = I2OVERSION;
880 systab->change_ind = change_ind + 1; 848 systab->change_ind = change_ind + 1;
@@ -952,30 +920,30 @@ static int i2o_parse_hrt(struct i2o_controller *c)
952 */ 920 */
953int i2o_status_get(struct i2o_controller *c) 921int i2o_status_get(struct i2o_controller *c)
954{ 922{
955 struct i2o_message __iomem *msg; 923 struct i2o_message *msg;
956 u32 m;
957 volatile u8 *status_block; 924 volatile u8 *status_block;
958 unsigned long timeout; 925 unsigned long timeout;
959 926
960 status_block = (u8 *) c->status_block.virt; 927 status_block = (u8 *) c->status_block.virt;
961 memset(c->status_block.virt, 0, sizeof(i2o_status_block)); 928 memset(c->status_block.virt, 0, sizeof(i2o_status_block));
962 929
963 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 930 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
964 if (m == I2O_QUEUE_EMPTY) 931 if (IS_ERR(msg))
965 return -ETIMEDOUT; 932 return PTR_ERR(msg);
966 933
967 writel(NINE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 934 msg->u.head[0] = cpu_to_le32(NINE_WORD_MSG_SIZE | SGL_OFFSET_0);
968 writel(I2O_CMD_STATUS_GET << 24 | HOST_TID << 12 | ADAPTER_TID, 935 msg->u.head[1] =
969 &msg->u.head[1]); 936 cpu_to_le32(I2O_CMD_STATUS_GET << 24 | HOST_TID << 12 |
970 writel(i2o_exec_driver.context, &msg->u.s.icntxt); 937 ADAPTER_TID);
971 writel(0, &msg->u.s.tcntxt); // FIXME: use resonable transaction context 938 msg->u.s.icntxt = cpu_to_le32(i2o_exec_driver.context);
972 writel(0, &msg->body[0]); 939 msg->u.s.tcntxt = cpu_to_le32(0x00000000);
973 writel(0, &msg->body[1]); 940 msg->body[0] = cpu_to_le32(0x00000000);
974 writel(i2o_dma_low(c->status_block.phys), &msg->body[2]); 941 msg->body[1] = cpu_to_le32(0x00000000);
975 writel(i2o_dma_high(c->status_block.phys), &msg->body[3]); 942 msg->body[2] = cpu_to_le32(i2o_dma_low(c->status_block.phys));
976 writel(sizeof(i2o_status_block), &msg->body[4]); /* always 88 bytes */ 943 msg->body[3] = cpu_to_le32(i2o_dma_high(c->status_block.phys));
944 msg->body[4] = cpu_to_le32(sizeof(i2o_status_block)); /* always 88 bytes */
977 945
978 i2o_msg_post(c, m); 946 i2o_msg_post(c, msg);
979 947
980 /* Wait for a reply */ 948 /* Wait for a reply */
981 timeout = jiffies + I2O_TIMEOUT_STATUS_GET * HZ; 949 timeout = jiffies + I2O_TIMEOUT_STATUS_GET * HZ;
@@ -1002,7 +970,7 @@ int i2o_status_get(struct i2o_controller *c)
1002 * The HRT contains information about possible hidden devices but is 970 * The HRT contains information about possible hidden devices but is
1003 * mostly useless to us. 971 * mostly useless to us.
1004 * 972 *
1005 * Returns 0 on success or negativer error code on failure. 973 * Returns 0 on success or negative error code on failure.
1006 */ 974 */
1007static int i2o_hrt_get(struct i2o_controller *c) 975static int i2o_hrt_get(struct i2o_controller *c)
1008{ 976{
@@ -1013,20 +981,20 @@ static int i2o_hrt_get(struct i2o_controller *c)
1013 struct device *dev = &c->pdev->dev; 981 struct device *dev = &c->pdev->dev;
1014 982
1015 for (i = 0; i < I2O_HRT_GET_TRIES; i++) { 983 for (i = 0; i < I2O_HRT_GET_TRIES; i++) {
1016 struct i2o_message __iomem *msg; 984 struct i2o_message *msg;
1017 u32 m;
1018 985
1019 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 986 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
1020 if (m == I2O_QUEUE_EMPTY) 987 if (IS_ERR(msg))
1021 return -ETIMEDOUT; 988 return PTR_ERR(msg);
1022 989
1023 writel(SIX_WORD_MSG_SIZE | SGL_OFFSET_4, &msg->u.head[0]); 990 msg->u.head[0] = cpu_to_le32(SIX_WORD_MSG_SIZE | SGL_OFFSET_4);
1024 writel(I2O_CMD_HRT_GET << 24 | HOST_TID << 12 | ADAPTER_TID, 991 msg->u.head[1] =
1025 &msg->u.head[1]); 992 cpu_to_le32(I2O_CMD_HRT_GET << 24 | HOST_TID << 12 |
1026 writel(0xd0000000 | c->hrt.len, &msg->body[0]); 993 ADAPTER_TID);
1027 writel(c->hrt.phys, &msg->body[1]); 994 msg->body[0] = cpu_to_le32(0xd0000000 | c->hrt.len);
995 msg->body[1] = cpu_to_le32(c->hrt.phys);
1028 996
1029 rc = i2o_msg_post_wait_mem(c, m, 20, &c->hrt); 997 rc = i2o_msg_post_wait_mem(c, msg, 20, &c->hrt);
1030 998
1031 if (rc < 0) { 999 if (rc < 0) {
1032 osm_err("%s: Unable to get HRT (status=%#x)\n", c->name, 1000 osm_err("%s: Unable to get HRT (status=%#x)\n", c->name,
@@ -1051,15 +1019,6 @@ static int i2o_hrt_get(struct i2o_controller *c)
1051} 1019}
1052 1020
1053/** 1021/**
1054 * i2o_iop_free - Free the i2o_controller struct
1055 * @c: I2O controller to free
1056 */
1057void i2o_iop_free(struct i2o_controller *c)
1058{
1059 kfree(c);
1060};
1061
1062/**
1063 * i2o_iop_release - release the memory for a I2O controller 1022 * i2o_iop_release - release the memory for a I2O controller
1064 * @dev: I2O controller which should be released 1023 * @dev: I2O controller which should be released
1065 * 1024 *
@@ -1073,14 +1032,11 @@ static void i2o_iop_release(struct device *dev)
1073 i2o_iop_free(c); 1032 i2o_iop_free(c);
1074}; 1033};
1075 1034
1076/* I2O controller class */
1077static struct class *i2o_controller_class;
1078
1079/** 1035/**
1080 * i2o_iop_alloc - Allocate and initialize a i2o_controller struct 1036 * i2o_iop_alloc - Allocate and initialize a i2o_controller struct
1081 * 1037 *
1082 * Allocate the necessary memory for a i2o_controller struct and 1038 * Allocate the necessary memory for a i2o_controller struct and
1083 * initialize the lists. 1039 * initialize the lists and message mempool.
1084 * 1040 *
1085 * Returns a pointer to the I2O controller or a negative error code on 1041 * Returns a pointer to the I2O controller or a negative error code on
1086 * failure. 1042 * failure.
@@ -1089,20 +1045,29 @@ struct i2o_controller *i2o_iop_alloc(void)
1089{ 1045{
1090 static int unit = 0; /* 0 and 1 are NULL IOP and Local Host */ 1046 static int unit = 0; /* 0 and 1 are NULL IOP and Local Host */
1091 struct i2o_controller *c; 1047 struct i2o_controller *c;
1048 char poolname[32];
1092 1049
1093 c = kmalloc(sizeof(*c), GFP_KERNEL); 1050 c = kzalloc(sizeof(*c), GFP_KERNEL);
1094 if (!c) { 1051 if (!c) {
1095 osm_err("i2o: Insufficient memory to allocate a I2O controller." 1052 osm_err("i2o: Insufficient memory to allocate a I2O controller."
1096 "\n"); 1053 "\n");
1097 return ERR_PTR(-ENOMEM); 1054 return ERR_PTR(-ENOMEM);
1098 } 1055 }
1099 memset(c, 0, sizeof(*c)); 1056
1057 c->unit = unit++;
1058 sprintf(c->name, "iop%d", c->unit);
1059
1060 snprintf(poolname, sizeof(poolname), "i2o_%s_msg_inpool", c->name);
1061 if (i2o_pool_alloc
1062 (&c->in_msg, poolname, I2O_INBOUND_MSG_FRAME_SIZE * 4,
1063 I2O_MSG_INPOOL_MIN)) {
1064 kfree(c);
1065 return ERR_PTR(-ENOMEM);
1066 };
1100 1067
1101 INIT_LIST_HEAD(&c->devices); 1068 INIT_LIST_HEAD(&c->devices);
1102 spin_lock_init(&c->lock); 1069 spin_lock_init(&c->lock);
1103 init_MUTEX(&c->lct_lock); 1070 init_MUTEX(&c->lct_lock);
1104 c->unit = unit++;
1105 sprintf(c->name, "iop%d", c->unit);
1106 1071
1107 device_initialize(&c->device); 1072 device_initialize(&c->device);
1108 1073
@@ -1137,36 +1102,29 @@ int i2o_iop_add(struct i2o_controller *c)
1137 goto iop_reset; 1102 goto iop_reset;
1138 } 1103 }
1139 1104
1140 c->classdev = class_device_create(i2o_controller_class, NULL, MKDEV(0,0),
1141 &c->device, "iop%d", c->unit);
1142 if (IS_ERR(c->classdev)) {
1143 osm_err("%s: could not add controller class\n", c->name);
1144 goto device_del;
1145 }
1146
1147 osm_info("%s: Activating I2O controller...\n", c->name); 1105 osm_info("%s: Activating I2O controller...\n", c->name);
1148 osm_info("%s: This may take a few minutes if there are many devices\n", 1106 osm_info("%s: This may take a few minutes if there are many devices\n",
1149 c->name); 1107 c->name);
1150 1108
1151 if ((rc = i2o_iop_activate(c))) { 1109 if ((rc = i2o_iop_activate(c))) {
1152 osm_err("%s: could not activate controller\n", c->name); 1110 osm_err("%s: could not activate controller\n", c->name);
1153 goto class_del; 1111 goto device_del;
1154 } 1112 }
1155 1113
1156 osm_debug("%s: building sys table...\n", c->name); 1114 osm_debug("%s: building sys table...\n", c->name);
1157 1115
1158 if ((rc = i2o_systab_build())) 1116 if ((rc = i2o_systab_build()))
1159 goto class_del; 1117 goto device_del;
1160 1118
1161 osm_debug("%s: online controller...\n", c->name); 1119 osm_debug("%s: online controller...\n", c->name);
1162 1120
1163 if ((rc = i2o_iop_online(c))) 1121 if ((rc = i2o_iop_online(c)))
1164 goto class_del; 1122 goto device_del;
1165 1123
1166 osm_debug("%s: getting LCT...\n", c->name); 1124 osm_debug("%s: getting LCT...\n", c->name);
1167 1125
1168 if ((rc = i2o_exec_lct_get(c))) 1126 if ((rc = i2o_exec_lct_get(c)))
1169 goto class_del; 1127 goto device_del;
1170 1128
1171 list_add(&c->list, &i2o_controllers); 1129 list_add(&c->list, &i2o_controllers);
1172 1130
@@ -1176,9 +1134,6 @@ int i2o_iop_add(struct i2o_controller *c)
1176 1134
1177 return 0; 1135 return 0;
1178 1136
1179 class_del:
1180 class_device_unregister(c->classdev);
1181
1182 device_del: 1137 device_del:
1183 device_del(&c->device); 1138 device_del(&c->device);
1184 1139
@@ -1199,28 +1154,27 @@ int i2o_iop_add(struct i2o_controller *c)
1199 * is waited for, or expected. If you do not want further notifications, 1154 * is waited for, or expected. If you do not want further notifications,
1200 * call the i2o_event_register again with a evt_mask of 0. 1155 * call the i2o_event_register again with a evt_mask of 0.
1201 * 1156 *
1202 * Returns 0 on success or -ETIMEDOUT if no message could be fetched for 1157 * Returns 0 on success or negative error code on failure.
1203 * sending the request.
1204 */ 1158 */
1205int i2o_event_register(struct i2o_device *dev, struct i2o_driver *drv, 1159int i2o_event_register(struct i2o_device *dev, struct i2o_driver *drv,
1206 int tcntxt, u32 evt_mask) 1160 int tcntxt, u32 evt_mask)
1207{ 1161{
1208 struct i2o_controller *c = dev->iop; 1162 struct i2o_controller *c = dev->iop;
1209 struct i2o_message __iomem *msg; 1163 struct i2o_message *msg;
1210 u32 m;
1211 1164
1212 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 1165 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
1213 if (m == I2O_QUEUE_EMPTY) 1166 if (IS_ERR(msg))
1214 return -ETIMEDOUT; 1167 return PTR_ERR(msg);
1215 1168
1216 writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 1169 msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
1217 writel(I2O_CMD_UTIL_EVT_REGISTER << 24 | HOST_TID << 12 | dev->lct_data. 1170 msg->u.head[1] =
1218 tid, &msg->u.head[1]); 1171 cpu_to_le32(I2O_CMD_UTIL_EVT_REGISTER << 24 | HOST_TID << 12 | dev->
1219 writel(drv->context, &msg->u.s.icntxt); 1172 lct_data.tid);
1220 writel(tcntxt, &msg->u.s.tcntxt); 1173 msg->u.s.icntxt = cpu_to_le32(drv->context);
1221 writel(evt_mask, &msg->body[0]); 1174 msg->u.s.tcntxt = cpu_to_le32(tcntxt);
1175 msg->body[0] = cpu_to_le32(evt_mask);
1222 1176
1223 i2o_msg_post(c, m); 1177 i2o_msg_post(c, msg);
1224 1178
1225 return 0; 1179 return 0;
1226}; 1180};
@@ -1239,14 +1193,8 @@ static int __init i2o_iop_init(void)
1239 1193
1240 printk(KERN_INFO OSM_DESCRIPTION " v" OSM_VERSION "\n"); 1194 printk(KERN_INFO OSM_DESCRIPTION " v" OSM_VERSION "\n");
1241 1195
1242 i2o_controller_class = class_create(THIS_MODULE, "i2o_controller");
1243 if (IS_ERR(i2o_controller_class)) {
1244 osm_err("can't register class i2o_controller\n");
1245 goto exit;
1246 }
1247
1248 if ((rc = i2o_driver_init())) 1196 if ((rc = i2o_driver_init()))
1249 goto class_exit; 1197 goto exit;
1250 1198
1251 if ((rc = i2o_exec_init())) 1199 if ((rc = i2o_exec_init()))
1252 goto driver_exit; 1200 goto driver_exit;
@@ -1262,9 +1210,6 @@ static int __init i2o_iop_init(void)
1262 driver_exit: 1210 driver_exit:
1263 i2o_driver_exit(); 1211 i2o_driver_exit();
1264 1212
1265 class_exit:
1266 class_destroy(i2o_controller_class);
1267
1268 exit: 1213 exit:
1269 return rc; 1214 return rc;
1270} 1215}
@@ -1279,7 +1224,6 @@ static void __exit i2o_iop_exit(void)
1279 i2o_pci_exit(); 1224 i2o_pci_exit();
1280 i2o_exec_exit(); 1225 i2o_exec_exit();
1281 i2o_driver_exit(); 1226 i2o_driver_exit();
1282 class_destroy(i2o_controller_class);
1283}; 1227};
1284 1228
1285module_init(i2o_iop_init); 1229module_init(i2o_iop_init);
diff --git a/drivers/message/i2o/pci.c b/drivers/message/i2o/pci.c
index ee7075fa1ec3..c5b656cdea7c 100644
--- a/drivers/message/i2o/pci.c
+++ b/drivers/message/i2o/pci.c
@@ -339,7 +339,7 @@ static int __devinit i2o_pci_probe(struct pci_dev *pdev,
339 pci_name(pdev)); 339 pci_name(pdev));
340 340
341 c->pdev = pdev; 341 c->pdev = pdev;
342 c->device.parent = get_device(&pdev->dev); 342 c->device.parent = &pdev->dev;
343 343
344 /* Cards that fall apart if you hit them with large I/O loads... */ 344 /* Cards that fall apart if you hit them with large I/O loads... */
345 if (pdev->vendor == PCI_VENDOR_ID_NCR && pdev->device == 0x0630) { 345 if (pdev->vendor == PCI_VENDOR_ID_NCR && pdev->device == 0x0630) {
@@ -410,8 +410,6 @@ static int __devinit i2o_pci_probe(struct pci_dev *pdev,
410 if ((rc = i2o_iop_add(c))) 410 if ((rc = i2o_iop_add(c)))
411 goto uninstall; 411 goto uninstall;
412 412
413 get_device(&c->device);
414
415 if (i960) 413 if (i960)
416 pci_write_config_word(i960, 0x42, 0x03ff); 414 pci_write_config_word(i960, 0x42, 0x03ff);
417 415
@@ -424,7 +422,6 @@ static int __devinit i2o_pci_probe(struct pci_dev *pdev,
424 i2o_pci_free(c); 422 i2o_pci_free(c);
425 423
426 free_controller: 424 free_controller:
427 put_device(c->device.parent);
428 i2o_iop_free(c); 425 i2o_iop_free(c);
429 426
430 disable: 427 disable:
@@ -454,7 +451,6 @@ static void __devexit i2o_pci_remove(struct pci_dev *pdev)
454 451
455 printk(KERN_INFO "%s: Controller removed.\n", c->name); 452 printk(KERN_INFO "%s: Controller removed.\n", c->name);
456 453
457 put_device(c->device.parent);
458 put_device(&c->device); 454 put_device(&c->device);
459}; 455};
460 456
@@ -483,4 +479,5 @@ void __exit i2o_pci_exit(void)
483{ 479{
484 pci_unregister_driver(&i2o_pci_driver); 480 pci_unregister_driver(&i2o_pci_driver);
485}; 481};
482
486MODULE_DEVICE_TABLE(pci, i2o_pci_ids); 483MODULE_DEVICE_TABLE(pci, i2o_pci_ids);