aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorMarkus Lidel <Markus.Lidel@shadowconnect.com>2006-01-06 03:19:29 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-01-06 11:33:53 -0500
commita1a5ea70a6e9db6332b27fe2d96666e17aa1436b (patch)
tree7edfe920aa40af94464ab05539d449dbe5689254 /drivers
parent347a8dc3b815f0c0fa62a1df075184ffe4cbdcf1 (diff)
[PATCH] I2O: changed I2O API to create I2O messages in kernel memory
Changed the I2O API to create I2O messages first in kernel memory and then transfer it at once over the PCI bus instead of sending each quad-word over the PCI bus. Signed-off-by: Markus Lidel <Markus.Lidel@shadowconnect.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/message/i2o/bus-osm.c21
-rw-r--r--drivers/message/i2o/device.c51
-rw-r--r--drivers/message/i2o/exec-osm.c93
-rw-r--r--drivers/message/i2o/i2o_block.c157
-rw-r--r--drivers/message/i2o/i2o_config.c169
-rw-r--r--drivers/message/i2o/i2o_scsi.c50
-rw-r--r--drivers/message/i2o/iop.c296
-rw-r--r--drivers/message/i2o/pci.c1
8 files changed, 412 insertions, 426 deletions
diff --git a/drivers/message/i2o/bus-osm.c b/drivers/message/i2o/bus-osm.c
index 151b228e1cb3..ce039d322fd0 100644
--- a/drivers/message/i2o/bus-osm.c
+++ b/drivers/message/i2o/bus-osm.c
@@ -39,18 +39,18 @@ static struct i2o_class_id i2o_bus_class_id[] = {
39 */ 39 */
40static int i2o_bus_scan(struct i2o_device *dev) 40static int i2o_bus_scan(struct i2o_device *dev)
41{ 41{
42 struct i2o_message __iomem *msg; 42 struct i2o_message *msg;
43 u32 m;
44 43
45 m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET); 44 msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET);
46 if (m == I2O_QUEUE_EMPTY) 45 if (IS_ERR(msg))
47 return -ETIMEDOUT; 46 return -ETIMEDOUT;
48 47
49 writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 48 msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
50 writel(I2O_CMD_BUS_SCAN << 24 | HOST_TID << 12 | dev->lct_data.tid, 49 msg->u.head[1] =
51 &msg->u.head[1]); 50 cpu_to_le32(I2O_CMD_BUS_SCAN << 24 | HOST_TID << 12 | dev->lct_data.
51 tid);
52 52
53 return i2o_msg_post_wait(dev->iop, m, 60); 53 return i2o_msg_post_wait(dev->iop, msg, 60);
54}; 54};
55 55
56/** 56/**
@@ -59,8 +59,9 @@ static int i2o_bus_scan(struct i2o_device *dev)
59 * 59 *
60 * Returns count. 60 * Returns count.
61 */ 61 */
62static ssize_t i2o_bus_store_scan(struct device *d, struct device_attribute *attr, const char *buf, 62static ssize_t i2o_bus_store_scan(struct device *d,
63 size_t count) 63 struct device_attribute *attr,
64 const char *buf, size_t count)
64{ 65{
65 struct i2o_device *i2o_dev = to_i2o_device(d); 66 struct i2o_device *i2o_dev = to_i2o_device(d);
66 int rc; 67 int rc;
diff --git a/drivers/message/i2o/device.c b/drivers/message/i2o/device.c
index 8eb50cdb8ae1..002ae0ed8966 100644
--- a/drivers/message/i2o/device.c
+++ b/drivers/message/i2o/device.c
@@ -35,18 +35,18 @@
35static inline int i2o_device_issue_claim(struct i2o_device *dev, u32 cmd, 35static inline int i2o_device_issue_claim(struct i2o_device *dev, u32 cmd,
36 u32 type) 36 u32 type)
37{ 37{
38 struct i2o_message __iomem *msg; 38 struct i2o_message *msg;
39 u32 m;
40 39
41 m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET); 40 msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET);
42 if (m == I2O_QUEUE_EMPTY) 41 if (IS_ERR(msg))
43 return -ETIMEDOUT; 42 return PTR_ERR(msg);
44 43
45 writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 44 msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
46 writel(cmd << 24 | HOST_TID << 12 | dev->lct_data.tid, &msg->u.head[1]); 45 msg->u.head[1] =
47 writel(type, &msg->body[0]); 46 cpu_to_le32(cmd << 24 | HOST_TID << 12 | dev->lct_data.tid);
47 msg->body[0] = cpu_to_le32(type);
48 48
49 return i2o_msg_post_wait(dev->iop, m, 60); 49 return i2o_msg_post_wait(dev->iop, msg, 60);
50} 50}
51 51
52/** 52/**
@@ -419,10 +419,9 @@ int i2o_device_parse_lct(struct i2o_controller *c)
419 * ResultCount, ErrorInfoSize, BlockStatus and BlockSize. 419 * ResultCount, ErrorInfoSize, BlockStatus and BlockSize.
420 */ 420 */
421int i2o_parm_issue(struct i2o_device *i2o_dev, int cmd, void *oplist, 421int i2o_parm_issue(struct i2o_device *i2o_dev, int cmd, void *oplist,
422 int oplen, void *reslist, int reslen) 422 int oplen, void *reslist, int reslen)
423{ 423{
424 struct i2o_message __iomem *msg; 424 struct i2o_message *msg;
425 u32 m;
426 u32 *res32 = (u32 *) reslist; 425 u32 *res32 = (u32 *) reslist;
427 u32 *restmp = (u32 *) reslist; 426 u32 *restmp = (u32 *) reslist;
428 int len = 0; 427 int len = 0;
@@ -437,26 +436,28 @@ int i2o_parm_issue(struct i2o_device *i2o_dev, int cmd, void *oplist,
437 if (i2o_dma_alloc(dev, &res, reslen, GFP_KERNEL)) 436 if (i2o_dma_alloc(dev, &res, reslen, GFP_KERNEL))
438 return -ENOMEM; 437 return -ENOMEM;
439 438
440 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 439 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
441 if (m == I2O_QUEUE_EMPTY) { 440 if (IS_ERR(msg)) {
442 i2o_dma_free(dev, &res); 441 i2o_dma_free(dev, &res);
443 return -ETIMEDOUT; 442 return PTR_ERR(msg);
444 } 443 }
445 444
446 i = 0; 445 i = 0;
447 writel(cmd << 24 | HOST_TID << 12 | i2o_dev->lct_data.tid, 446 msg->u.head[1] =
448 &msg->u.head[1]); 447 cpu_to_le32(cmd << 24 | HOST_TID << 12 | i2o_dev->lct_data.tid);
449 writel(0, &msg->body[i++]); 448 msg->body[i++] = cpu_to_le32(0x00000000);
450 writel(0x4C000000 | oplen, &msg->body[i++]); /* OperationList */ 449 msg->body[i++] = cpu_to_le32(0x4C000000 | oplen); /* OperationList */
451 memcpy_toio(&msg->body[i], oplist, oplen); 450 memcpy(&msg->body[i], oplist, oplen);
451
452 i += (oplen / 4 + (oplen % 4 ? 1 : 0)); 452 i += (oplen / 4 + (oplen % 4 ? 1 : 0));
453 writel(0xD0000000 | res.len, &msg->body[i++]); /* ResultList */ 453 msg->body[i++] = cpu_to_le32(0xD0000000 | res.len); /* ResultList */
454 writel(res.phys, &msg->body[i++]); 454 msg->body[i++] = cpu_to_le32(res.phys);
455 455
456 writel(I2O_MESSAGE_SIZE(i + sizeof(struct i2o_message) / 4) | 456 msg->u.head[0] =
457 SGL_OFFSET_5, &msg->u.head[0]); 457 cpu_to_le32(I2O_MESSAGE_SIZE(i + sizeof(struct i2o_message) / 4) |
458 SGL_OFFSET_5);
458 459
459 rc = i2o_msg_post_wait_mem(c, m, 10, &res); 460 rc = i2o_msg_post_wait_mem(c, msg, 10, &res);
460 461
461 /* This only looks like a memory leak - don't "fix" it. */ 462 /* This only looks like a memory leak - don't "fix" it. */
462 if (rc == -ETIMEDOUT) 463 if (rc == -ETIMEDOUT)
diff --git a/drivers/message/i2o/exec-osm.c b/drivers/message/i2o/exec-osm.c
index 9c339a2505b0..71a09332e7c0 100644
--- a/drivers/message/i2o/exec-osm.c
+++ b/drivers/message/i2o/exec-osm.c
@@ -114,13 +114,12 @@ static void i2o_exec_wait_free(struct i2o_exec_wait *wait)
114 * Returns 0 on success, negative error code on timeout or positive error 114 * Returns 0 on success, negative error code on timeout or positive error
115 * code from reply. 115 * code from reply.
116 */ 116 */
117int i2o_msg_post_wait_mem(struct i2o_controller *c, u32 m, unsigned long 117int i2o_msg_post_wait_mem(struct i2o_controller *c, struct i2o_message *msg,
118 timeout, struct i2o_dma *dma) 118 unsigned long timeout, struct i2o_dma *dma)
119{ 119{
120 DECLARE_WAIT_QUEUE_HEAD(wq); 120 DECLARE_WAIT_QUEUE_HEAD(wq);
121 struct i2o_exec_wait *wait; 121 struct i2o_exec_wait *wait;
122 static u32 tcntxt = 0x80000000; 122 static u32 tcntxt = 0x80000000;
123 struct i2o_message __iomem *msg = i2o_msg_in_to_virt(c, m);
124 int rc = 0; 123 int rc = 0;
125 124
126 wait = i2o_exec_wait_alloc(); 125 wait = i2o_exec_wait_alloc();
@@ -138,15 +137,15 @@ int i2o_msg_post_wait_mem(struct i2o_controller *c, u32 m, unsigned long
138 * We will only use transaction contexts >= 0x80000000 for POST WAIT, 137 * We will only use transaction contexts >= 0x80000000 for POST WAIT,
139 * so we could find a POST WAIT reply easier in the reply handler. 138 * so we could find a POST WAIT reply easier in the reply handler.
140 */ 139 */
141 writel(i2o_exec_driver.context, &msg->u.s.icntxt); 140 msg->u.s.icntxt = cpu_to_le32(i2o_exec_driver.context);
142 wait->tcntxt = tcntxt++; 141 wait->tcntxt = tcntxt++;
143 writel(wait->tcntxt, &msg->u.s.tcntxt); 142 msg->u.s.tcntxt = cpu_to_le32(wait->tcntxt);
144 143
145 /* 144 /*
146 * Post the message to the controller. At some point later it will 145 * Post the message to the controller. At some point later it will
147 * return. If we time out before it returns then complete will be zero. 146 * return. If we time out before it returns then complete will be zero.
148 */ 147 */
149 i2o_msg_post(c, m); 148 i2o_msg_post(c, msg);
150 149
151 if (!wait->complete) { 150 if (!wait->complete) {
152 wait->wq = &wq; 151 wait->wq = &wq;
@@ -266,7 +265,8 @@ static int i2o_msg_post_wait_complete(struct i2o_controller *c, u32 m,
266 * 265 *
267 * Returns number of bytes printed into buffer. 266 * Returns number of bytes printed into buffer.
268 */ 267 */
269static ssize_t i2o_exec_show_vendor_id(struct device *d, struct device_attribute *attr, char *buf) 268static ssize_t i2o_exec_show_vendor_id(struct device *d,
269 struct device_attribute *attr, char *buf)
270{ 270{
271 struct i2o_device *dev = to_i2o_device(d); 271 struct i2o_device *dev = to_i2o_device(d);
272 u16 id; 272 u16 id;
@@ -286,7 +286,9 @@ static ssize_t i2o_exec_show_vendor_id(struct device *d, struct device_attribute
286 * 286 *
287 * Returns number of bytes printed into buffer. 287 * Returns number of bytes printed into buffer.
288 */ 288 */
289static ssize_t i2o_exec_show_product_id(struct device *d, struct device_attribute *attr, char *buf) 289static ssize_t i2o_exec_show_product_id(struct device *d,
290 struct device_attribute *attr,
291 char *buf)
290{ 292{
291 struct i2o_device *dev = to_i2o_device(d); 293 struct i2o_device *dev = to_i2o_device(d);
292 u16 id; 294 u16 id;
@@ -385,23 +387,22 @@ static int i2o_exec_reply(struct i2o_controller *c, u32 m,
385 u32 context; 387 u32 context;
386 388
387 if (le32_to_cpu(msg->u.head[0]) & MSG_FAIL) { 389 if (le32_to_cpu(msg->u.head[0]) & MSG_FAIL) {
390 struct i2o_message __iomem *pmsg;
391 u32 pm;
392
388 /* 393 /*
389 * If Fail bit is set we must take the transaction context of 394 * If Fail bit is set we must take the transaction context of
390 * the preserved message to find the right request again. 395 * the preserved message to find the right request again.
391 */ 396 */
392 struct i2o_message __iomem *pmsg;
393 u32 pm;
394 397
395 pm = le32_to_cpu(msg->body[3]); 398 pm = le32_to_cpu(msg->body[3]);
396
397 pmsg = i2o_msg_in_to_virt(c, pm); 399 pmsg = i2o_msg_in_to_virt(c, pm);
400 context = readl(&pmsg->u.s.tcntxt);
398 401
399 i2o_report_status(KERN_INFO, "i2o_core", msg); 402 i2o_report_status(KERN_INFO, "i2o_core", msg);
400 403
401 context = readl(&pmsg->u.s.tcntxt);
402
403 /* Release the preserved msg */ 404 /* Release the preserved msg */
404 i2o_msg_nop(c, pm); 405 i2o_msg_nop_mfa(c, pm);
405 } else 406 } else
406 context = le32_to_cpu(msg->u.s.tcntxt); 407 context = le32_to_cpu(msg->u.s.tcntxt);
407 408
@@ -462,25 +463,26 @@ static void i2o_exec_event(struct i2o_event *evt)
462 */ 463 */
463int i2o_exec_lct_get(struct i2o_controller *c) 464int i2o_exec_lct_get(struct i2o_controller *c)
464{ 465{
465 struct i2o_message __iomem *msg; 466 struct i2o_message *msg;
466 u32 m;
467 int i = 0; 467 int i = 0;
468 int rc = -EAGAIN; 468 int rc = -EAGAIN;
469 469
470 for (i = 1; i <= I2O_LCT_GET_TRIES; i++) { 470 for (i = 1; i <= I2O_LCT_GET_TRIES; i++) {
471 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 471 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
472 if (m == I2O_QUEUE_EMPTY) 472 if (IS_ERR(msg))
473 return -ETIMEDOUT; 473 return PTR_ERR(msg);
474 474
475 writel(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6, &msg->u.head[0]); 475 msg->u.head[0] =
476 writel(I2O_CMD_LCT_NOTIFY << 24 | HOST_TID << 12 | ADAPTER_TID, 476 cpu_to_le32(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6);
477 &msg->u.head[1]); 477 msg->u.head[1] =
478 writel(0xffffffff, &msg->body[0]); 478 cpu_to_le32(I2O_CMD_LCT_NOTIFY << 24 | HOST_TID << 12 |
479 writel(0x00000000, &msg->body[1]); 479 ADAPTER_TID);
480 writel(0xd0000000 | c->dlct.len, &msg->body[2]); 480 msg->body[0] = cpu_to_le32(0xffffffff);
481 writel(c->dlct.phys, &msg->body[3]); 481 msg->body[1] = cpu_to_le32(0x00000000);
482 482 msg->body[2] = cpu_to_le32(0xd0000000 | c->dlct.len);
483 rc = i2o_msg_post_wait(c, m, I2O_TIMEOUT_LCT_GET); 483 msg->body[3] = cpu_to_le32(c->dlct.phys);
484
485 rc = i2o_msg_post_wait(c, msg, I2O_TIMEOUT_LCT_GET);
484 if (rc < 0) 486 if (rc < 0)
485 break; 487 break;
486 488
@@ -506,29 +508,28 @@ static int i2o_exec_lct_notify(struct i2o_controller *c, u32 change_ind)
506{ 508{
507 i2o_status_block *sb = c->status_block.virt; 509 i2o_status_block *sb = c->status_block.virt;
508 struct device *dev; 510 struct device *dev;
509 struct i2o_message __iomem *msg; 511 struct i2o_message *msg;
510 u32 m;
511 512
512 dev = &c->pdev->dev; 513 dev = &c->pdev->dev;
513 514
514 if (i2o_dma_realloc(dev, &c->dlct, sb->expected_lct_size, GFP_KERNEL)) 515 if (i2o_dma_realloc(dev, &c->dlct, sb->expected_lct_size, GFP_KERNEL))
515 return -ENOMEM; 516 return -ENOMEM;
516 517
517 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 518 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
518 if (m == I2O_QUEUE_EMPTY) 519 if (IS_ERR(msg))
519 return -ETIMEDOUT; 520 return PTR_ERR(msg);
520 521
521 writel(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6, &msg->u.head[0]); 522 msg->u.head[0] = cpu_to_le32(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6);
522 writel(I2O_CMD_LCT_NOTIFY << 24 | HOST_TID << 12 | ADAPTER_TID, 523 msg->u.head[1] = cpu_to_le32(I2O_CMD_LCT_NOTIFY << 24 | HOST_TID << 12 |
523 &msg->u.head[1]); 524 ADAPTER_TID);
524 writel(i2o_exec_driver.context, &msg->u.s.icntxt); 525 msg->u.s.icntxt = cpu_to_le32(i2o_exec_driver.context);
525 writel(0, &msg->u.s.tcntxt); /* FIXME */ 526 msg->u.s.tcntxt = cpu_to_le32(0x00000000);
526 writel(0xffffffff, &msg->body[0]); 527 msg->body[0] = cpu_to_le32(0xffffffff);
527 writel(change_ind, &msg->body[1]); 528 msg->body[1] = cpu_to_le32(change_ind);
528 writel(0xd0000000 | c->dlct.len, &msg->body[2]); 529 msg->body[2] = cpu_to_le32(0xd0000000 | c->dlct.len);
529 writel(c->dlct.phys, &msg->body[3]); 530 msg->body[3] = cpu_to_le32(c->dlct.phys);
530 531
531 i2o_msg_post(c, m); 532 i2o_msg_post(c, msg);
532 533
533 return 0; 534 return 0;
534}; 535};
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c
index f283b5bafdd3..2bd15c70773b 100644
--- a/drivers/message/i2o/i2o_block.c
+++ b/drivers/message/i2o/i2o_block.c
@@ -130,20 +130,20 @@ static int i2o_block_remove(struct device *dev)
130 */ 130 */
131static int i2o_block_device_flush(struct i2o_device *dev) 131static int i2o_block_device_flush(struct i2o_device *dev)
132{ 132{
133 struct i2o_message __iomem *msg; 133 struct i2o_message *msg;
134 u32 m;
135 134
136 m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET); 135 msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET);
137 if (m == I2O_QUEUE_EMPTY) 136 if (IS_ERR(msg))
138 return -ETIMEDOUT; 137 return PTR_ERR(msg);
139 138
140 writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 139 msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
141 writel(I2O_CMD_BLOCK_CFLUSH << 24 | HOST_TID << 12 | dev->lct_data.tid, 140 msg->u.head[1] =
142 &msg->u.head[1]); 141 cpu_to_le32(I2O_CMD_BLOCK_CFLUSH << 24 | HOST_TID << 12 | dev->
143 writel(60 << 16, &msg->body[0]); 142 lct_data.tid);
143 msg->body[0] = cpu_to_le32(60 << 16);
144 osm_debug("Flushing...\n"); 144 osm_debug("Flushing...\n");
145 145
146 return i2o_msg_post_wait(dev->iop, m, 60); 146 return i2o_msg_post_wait(dev->iop, msg, 60);
147}; 147};
148 148
149/** 149/**
@@ -181,21 +181,21 @@ static int i2o_block_issue_flush(request_queue_t * queue, struct gendisk *disk,
181 */ 181 */
182static int i2o_block_device_mount(struct i2o_device *dev, u32 media_id) 182static int i2o_block_device_mount(struct i2o_device *dev, u32 media_id)
183{ 183{
184 struct i2o_message __iomem *msg; 184 struct i2o_message *msg;
185 u32 m; 185
186 186 msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET);
187 m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET); 187 if (IS_ERR(msg))
188 if (m == I2O_QUEUE_EMPTY) 188 return PTR_ERR(msg);
189 return -ETIMEDOUT; 189
190 190 msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
191 writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 191 msg->u.head[1] =
192 writel(I2O_CMD_BLOCK_MMOUNT << 24 | HOST_TID << 12 | dev->lct_data.tid, 192 cpu_to_le32(I2O_CMD_BLOCK_MMOUNT << 24 | HOST_TID << 12 | dev->
193 &msg->u.head[1]); 193 lct_data.tid);
194 writel(-1, &msg->body[0]); 194 msg->body[0] = cpu_to_le32(-1);
195 writel(0, &msg->body[1]); 195 msg->body[1] = cpu_to_le32(0x00000000);
196 osm_debug("Mounting...\n"); 196 osm_debug("Mounting...\n");
197 197
198 return i2o_msg_post_wait(dev->iop, m, 2); 198 return i2o_msg_post_wait(dev->iop, msg, 2);
199}; 199};
200 200
201/** 201/**
@@ -210,20 +210,20 @@ static int i2o_block_device_mount(struct i2o_device *dev, u32 media_id)
210 */ 210 */
211static int i2o_block_device_lock(struct i2o_device *dev, u32 media_id) 211static int i2o_block_device_lock(struct i2o_device *dev, u32 media_id)
212{ 212{
213 struct i2o_message __iomem *msg; 213 struct i2o_message *msg;
214 u32 m;
215 214
216 m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET); 215 msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET);
217 if (m == I2O_QUEUE_EMPTY) 216 if (IS_ERR(msg) == I2O_QUEUE_EMPTY)
218 return -ETIMEDOUT; 217 return PTR_ERR(msg);
219 218
220 writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 219 msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
221 writel(I2O_CMD_BLOCK_MLOCK << 24 | HOST_TID << 12 | dev->lct_data.tid, 220 msg->u.head[1] =
222 &msg->u.head[1]); 221 cpu_to_le32(I2O_CMD_BLOCK_MLOCK << 24 | HOST_TID << 12 | dev->
223 writel(-1, &msg->body[0]); 222 lct_data.tid);
223 msg->body[0] = cpu_to_le32(-1);
224 osm_debug("Locking...\n"); 224 osm_debug("Locking...\n");
225 225
226 return i2o_msg_post_wait(dev->iop, m, 2); 226 return i2o_msg_post_wait(dev->iop, msg, 2);
227}; 227};
228 228
229/** 229/**
@@ -238,20 +238,20 @@ static int i2o_block_device_lock(struct i2o_device *dev, u32 media_id)
238 */ 238 */
239static int i2o_block_device_unlock(struct i2o_device *dev, u32 media_id) 239static int i2o_block_device_unlock(struct i2o_device *dev, u32 media_id)
240{ 240{
241 struct i2o_message __iomem *msg; 241 struct i2o_message *msg;
242 u32 m;
243 242
244 m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET); 243 msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET);
245 if (m == I2O_QUEUE_EMPTY) 244 if (IS_ERR(msg))
246 return -ETIMEDOUT; 245 return PTR_ERR(msg);
247 246
248 writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 247 msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
249 writel(I2O_CMD_BLOCK_MUNLOCK << 24 | HOST_TID << 12 | dev->lct_data.tid, 248 msg->u.head[1] =
250 &msg->u.head[1]); 249 cpu_to_le32(I2O_CMD_BLOCK_MUNLOCK << 24 | HOST_TID << 12 | dev->
251 writel(media_id, &msg->body[0]); 250 lct_data.tid);
251 msg->body[0] = cpu_to_le32(media_id);
252 osm_debug("Unlocking...\n"); 252 osm_debug("Unlocking...\n");
253 253
254 return i2o_msg_post_wait(dev->iop, m, 2); 254 return i2o_msg_post_wait(dev->iop, msg, 2);
255}; 255};
256 256
257/** 257/**
@@ -267,21 +267,21 @@ static int i2o_block_device_power(struct i2o_block_device *dev, u8 op)
267{ 267{
268 struct i2o_device *i2o_dev = dev->i2o_dev; 268 struct i2o_device *i2o_dev = dev->i2o_dev;
269 struct i2o_controller *c = i2o_dev->iop; 269 struct i2o_controller *c = i2o_dev->iop;
270 struct i2o_message __iomem *msg; 270 struct i2o_message *msg;
271 u32 m;
272 int rc; 271 int rc;
273 272
274 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 273 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
275 if (m == I2O_QUEUE_EMPTY) 274 if (IS_ERR(msg))
276 return -ETIMEDOUT; 275 return PTR_ERR(msg);
277 276
278 writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 277 msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0);
279 writel(I2O_CMD_BLOCK_POWER << 24 | HOST_TID << 12 | i2o_dev->lct_data. 278 msg->u.head[1] =
280 tid, &msg->u.head[1]); 279 cpu_to_le32(I2O_CMD_BLOCK_POWER << 24 | HOST_TID << 12 | i2o_dev->
281 writel(op << 24, &msg->body[0]); 280 lct_data.tid);
281 msg->body[0] = cpu_to_le32(op << 24);
282 osm_debug("Power...\n"); 282 osm_debug("Power...\n");
283 283
284 rc = i2o_msg_post_wait(c, m, 60); 284 rc = i2o_msg_post_wait(c, msg, 60);
285 if (!rc) 285 if (!rc)
286 dev->power = op; 286 dev->power = op;
287 287
@@ -331,7 +331,7 @@ static inline void i2o_block_request_free(struct i2o_block_request *ireq)
331 */ 331 */
332static inline int i2o_block_sglist_alloc(struct i2o_controller *c, 332static inline int i2o_block_sglist_alloc(struct i2o_controller *c,
333 struct i2o_block_request *ireq, 333 struct i2o_block_request *ireq,
334 u32 __iomem ** mptr) 334 u32 ** mptr)
335{ 335{
336 int nents; 336 int nents;
337 enum dma_data_direction direction; 337 enum dma_data_direction direction;
@@ -745,10 +745,9 @@ static int i2o_block_transfer(struct request *req)
745 struct i2o_block_device *dev = req->rq_disk->private_data; 745 struct i2o_block_device *dev = req->rq_disk->private_data;
746 struct i2o_controller *c; 746 struct i2o_controller *c;
747 int tid = dev->i2o_dev->lct_data.tid; 747 int tid = dev->i2o_dev->lct_data.tid;
748 struct i2o_message __iomem *msg; 748 struct i2o_message *msg;
749 u32 __iomem *mptr; 749 u32 *mptr;
750 struct i2o_block_request *ireq = req->special; 750 struct i2o_block_request *ireq = req->special;
751 u32 m;
752 u32 tcntxt; 751 u32 tcntxt;
753 u32 sgl_offset = SGL_OFFSET_8; 752 u32 sgl_offset = SGL_OFFSET_8;
754 u32 ctl_flags = 0x00000000; 753 u32 ctl_flags = 0x00000000;
@@ -763,9 +762,9 @@ static int i2o_block_transfer(struct request *req)
763 762
764 c = dev->i2o_dev->iop; 763 c = dev->i2o_dev->iop;
765 764
766 m = i2o_msg_get(c, &msg); 765 msg = i2o_msg_get(c);
767 if (m == I2O_QUEUE_EMPTY) { 766 if (IS_ERR(msg)) {
768 rc = -EBUSY; 767 rc = PTR_ERR(msg);
769 goto exit; 768 goto exit;
770 } 769 }
771 770
@@ -775,8 +774,8 @@ static int i2o_block_transfer(struct request *req)
775 goto nop_msg; 774 goto nop_msg;
776 } 775 }
777 776
778 writel(i2o_block_driver.context, &msg->u.s.icntxt); 777 msg->u.s.icntxt = cpu_to_le32(i2o_block_driver.context);
779 writel(tcntxt, &msg->u.s.tcntxt); 778 msg->u.s.tcntxt = cpu_to_le32(tcntxt);
780 779
781 mptr = &msg->body[0]; 780 mptr = &msg->body[0];
782 781
@@ -834,11 +833,11 @@ static int i2o_block_transfer(struct request *req)
834 833
835 sgl_offset = SGL_OFFSET_12; 834 sgl_offset = SGL_OFFSET_12;
836 835
837 writel(I2O_CMD_PRIVATE << 24 | HOST_TID << 12 | tid, 836 msg->u.head[1] =
838 &msg->u.head[1]); 837 cpu_to_le32(I2O_CMD_PRIVATE << 24 | HOST_TID << 12 | tid);
839 838
840 writel(I2O_VENDOR_DPT << 16 | I2O_CMD_SCSI_EXEC, mptr++); 839 *mptr++ = cpu_to_le32(I2O_VENDOR_DPT << 16 | I2O_CMD_SCSI_EXEC);
841 writel(tid, mptr++); 840 *mptr++ = cpu_to_le32(tid);
842 841
843 /* 842 /*
844 * ENABLE_DISCONNECT 843 * ENABLE_DISCONNECT
@@ -853,22 +852,24 @@ static int i2o_block_transfer(struct request *req)
853 scsi_flags = 0xa0a0000a; 852 scsi_flags = 0xa0a0000a;
854 } 853 }
855 854
856 writel(scsi_flags, mptr++); 855 *mptr++ = cpu_to_le32(scsi_flags);
857 856
858 *((u32 *) & cmd[2]) = cpu_to_be32(req->sector * hwsec); 857 *((u32 *) & cmd[2]) = cpu_to_be32(req->sector * hwsec);
859 *((u16 *) & cmd[7]) = cpu_to_be16(req->nr_sectors * hwsec); 858 *((u16 *) & cmd[7]) = cpu_to_be16(req->nr_sectors * hwsec);
860 859
861 memcpy_toio(mptr, cmd, 10); 860 memcpy(mptr, cmd, 10);
862 mptr += 4; 861 mptr += 4;
863 writel(req->nr_sectors << KERNEL_SECTOR_SHIFT, mptr++); 862 *mptr++ = cpu_to_le32(req->nr_sectors << KERNEL_SECTOR_SHIFT);
864 } else 863 } else
865#endif 864#endif
866 { 865 {
867 writel(cmd | HOST_TID << 12 | tid, &msg->u.head[1]); 866 msg->u.head[1] = cpu_to_le32(cmd | HOST_TID << 12 | tid);
868 writel(ctl_flags, mptr++); 867 *mptr++ = cpu_to_le32(ctl_flags);
869 writel(req->nr_sectors << KERNEL_SECTOR_SHIFT, mptr++); 868 *mptr++ = cpu_to_le32(req->nr_sectors << KERNEL_SECTOR_SHIFT);
870 writel((u32) (req->sector << KERNEL_SECTOR_SHIFT), mptr++); 869 *mptr++ =
871 writel(req->sector >> (32 - KERNEL_SECTOR_SHIFT), mptr++); 870 cpu_to_le32((u32) (req->sector << KERNEL_SECTOR_SHIFT));
871 *mptr++ =
872 cpu_to_le32(req->sector >> (32 - KERNEL_SECTOR_SHIFT));
872 } 873 }
873 874
874 if (!i2o_block_sglist_alloc(c, ireq, &mptr)) { 875 if (!i2o_block_sglist_alloc(c, ireq, &mptr)) {
@@ -876,13 +877,13 @@ static int i2o_block_transfer(struct request *req)
876 goto context_remove; 877 goto context_remove;
877 } 878 }
878 879
879 writel(I2O_MESSAGE_SIZE(mptr - &msg->u.head[0]) | 880 msg->u.head[0] =
880 sgl_offset, &msg->u.head[0]); 881 cpu_to_le32(I2O_MESSAGE_SIZE(mptr - &msg->u.head[0]) | sgl_offset);
881 882
882 list_add_tail(&ireq->queue, &dev->open_queue); 883 list_add_tail(&ireq->queue, &dev->open_queue);
883 dev->open_queue_depth++; 884 dev->open_queue_depth++;
884 885
885 i2o_msg_post(c, m); 886 i2o_msg_post(c, msg);
886 887
887 return 0; 888 return 0;
888 889
@@ -890,7 +891,7 @@ static int i2o_block_transfer(struct request *req)
890 i2o_cntxt_list_remove(c, req); 891 i2o_cntxt_list_remove(c, req);
891 892
892 nop_msg: 893 nop_msg:
893 i2o_msg_nop(c, m); 894 i2o_msg_nop(c, msg);
894 895
895 exit: 896 exit:
896 return rc; 897 return rc;
diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c
index 3c3a7abebb1b..4fe73d628c5b 100644
--- a/drivers/message/i2o/i2o_config.c
+++ b/drivers/message/i2o/i2o_config.c
@@ -230,8 +230,7 @@ static int i2o_cfg_swdl(unsigned long arg)
230 struct i2o_sw_xfer __user *pxfer = (struct i2o_sw_xfer __user *)arg; 230 struct i2o_sw_xfer __user *pxfer = (struct i2o_sw_xfer __user *)arg;
231 unsigned char maxfrag = 0, curfrag = 1; 231 unsigned char maxfrag = 0, curfrag = 1;
232 struct i2o_dma buffer; 232 struct i2o_dma buffer;
233 struct i2o_message __iomem *msg; 233 struct i2o_message *msg;
234 u32 m;
235 unsigned int status = 0, swlen = 0, fragsize = 8192; 234 unsigned int status = 0, swlen = 0, fragsize = 8192;
236 struct i2o_controller *c; 235 struct i2o_controller *c;
237 236
@@ -257,31 +256,34 @@ static int i2o_cfg_swdl(unsigned long arg)
257 if (!c) 256 if (!c)
258 return -ENXIO; 257 return -ENXIO;
259 258
260 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 259 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
261 if (m == I2O_QUEUE_EMPTY) 260 if (IS_ERR(msg))
262 return -EBUSY; 261 return PTR_ERR(msg);
263 262
264 if (i2o_dma_alloc(&c->pdev->dev, &buffer, fragsize, GFP_KERNEL)) { 263 if (i2o_dma_alloc(&c->pdev->dev, &buffer, fragsize, GFP_KERNEL)) {
265 i2o_msg_nop(c, m); 264 i2o_msg_nop(c, msg);
266 return -ENOMEM; 265 return -ENOMEM;
267 } 266 }
268 267
269 __copy_from_user(buffer.virt, kxfer.buf, fragsize); 268 __copy_from_user(buffer.virt, kxfer.buf, fragsize);
270 269
271 writel(NINE_WORD_MSG_SIZE | SGL_OFFSET_7, &msg->u.head[0]); 270 msg->u.head[0] = cpu_to_le32(NINE_WORD_MSG_SIZE | SGL_OFFSET_7);
272 writel(I2O_CMD_SW_DOWNLOAD << 24 | HOST_TID << 12 | ADAPTER_TID, 271 msg->u.head[1] =
273 &msg->u.head[1]); 272 cpu_to_le32(I2O_CMD_SW_DOWNLOAD << 24 | HOST_TID << 12 |
274 writel(i2o_config_driver.context, &msg->u.head[2]); 273 ADAPTER_TID);
275 writel(0, &msg->u.head[3]); 274 msg->u.head[2] = cpu_to_le32(i2o_config_driver.context);
276 writel((((u32) kxfer.flags) << 24) | (((u32) kxfer.sw_type) << 16) | 275 msg->u.head[3] = cpu_to_le32(0);
277 (((u32) maxfrag) << 8) | (((u32) curfrag)), &msg->body[0]); 276 msg->body[0] =
278 writel(swlen, &msg->body[1]); 277 cpu_to_le32((((u32) kxfer.flags) << 24) | (((u32) kxfer.
279 writel(kxfer.sw_id, &msg->body[2]); 278 sw_type) << 16) |
280 writel(0xD0000000 | fragsize, &msg->body[3]); 279 (((u32) maxfrag) << 8) | (((u32) curfrag)));
281 writel(buffer.phys, &msg->body[4]); 280 msg->body[1] = cpu_to_le32(swlen);
281 msg->body[2] = cpu_to_le32(kxfer.sw_id);
282 msg->body[3] = cpu_to_le32(0xD0000000 | fragsize);
283 msg->body[4] = cpu_to_le32(buffer.phys);
282 284
283 osm_debug("swdl frag %d/%d (size %d)\n", curfrag, maxfrag, fragsize); 285 osm_debug("swdl frag %d/%d (size %d)\n", curfrag, maxfrag, fragsize);
284 status = i2o_msg_post_wait_mem(c, m, 60, &buffer); 286 status = i2o_msg_post_wait_mem(c, msg, 60, &buffer);
285 287
286 if (status != -ETIMEDOUT) 288 if (status != -ETIMEDOUT)
287 i2o_dma_free(&c->pdev->dev, &buffer); 289 i2o_dma_free(&c->pdev->dev, &buffer);
@@ -302,8 +304,7 @@ static int i2o_cfg_swul(unsigned long arg)
302 struct i2o_sw_xfer __user *pxfer = (struct i2o_sw_xfer __user *)arg; 304 struct i2o_sw_xfer __user *pxfer = (struct i2o_sw_xfer __user *)arg;
303 unsigned char maxfrag = 0, curfrag = 1; 305 unsigned char maxfrag = 0, curfrag = 1;
304 struct i2o_dma buffer; 306 struct i2o_dma buffer;
305 struct i2o_message __iomem *msg; 307 struct i2o_message *msg;
306 u32 m;
307 unsigned int status = 0, swlen = 0, fragsize = 8192; 308 unsigned int status = 0, swlen = 0, fragsize = 8192;
308 struct i2o_controller *c; 309 struct i2o_controller *c;
309 int ret = 0; 310 int ret = 0;
@@ -330,30 +331,30 @@ static int i2o_cfg_swul(unsigned long arg)
330 if (!c) 331 if (!c)
331 return -ENXIO; 332 return -ENXIO;
332 333
333 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 334 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
334 if (m == I2O_QUEUE_EMPTY) 335 if (IS_ERR(msg))
335 return -EBUSY; 336 return PTR_ERR(msg);
336 337
337 if (i2o_dma_alloc(&c->pdev->dev, &buffer, fragsize, GFP_KERNEL)) { 338 if (i2o_dma_alloc(&c->pdev->dev, &buffer, fragsize, GFP_KERNEL)) {
338 i2o_msg_nop(c, m); 339 i2o_msg_nop(c, msg);
339 return -ENOMEM; 340 return -ENOMEM;
340 } 341 }
341 342
342 writel(NINE_WORD_MSG_SIZE | SGL_OFFSET_7, &msg->u.head[0]); 343 msg->u.head[0] = cpu_to_le32(NINE_WORD_MSG_SIZE | SGL_OFFSET_7);
343 writel(I2O_CMD_SW_UPLOAD << 24 | HOST_TID << 12 | ADAPTER_TID, 344 msg->u.head[1] =
344 &msg->u.head[1]); 345 cpu_to_le32(I2O_CMD_SW_UPLOAD << 24 | HOST_TID << 12 | ADAPTER_TID);
345 writel(i2o_config_driver.context, &msg->u.head[2]); 346 msg->u.head[2] = cpu_to_le32(i2o_config_driver.context);
346 writel(0, &msg->u.head[3]); 347 msg->u.head[3] = cpu_to_le32(0);
347 writel((u32) kxfer.flags << 24 | (u32) kxfer. 348 msg->body[0] =
348 sw_type << 16 | (u32) maxfrag << 8 | (u32) curfrag, 349 cpu_to_le32((u32) kxfer.flags << 24 | (u32) kxfer.
349 &msg->body[0]); 350 sw_type << 16 | (u32) maxfrag << 8 | (u32) curfrag);
350 writel(swlen, &msg->body[1]); 351 msg->body[1] = cpu_to_le32(swlen);
351 writel(kxfer.sw_id, &msg->body[2]); 352 msg->body[2] = cpu_to_le32(kxfer.sw_id);
352 writel(0xD0000000 | fragsize, &msg->body[3]); 353 msg->body[3] = cpu_to_le32(0xD0000000 | fragsize);
353 writel(buffer.phys, &msg->body[4]); 354 msg->body[4] = cpu_to_le32(buffer.phys);
354 355
355 osm_debug("swul frag %d/%d (size %d)\n", curfrag, maxfrag, fragsize); 356 osm_debug("swul frag %d/%d (size %d)\n", curfrag, maxfrag, fragsize);
356 status = i2o_msg_post_wait_mem(c, m, 60, &buffer); 357 status = i2o_msg_post_wait_mem(c, msg, 60, &buffer);
357 358
358 if (status != I2O_POST_WAIT_OK) { 359 if (status != I2O_POST_WAIT_OK) {
359 if (status != -ETIMEDOUT) 360 if (status != -ETIMEDOUT)
@@ -380,8 +381,7 @@ static int i2o_cfg_swdel(unsigned long arg)
380 struct i2o_controller *c; 381 struct i2o_controller *c;
381 struct i2o_sw_xfer kxfer; 382 struct i2o_sw_xfer kxfer;
382 struct i2o_sw_xfer __user *pxfer = (struct i2o_sw_xfer __user *)arg; 383 struct i2o_sw_xfer __user *pxfer = (struct i2o_sw_xfer __user *)arg;
383 struct i2o_message __iomem *msg; 384 struct i2o_message *msg;
384 u32 m;
385 unsigned int swlen; 385 unsigned int swlen;
386 int token; 386 int token;
387 387
@@ -395,21 +395,21 @@ static int i2o_cfg_swdel(unsigned long arg)
395 if (!c) 395 if (!c)
396 return -ENXIO; 396 return -ENXIO;
397 397
398 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 398 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
399 if (m == I2O_QUEUE_EMPTY) 399 if (IS_ERR(msg))
400 return -EBUSY; 400 return PTR_ERR(msg);
401 401
402 writel(SEVEN_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 402 msg->u.head[0] = cpu_to_le32(SEVEN_WORD_MSG_SIZE | SGL_OFFSET_0);
403 writel(I2O_CMD_SW_REMOVE << 24 | HOST_TID << 12 | ADAPTER_TID, 403 msg->u.head[1] =
404 &msg->u.head[1]); 404 cpu_to_le32(I2O_CMD_SW_REMOVE << 24 | HOST_TID << 12 | ADAPTER_TID);
405 writel(i2o_config_driver.context, &msg->u.head[2]); 405 msg->u.head[2] = cpu_to_le32(i2o_config_driver.context);
406 writel(0, &msg->u.head[3]); 406 msg->u.head[3] = cpu_to_le32(0);
407 writel((u32) kxfer.flags << 24 | (u32) kxfer.sw_type << 16, 407 msg->body[0] =
408 &msg->body[0]); 408 cpu_to_le32((u32) kxfer.flags << 24 | (u32) kxfer.sw_type << 16);
409 writel(swlen, &msg->body[1]); 409 msg->body[1] = cpu_to_le32(swlen);
410 writel(kxfer.sw_id, &msg->body[2]); 410 msg->body[2] = cpu_to_le32(kxfer.sw_id);
411 411
412 token = i2o_msg_post_wait(c, m, 10); 412 token = i2o_msg_post_wait(c, msg, 10);
413 413
414 if (token != I2O_POST_WAIT_OK) { 414 if (token != I2O_POST_WAIT_OK) {
415 osm_info("swdel failed, DetailedStatus = %d\n", token); 415 osm_info("swdel failed, DetailedStatus = %d\n", token);
@@ -423,25 +423,24 @@ static int i2o_cfg_validate(unsigned long arg)
423{ 423{
424 int token; 424 int token;
425 int iop = (int)arg; 425 int iop = (int)arg;
426 struct i2o_message __iomem *msg; 426 struct i2o_message *msg;
427 u32 m;
428 struct i2o_controller *c; 427 struct i2o_controller *c;
429 428
430 c = i2o_find_iop(iop); 429 c = i2o_find_iop(iop);
431 if (!c) 430 if (!c)
432 return -ENXIO; 431 return -ENXIO;
433 432
434 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 433 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
435 if (m == I2O_QUEUE_EMPTY) 434 if (IS_ERR(msg))
436 return -EBUSY; 435 return PTR_ERR(msg);
437 436
438 writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 437 msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0);
439 writel(I2O_CMD_CONFIG_VALIDATE << 24 | HOST_TID << 12 | iop, 438 msg->u.head[1] =
440 &msg->u.head[1]); 439 cpu_to_le32(I2O_CMD_CONFIG_VALIDATE << 24 | HOST_TID << 12 | iop);
441 writel(i2o_config_driver.context, &msg->u.head[2]); 440 msg->u.head[2] = cpu_to_le32(i2o_config_driver.context);
442 writel(0, &msg->u.head[3]); 441 msg->u.head[3] = cpu_to_le32(0);
443 442
444 token = i2o_msg_post_wait(c, m, 10); 443 token = i2o_msg_post_wait(c, msg, 10);
445 444
446 if (token != I2O_POST_WAIT_OK) { 445 if (token != I2O_POST_WAIT_OK) {
447 osm_info("Can't validate configuration, ErrorStatus = %d\n", 446 osm_info("Can't validate configuration, ErrorStatus = %d\n",
@@ -454,8 +453,7 @@ static int i2o_cfg_validate(unsigned long arg)
454 453
455static int i2o_cfg_evt_reg(unsigned long arg, struct file *fp) 454static int i2o_cfg_evt_reg(unsigned long arg, struct file *fp)
456{ 455{
457 struct i2o_message __iomem *msg; 456 struct i2o_message *msg;
458 u32 m;
459 struct i2o_evt_id __user *pdesc = (struct i2o_evt_id __user *)arg; 457 struct i2o_evt_id __user *pdesc = (struct i2o_evt_id __user *)arg;
460 struct i2o_evt_id kdesc; 458 struct i2o_evt_id kdesc;
461 struct i2o_controller *c; 459 struct i2o_controller *c;
@@ -474,18 +472,19 @@ static int i2o_cfg_evt_reg(unsigned long arg, struct file *fp)
474 if (!d) 472 if (!d)
475 return -ENODEV; 473 return -ENODEV;
476 474
477 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 475 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
478 if (m == I2O_QUEUE_EMPTY) 476 if (IS_ERR(msg))
479 return -EBUSY; 477 return PTR_ERR(msg);
480 478
481 writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 479 msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0);
482 writel(I2O_CMD_UTIL_EVT_REGISTER << 24 | HOST_TID << 12 | kdesc.tid, 480 msg->u.head[1] =
483 &msg->u.head[1]); 481 cpu_to_le32(I2O_CMD_UTIL_EVT_REGISTER << 24 | HOST_TID << 12 |
484 writel(i2o_config_driver.context, &msg->u.head[2]); 482 kdesc.tid);
485 writel(i2o_cntxt_list_add(c, fp->private_data), &msg->u.head[3]); 483 msg->u.head[2] = cpu_to_le32(i2o_config_driver.context);
486 writel(kdesc.evt_mask, &msg->body[0]); 484 msg->u.head[3] = cpu_to_le32(i2o_cntxt_list_add(c, fp->private_data));
485 msg->body[0] = cpu_to_le32(kdesc.evt_mask);
487 486
488 i2o_msg_post(c, m); 487 i2o_msg_post(c, msg);
489 488
490 return 0; 489 return 0;
491} 490}
@@ -537,7 +536,6 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
537 u32 sg_index = 0; 536 u32 sg_index = 0;
538 i2o_status_block *sb; 537 i2o_status_block *sb;
539 struct i2o_message *msg; 538 struct i2o_message *msg;
540 u32 m;
541 unsigned int iop; 539 unsigned int iop;
542 540
543 cmd = (struct i2o_cmd_passthru32 __user *)arg; 541 cmd = (struct i2o_cmd_passthru32 __user *)arg;
@@ -553,7 +551,7 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
553 return -ENXIO; 551 return -ENXIO;
554 } 552 }
555 553
556 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 554 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
557 555
558 sb = c->status_block.virt; 556 sb = c->status_block.virt;
559 557
@@ -595,8 +593,8 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
595 593
596 sg_offset = (msg->u.head[0] >> 4) & 0x0f; 594 sg_offset = (msg->u.head[0] >> 4) & 0x0f;
597 595
598 writel(i2o_config_driver.context, &msg->u.s.icntxt); 596 msg->u.s.icntxt = cpu_to_le32(i2o_config_driver.context);
599 writel(i2o_cntxt_list_add(c, reply), &msg->u.s.tcntxt); 597 msg->u.s.tcntxt = cpu_to_le32(i2o_cntxt_list_add(c, reply));
600 598
601 memset(sg_list, 0, sizeof(sg_list[0]) * SG_TABLESIZE); 599 memset(sg_list, 0, sizeof(sg_list[0]) * SG_TABLESIZE);
602 if (sg_offset) { 600 if (sg_offset) {
@@ -662,7 +660,7 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
662 } 660 }
663 } 661 }
664 662
665 rcode = i2o_msg_post_wait(c, m, 60); 663 rcode = i2o_msg_post_wait(c, msg, 60);
666 if (rcode) 664 if (rcode)
667 goto sg_list_cleanup; 665 goto sg_list_cleanup;
668 666
@@ -780,8 +778,7 @@ static int i2o_cfg_passthru(unsigned long arg)
780 u32 i = 0; 778 u32 i = 0;
781 void *p = NULL; 779 void *p = NULL;
782 i2o_status_block *sb; 780 i2o_status_block *sb;
783 struct i2o_message __iomem *msg; 781 struct i2o_message *msg;
784 u32 m;
785 unsigned int iop; 782 unsigned int iop;
786 783
787 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg)) 784 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
@@ -793,7 +790,7 @@ static int i2o_cfg_passthru(unsigned long arg)
793 return -ENXIO; 790 return -ENXIO;
794 } 791 }
795 792
796 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 793 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
797 794
798 sb = c->status_block.virt; 795 sb = c->status_block.virt;
799 796
@@ -830,8 +827,8 @@ static int i2o_cfg_passthru(unsigned long arg)
830 827
831 sg_offset = (msg->u.head[0] >> 4) & 0x0f; 828 sg_offset = (msg->u.head[0] >> 4) & 0x0f;
832 829
833 writel(i2o_config_driver.context, &msg->u.s.icntxt); 830 msg->u.s.icntxt = cpu_to_le32(i2o_config_driver.context);
834 writel(i2o_cntxt_list_add(c, reply), &msg->u.s.tcntxt); 831 msg->u.s.tcntxt = cpu_to_le32(i2o_cntxt_list_add(c, reply));
835 832
836 memset(sg_list, 0, sizeof(sg_list[0]) * SG_TABLESIZE); 833 memset(sg_list, 0, sizeof(sg_list[0]) * SG_TABLESIZE);
837 if (sg_offset) { 834 if (sg_offset) {
@@ -894,7 +891,7 @@ static int i2o_cfg_passthru(unsigned long arg)
894 } 891 }
895 } 892 }
896 893
897 rcode = i2o_msg_post_wait(c, m, 60); 894 rcode = i2o_msg_post_wait(c, msg, 60);
898 if (rcode) 895 if (rcode)
899 goto sg_list_cleanup; 896 goto sg_list_cleanup;
900 897
diff --git a/drivers/message/i2o/i2o_scsi.c b/drivers/message/i2o/i2o_scsi.c
index 9f1744c3933b..7a784fd60804 100644
--- a/drivers/message/i2o/i2o_scsi.c
+++ b/drivers/message/i2o/i2o_scsi.c
@@ -510,8 +510,7 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt,
510 struct i2o_controller *c; 510 struct i2o_controller *c;
511 struct i2o_device *i2o_dev; 511 struct i2o_device *i2o_dev;
512 int tid; 512 int tid;
513 struct i2o_message __iomem *msg; 513 struct i2o_message *msg;
514 u32 m;
515 /* 514 /*
516 * ENABLE_DISCONNECT 515 * ENABLE_DISCONNECT
517 * SIMPLE_TAG 516 * SIMPLE_TAG
@@ -519,7 +518,7 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt,
519 */ 518 */
520 u32 scsi_flags = 0x20a00000; 519 u32 scsi_flags = 0x20a00000;
521 u32 sgl_offset; 520 u32 sgl_offset;
522 u32 __iomem *mptr; 521 u32 *mptr;
523 u32 cmd = I2O_CMD_SCSI_EXEC << 24; 522 u32 cmd = I2O_CMD_SCSI_EXEC << 24;
524 int rc = 0; 523 int rc = 0;
525 524
@@ -576,8 +575,8 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt,
576 * throw it back to the scsi layer 575 * throw it back to the scsi layer
577 */ 576 */
578 577
579 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 578 msg = i2o_msg_get(c);
580 if (m == I2O_QUEUE_EMPTY) { 579 if (IS_ERR(msg)) {
581 rc = SCSI_MLQUEUE_HOST_BUSY; 580 rc = SCSI_MLQUEUE_HOST_BUSY;
582 goto exit; 581 goto exit;
583 } 582 }
@@ -617,16 +616,16 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt,
617 if (sgl_offset == SGL_OFFSET_10) 616 if (sgl_offset == SGL_OFFSET_10)
618 sgl_offset = SGL_OFFSET_12; 617 sgl_offset = SGL_OFFSET_12;
619 cmd = I2O_CMD_PRIVATE << 24; 618 cmd = I2O_CMD_PRIVATE << 24;
620 writel(I2O_VENDOR_DPT << 16 | I2O_CMD_SCSI_EXEC, mptr++); 619 *mptr++ = cpu_to_le32(I2O_VENDOR_DPT << 16 | I2O_CMD_SCSI_EXEC);
621 writel(adpt_flags | tid, mptr++); 620 *mptr++ = cpu_to_le32(adpt_flags | tid);
622 } 621 }
623#endif 622#endif
624 623
625 writel(cmd | HOST_TID << 12 | tid, &msg->u.head[1]); 624 msg->u.head[1] = cpu_to_le32(cmd | HOST_TID << 12 | tid);
626 writel(i2o_scsi_driver.context, &msg->u.s.icntxt); 625 msg->u.s.icntxt = cpu_to_le32(i2o_scsi_driver.context);
627 626
628 /* We want the SCSI control block back */ 627 /* We want the SCSI control block back */
629 writel(i2o_cntxt_list_add(c, SCpnt), &msg->u.s.tcntxt); 628 msg->u.s.tcntxt = cpu_to_le32(i2o_cntxt_list_add(c, SCpnt));
630 629
631 /* LSI_920_PCI_QUIRK 630 /* LSI_920_PCI_QUIRK
632 * 631 *
@@ -649,15 +648,15 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt,
649 } 648 }
650 */ 649 */
651 650
652 writel(scsi_flags | SCpnt->cmd_len, mptr++); 651 *mptr++ = cpu_to_le32(scsi_flags | SCpnt->cmd_len);
653 652
654 /* Write SCSI command into the message - always 16 byte block */ 653 /* Write SCSI command into the message - always 16 byte block */
655 memcpy_toio(mptr, SCpnt->cmnd, 16); 654 memcpy(mptr, SCpnt->cmnd, 16);
656 mptr += 4; 655 mptr += 4;
657 656
658 if (sgl_offset != SGL_OFFSET_0) { 657 if (sgl_offset != SGL_OFFSET_0) {
659 /* write size of data addressed by SGL */ 658 /* write size of data addressed by SGL */
660 writel(SCpnt->request_bufflen, mptr++); 659 *mptr++ = cpu_to_le32(SCpnt->request_bufflen);
661 660
662 /* Now fill in the SGList and command */ 661 /* Now fill in the SGList and command */
663 if (SCpnt->use_sg) { 662 if (SCpnt->use_sg) {
@@ -676,11 +675,11 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt,
676 } 675 }
677 676
678 /* Stick the headers on */ 677 /* Stick the headers on */
679 writel(I2O_MESSAGE_SIZE(mptr - &msg->u.head[0]) | sgl_offset, 678 msg->u.head[0] =
680 &msg->u.head[0]); 679 cpu_to_le32(I2O_MESSAGE_SIZE(mptr - &msg->u.head[0]) | sgl_offset);
681 680
682 /* Queue the message */ 681 /* Queue the message */
683 i2o_msg_post(c, m); 682 i2o_msg_post(c, msg);
684 683
685 osm_debug("Issued %ld\n", SCpnt->serial_number); 684 osm_debug("Issued %ld\n", SCpnt->serial_number);
686 685
@@ -688,7 +687,7 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt,
688 687
689 nomem: 688 nomem:
690 rc = -ENOMEM; 689 rc = -ENOMEM;
691 i2o_msg_nop(c, m); 690 i2o_msg_nop(c, msg);
692 691
693 exit: 692 exit:
694 return rc; 693 return rc;
@@ -709,8 +708,7 @@ static int i2o_scsi_abort(struct scsi_cmnd *SCpnt)
709{ 708{
710 struct i2o_device *i2o_dev; 709 struct i2o_device *i2o_dev;
711 struct i2o_controller *c; 710 struct i2o_controller *c;
712 struct i2o_message __iomem *msg; 711 struct i2o_message *msg;
713 u32 m;
714 int tid; 712 int tid;
715 int status = FAILED; 713 int status = FAILED;
716 714
@@ -720,16 +718,16 @@ static int i2o_scsi_abort(struct scsi_cmnd *SCpnt)
720 c = i2o_dev->iop; 718 c = i2o_dev->iop;
721 tid = i2o_dev->lct_data.tid; 719 tid = i2o_dev->lct_data.tid;
722 720
723 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 721 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
724 if (m == I2O_QUEUE_EMPTY) 722 if (IS_ERR(msg))
725 return SCSI_MLQUEUE_HOST_BUSY; 723 return SCSI_MLQUEUE_HOST_BUSY;
726 724
727 writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 725 msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
728 writel(I2O_CMD_SCSI_ABORT << 24 | HOST_TID << 12 | tid, 726 msg->u.head[1] =
729 &msg->u.head[1]); 727 cpu_to_le32(I2O_CMD_SCSI_ABORT << 24 | HOST_TID << 12 | tid);
730 writel(i2o_cntxt_list_get_ptr(c, SCpnt), &msg->body[0]); 728 msg->body[0] = cpu_to_le32(i2o_cntxt_list_get_ptr(c, SCpnt));
731 729
732 if (i2o_msg_post_wait(c, m, I2O_TIMEOUT_SCSI_SCB_ABORT)) 730 if (i2o_msg_post_wait(c, msg, I2O_TIMEOUT_SCSI_SCB_ABORT))
733 status = SUCCESS; 731 status = SUCCESS;
734 732
735 return status; 733 return status;
diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
index 4eb53258842e..f86abb42bf89 100644
--- a/drivers/message/i2o/iop.c
+++ b/drivers/message/i2o/iop.c
@@ -47,27 +47,6 @@ static struct i2o_dma i2o_systab;
47static int i2o_hrt_get(struct i2o_controller *c); 47static int i2o_hrt_get(struct i2o_controller *c);
48 48
49/** 49/**
50 * i2o_msg_nop - Returns a message which is not used
51 * @c: I2O controller from which the message was created
52 * @m: message which should be returned
53 *
54 * If you fetch a message via i2o_msg_get, and can't use it, you must
55 * return the message with this function. Otherwise the message frame
56 * is lost.
57 */
58void i2o_msg_nop(struct i2o_controller *c, u32 m)
59{
60 struct i2o_message __iomem *msg = i2o_msg_in_to_virt(c, m);
61
62 writel(THREE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
63 writel(I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | ADAPTER_TID,
64 &msg->u.head[1]);
65 writel(0, &msg->u.head[2]);
66 writel(0, &msg->u.head[3]);
67 i2o_msg_post(c, m);
68};
69
70/**
71 * i2o_msg_get_wait - obtain an I2O message from the IOP 50 * i2o_msg_get_wait - obtain an I2O message from the IOP
72 * @c: I2O controller 51 * @c: I2O controller
73 * @msg: pointer to a I2O message pointer 52 * @msg: pointer to a I2O message pointer
@@ -81,22 +60,21 @@ void i2o_msg_nop(struct i2o_controller *c, u32 m)
81 * address from the read port (see the i2o spec). If no message is 60 * address from the read port (see the i2o spec). If no message is
82 * available returns I2O_QUEUE_EMPTY and msg is leaved untouched. 61 * available returns I2O_QUEUE_EMPTY and msg is leaved untouched.
83 */ 62 */
84u32 i2o_msg_get_wait(struct i2o_controller *c, 63struct i2o_message *i2o_msg_get_wait(struct i2o_controller *c, int wait)
85 struct i2o_message __iomem ** msg, int wait)
86{ 64{
87 unsigned long timeout = jiffies + wait * HZ; 65 unsigned long timeout = jiffies + wait * HZ;
88 u32 m; 66 struct i2o_message *msg;
89 67
90 while ((m = i2o_msg_get(c, msg)) == I2O_QUEUE_EMPTY) { 68 while (IS_ERR(msg = i2o_msg_get(c))) {
91 if (time_after(jiffies, timeout)) { 69 if (time_after(jiffies, timeout)) {
92 osm_debug("%s: Timeout waiting for message frame.\n", 70 osm_debug("%s: Timeout waiting for message frame.\n",
93 c->name); 71 c->name);
94 return I2O_QUEUE_EMPTY; 72 return ERR_PTR(-ETIMEDOUT);
95 } 73 }
96 schedule_timeout_uninterruptible(1); 74 schedule_timeout_uninterruptible(1);
97 } 75 }
98 76
99 return m; 77 return msg;
100}; 78};
101 79
102#if BITS_PER_LONG == 64 80#if BITS_PER_LONG == 64
@@ -301,8 +279,7 @@ struct i2o_device *i2o_iop_find_device(struct i2o_controller *c, u16 tid)
301 */ 279 */
302static int i2o_iop_quiesce(struct i2o_controller *c) 280static int i2o_iop_quiesce(struct i2o_controller *c)
303{ 281{
304 struct i2o_message __iomem *msg; 282 struct i2o_message *msg;
305 u32 m;
306 i2o_status_block *sb = c->status_block.virt; 283 i2o_status_block *sb = c->status_block.virt;
307 int rc; 284 int rc;
308 285
@@ -313,16 +290,17 @@ static int i2o_iop_quiesce(struct i2o_controller *c)
313 (sb->iop_state != ADAPTER_STATE_OPERATIONAL)) 290 (sb->iop_state != ADAPTER_STATE_OPERATIONAL))
314 return 0; 291 return 0;
315 292
316 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 293 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
317 if (m == I2O_QUEUE_EMPTY) 294 if (IS_ERR(msg))
318 return -ETIMEDOUT; 295 return PTR_ERR(msg);
319 296
320 writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 297 msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0);
321 writel(I2O_CMD_SYS_QUIESCE << 24 | HOST_TID << 12 | ADAPTER_TID, 298 msg->u.head[1] =
322 &msg->u.head[1]); 299 cpu_to_le32(I2O_CMD_SYS_QUIESCE << 24 | HOST_TID << 12 |
300 ADAPTER_TID);
323 301
324 /* Long timeout needed for quiesce if lots of devices */ 302 /* Long timeout needed for quiesce if lots of devices */
325 if ((rc = i2o_msg_post_wait(c, m, 240))) 303 if ((rc = i2o_msg_post_wait(c, msg, 240)))
326 osm_info("%s: Unable to quiesce (status=%#x).\n", c->name, -rc); 304 osm_info("%s: Unable to quiesce (status=%#x).\n", c->name, -rc);
327 else 305 else
328 osm_debug("%s: Quiesced.\n", c->name); 306 osm_debug("%s: Quiesced.\n", c->name);
@@ -342,8 +320,7 @@ static int i2o_iop_quiesce(struct i2o_controller *c)
342 */ 320 */
343static int i2o_iop_enable(struct i2o_controller *c) 321static int i2o_iop_enable(struct i2o_controller *c)
344{ 322{
345 struct i2o_message __iomem *msg; 323 struct i2o_message *msg;
346 u32 m;
347 i2o_status_block *sb = c->status_block.virt; 324 i2o_status_block *sb = c->status_block.virt;
348 int rc; 325 int rc;
349 326
@@ -353,16 +330,17 @@ static int i2o_iop_enable(struct i2o_controller *c)
353 if (sb->iop_state != ADAPTER_STATE_READY) 330 if (sb->iop_state != ADAPTER_STATE_READY)
354 return -EINVAL; 331 return -EINVAL;
355 332
356 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 333 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
357 if (m == I2O_QUEUE_EMPTY) 334 if (IS_ERR(msg))
358 return -ETIMEDOUT; 335 return PTR_ERR(msg);
359 336
360 writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 337 msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0);
361 writel(I2O_CMD_SYS_ENABLE << 24 | HOST_TID << 12 | ADAPTER_TID, 338 msg->u.head[1] =
362 &msg->u.head[1]); 339 cpu_to_le32(I2O_CMD_SYS_ENABLE << 24 | HOST_TID << 12 |
340 ADAPTER_TID);
363 341
364 /* How long of a timeout do we need? */ 342 /* How long of a timeout do we need? */
365 if ((rc = i2o_msg_post_wait(c, m, 240))) 343 if ((rc = i2o_msg_post_wait(c, msg, 240)))
366 osm_err("%s: Could not enable (status=%#x).\n", c->name, -rc); 344 osm_err("%s: Could not enable (status=%#x).\n", c->name, -rc);
367 else 345 else
368 osm_debug("%s: Enabled.\n", c->name); 346 osm_debug("%s: Enabled.\n", c->name);
@@ -413,22 +391,22 @@ static inline void i2o_iop_enable_all(void)
413 */ 391 */
414static int i2o_iop_clear(struct i2o_controller *c) 392static int i2o_iop_clear(struct i2o_controller *c)
415{ 393{
416 struct i2o_message __iomem *msg; 394 struct i2o_message *msg;
417 u32 m;
418 int rc; 395 int rc;
419 396
420 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 397 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
421 if (m == I2O_QUEUE_EMPTY) 398 if (IS_ERR(msg))
422 return -ETIMEDOUT; 399 return PTR_ERR(msg);
423 400
424 /* Quiesce all IOPs first */ 401 /* Quiesce all IOPs first */
425 i2o_iop_quiesce_all(); 402 i2o_iop_quiesce_all();
426 403
427 writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 404 msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0);
428 writel(I2O_CMD_ADAPTER_CLEAR << 24 | HOST_TID << 12 | ADAPTER_TID, 405 msg->u.head[1] =
429 &msg->u.head[1]); 406 cpu_to_le32(I2O_CMD_ADAPTER_CLEAR << 24 | HOST_TID << 12 |
407 ADAPTER_TID);
430 408
431 if ((rc = i2o_msg_post_wait(c, m, 30))) 409 if ((rc = i2o_msg_post_wait(c, msg, 30)))
432 osm_info("%s: Unable to clear (status=%#x).\n", c->name, -rc); 410 osm_info("%s: Unable to clear (status=%#x).\n", c->name, -rc);
433 else 411 else
434 osm_debug("%s: Cleared.\n", c->name); 412 osm_debug("%s: Cleared.\n", c->name);
@@ -446,13 +424,13 @@ static int i2o_iop_clear(struct i2o_controller *c)
446 * Clear and (re)initialize IOP's outbound queue and post the message 424 * Clear and (re)initialize IOP's outbound queue and post the message
447 * frames to the IOP. 425 * frames to the IOP.
448 * 426 *
449 * Returns 0 on success or a negative errno code on failure. 427 * Returns 0 on success or negative error code on failure.
450 */ 428 */
451static int i2o_iop_init_outbound_queue(struct i2o_controller *c) 429static int i2o_iop_init_outbound_queue(struct i2o_controller *c)
452{ 430{
453 volatile u8 *status = c->status.virt;
454 u32 m; 431 u32 m;
455 struct i2o_message __iomem *msg; 432 volatile u8 *status = c->status.virt;
433 struct i2o_message *msg;
456 ulong timeout; 434 ulong timeout;
457 int i; 435 int i;
458 436
@@ -460,23 +438,24 @@ static int i2o_iop_init_outbound_queue(struct i2o_controller *c)
460 438
461 memset(c->status.virt, 0, 4); 439 memset(c->status.virt, 0, 4);
462 440
463 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 441 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
464 if (m == I2O_QUEUE_EMPTY) 442 if (IS_ERR(msg))
465 return -ETIMEDOUT; 443 return PTR_ERR(msg);
466 444
467 writel(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6, &msg->u.head[0]); 445 msg->u.head[0] = cpu_to_le32(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6);
468 writel(I2O_CMD_OUTBOUND_INIT << 24 | HOST_TID << 12 | ADAPTER_TID, 446 msg->u.head[1] =
469 &msg->u.head[1]); 447 cpu_to_le32(I2O_CMD_OUTBOUND_INIT << 24 | HOST_TID << 12 |
470 writel(i2o_exec_driver.context, &msg->u.s.icntxt); 448 ADAPTER_TID);
471 writel(0x00000000, &msg->u.s.tcntxt); 449 msg->u.s.icntxt = cpu_to_le32(i2o_exec_driver.context);
472 writel(PAGE_SIZE, &msg->body[0]); 450 msg->u.s.tcntxt = cpu_to_le32(0x00000000);
451 msg->body[0] = cpu_to_le32(PAGE_SIZE);
473 /* Outbound msg frame size in words and Initcode */ 452 /* Outbound msg frame size in words and Initcode */
474 writel(I2O_OUTBOUND_MSG_FRAME_SIZE << 16 | 0x80, &msg->body[1]); 453 msg->body[1] = cpu_to_le32(I2O_OUTBOUND_MSG_FRAME_SIZE << 16 | 0x80);
475 writel(0xd0000004, &msg->body[2]); 454 msg->body[2] = cpu_to_le32(0xd0000004);
476 writel(i2o_dma_low(c->status.phys), &msg->body[3]); 455 msg->body[3] = cpu_to_le32(i2o_dma_low(c->status.phys));
477 writel(i2o_dma_high(c->status.phys), &msg->body[4]); 456 msg->body[4] = cpu_to_le32(i2o_dma_high(c->status.phys));
478 457
479 i2o_msg_post(c, m); 458 i2o_msg_post(c, msg);
480 459
481 timeout = jiffies + I2O_TIMEOUT_INIT_OUTBOUND_QUEUE * HZ; 460 timeout = jiffies + I2O_TIMEOUT_INIT_OUTBOUND_QUEUE * HZ;
482 while (*status <= I2O_CMD_IN_PROGRESS) { 461 while (*status <= I2O_CMD_IN_PROGRESS) {
@@ -511,34 +490,34 @@ static int i2o_iop_init_outbound_queue(struct i2o_controller *c)
511static int i2o_iop_reset(struct i2o_controller *c) 490static int i2o_iop_reset(struct i2o_controller *c)
512{ 491{
513 volatile u8 *status = c->status.virt; 492 volatile u8 *status = c->status.virt;
514 struct i2o_message __iomem *msg; 493 struct i2o_message *msg;
515 u32 m;
516 unsigned long timeout; 494 unsigned long timeout;
517 i2o_status_block *sb = c->status_block.virt; 495 i2o_status_block *sb = c->status_block.virt;
518 int rc = 0; 496 int rc = 0;
519 497
520 osm_debug("%s: Resetting controller\n", c->name); 498 osm_debug("%s: Resetting controller\n", c->name);
521 499
522 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 500 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
523 if (m == I2O_QUEUE_EMPTY) 501 if (IS_ERR(msg))
524 return -ETIMEDOUT; 502 return PTR_ERR(msg);
525 503
526 memset(c->status_block.virt, 0, 8); 504 memset(c->status_block.virt, 0, 8);
527 505
528 /* Quiesce all IOPs first */ 506 /* Quiesce all IOPs first */
529 i2o_iop_quiesce_all(); 507 i2o_iop_quiesce_all();
530 508
531 writel(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 509 msg->u.head[0] = cpu_to_le32(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_0);
532 writel(I2O_CMD_ADAPTER_RESET << 24 | HOST_TID << 12 | ADAPTER_TID, 510 msg->u.head[1] =
533 &msg->u.head[1]); 511 cpu_to_le32(I2O_CMD_ADAPTER_RESET << 24 | HOST_TID << 12 |
534 writel(i2o_exec_driver.context, &msg->u.s.icntxt); 512 ADAPTER_TID);
535 writel(0, &msg->u.s.tcntxt); //FIXME: use reasonable transaction context 513 msg->u.s.icntxt = cpu_to_le32(i2o_exec_driver.context);
536 writel(0, &msg->body[0]); 514 msg->u.s.tcntxt = cpu_to_le32(0x00000000);
537 writel(0, &msg->body[1]); 515 msg->body[0] = cpu_to_le32(0x00000000);
538 writel(i2o_dma_low(c->status.phys), &msg->body[2]); 516 msg->body[1] = cpu_to_le32(0x00000000);
539 writel(i2o_dma_high(c->status.phys), &msg->body[3]); 517 msg->body[2] = cpu_to_le32(i2o_dma_low(c->status.phys));
518 msg->body[3] = cpu_to_le32(i2o_dma_high(c->status.phys));
540 519
541 i2o_msg_post(c, m); 520 i2o_msg_post(c, msg);
542 521
543 /* Wait for a reply */ 522 /* Wait for a reply */
544 timeout = jiffies + I2O_TIMEOUT_RESET * HZ; 523 timeout = jiffies + I2O_TIMEOUT_RESET * HZ;
@@ -567,18 +546,15 @@ static int i2o_iop_reset(struct i2o_controller *c)
567 osm_debug("%s: Reset in progress, waiting for reboot...\n", 546 osm_debug("%s: Reset in progress, waiting for reboot...\n",
568 c->name); 547 c->name);
569 548
570 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_RESET); 549 while (IS_ERR(msg = i2o_msg_get_wait(c, I2O_TIMEOUT_RESET))) {
571 while (m == I2O_QUEUE_EMPTY) {
572 if (time_after(jiffies, timeout)) { 550 if (time_after(jiffies, timeout)) {
573 osm_err("%s: IOP reset timeout.\n", c->name); 551 osm_err("%s: IOP reset timeout.\n", c->name);
574 rc = -ETIMEDOUT; 552 rc = PTR_ERR(msg);
575 goto exit; 553 goto exit;
576 } 554 }
577 schedule_timeout_uninterruptible(1); 555 schedule_timeout_uninterruptible(1);
578
579 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_RESET);
580 } 556 }
581 i2o_msg_nop(c, m); 557 i2o_msg_nop(c, msg);
582 558
583 /* from here all quiesce commands are safe */ 559 /* from here all quiesce commands are safe */
584 c->no_quiesce = 0; 560 c->no_quiesce = 0;
@@ -686,8 +662,7 @@ static int i2o_iop_activate(struct i2o_controller *c)
686 */ 662 */
687static int i2o_iop_systab_set(struct i2o_controller *c) 663static int i2o_iop_systab_set(struct i2o_controller *c)
688{ 664{
689 struct i2o_message __iomem *msg; 665 struct i2o_message *msg;
690 u32 m;
691 i2o_status_block *sb = c->status_block.virt; 666 i2o_status_block *sb = c->status_block.virt;
692 struct device *dev = &c->pdev->dev; 667 struct device *dev = &c->pdev->dev;
693 struct resource *root; 668 struct resource *root;
@@ -735,20 +710,21 @@ static int i2o_iop_systab_set(struct i2o_controller *c)
735 } 710 }
736 } 711 }
737 712
738 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 713 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
739 if (m == I2O_QUEUE_EMPTY) 714 if (IS_ERR(msg))
740 return -ETIMEDOUT; 715 return PTR_ERR(msg);
741 716
742 i2o_systab.phys = dma_map_single(dev, i2o_systab.virt, i2o_systab.len, 717 i2o_systab.phys = dma_map_single(dev, i2o_systab.virt, i2o_systab.len,
743 PCI_DMA_TODEVICE); 718 PCI_DMA_TODEVICE);
744 if (!i2o_systab.phys) { 719 if (!i2o_systab.phys) {
745 i2o_msg_nop(c, m); 720 i2o_msg_nop(c, msg);
746 return -ENOMEM; 721 return -ENOMEM;
747 } 722 }
748 723
749 writel(I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6, &msg->u.head[0]); 724 msg->u.head[0] = cpu_to_le32(I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6);
750 writel(I2O_CMD_SYS_TAB_SET << 24 | HOST_TID << 12 | ADAPTER_TID, 725 msg->u.head[1] =
751 &msg->u.head[1]); 726 cpu_to_le32(I2O_CMD_SYS_TAB_SET << 24 | HOST_TID << 12 |
727 ADAPTER_TID);
752 728
753 /* 729 /*
754 * Provide three SGL-elements: 730 * Provide three SGL-elements:
@@ -760,16 +736,16 @@ static int i2o_iop_systab_set(struct i2o_controller *c)
760 * same table to everyone. We have to go remap it for them all 736 * same table to everyone. We have to go remap it for them all
761 */ 737 */
762 738
763 writel(c->unit + 2, &msg->body[0]); 739 msg->body[0] = cpu_to_le32(c->unit + 2);
764 writel(0, &msg->body[1]); 740 msg->body[1] = cpu_to_le32(0x00000000);
765 writel(0x54000000 | i2o_systab.len, &msg->body[2]); 741 msg->body[2] = cpu_to_le32(0x54000000 | i2o_systab.len);
766 writel(i2o_systab.phys, &msg->body[3]); 742 msg->body[3] = cpu_to_le32(i2o_systab.phys);
767 writel(0x54000000 | sb->current_mem_size, &msg->body[4]); 743 msg->body[4] = cpu_to_le32(0x54000000 | sb->current_mem_size);
768 writel(sb->current_mem_base, &msg->body[5]); 744 msg->body[5] = cpu_to_le32(sb->current_mem_base);
769 writel(0xd4000000 | sb->current_io_size, &msg->body[6]); 745 msg->body[6] = cpu_to_le32(0xd4000000 | sb->current_io_size);
770 writel(sb->current_io_base, &msg->body[6]); 746 msg->body[6] = cpu_to_le32(sb->current_io_base);
771 747
772 rc = i2o_msg_post_wait(c, m, 120); 748 rc = i2o_msg_post_wait(c, msg, 120);
773 749
774 dma_unmap_single(dev, i2o_systab.phys, i2o_systab.len, 750 dma_unmap_single(dev, i2o_systab.phys, i2o_systab.len,
775 PCI_DMA_TODEVICE); 751 PCI_DMA_TODEVICE);
@@ -952,30 +928,30 @@ static int i2o_parse_hrt(struct i2o_controller *c)
952 */ 928 */
953int i2o_status_get(struct i2o_controller *c) 929int i2o_status_get(struct i2o_controller *c)
954{ 930{
955 struct i2o_message __iomem *msg; 931 struct i2o_message *msg;
956 u32 m;
957 volatile u8 *status_block; 932 volatile u8 *status_block;
958 unsigned long timeout; 933 unsigned long timeout;
959 934
960 status_block = (u8 *) c->status_block.virt; 935 status_block = (u8 *) c->status_block.virt;
961 memset(c->status_block.virt, 0, sizeof(i2o_status_block)); 936 memset(c->status_block.virt, 0, sizeof(i2o_status_block));
962 937
963 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 938 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
964 if (m == I2O_QUEUE_EMPTY) 939 if (IS_ERR(msg))
965 return -ETIMEDOUT; 940 return PTR_ERR(msg);
966 941
967 writel(NINE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 942 msg->u.head[0] = cpu_to_le32(NINE_WORD_MSG_SIZE | SGL_OFFSET_0);
968 writel(I2O_CMD_STATUS_GET << 24 | HOST_TID << 12 | ADAPTER_TID, 943 msg->u.head[1] =
969 &msg->u.head[1]); 944 cpu_to_le32(I2O_CMD_STATUS_GET << 24 | HOST_TID << 12 |
970 writel(i2o_exec_driver.context, &msg->u.s.icntxt); 945 ADAPTER_TID);
971 writel(0, &msg->u.s.tcntxt); // FIXME: use resonable transaction context 946 msg->u.s.icntxt = cpu_to_le32(i2o_exec_driver.context);
972 writel(0, &msg->body[0]); 947 msg->u.s.tcntxt = cpu_to_le32(0x00000000);
973 writel(0, &msg->body[1]); 948 msg->body[0] = cpu_to_le32(0x00000000);
974 writel(i2o_dma_low(c->status_block.phys), &msg->body[2]); 949 msg->body[1] = cpu_to_le32(0x00000000);
975 writel(i2o_dma_high(c->status_block.phys), &msg->body[3]); 950 msg->body[2] = cpu_to_le32(i2o_dma_low(c->status_block.phys));
976 writel(sizeof(i2o_status_block), &msg->body[4]); /* always 88 bytes */ 951 msg->body[3] = cpu_to_le32(i2o_dma_high(c->status_block.phys));
952 msg->body[4] = cpu_to_le32(sizeof(i2o_status_block)); /* always 88 bytes */
977 953
978 i2o_msg_post(c, m); 954 i2o_msg_post(c, msg);
979 955
980 /* Wait for a reply */ 956 /* Wait for a reply */
981 timeout = jiffies + I2O_TIMEOUT_STATUS_GET * HZ; 957 timeout = jiffies + I2O_TIMEOUT_STATUS_GET * HZ;
@@ -1013,20 +989,20 @@ static int i2o_hrt_get(struct i2o_controller *c)
1013 struct device *dev = &c->pdev->dev; 989 struct device *dev = &c->pdev->dev;
1014 990
1015 for (i = 0; i < I2O_HRT_GET_TRIES; i++) { 991 for (i = 0; i < I2O_HRT_GET_TRIES; i++) {
1016 struct i2o_message __iomem *msg; 992 struct i2o_message *msg;
1017 u32 m;
1018 993
1019 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 994 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
1020 if (m == I2O_QUEUE_EMPTY) 995 if (IS_ERR(msg))
1021 return -ETIMEDOUT; 996 return PTR_ERR(msg);
1022 997
1023 writel(SIX_WORD_MSG_SIZE | SGL_OFFSET_4, &msg->u.head[0]); 998 msg->u.head[0] = cpu_to_le32(SIX_WORD_MSG_SIZE | SGL_OFFSET_4);
1024 writel(I2O_CMD_HRT_GET << 24 | HOST_TID << 12 | ADAPTER_TID, 999 msg->u.head[1] =
1025 &msg->u.head[1]); 1000 cpu_to_le32(I2O_CMD_HRT_GET << 24 | HOST_TID << 12 |
1026 writel(0xd0000000 | c->hrt.len, &msg->body[0]); 1001 ADAPTER_TID);
1027 writel(c->hrt.phys, &msg->body[1]); 1002 msg->body[0] = cpu_to_le32(0xd0000000 | c->hrt.len);
1003 msg->body[1] = cpu_to_le32(c->hrt.phys);
1028 1004
1029 rc = i2o_msg_post_wait_mem(c, m, 20, &c->hrt); 1005 rc = i2o_msg_post_wait_mem(c, msg, 20, &c->hrt);
1030 1006
1031 if (rc < 0) { 1007 if (rc < 0) {
1032 osm_err("%s: Unable to get HRT (status=%#x)\n", c->name, 1008 osm_err("%s: Unable to get HRT (status=%#x)\n", c->name,
@@ -1056,6 +1032,7 @@ static int i2o_hrt_get(struct i2o_controller *c)
1056 */ 1032 */
1057void i2o_iop_free(struct i2o_controller *c) 1033void i2o_iop_free(struct i2o_controller *c)
1058{ 1034{
1035 i2o_pool_free(&c->in_msg);
1059 kfree(c); 1036 kfree(c);
1060}; 1037};
1061 1038
@@ -1080,7 +1057,7 @@ static struct class *i2o_controller_class;
1080 * i2o_iop_alloc - Allocate and initialize a i2o_controller struct 1057 * i2o_iop_alloc - Allocate and initialize a i2o_controller struct
1081 * 1058 *
1082 * Allocate the necessary memory for a i2o_controller struct and 1059 * Allocate the necessary memory for a i2o_controller struct and
1083 * initialize the lists. 1060 * initialize the lists and message mempool.
1084 * 1061 *
1085 * Returns a pointer to the I2O controller or a negative error code on 1062 * Returns a pointer to the I2O controller or a negative error code on
1086 * failure. 1063 * failure.
@@ -1089,6 +1066,7 @@ struct i2o_controller *i2o_iop_alloc(void)
1089{ 1066{
1090 static int unit = 0; /* 0 and 1 are NULL IOP and Local Host */ 1067 static int unit = 0; /* 0 and 1 are NULL IOP and Local Host */
1091 struct i2o_controller *c; 1068 struct i2o_controller *c;
1069 char poolname[32];
1092 1070
1093 c = kmalloc(sizeof(*c), GFP_KERNEL); 1071 c = kmalloc(sizeof(*c), GFP_KERNEL);
1094 if (!c) { 1072 if (!c) {
@@ -1098,11 +1076,20 @@ struct i2o_controller *i2o_iop_alloc(void)
1098 } 1076 }
1099 memset(c, 0, sizeof(*c)); 1077 memset(c, 0, sizeof(*c));
1100 1078
1079 c->unit = unit++;
1080 sprintf(c->name, "iop%d", c->unit);
1081
1082 snprintf(poolname, sizeof(poolname), "i2o_%s_msg_inpool", c->name);
1083 if (i2o_pool_alloc
1084 (&c->in_msg, poolname, I2O_INBOUND_MSG_FRAME_SIZE * 4,
1085 I2O_MSG_INPOOL_MIN)) {
1086 kfree(c);
1087 return ERR_PTR(-ENOMEM);
1088 };
1089
1101 INIT_LIST_HEAD(&c->devices); 1090 INIT_LIST_HEAD(&c->devices);
1102 spin_lock_init(&c->lock); 1091 spin_lock_init(&c->lock);
1103 init_MUTEX(&c->lct_lock); 1092 init_MUTEX(&c->lct_lock);
1104 c->unit = unit++;
1105 sprintf(c->name, "iop%d", c->unit);
1106 1093
1107 device_initialize(&c->device); 1094 device_initialize(&c->device);
1108 1095
@@ -1199,28 +1186,27 @@ int i2o_iop_add(struct i2o_controller *c)
1199 * is waited for, or expected. If you do not want further notifications, 1186 * is waited for, or expected. If you do not want further notifications,
1200 * call the i2o_event_register again with a evt_mask of 0. 1187 * call the i2o_event_register again with a evt_mask of 0.
1201 * 1188 *
1202 * Returns 0 on success or -ETIMEDOUT if no message could be fetched for 1189 * Returns 0 on success or negative error code on failure.
1203 * sending the request.
1204 */ 1190 */
1205int i2o_event_register(struct i2o_device *dev, struct i2o_driver *drv, 1191int i2o_event_register(struct i2o_device *dev, struct i2o_driver *drv,
1206 int tcntxt, u32 evt_mask) 1192 int tcntxt, u32 evt_mask)
1207{ 1193{
1208 struct i2o_controller *c = dev->iop; 1194 struct i2o_controller *c = dev->iop;
1209 struct i2o_message __iomem *msg; 1195 struct i2o_message *msg;
1210 u32 m;
1211 1196
1212 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 1197 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
1213 if (m == I2O_QUEUE_EMPTY) 1198 if (IS_ERR(msg))
1214 return -ETIMEDOUT; 1199 return PTR_ERR(msg);
1215 1200
1216 writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 1201 msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
1217 writel(I2O_CMD_UTIL_EVT_REGISTER << 24 | HOST_TID << 12 | dev->lct_data. 1202 msg->u.head[1] =
1218 tid, &msg->u.head[1]); 1203 cpu_to_le32(I2O_CMD_UTIL_EVT_REGISTER << 24 | HOST_TID << 12 | dev->
1219 writel(drv->context, &msg->u.s.icntxt); 1204 lct_data.tid);
1220 writel(tcntxt, &msg->u.s.tcntxt); 1205 msg->u.s.icntxt = cpu_to_le32(drv->context);
1221 writel(evt_mask, &msg->body[0]); 1206 msg->u.s.tcntxt = cpu_to_le32(tcntxt);
1207 msg->body[0] = cpu_to_le32(evt_mask);
1222 1208
1223 i2o_msg_post(c, m); 1209 i2o_msg_post(c, msg);
1224 1210
1225 return 0; 1211 return 0;
1226}; 1212};
diff --git a/drivers/message/i2o/pci.c b/drivers/message/i2o/pci.c
index ee7075fa1ec3..329d482eee81 100644
--- a/drivers/message/i2o/pci.c
+++ b/drivers/message/i2o/pci.c
@@ -483,4 +483,5 @@ void __exit i2o_pci_exit(void)
483{ 483{
484 pci_unregister_driver(&i2o_pci_driver); 484 pci_unregister_driver(&i2o_pci_driver);
485}; 485};
486
486MODULE_DEVICE_TABLE(pci, i2o_pci_ids); 487MODULE_DEVICE_TABLE(pci, i2o_pci_ids);