diff options
author | Markus Lidel <Markus.Lidel@shadowconnect.com> | 2006-01-06 03:19:29 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-01-06 11:33:53 -0500 |
commit | a1a5ea70a6e9db6332b27fe2d96666e17aa1436b (patch) | |
tree | 7edfe920aa40af94464ab05539d449dbe5689254 /drivers/message/i2o/iop.c | |
parent | 347a8dc3b815f0c0fa62a1df075184ffe4cbdcf1 (diff) |
[PATCH] I2O: changed I2O API to create I2O messages in kernel memory
Changed the I2O API to create I2O messages first in kernel memory and then
transfer it at once over the PCI bus instead of sending each quad-word over
the PCI bus.
Signed-off-by: Markus Lidel <Markus.Lidel@shadowconnect.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'drivers/message/i2o/iop.c')
-rw-r--r-- | drivers/message/i2o/iop.c | 296 |
1 files changed, 141 insertions, 155 deletions
diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c index 4eb53258842e..f86abb42bf89 100644 --- a/drivers/message/i2o/iop.c +++ b/drivers/message/i2o/iop.c | |||
@@ -47,27 +47,6 @@ static struct i2o_dma i2o_systab; | |||
47 | static int i2o_hrt_get(struct i2o_controller *c); | 47 | static int i2o_hrt_get(struct i2o_controller *c); |
48 | 48 | ||
49 | /** | 49 | /** |
50 | * i2o_msg_nop - Returns a message which is not used | ||
51 | * @c: I2O controller from which the message was created | ||
52 | * @m: message which should be returned | ||
53 | * | ||
54 | * If you fetch a message via i2o_msg_get, and can't use it, you must | ||
55 | * return the message with this function. Otherwise the message frame | ||
56 | * is lost. | ||
57 | */ | ||
58 | void i2o_msg_nop(struct i2o_controller *c, u32 m) | ||
59 | { | ||
60 | struct i2o_message __iomem *msg = i2o_msg_in_to_virt(c, m); | ||
61 | |||
62 | writel(THREE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); | ||
63 | writel(I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | ADAPTER_TID, | ||
64 | &msg->u.head[1]); | ||
65 | writel(0, &msg->u.head[2]); | ||
66 | writel(0, &msg->u.head[3]); | ||
67 | i2o_msg_post(c, m); | ||
68 | }; | ||
69 | |||
70 | /** | ||
71 | * i2o_msg_get_wait - obtain an I2O message from the IOP | 50 | * i2o_msg_get_wait - obtain an I2O message from the IOP |
72 | * @c: I2O controller | 51 | * @c: I2O controller |
73 | * @msg: pointer to a I2O message pointer | 52 | * @msg: pointer to a I2O message pointer |
@@ -81,22 +60,21 @@ void i2o_msg_nop(struct i2o_controller *c, u32 m) | |||
81 | * address from the read port (see the i2o spec). If no message is | 60 | * address from the read port (see the i2o spec). If no message is |
82 | * available returns I2O_QUEUE_EMPTY and msg is leaved untouched. | 61 | * available returns I2O_QUEUE_EMPTY and msg is leaved untouched. |
83 | */ | 62 | */ |
84 | u32 i2o_msg_get_wait(struct i2o_controller *c, | 63 | struct i2o_message *i2o_msg_get_wait(struct i2o_controller *c, int wait) |
85 | struct i2o_message __iomem ** msg, int wait) | ||
86 | { | 64 | { |
87 | unsigned long timeout = jiffies + wait * HZ; | 65 | unsigned long timeout = jiffies + wait * HZ; |
88 | u32 m; | 66 | struct i2o_message *msg; |
89 | 67 | ||
90 | while ((m = i2o_msg_get(c, msg)) == I2O_QUEUE_EMPTY) { | 68 | while (IS_ERR(msg = i2o_msg_get(c))) { |
91 | if (time_after(jiffies, timeout)) { | 69 | if (time_after(jiffies, timeout)) { |
92 | osm_debug("%s: Timeout waiting for message frame.\n", | 70 | osm_debug("%s: Timeout waiting for message frame.\n", |
93 | c->name); | 71 | c->name); |
94 | return I2O_QUEUE_EMPTY; | 72 | return ERR_PTR(-ETIMEDOUT); |
95 | } | 73 | } |
96 | schedule_timeout_uninterruptible(1); | 74 | schedule_timeout_uninterruptible(1); |
97 | } | 75 | } |
98 | 76 | ||
99 | return m; | 77 | return msg; |
100 | }; | 78 | }; |
101 | 79 | ||
102 | #if BITS_PER_LONG == 64 | 80 | #if BITS_PER_LONG == 64 |
@@ -301,8 +279,7 @@ struct i2o_device *i2o_iop_find_device(struct i2o_controller *c, u16 tid) | |||
301 | */ | 279 | */ |
302 | static int i2o_iop_quiesce(struct i2o_controller *c) | 280 | static int i2o_iop_quiesce(struct i2o_controller *c) |
303 | { | 281 | { |
304 | struct i2o_message __iomem *msg; | 282 | struct i2o_message *msg; |
305 | u32 m; | ||
306 | i2o_status_block *sb = c->status_block.virt; | 283 | i2o_status_block *sb = c->status_block.virt; |
307 | int rc; | 284 | int rc; |
308 | 285 | ||
@@ -313,16 +290,17 @@ static int i2o_iop_quiesce(struct i2o_controller *c) | |||
313 | (sb->iop_state != ADAPTER_STATE_OPERATIONAL)) | 290 | (sb->iop_state != ADAPTER_STATE_OPERATIONAL)) |
314 | return 0; | 291 | return 0; |
315 | 292 | ||
316 | m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); | 293 | msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); |
317 | if (m == I2O_QUEUE_EMPTY) | 294 | if (IS_ERR(msg)) |
318 | return -ETIMEDOUT; | 295 | return PTR_ERR(msg); |
319 | 296 | ||
320 | writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); | 297 | msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0); |
321 | writel(I2O_CMD_SYS_QUIESCE << 24 | HOST_TID << 12 | ADAPTER_TID, | 298 | msg->u.head[1] = |
322 | &msg->u.head[1]); | 299 | cpu_to_le32(I2O_CMD_SYS_QUIESCE << 24 | HOST_TID << 12 | |
300 | ADAPTER_TID); | ||
323 | 301 | ||
324 | /* Long timeout needed for quiesce if lots of devices */ | 302 | /* Long timeout needed for quiesce if lots of devices */ |
325 | if ((rc = i2o_msg_post_wait(c, m, 240))) | 303 | if ((rc = i2o_msg_post_wait(c, msg, 240))) |
326 | osm_info("%s: Unable to quiesce (status=%#x).\n", c->name, -rc); | 304 | osm_info("%s: Unable to quiesce (status=%#x).\n", c->name, -rc); |
327 | else | 305 | else |
328 | osm_debug("%s: Quiesced.\n", c->name); | 306 | osm_debug("%s: Quiesced.\n", c->name); |
@@ -342,8 +320,7 @@ static int i2o_iop_quiesce(struct i2o_controller *c) | |||
342 | */ | 320 | */ |
343 | static int i2o_iop_enable(struct i2o_controller *c) | 321 | static int i2o_iop_enable(struct i2o_controller *c) |
344 | { | 322 | { |
345 | struct i2o_message __iomem *msg; | 323 | struct i2o_message *msg; |
346 | u32 m; | ||
347 | i2o_status_block *sb = c->status_block.virt; | 324 | i2o_status_block *sb = c->status_block.virt; |
348 | int rc; | 325 | int rc; |
349 | 326 | ||
@@ -353,16 +330,17 @@ static int i2o_iop_enable(struct i2o_controller *c) | |||
353 | if (sb->iop_state != ADAPTER_STATE_READY) | 330 | if (sb->iop_state != ADAPTER_STATE_READY) |
354 | return -EINVAL; | 331 | return -EINVAL; |
355 | 332 | ||
356 | m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); | 333 | msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); |
357 | if (m == I2O_QUEUE_EMPTY) | 334 | if (IS_ERR(msg)) |
358 | return -ETIMEDOUT; | 335 | return PTR_ERR(msg); |
359 | 336 | ||
360 | writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); | 337 | msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0); |
361 | writel(I2O_CMD_SYS_ENABLE << 24 | HOST_TID << 12 | ADAPTER_TID, | 338 | msg->u.head[1] = |
362 | &msg->u.head[1]); | 339 | cpu_to_le32(I2O_CMD_SYS_ENABLE << 24 | HOST_TID << 12 | |
340 | ADAPTER_TID); | ||
363 | 341 | ||
364 | /* How long of a timeout do we need? */ | 342 | /* How long of a timeout do we need? */ |
365 | if ((rc = i2o_msg_post_wait(c, m, 240))) | 343 | if ((rc = i2o_msg_post_wait(c, msg, 240))) |
366 | osm_err("%s: Could not enable (status=%#x).\n", c->name, -rc); | 344 | osm_err("%s: Could not enable (status=%#x).\n", c->name, -rc); |
367 | else | 345 | else |
368 | osm_debug("%s: Enabled.\n", c->name); | 346 | osm_debug("%s: Enabled.\n", c->name); |
@@ -413,22 +391,22 @@ static inline void i2o_iop_enable_all(void) | |||
413 | */ | 391 | */ |
414 | static int i2o_iop_clear(struct i2o_controller *c) | 392 | static int i2o_iop_clear(struct i2o_controller *c) |
415 | { | 393 | { |
416 | struct i2o_message __iomem *msg; | 394 | struct i2o_message *msg; |
417 | u32 m; | ||
418 | int rc; | 395 | int rc; |
419 | 396 | ||
420 | m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); | 397 | msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); |
421 | if (m == I2O_QUEUE_EMPTY) | 398 | if (IS_ERR(msg)) |
422 | return -ETIMEDOUT; | 399 | return PTR_ERR(msg); |
423 | 400 | ||
424 | /* Quiesce all IOPs first */ | 401 | /* Quiesce all IOPs first */ |
425 | i2o_iop_quiesce_all(); | 402 | i2o_iop_quiesce_all(); |
426 | 403 | ||
427 | writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); | 404 | msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0); |
428 | writel(I2O_CMD_ADAPTER_CLEAR << 24 | HOST_TID << 12 | ADAPTER_TID, | 405 | msg->u.head[1] = |
429 | &msg->u.head[1]); | 406 | cpu_to_le32(I2O_CMD_ADAPTER_CLEAR << 24 | HOST_TID << 12 | |
407 | ADAPTER_TID); | ||
430 | 408 | ||
431 | if ((rc = i2o_msg_post_wait(c, m, 30))) | 409 | if ((rc = i2o_msg_post_wait(c, msg, 30))) |
432 | osm_info("%s: Unable to clear (status=%#x).\n", c->name, -rc); | 410 | osm_info("%s: Unable to clear (status=%#x).\n", c->name, -rc); |
433 | else | 411 | else |
434 | osm_debug("%s: Cleared.\n", c->name); | 412 | osm_debug("%s: Cleared.\n", c->name); |
@@ -446,13 +424,13 @@ static int i2o_iop_clear(struct i2o_controller *c) | |||
446 | * Clear and (re)initialize IOP's outbound queue and post the message | 424 | * Clear and (re)initialize IOP's outbound queue and post the message |
447 | * frames to the IOP. | 425 | * frames to the IOP. |
448 | * | 426 | * |
449 | * Returns 0 on success or a negative errno code on failure. | 427 | * Returns 0 on success or negative error code on failure. |
450 | */ | 428 | */ |
451 | static int i2o_iop_init_outbound_queue(struct i2o_controller *c) | 429 | static int i2o_iop_init_outbound_queue(struct i2o_controller *c) |
452 | { | 430 | { |
453 | volatile u8 *status = c->status.virt; | ||
454 | u32 m; | 431 | u32 m; |
455 | struct i2o_message __iomem *msg; | 432 | volatile u8 *status = c->status.virt; |
433 | struct i2o_message *msg; | ||
456 | ulong timeout; | 434 | ulong timeout; |
457 | int i; | 435 | int i; |
458 | 436 | ||
@@ -460,23 +438,24 @@ static int i2o_iop_init_outbound_queue(struct i2o_controller *c) | |||
460 | 438 | ||
461 | memset(c->status.virt, 0, 4); | 439 | memset(c->status.virt, 0, 4); |
462 | 440 | ||
463 | m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); | 441 | msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); |
464 | if (m == I2O_QUEUE_EMPTY) | 442 | if (IS_ERR(msg)) |
465 | return -ETIMEDOUT; | 443 | return PTR_ERR(msg); |
466 | 444 | ||
467 | writel(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6, &msg->u.head[0]); | 445 | msg->u.head[0] = cpu_to_le32(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6); |
468 | writel(I2O_CMD_OUTBOUND_INIT << 24 | HOST_TID << 12 | ADAPTER_TID, | 446 | msg->u.head[1] = |
469 | &msg->u.head[1]); | 447 | cpu_to_le32(I2O_CMD_OUTBOUND_INIT << 24 | HOST_TID << 12 | |
470 | writel(i2o_exec_driver.context, &msg->u.s.icntxt); | 448 | ADAPTER_TID); |
471 | writel(0x00000000, &msg->u.s.tcntxt); | 449 | msg->u.s.icntxt = cpu_to_le32(i2o_exec_driver.context); |
472 | writel(PAGE_SIZE, &msg->body[0]); | 450 | msg->u.s.tcntxt = cpu_to_le32(0x00000000); |
451 | msg->body[0] = cpu_to_le32(PAGE_SIZE); | ||
473 | /* Outbound msg frame size in words and Initcode */ | 452 | /* Outbound msg frame size in words and Initcode */ |
474 | writel(I2O_OUTBOUND_MSG_FRAME_SIZE << 16 | 0x80, &msg->body[1]); | 453 | msg->body[1] = cpu_to_le32(I2O_OUTBOUND_MSG_FRAME_SIZE << 16 | 0x80); |
475 | writel(0xd0000004, &msg->body[2]); | 454 | msg->body[2] = cpu_to_le32(0xd0000004); |
476 | writel(i2o_dma_low(c->status.phys), &msg->body[3]); | 455 | msg->body[3] = cpu_to_le32(i2o_dma_low(c->status.phys)); |
477 | writel(i2o_dma_high(c->status.phys), &msg->body[4]); | 456 | msg->body[4] = cpu_to_le32(i2o_dma_high(c->status.phys)); |
478 | 457 | ||
479 | i2o_msg_post(c, m); | 458 | i2o_msg_post(c, msg); |
480 | 459 | ||
481 | timeout = jiffies + I2O_TIMEOUT_INIT_OUTBOUND_QUEUE * HZ; | 460 | timeout = jiffies + I2O_TIMEOUT_INIT_OUTBOUND_QUEUE * HZ; |
482 | while (*status <= I2O_CMD_IN_PROGRESS) { | 461 | while (*status <= I2O_CMD_IN_PROGRESS) { |
@@ -511,34 +490,34 @@ static int i2o_iop_init_outbound_queue(struct i2o_controller *c) | |||
511 | static int i2o_iop_reset(struct i2o_controller *c) | 490 | static int i2o_iop_reset(struct i2o_controller *c) |
512 | { | 491 | { |
513 | volatile u8 *status = c->status.virt; | 492 | volatile u8 *status = c->status.virt; |
514 | struct i2o_message __iomem *msg; | 493 | struct i2o_message *msg; |
515 | u32 m; | ||
516 | unsigned long timeout; | 494 | unsigned long timeout; |
517 | i2o_status_block *sb = c->status_block.virt; | 495 | i2o_status_block *sb = c->status_block.virt; |
518 | int rc = 0; | 496 | int rc = 0; |
519 | 497 | ||
520 | osm_debug("%s: Resetting controller\n", c->name); | 498 | osm_debug("%s: Resetting controller\n", c->name); |
521 | 499 | ||
522 | m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); | 500 | msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); |
523 | if (m == I2O_QUEUE_EMPTY) | 501 | if (IS_ERR(msg)) |
524 | return -ETIMEDOUT; | 502 | return PTR_ERR(msg); |
525 | 503 | ||
526 | memset(c->status_block.virt, 0, 8); | 504 | memset(c->status_block.virt, 0, 8); |
527 | 505 | ||
528 | /* Quiesce all IOPs first */ | 506 | /* Quiesce all IOPs first */ |
529 | i2o_iop_quiesce_all(); | 507 | i2o_iop_quiesce_all(); |
530 | 508 | ||
531 | writel(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); | 509 | msg->u.head[0] = cpu_to_le32(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_0); |
532 | writel(I2O_CMD_ADAPTER_RESET << 24 | HOST_TID << 12 | ADAPTER_TID, | 510 | msg->u.head[1] = |
533 | &msg->u.head[1]); | 511 | cpu_to_le32(I2O_CMD_ADAPTER_RESET << 24 | HOST_TID << 12 | |
534 | writel(i2o_exec_driver.context, &msg->u.s.icntxt); | 512 | ADAPTER_TID); |
535 | writel(0, &msg->u.s.tcntxt); //FIXME: use reasonable transaction context | 513 | msg->u.s.icntxt = cpu_to_le32(i2o_exec_driver.context); |
536 | writel(0, &msg->body[0]); | 514 | msg->u.s.tcntxt = cpu_to_le32(0x00000000); |
537 | writel(0, &msg->body[1]); | 515 | msg->body[0] = cpu_to_le32(0x00000000); |
538 | writel(i2o_dma_low(c->status.phys), &msg->body[2]); | 516 | msg->body[1] = cpu_to_le32(0x00000000); |
539 | writel(i2o_dma_high(c->status.phys), &msg->body[3]); | 517 | msg->body[2] = cpu_to_le32(i2o_dma_low(c->status.phys)); |
518 | msg->body[3] = cpu_to_le32(i2o_dma_high(c->status.phys)); | ||
540 | 519 | ||
541 | i2o_msg_post(c, m); | 520 | i2o_msg_post(c, msg); |
542 | 521 | ||
543 | /* Wait for a reply */ | 522 | /* Wait for a reply */ |
544 | timeout = jiffies + I2O_TIMEOUT_RESET * HZ; | 523 | timeout = jiffies + I2O_TIMEOUT_RESET * HZ; |
@@ -567,18 +546,15 @@ static int i2o_iop_reset(struct i2o_controller *c) | |||
567 | osm_debug("%s: Reset in progress, waiting for reboot...\n", | 546 | osm_debug("%s: Reset in progress, waiting for reboot...\n", |
568 | c->name); | 547 | c->name); |
569 | 548 | ||
570 | m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_RESET); | 549 | while (IS_ERR(msg = i2o_msg_get_wait(c, I2O_TIMEOUT_RESET))) { |
571 | while (m == I2O_QUEUE_EMPTY) { | ||
572 | if (time_after(jiffies, timeout)) { | 550 | if (time_after(jiffies, timeout)) { |
573 | osm_err("%s: IOP reset timeout.\n", c->name); | 551 | osm_err("%s: IOP reset timeout.\n", c->name); |
574 | rc = -ETIMEDOUT; | 552 | rc = PTR_ERR(msg); |
575 | goto exit; | 553 | goto exit; |
576 | } | 554 | } |
577 | schedule_timeout_uninterruptible(1); | 555 | schedule_timeout_uninterruptible(1); |
578 | |||
579 | m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_RESET); | ||
580 | } | 556 | } |
581 | i2o_msg_nop(c, m); | 557 | i2o_msg_nop(c, msg); |
582 | 558 | ||
583 | /* from here all quiesce commands are safe */ | 559 | /* from here all quiesce commands are safe */ |
584 | c->no_quiesce = 0; | 560 | c->no_quiesce = 0; |
@@ -686,8 +662,7 @@ static int i2o_iop_activate(struct i2o_controller *c) | |||
686 | */ | 662 | */ |
687 | static int i2o_iop_systab_set(struct i2o_controller *c) | 663 | static int i2o_iop_systab_set(struct i2o_controller *c) |
688 | { | 664 | { |
689 | struct i2o_message __iomem *msg; | 665 | struct i2o_message *msg; |
690 | u32 m; | ||
691 | i2o_status_block *sb = c->status_block.virt; | 666 | i2o_status_block *sb = c->status_block.virt; |
692 | struct device *dev = &c->pdev->dev; | 667 | struct device *dev = &c->pdev->dev; |
693 | struct resource *root; | 668 | struct resource *root; |
@@ -735,20 +710,21 @@ static int i2o_iop_systab_set(struct i2o_controller *c) | |||
735 | } | 710 | } |
736 | } | 711 | } |
737 | 712 | ||
738 | m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); | 713 | msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); |
739 | if (m == I2O_QUEUE_EMPTY) | 714 | if (IS_ERR(msg)) |
740 | return -ETIMEDOUT; | 715 | return PTR_ERR(msg); |
741 | 716 | ||
742 | i2o_systab.phys = dma_map_single(dev, i2o_systab.virt, i2o_systab.len, | 717 | i2o_systab.phys = dma_map_single(dev, i2o_systab.virt, i2o_systab.len, |
743 | PCI_DMA_TODEVICE); | 718 | PCI_DMA_TODEVICE); |
744 | if (!i2o_systab.phys) { | 719 | if (!i2o_systab.phys) { |
745 | i2o_msg_nop(c, m); | 720 | i2o_msg_nop(c, msg); |
746 | return -ENOMEM; | 721 | return -ENOMEM; |
747 | } | 722 | } |
748 | 723 | ||
749 | writel(I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6, &msg->u.head[0]); | 724 | msg->u.head[0] = cpu_to_le32(I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6); |
750 | writel(I2O_CMD_SYS_TAB_SET << 24 | HOST_TID << 12 | ADAPTER_TID, | 725 | msg->u.head[1] = |
751 | &msg->u.head[1]); | 726 | cpu_to_le32(I2O_CMD_SYS_TAB_SET << 24 | HOST_TID << 12 | |
727 | ADAPTER_TID); | ||
752 | 728 | ||
753 | /* | 729 | /* |
754 | * Provide three SGL-elements: | 730 | * Provide three SGL-elements: |
@@ -760,16 +736,16 @@ static int i2o_iop_systab_set(struct i2o_controller *c) | |||
760 | * same table to everyone. We have to go remap it for them all | 736 | * same table to everyone. We have to go remap it for them all |
761 | */ | 737 | */ |
762 | 738 | ||
763 | writel(c->unit + 2, &msg->body[0]); | 739 | msg->body[0] = cpu_to_le32(c->unit + 2); |
764 | writel(0, &msg->body[1]); | 740 | msg->body[1] = cpu_to_le32(0x00000000); |
765 | writel(0x54000000 | i2o_systab.len, &msg->body[2]); | 741 | msg->body[2] = cpu_to_le32(0x54000000 | i2o_systab.len); |
766 | writel(i2o_systab.phys, &msg->body[3]); | 742 | msg->body[3] = cpu_to_le32(i2o_systab.phys); |
767 | writel(0x54000000 | sb->current_mem_size, &msg->body[4]); | 743 | msg->body[4] = cpu_to_le32(0x54000000 | sb->current_mem_size); |
768 | writel(sb->current_mem_base, &msg->body[5]); | 744 | msg->body[5] = cpu_to_le32(sb->current_mem_base); |
769 | writel(0xd4000000 | sb->current_io_size, &msg->body[6]); | 745 | msg->body[6] = cpu_to_le32(0xd4000000 | sb->current_io_size); |
770 | writel(sb->current_io_base, &msg->body[6]); | 746 | msg->body[6] = cpu_to_le32(sb->current_io_base); |
771 | 747 | ||
772 | rc = i2o_msg_post_wait(c, m, 120); | 748 | rc = i2o_msg_post_wait(c, msg, 120); |
773 | 749 | ||
774 | dma_unmap_single(dev, i2o_systab.phys, i2o_systab.len, | 750 | dma_unmap_single(dev, i2o_systab.phys, i2o_systab.len, |
775 | PCI_DMA_TODEVICE); | 751 | PCI_DMA_TODEVICE); |
@@ -952,30 +928,30 @@ static int i2o_parse_hrt(struct i2o_controller *c) | |||
952 | */ | 928 | */ |
953 | int i2o_status_get(struct i2o_controller *c) | 929 | int i2o_status_get(struct i2o_controller *c) |
954 | { | 930 | { |
955 | struct i2o_message __iomem *msg; | 931 | struct i2o_message *msg; |
956 | u32 m; | ||
957 | volatile u8 *status_block; | 932 | volatile u8 *status_block; |
958 | unsigned long timeout; | 933 | unsigned long timeout; |
959 | 934 | ||
960 | status_block = (u8 *) c->status_block.virt; | 935 | status_block = (u8 *) c->status_block.virt; |
961 | memset(c->status_block.virt, 0, sizeof(i2o_status_block)); | 936 | memset(c->status_block.virt, 0, sizeof(i2o_status_block)); |
962 | 937 | ||
963 | m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); | 938 | msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); |
964 | if (m == I2O_QUEUE_EMPTY) | 939 | if (IS_ERR(msg)) |
965 | return -ETIMEDOUT; | 940 | return PTR_ERR(msg); |
966 | 941 | ||
967 | writel(NINE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); | 942 | msg->u.head[0] = cpu_to_le32(NINE_WORD_MSG_SIZE | SGL_OFFSET_0); |
968 | writel(I2O_CMD_STATUS_GET << 24 | HOST_TID << 12 | ADAPTER_TID, | 943 | msg->u.head[1] = |
969 | &msg->u.head[1]); | 944 | cpu_to_le32(I2O_CMD_STATUS_GET << 24 | HOST_TID << 12 | |
970 | writel(i2o_exec_driver.context, &msg->u.s.icntxt); | 945 | ADAPTER_TID); |
971 | writel(0, &msg->u.s.tcntxt); // FIXME: use resonable transaction context | 946 | msg->u.s.icntxt = cpu_to_le32(i2o_exec_driver.context); |
972 | writel(0, &msg->body[0]); | 947 | msg->u.s.tcntxt = cpu_to_le32(0x00000000); |
973 | writel(0, &msg->body[1]); | 948 | msg->body[0] = cpu_to_le32(0x00000000); |
974 | writel(i2o_dma_low(c->status_block.phys), &msg->body[2]); | 949 | msg->body[1] = cpu_to_le32(0x00000000); |
975 | writel(i2o_dma_high(c->status_block.phys), &msg->body[3]); | 950 | msg->body[2] = cpu_to_le32(i2o_dma_low(c->status_block.phys)); |
976 | writel(sizeof(i2o_status_block), &msg->body[4]); /* always 88 bytes */ | 951 | msg->body[3] = cpu_to_le32(i2o_dma_high(c->status_block.phys)); |
952 | msg->body[4] = cpu_to_le32(sizeof(i2o_status_block)); /* always 88 bytes */ | ||
977 | 953 | ||
978 | i2o_msg_post(c, m); | 954 | i2o_msg_post(c, msg); |
979 | 955 | ||
980 | /* Wait for a reply */ | 956 | /* Wait for a reply */ |
981 | timeout = jiffies + I2O_TIMEOUT_STATUS_GET * HZ; | 957 | timeout = jiffies + I2O_TIMEOUT_STATUS_GET * HZ; |
@@ -1013,20 +989,20 @@ static int i2o_hrt_get(struct i2o_controller *c) | |||
1013 | struct device *dev = &c->pdev->dev; | 989 | struct device *dev = &c->pdev->dev; |
1014 | 990 | ||
1015 | for (i = 0; i < I2O_HRT_GET_TRIES; i++) { | 991 | for (i = 0; i < I2O_HRT_GET_TRIES; i++) { |
1016 | struct i2o_message __iomem *msg; | 992 | struct i2o_message *msg; |
1017 | u32 m; | ||
1018 | 993 | ||
1019 | m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); | 994 | msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); |
1020 | if (m == I2O_QUEUE_EMPTY) | 995 | if (IS_ERR(msg)) |
1021 | return -ETIMEDOUT; | 996 | return PTR_ERR(msg); |
1022 | 997 | ||
1023 | writel(SIX_WORD_MSG_SIZE | SGL_OFFSET_4, &msg->u.head[0]); | 998 | msg->u.head[0] = cpu_to_le32(SIX_WORD_MSG_SIZE | SGL_OFFSET_4); |
1024 | writel(I2O_CMD_HRT_GET << 24 | HOST_TID << 12 | ADAPTER_TID, | 999 | msg->u.head[1] = |
1025 | &msg->u.head[1]); | 1000 | cpu_to_le32(I2O_CMD_HRT_GET << 24 | HOST_TID << 12 | |
1026 | writel(0xd0000000 | c->hrt.len, &msg->body[0]); | 1001 | ADAPTER_TID); |
1027 | writel(c->hrt.phys, &msg->body[1]); | 1002 | msg->body[0] = cpu_to_le32(0xd0000000 | c->hrt.len); |
1003 | msg->body[1] = cpu_to_le32(c->hrt.phys); | ||
1028 | 1004 | ||
1029 | rc = i2o_msg_post_wait_mem(c, m, 20, &c->hrt); | 1005 | rc = i2o_msg_post_wait_mem(c, msg, 20, &c->hrt); |
1030 | 1006 | ||
1031 | if (rc < 0) { | 1007 | if (rc < 0) { |
1032 | osm_err("%s: Unable to get HRT (status=%#x)\n", c->name, | 1008 | osm_err("%s: Unable to get HRT (status=%#x)\n", c->name, |
@@ -1056,6 +1032,7 @@ static int i2o_hrt_get(struct i2o_controller *c) | |||
1056 | */ | 1032 | */ |
1057 | void i2o_iop_free(struct i2o_controller *c) | 1033 | void i2o_iop_free(struct i2o_controller *c) |
1058 | { | 1034 | { |
1035 | i2o_pool_free(&c->in_msg); | ||
1059 | kfree(c); | 1036 | kfree(c); |
1060 | }; | 1037 | }; |
1061 | 1038 | ||
@@ -1080,7 +1057,7 @@ static struct class *i2o_controller_class; | |||
1080 | * i2o_iop_alloc - Allocate and initialize a i2o_controller struct | 1057 | * i2o_iop_alloc - Allocate and initialize a i2o_controller struct |
1081 | * | 1058 | * |
1082 | * Allocate the necessary memory for a i2o_controller struct and | 1059 | * Allocate the necessary memory for a i2o_controller struct and |
1083 | * initialize the lists. | 1060 | * initialize the lists and message mempool. |
1084 | * | 1061 | * |
1085 | * Returns a pointer to the I2O controller or a negative error code on | 1062 | * Returns a pointer to the I2O controller or a negative error code on |
1086 | * failure. | 1063 | * failure. |
@@ -1089,6 +1066,7 @@ struct i2o_controller *i2o_iop_alloc(void) | |||
1089 | { | 1066 | { |
1090 | static int unit = 0; /* 0 and 1 are NULL IOP and Local Host */ | 1067 | static int unit = 0; /* 0 and 1 are NULL IOP and Local Host */ |
1091 | struct i2o_controller *c; | 1068 | struct i2o_controller *c; |
1069 | char poolname[32]; | ||
1092 | 1070 | ||
1093 | c = kmalloc(sizeof(*c), GFP_KERNEL); | 1071 | c = kmalloc(sizeof(*c), GFP_KERNEL); |
1094 | if (!c) { | 1072 | if (!c) { |
@@ -1098,11 +1076,20 @@ struct i2o_controller *i2o_iop_alloc(void) | |||
1098 | } | 1076 | } |
1099 | memset(c, 0, sizeof(*c)); | 1077 | memset(c, 0, sizeof(*c)); |
1100 | 1078 | ||
1079 | c->unit = unit++; | ||
1080 | sprintf(c->name, "iop%d", c->unit); | ||
1081 | |||
1082 | snprintf(poolname, sizeof(poolname), "i2o_%s_msg_inpool", c->name); | ||
1083 | if (i2o_pool_alloc | ||
1084 | (&c->in_msg, poolname, I2O_INBOUND_MSG_FRAME_SIZE * 4, | ||
1085 | I2O_MSG_INPOOL_MIN)) { | ||
1086 | kfree(c); | ||
1087 | return ERR_PTR(-ENOMEM); | ||
1088 | }; | ||
1089 | |||
1101 | INIT_LIST_HEAD(&c->devices); | 1090 | INIT_LIST_HEAD(&c->devices); |
1102 | spin_lock_init(&c->lock); | 1091 | spin_lock_init(&c->lock); |
1103 | init_MUTEX(&c->lct_lock); | 1092 | init_MUTEX(&c->lct_lock); |
1104 | c->unit = unit++; | ||
1105 | sprintf(c->name, "iop%d", c->unit); | ||
1106 | 1093 | ||
1107 | device_initialize(&c->device); | 1094 | device_initialize(&c->device); |
1108 | 1095 | ||
@@ -1199,28 +1186,27 @@ int i2o_iop_add(struct i2o_controller *c) | |||
1199 | * is waited for, or expected. If you do not want further notifications, | 1186 | * is waited for, or expected. If you do not want further notifications, |
1200 | * call the i2o_event_register again with a evt_mask of 0. | 1187 | * call the i2o_event_register again with a evt_mask of 0. |
1201 | * | 1188 | * |
1202 | * Returns 0 on success or -ETIMEDOUT if no message could be fetched for | 1189 | * Returns 0 on success or negative error code on failure. |
1203 | * sending the request. | ||
1204 | */ | 1190 | */ |
1205 | int i2o_event_register(struct i2o_device *dev, struct i2o_driver *drv, | 1191 | int i2o_event_register(struct i2o_device *dev, struct i2o_driver *drv, |
1206 | int tcntxt, u32 evt_mask) | 1192 | int tcntxt, u32 evt_mask) |
1207 | { | 1193 | { |
1208 | struct i2o_controller *c = dev->iop; | 1194 | struct i2o_controller *c = dev->iop; |
1209 | struct i2o_message __iomem *msg; | 1195 | struct i2o_message *msg; |
1210 | u32 m; | ||
1211 | 1196 | ||
1212 | m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); | 1197 | msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); |
1213 | if (m == I2O_QUEUE_EMPTY) | 1198 | if (IS_ERR(msg)) |
1214 | return -ETIMEDOUT; | 1199 | return PTR_ERR(msg); |
1215 | 1200 | ||
1216 | writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); | 1201 | msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0); |
1217 | writel(I2O_CMD_UTIL_EVT_REGISTER << 24 | HOST_TID << 12 | dev->lct_data. | 1202 | msg->u.head[1] = |
1218 | tid, &msg->u.head[1]); | 1203 | cpu_to_le32(I2O_CMD_UTIL_EVT_REGISTER << 24 | HOST_TID << 12 | dev-> |
1219 | writel(drv->context, &msg->u.s.icntxt); | 1204 | lct_data.tid); |
1220 | writel(tcntxt, &msg->u.s.tcntxt); | 1205 | msg->u.s.icntxt = cpu_to_le32(drv->context); |
1221 | writel(evt_mask, &msg->body[0]); | 1206 | msg->u.s.tcntxt = cpu_to_le32(tcntxt); |
1207 | msg->body[0] = cpu_to_le32(evt_mask); | ||
1222 | 1208 | ||
1223 | i2o_msg_post(c, m); | 1209 | i2o_msg_post(c, msg); |
1224 | 1210 | ||
1225 | return 0; | 1211 | return 0; |
1226 | }; | 1212 | }; |