aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/message/i2o/i2o_block.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/message/i2o/i2o_block.c')
-rw-r--r--drivers/message/i2o/i2o_block.c211
1 files changed, 71 insertions, 140 deletions
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c
index 4830b7759061..e69421e36ac5 100644
--- a/drivers/message/i2o/i2o_block.c
+++ b/drivers/message/i2o/i2o_block.c
@@ -104,7 +104,8 @@ static int i2o_block_remove(struct device *dev)
104 struct i2o_device *i2o_dev = to_i2o_device(dev); 104 struct i2o_device *i2o_dev = to_i2o_device(dev);
105 struct i2o_block_device *i2o_blk_dev = dev_get_drvdata(dev); 105 struct i2o_block_device *i2o_blk_dev = dev_get_drvdata(dev);
106 106
107 osm_info("Device removed %s\n", i2o_blk_dev->gd->disk_name); 107 osm_info("device removed (TID: %03x): %s\n", i2o_dev->lct_data.tid,
108 i2o_blk_dev->gd->disk_name);
108 109
109 i2o_event_register(i2o_dev, &i2o_block_driver, 0, 0); 110 i2o_event_register(i2o_dev, &i2o_block_driver, 0, 0);
110 111
@@ -400,71 +401,62 @@ static void i2o_block_delayed_request_fn(void *delayed_request)
400}; 401};
401 402
402/** 403/**
403 * i2o_block_reply - Block OSM reply handler. 404 * i2o_block_end_request - Post-processing of completed commands
404 * @c: I2O controller from which the message arrives 405 * @req: request which should be completed
405 * @m: message id of reply 406 * @uptodate: 1 for success, 0 for I/O error, < 0 for specific error
406 * qmsg: the actuall I2O message reply 407 * @nr_bytes: number of bytes to complete
407 * 408 *
408 * This function gets all the message replies. 409 * Mark the request as complete. The lock must not be held when entering.
409 * 410 *
410 */ 411 */
411static int i2o_block_reply(struct i2o_controller *c, u32 m, 412static void i2o_block_end_request(struct request *req, int uptodate,
412 struct i2o_message *msg) 413 int nr_bytes)
413{ 414{
414 struct i2o_block_request *ireq; 415 struct i2o_block_request *ireq = req->special;
415 struct request *req; 416 struct i2o_block_device *dev = ireq->i2o_blk_dev;
416 struct i2o_block_device *dev; 417 request_queue_t *q = dev->gd->queue;
417 struct request_queue *q;
418 u8 st;
419 unsigned long flags; 418 unsigned long flags;
420 419
421 /* FAILed message */ 420 if (end_that_request_chunk(req, uptodate, nr_bytes)) {
422 if (unlikely(le32_to_cpu(msg->u.head[0]) & (1 << 13))) { 421 int leftover = (req->hard_nr_sectors << 9);
423 struct i2o_message *pmsg;
424 u32 pm;
425
426 /*
427 * FAILed message from controller
428 * We increment the error count and abort it
429 *
430 * In theory this will never happen. The I2O block class
431 * specification states that block devices never return
432 * FAILs but instead use the REQ status field...but
433 * better be on the safe side since no one really follows
434 * the spec to the book :)
435 */
436 pm = le32_to_cpu(msg->body[3]);
437 pmsg = i2o_msg_in_to_virt(c, pm);
438 422
439 req = i2o_cntxt_list_get(c, le32_to_cpu(pmsg->u.s.tcntxt)); 423 if (blk_pc_request(req))
440 if (unlikely(!req)) { 424 leftover = req->data_len;
441 osm_err("NULL reply received!\n");
442 return -1;
443 }
444 425
445 ireq = req->special; 426 if (end_io_error(uptodate))
446 dev = ireq->i2o_blk_dev; 427 end_that_request_chunk(req, 0, leftover);
447 q = dev->gd->queue; 428 }
448 429
449 req->errors++; 430 add_disk_randomness(req->rq_disk);
450 431
451 spin_lock_irqsave(q->queue_lock, flags); 432 spin_lock_irqsave(q->queue_lock, flags);
452 433
453 while (end_that_request_chunk(req, !req->errors, 434 end_that_request_last(req);
454 le32_to_cpu(pmsg->body[1]))) ; 435 dev->open_queue_depth--;
455 end_that_request_last(req); 436 list_del(&ireq->queue);
456 437
457 dev->open_queue_depth--; 438 blk_start_queue(q);
458 list_del(&ireq->queue);
459 blk_start_queue(q);
460 439
461 spin_unlock_irqrestore(q->queue_lock, flags); 440 spin_unlock_irqrestore(q->queue_lock, flags);
462 441
463 /* Now flush the message by making it a NOP */ 442 i2o_block_sglist_free(ireq);
464 i2o_msg_nop(c, pm); 443 i2o_block_request_free(ireq);
444};
465 445
466 return -1; 446/**
467 } 447 * i2o_block_reply - Block OSM reply handler.
448 * @c: I2O controller from which the message arrives
449 * @m: message id of reply
450 * qmsg: the actuall I2O message reply
451 *
452 * This function gets all the message replies.
453 *
454 */
455static int i2o_block_reply(struct i2o_controller *c, u32 m,
456 struct i2o_message *msg)
457{
458 struct request *req;
459 int uptodate = 1;
468 460
469 req = i2o_cntxt_list_get(c, le32_to_cpu(msg->u.s.tcntxt)); 461 req = i2o_cntxt_list_get(c, le32_to_cpu(msg->u.s.tcntxt));
470 if (unlikely(!req)) { 462 if (unlikely(!req)) {
@@ -472,61 +464,13 @@ static int i2o_block_reply(struct i2o_controller *c, u32 m,
472 return -1; 464 return -1;
473 } 465 }
474 466
475 ireq = req->special;
476 dev = ireq->i2o_blk_dev;
477 q = dev->gd->queue;
478
479 if (unlikely(!dev->i2o_dev)) {
480 /*
481 * This is HACK, but Intel Integrated RAID allows user
482 * to delete a volume that is claimed, locked, and in use
483 * by the OS. We have to check for a reply from a
484 * non-existent device and flag it as an error or the system
485 * goes kaput...
486 */
487 req->errors++;
488 osm_warn("Data transfer to deleted device!\n");
489 spin_lock_irqsave(q->queue_lock, flags);
490 while (end_that_request_chunk
491 (req, !req->errors, le32_to_cpu(msg->body[1]))) ;
492 end_that_request_last(req);
493
494 dev->open_queue_depth--;
495 list_del(&ireq->queue);
496 blk_start_queue(q);
497
498 spin_unlock_irqrestore(q->queue_lock, flags);
499 return -1;
500 }
501
502 /* 467 /*
503 * Lets see what is cooking. We stuffed the 468 * Lets see what is cooking. We stuffed the
504 * request in the context. 469 * request in the context.
505 */ 470 */
506 471
507 st = le32_to_cpu(msg->body[0]) >> 24; 472 if ((le32_to_cpu(msg->body[0]) >> 24) != 0) {
508 473 u32 status = le32_to_cpu(msg->body[0]);
509 if (st != 0) {
510 int err;
511 char *bsa_errors[] = {
512 "Success",
513 "Media Error",
514 "Failure communicating to device",
515 "Device Failure",
516 "Device is not ready",
517 "Media not present",
518 "Media is locked by another user",
519 "Media has failed",
520 "Failure communicating to device",
521 "Device bus failure",
522 "Device is locked by another user",
523 "Device is write protected",
524 "Device has reset",
525 "Volume has changed, waiting for acknowledgement"
526 };
527
528 err = le32_to_cpu(msg->body[0]) & 0xffff;
529
530 /* 474 /*
531 * Device not ready means two things. One is that the 475 * Device not ready means two things. One is that the
532 * the thing went offline (but not a removal media) 476 * the thing went offline (but not a removal media)
@@ -539,40 +483,23 @@ static int i2o_block_reply(struct i2o_controller *c, u32 m,
539 * Don't stick a supertrak100 into cache aggressive modes 483 * Don't stick a supertrak100 into cache aggressive modes
540 */ 484 */
541 485
542 osm_err("block-osm: /dev/%s error: %s", dev->gd->disk_name, 486 osm_err("%03x error status: %02x, detailed status: %04x\n",
543 bsa_errors[le32_to_cpu(msg->body[0]) & 0xffff]); 487 (le32_to_cpu(msg->u.head[1]) >> 12 & 0xfff),
544 if (le32_to_cpu(msg->body[0]) & 0x00ff0000) 488 status >> 24, status & 0xffff);
545 printk(KERN_ERR " - DDM attempted %d retries",
546 (le32_to_cpu(msg->body[0]) >> 16) & 0x00ff);
547 printk(KERN_ERR ".\n");
548 req->errors++;
549 } else
550 req->errors = 0;
551
552 if (!end_that_request_chunk
553 (req, !req->errors, le32_to_cpu(msg->body[1]))) {
554 add_disk_randomness(req->rq_disk);
555 spin_lock_irqsave(q->queue_lock, flags);
556 489
557 end_that_request_last(req); 490 req->errors++;
558 491
559 dev->open_queue_depth--; 492 uptodate = 0;
560 list_del(&ireq->queue); 493 }
561 blk_start_queue(q);
562 494
563 spin_unlock_irqrestore(q->queue_lock, flags); 495 i2o_block_end_request(req, uptodate, le32_to_cpu(msg->body[1]));
564
565 i2o_block_sglist_free(ireq);
566 i2o_block_request_free(ireq);
567 } else
568 osm_err("still remaining chunks\n");
569 496
570 return 1; 497 return 1;
571}; 498};
572 499
573static void i2o_block_event(struct i2o_event *evt) 500static void i2o_block_event(struct i2o_event *evt)
574{ 501{
575 osm_info("block-osm: event received\n"); 502 osm_info("event received\n");
576 kfree(evt); 503 kfree(evt);
577}; 504};
578 505
@@ -875,9 +802,7 @@ static int i2o_block_transfer(struct request *req)
875 sg++; 802 sg++;
876 } 803 }
877 804
878 writel(I2O_MESSAGE_SIZE 805 writel(I2O_MESSAGE_SIZE(mptr - &msg->u.head[0]) | SGL_OFFSET_8,
879 (((unsigned long)mptr -
880 (unsigned long)&msg->u.head[0]) >> 2) | SGL_OFFSET_8,
881 &msg->u.head[0]); 806 &msg->u.head[0]);
882 807
883 list_add_tail(&ireq->queue, &dev->open_queue); 808 list_add_tail(&ireq->queue, &dev->open_queue);
@@ -1048,7 +973,6 @@ static int i2o_block_probe(struct device *dev)
1048 int rc; 973 int rc;
1049 u64 size; 974 u64 size;
1050 u32 blocksize; 975 u32 blocksize;
1051 u16 power;
1052 u32 flags, status; 976 u32 flags, status;
1053 int segments; 977 int segments;
1054 978
@@ -1058,8 +982,6 @@ static int i2o_block_probe(struct device *dev)
1058 return -ENODEV; 982 return -ENODEV;
1059 } 983 }
1060 984
1061 osm_info("New device detected (TID: %03x)\n", i2o_dev->lct_data.tid);
1062
1063 if (i2o_device_claim(i2o_dev)) { 985 if (i2o_device_claim(i2o_dev)) {
1064 osm_warn("Unable to claim device. Installation aborted\n"); 986 osm_warn("Unable to claim device. Installation aborted\n");
1065 rc = -EFAULT; 987 rc = -EFAULT;
@@ -1111,15 +1033,21 @@ static int i2o_block_probe(struct device *dev)
1111 * Ask for the current media data. If that isn't supported 1033 * Ask for the current media data. If that isn't supported
1112 * then we ask for the device capacity data 1034 * then we ask for the device capacity data
1113 */ 1035 */
1114 if (i2o_parm_field_get(i2o_dev, 0x0004, 1, &blocksize, 4) != 0 1036 if (!i2o_parm_field_get(i2o_dev, 0x0004, 0, &size, 8))
1115 || i2o_parm_field_get(i2o_dev, 0x0004, 0, &size, 8) != 0) { 1037 if (!i2o_parm_field_get(i2o_dev, 0x0000, 4, &size, 8)) {
1116 i2o_parm_field_get(i2o_dev, 0x0000, 3, &blocksize, 4); 1038 osm_warn("could not get size of %s\n", gd->disk_name);
1117 i2o_parm_field_get(i2o_dev, 0x0000, 4, &size, 8); 1039 size = 0;
1118 } 1040 }
1119 osm_debug("blocksize = %d\n", blocksize);
1120 1041
1121 if (i2o_parm_field_get(i2o_dev, 0x0000, 2, &power, 2)) 1042 if (!i2o_parm_field_get(i2o_dev, 0x0004, 1, &blocksize, 4))
1122 power = 0; 1043 if (!i2o_parm_field_get(i2o_dev, 0x0000, 3, &blocksize, 4)) {
1044 osm_warn("unable to get blocksize of %s\n",
1045 gd->disk_name);
1046 blocksize = 0;
1047 }
1048
1049 if (!i2o_parm_field_get(i2o_dev, 0x0000, 2, &i2o_blk_dev->power, 2))
1050 i2o_blk_dev->power = 0;
1123 i2o_parm_field_get(i2o_dev, 0x0000, 5, &flags, 4); 1051 i2o_parm_field_get(i2o_dev, 0x0000, 5, &flags, 4);
1124 i2o_parm_field_get(i2o_dev, 0x0000, 6, &status, 4); 1052 i2o_parm_field_get(i2o_dev, 0x0000, 6, &status, 4);
1125 1053
@@ -1131,6 +1059,9 @@ static int i2o_block_probe(struct device *dev)
1131 1059
1132 unit++; 1060 unit++;
1133 1061
1062 osm_info("device added (TID: %03x): %s\n", i2o_dev->lct_data.tid,
1063 i2o_blk_dev->gd->disk_name);
1064
1134 return 0; 1065 return 0;
1135 1066
1136 claim_release: 1067 claim_release: