diff options
author | Bartlomiej Zolnierkiewicz <bzolnier@gmail.com> | 2009-07-28 02:56:34 -0400 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2009-07-28 02:56:34 -0400 |
commit | 394c6cc63c1d6900ad7498a3221a1d48fc00c4fa (patch) | |
tree | 56676e59075c4533b751a9c9c3296bed5958c792 | |
parent | eb32baec15c38ae6f06cb898a9f791578c5f8c79 (diff) |
mg_disk: fix issue with data integrity on error in mg_write()
We cannot acknowledge the sector write before checking its status
(which is done on the next loop iteration) and we also need to do
the final status register check after writing the last sector.
Fix mg_write() to match mg_write_intr() in this regard.
While at it:
- add mg_read_one() and mg_write_one() helpers
- always use MG_SECTOR_SIZE and remove MG_STORAGE_BUFFER_SIZE
[bart: thanks to Tejun for porting the patch over recent block changes]
Cc: unsik Kim <donari75@gmail.com>
Cc: Tejun Heo <tj@kernel.org>
Signed-off-by: Bartlomiej Zolnierkiewicz <bzolnier@gmail.com>
===================================================================
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
-rw-r--r-- | drivers/block/mg_disk.c | 89 |
1 files changed, 47 insertions, 42 deletions
diff --git a/drivers/block/mg_disk.c b/drivers/block/mg_disk.c index 6440d5945414..19917d5481bd 100644 --- a/drivers/block/mg_disk.c +++ b/drivers/block/mg_disk.c | |||
@@ -36,7 +36,6 @@ | |||
36 | 36 | ||
37 | /* Register offsets */ | 37 | /* Register offsets */ |
38 | #define MG_BUFF_OFFSET 0x8000 | 38 | #define MG_BUFF_OFFSET 0x8000 |
39 | #define MG_STORAGE_BUFFER_SIZE 0x200 | ||
40 | #define MG_REG_OFFSET 0xC000 | 39 | #define MG_REG_OFFSET 0xC000 |
41 | #define MG_REG_FEATURE (MG_REG_OFFSET + 2) /* write case */ | 40 | #define MG_REG_FEATURE (MG_REG_OFFSET + 2) /* write case */ |
42 | #define MG_REG_ERROR (MG_REG_OFFSET + 2) /* read case */ | 41 | #define MG_REG_ERROR (MG_REG_OFFSET + 2) /* read case */ |
@@ -477,9 +476,18 @@ static unsigned int mg_out(struct mg_host *host, | |||
477 | return MG_ERR_NONE; | 476 | return MG_ERR_NONE; |
478 | } | 477 | } |
479 | 478 | ||
479 | static void mg_read_one(struct mg_host *host, struct request *req) | ||
480 | { | ||
481 | u16 *buff = (u16 *)req->buffer; | ||
482 | u32 i; | ||
483 | |||
484 | for (i = 0; i < MG_SECTOR_SIZE >> 1; i++) | ||
485 | *buff++ = inw((unsigned long)host->dev_base + MG_BUFF_OFFSET + | ||
486 | (i << 1)); | ||
487 | } | ||
488 | |||
480 | static void mg_read(struct request *req) | 489 | static void mg_read(struct request *req) |
481 | { | 490 | { |
482 | u32 j; | ||
483 | struct mg_host *host = req->rq_disk->private_data; | 491 | struct mg_host *host = req->rq_disk->private_data; |
484 | 492 | ||
485 | if (mg_out(host, blk_rq_pos(req), blk_rq_sectors(req), | 493 | if (mg_out(host, blk_rq_pos(req), blk_rq_sectors(req), |
@@ -490,26 +498,33 @@ static void mg_read(struct request *req) | |||
490 | blk_rq_sectors(req), blk_rq_pos(req), req->buffer); | 498 | blk_rq_sectors(req), blk_rq_pos(req), req->buffer); |
491 | 499 | ||
492 | do { | 500 | do { |
493 | u16 *buff = (u16 *)req->buffer; | ||
494 | |||
495 | if (mg_wait(host, ATA_DRQ, | 501 | if (mg_wait(host, ATA_DRQ, |
496 | MG_TMAX_WAIT_RD_DRQ) != MG_ERR_NONE) { | 502 | MG_TMAX_WAIT_RD_DRQ) != MG_ERR_NONE) { |
497 | mg_bad_rw_intr(host); | 503 | mg_bad_rw_intr(host); |
498 | return; | 504 | return; |
499 | } | 505 | } |
500 | for (j = 0; j < MG_SECTOR_SIZE >> 1; j++) | 506 | |
501 | *buff++ = inw((unsigned long)host->dev_base + | 507 | mg_read_one(host, req); |
502 | MG_BUFF_OFFSET + (j << 1)); | ||
503 | 508 | ||
504 | outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base + | 509 | outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base + |
505 | MG_REG_COMMAND); | 510 | MG_REG_COMMAND); |
506 | } while (mg_end_request(host, 0, MG_SECTOR_SIZE)); | 511 | } while (mg_end_request(host, 0, MG_SECTOR_SIZE)); |
507 | } | 512 | } |
508 | 513 | ||
514 | static void mg_write_one(struct mg_host *host, struct request *req) | ||
515 | { | ||
516 | u16 *buff = (u16 *)req->buffer; | ||
517 | u32 i; | ||
518 | |||
519 | for (i = 0; i < MG_SECTOR_SIZE >> 1; i++) | ||
520 | outw(*buff++, (unsigned long)host->dev_base + MG_BUFF_OFFSET + | ||
521 | (i << 1)); | ||
522 | } | ||
523 | |||
509 | static void mg_write(struct request *req) | 524 | static void mg_write(struct request *req) |
510 | { | 525 | { |
511 | u32 j; | ||
512 | struct mg_host *host = req->rq_disk->private_data; | 526 | struct mg_host *host = req->rq_disk->private_data; |
527 | bool rem; | ||
513 | 528 | ||
514 | if (mg_out(host, blk_rq_pos(req), blk_rq_sectors(req), | 529 | if (mg_out(host, blk_rq_pos(req), blk_rq_sectors(req), |
515 | MG_CMD_WR, NULL) != MG_ERR_NONE) { | 530 | MG_CMD_WR, NULL) != MG_ERR_NONE) { |
@@ -520,27 +535,37 @@ static void mg_write(struct request *req) | |||
520 | MG_DBG("requested %d sects (from %ld), buffer=0x%p\n", | 535 | MG_DBG("requested %d sects (from %ld), buffer=0x%p\n", |
521 | blk_rq_sectors(req), blk_rq_pos(req), req->buffer); | 536 | blk_rq_sectors(req), blk_rq_pos(req), req->buffer); |
522 | 537 | ||
523 | do { | 538 | if (mg_wait(host, ATA_DRQ, |
524 | u16 *buff = (u16 *)req->buffer; | 539 | MG_TMAX_WAIT_WR_DRQ) != MG_ERR_NONE) { |
540 | mg_bad_rw_intr(host); | ||
541 | return; | ||
542 | } | ||
543 | |||
544 | mg_write_one(host, req); | ||
545 | |||
546 | outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND); | ||
525 | 547 | ||
526 | if (mg_wait(host, ATA_DRQ, MG_TMAX_WAIT_WR_DRQ) != MG_ERR_NONE) { | 548 | do { |
549 | if (blk_rq_sectors(req) > 1 && | ||
550 | mg_wait(host, ATA_DRQ, | ||
551 | MG_TMAX_WAIT_WR_DRQ) != MG_ERR_NONE) { | ||
527 | mg_bad_rw_intr(host); | 552 | mg_bad_rw_intr(host); |
528 | return; | 553 | return; |
529 | } | 554 | } |
530 | for (j = 0; j < MG_SECTOR_SIZE >> 1; j++) | ||
531 | outw(*buff++, (unsigned long)host->dev_base + | ||
532 | MG_BUFF_OFFSET + (j << 1)); | ||
533 | 555 | ||
534 | outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base + | 556 | rem = mg_end_request(host, 0, MG_SECTOR_SIZE); |
535 | MG_REG_COMMAND); | 557 | if (rem) |
536 | } while (mg_end_request(host, 0, MG_SECTOR_SIZE)); | 558 | mg_write_one(host, req); |
559 | |||
560 | outb(MG_CMD_WR_CONF, | ||
561 | (unsigned long)host->dev_base + MG_REG_COMMAND); | ||
562 | } while (rem); | ||
537 | } | 563 | } |
538 | 564 | ||
539 | static void mg_read_intr(struct mg_host *host) | 565 | static void mg_read_intr(struct mg_host *host) |
540 | { | 566 | { |
541 | struct request *req = host->req; | 567 | struct request *req = host->req; |
542 | u32 i; | 568 | u32 i; |
543 | u16 *buff; | ||
544 | 569 | ||
545 | /* check status */ | 570 | /* check status */ |
546 | do { | 571 | do { |
@@ -558,13 +583,7 @@ static void mg_read_intr(struct mg_host *host) | |||
558 | return; | 583 | return; |
559 | 584 | ||
560 | ok_to_read: | 585 | ok_to_read: |
561 | /* get current segment of request */ | 586 | mg_read_one(host, req); |
562 | buff = (u16 *)req->buffer; | ||
563 | |||
564 | /* read 1 sector */ | ||
565 | for (i = 0; i < MG_SECTOR_SIZE >> 1; i++) | ||
566 | *buff++ = inw((unsigned long)host->dev_base + MG_BUFF_OFFSET + | ||
567 | (i << 1)); | ||
568 | 587 | ||
569 | MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n", | 588 | MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n", |
570 | blk_rq_pos(req), blk_rq_sectors(req) - 1, req->buffer); | 589 | blk_rq_pos(req), blk_rq_sectors(req) - 1, req->buffer); |
@@ -583,8 +602,7 @@ ok_to_read: | |||
583 | static void mg_write_intr(struct mg_host *host) | 602 | static void mg_write_intr(struct mg_host *host) |
584 | { | 603 | { |
585 | struct request *req = host->req; | 604 | struct request *req = host->req; |
586 | u32 i, j; | 605 | u32 i; |
587 | u16 *buff; | ||
588 | bool rem; | 606 | bool rem; |
589 | 607 | ||
590 | /* check status */ | 608 | /* check status */ |
@@ -605,12 +623,7 @@ static void mg_write_intr(struct mg_host *host) | |||
605 | ok_to_write: | 623 | ok_to_write: |
606 | if ((rem = mg_end_request(host, 0, MG_SECTOR_SIZE))) { | 624 | if ((rem = mg_end_request(host, 0, MG_SECTOR_SIZE))) { |
607 | /* write 1 sector and set handler if remains */ | 625 | /* write 1 sector and set handler if remains */ |
608 | buff = (u16 *)req->buffer; | 626 | mg_write_one(host, req); |
609 | for (j = 0; j < MG_STORAGE_BUFFER_SIZE >> 1; j++) { | ||
610 | outw(*buff, (unsigned long)host->dev_base + | ||
611 | MG_BUFF_OFFSET + (j << 1)); | ||
612 | buff++; | ||
613 | } | ||
614 | MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n", | 627 | MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n", |
615 | blk_rq_pos(req), blk_rq_sectors(req), req->buffer); | 628 | blk_rq_pos(req), blk_rq_sectors(req), req->buffer); |
616 | host->mg_do_intr = mg_write_intr; | 629 | host->mg_do_intr = mg_write_intr; |
@@ -675,9 +688,6 @@ static unsigned int mg_issue_req(struct request *req, | |||
675 | unsigned int sect_num, | 688 | unsigned int sect_num, |
676 | unsigned int sect_cnt) | 689 | unsigned int sect_cnt) |
677 | { | 690 | { |
678 | u16 *buff; | ||
679 | u32 i; | ||
680 | |||
681 | switch (rq_data_dir(req)) { | 691 | switch (rq_data_dir(req)) { |
682 | case READ: | 692 | case READ: |
683 | if (mg_out(host, sect_num, sect_cnt, MG_CMD_RD, &mg_read_intr) | 693 | if (mg_out(host, sect_num, sect_cnt, MG_CMD_RD, &mg_read_intr) |
@@ -701,12 +711,7 @@ static unsigned int mg_issue_req(struct request *req, | |||
701 | mg_bad_rw_intr(host); | 711 | mg_bad_rw_intr(host); |
702 | return host->error; | 712 | return host->error; |
703 | } | 713 | } |
704 | buff = (u16 *)req->buffer; | 714 | mg_write_one(host, req); |
705 | for (i = 0; i < MG_SECTOR_SIZE >> 1; i++) { | ||
706 | outw(*buff, (unsigned long)host->dev_base + | ||
707 | MG_BUFF_OFFSET + (i << 1)); | ||
708 | buff++; | ||
709 | } | ||
710 | mod_timer(&host->timer, jiffies + 3 * HZ); | 715 | mod_timer(&host->timer, jiffies + 3 * HZ); |
711 | outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base + | 716 | outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base + |
712 | MG_REG_COMMAND); | 717 | MG_REG_COMMAND); |