aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mtd/mtdconcat.c
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2013-01-17 16:15:55 -0500
committerJonathan Herman <hermanjl@cs.unc.edu>2013-01-17 16:15:55 -0500
commit8dea78da5cee153b8af9c07a2745f6c55057fe12 (patch)
treea8f4d49d63b1ecc92f2fddceba0655b2472c5bd9 /drivers/mtd/mtdconcat.c
parent406089d01562f1e2bf9f089fd7637009ebaad589 (diff)
Patched in Tegra support.
Diffstat (limited to 'drivers/mtd/mtdconcat.c')
-rw-r--r--drivers/mtd/mtdconcat.c159
1 files changed, 113 insertions, 46 deletions
diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c
index b9000563b9f..e601672a530 100644
--- a/drivers/mtd/mtdconcat.c
+++ b/drivers/mtd/mtdconcat.c
@@ -72,6 +72,8 @@ concat_read(struct mtd_info *mtd, loff_t from, size_t len,
72 int ret = 0, err; 72 int ret = 0, err;
73 int i; 73 int i;
74 74
75 *retlen = 0;
76
75 for (i = 0; i < concat->num_subdev; i++) { 77 for (i = 0; i < concat->num_subdev; i++) {
76 struct mtd_info *subdev = concat->subdev[i]; 78 struct mtd_info *subdev = concat->subdev[i];
77 size_t size, retsize; 79 size_t size, retsize;
@@ -89,14 +91,14 @@ concat_read(struct mtd_info *mtd, loff_t from, size_t len,
89 /* Entire transaction goes into this subdev */ 91 /* Entire transaction goes into this subdev */
90 size = len; 92 size = len;
91 93
92 err = mtd_read(subdev, from, size, &retsize, buf); 94 err = subdev->read(subdev, from, size, &retsize, buf);
93 95
94 /* Save information about bitflips! */ 96 /* Save information about bitflips! */
95 if (unlikely(err)) { 97 if (unlikely(err)) {
96 if (mtd_is_eccerr(err)) { 98 if (err == -EBADMSG) {
97 mtd->ecc_stats.failed++; 99 mtd->ecc_stats.failed++;
98 ret = err; 100 ret = err;
99 } else if (mtd_is_bitflip(err)) { 101 } else if (err == -EUCLEAN) {
100 mtd->ecc_stats.corrected++; 102 mtd->ecc_stats.corrected++;
101 /* Do not overwrite -EBADMSG !! */ 103 /* Do not overwrite -EBADMSG !! */
102 if (!ret) 104 if (!ret)
@@ -124,6 +126,11 @@ concat_write(struct mtd_info *mtd, loff_t to, size_t len,
124 int err = -EINVAL; 126 int err = -EINVAL;
125 int i; 127 int i;
126 128
129 if (!(mtd->flags & MTD_WRITEABLE))
130 return -EROFS;
131
132 *retlen = 0;
133
127 for (i = 0; i < concat->num_subdev; i++) { 134 for (i = 0; i < concat->num_subdev; i++) {
128 struct mtd_info *subdev = concat->subdev[i]; 135 struct mtd_info *subdev = concat->subdev[i];
129 size_t size, retsize; 136 size_t size, retsize;
@@ -138,7 +145,11 @@ concat_write(struct mtd_info *mtd, loff_t to, size_t len,
138 else 145 else
139 size = len; 146 size = len;
140 147
141 err = mtd_write(subdev, to, size, &retsize, buf); 148 if (!(subdev->flags & MTD_WRITEABLE))
149 err = -EROFS;
150 else
151 err = subdev->write(subdev, to, size, &retsize, buf);
152
142 if (err) 153 if (err)
143 break; 154 break;
144 155
@@ -165,10 +176,19 @@ concat_writev(struct mtd_info *mtd, const struct kvec *vecs,
165 int i; 176 int i;
166 int err = -EINVAL; 177 int err = -EINVAL;
167 178
179 if (!(mtd->flags & MTD_WRITEABLE))
180 return -EROFS;
181
182 *retlen = 0;
183
168 /* Calculate total length of data */ 184 /* Calculate total length of data */
169 for (i = 0; i < count; i++) 185 for (i = 0; i < count; i++)
170 total_len += vecs[i].iov_len; 186 total_len += vecs[i].iov_len;
171 187
188 /* Do not allow write past end of device */
189 if ((to + total_len) > mtd->size)
190 return -EINVAL;
191
172 /* Check alignment */ 192 /* Check alignment */
173 if (mtd->writesize > 1) { 193 if (mtd->writesize > 1) {
174 uint64_t __to = to; 194 uint64_t __to = to;
@@ -204,8 +224,11 @@ concat_writev(struct mtd_info *mtd, const struct kvec *vecs,
204 old_iov_len = vecs_copy[entry_high].iov_len; 224 old_iov_len = vecs_copy[entry_high].iov_len;
205 vecs_copy[entry_high].iov_len = size; 225 vecs_copy[entry_high].iov_len = size;
206 226
207 err = mtd_writev(subdev, &vecs_copy[entry_low], 227 if (!(subdev->flags & MTD_WRITEABLE))
208 entry_high - entry_low + 1, to, &retsize); 228 err = -EROFS;
229 else
230 err = subdev->writev(subdev, &vecs_copy[entry_low],
231 entry_high - entry_low + 1, to, &retsize);
209 232
210 vecs_copy[entry_high].iov_len = old_iov_len - size; 233 vecs_copy[entry_high].iov_len = old_iov_len - size;
211 vecs_copy[entry_high].iov_base += size; 234 vecs_copy[entry_high].iov_base += size;
@@ -250,16 +273,16 @@ concat_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
250 if (from + devops.len > subdev->size) 273 if (from + devops.len > subdev->size)
251 devops.len = subdev->size - from; 274 devops.len = subdev->size - from;
252 275
253 err = mtd_read_oob(subdev, from, &devops); 276 err = subdev->read_oob(subdev, from, &devops);
254 ops->retlen += devops.retlen; 277 ops->retlen += devops.retlen;
255 ops->oobretlen += devops.oobretlen; 278 ops->oobretlen += devops.oobretlen;
256 279
257 /* Save information about bitflips! */ 280 /* Save information about bitflips! */
258 if (unlikely(err)) { 281 if (unlikely(err)) {
259 if (mtd_is_eccerr(err)) { 282 if (err == -EBADMSG) {
260 mtd->ecc_stats.failed++; 283 mtd->ecc_stats.failed++;
261 ret = err; 284 ret = err;
262 } else if (mtd_is_bitflip(err)) { 285 } else if (err == -EUCLEAN) {
263 mtd->ecc_stats.corrected++; 286 mtd->ecc_stats.corrected++;
264 /* Do not overwrite -EBADMSG !! */ 287 /* Do not overwrite -EBADMSG !! */
265 if (!ret) 288 if (!ret)
@@ -310,7 +333,7 @@ concat_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
310 if (to + devops.len > subdev->size) 333 if (to + devops.len > subdev->size)
311 devops.len = subdev->size - to; 334 devops.len = subdev->size - to;
312 335
313 err = mtd_write_oob(subdev, to, &devops); 336 err = subdev->write_oob(subdev, to, &devops);
314 ops->retlen += devops.oobretlen; 337 ops->retlen += devops.oobretlen;
315 if (err) 338 if (err)
316 return err; 339 return err;
@@ -356,7 +379,7 @@ static int concat_dev_erase(struct mtd_info *mtd, struct erase_info *erase)
356 * FIXME: Allow INTERRUPTIBLE. Which means 379 * FIXME: Allow INTERRUPTIBLE. Which means
357 * not having the wait_queue head on the stack. 380 * not having the wait_queue head on the stack.
358 */ 381 */
359 err = mtd_erase(mtd, erase); 382 err = mtd->erase(mtd, erase);
360 if (!err) { 383 if (!err) {
361 set_current_state(TASK_UNINTERRUPTIBLE); 384 set_current_state(TASK_UNINTERRUPTIBLE);
362 add_wait_queue(&waitq, &wait); 385 add_wait_queue(&waitq, &wait);
@@ -379,6 +402,15 @@ static int concat_erase(struct mtd_info *mtd, struct erase_info *instr)
379 uint64_t length, offset = 0; 402 uint64_t length, offset = 0;
380 struct erase_info *erase; 403 struct erase_info *erase;
381 404
405 if (!(mtd->flags & MTD_WRITEABLE))
406 return -EROFS;
407
408 if (instr->addr > concat->mtd.size)
409 return -EINVAL;
410
411 if (instr->len + instr->addr > concat->mtd.size)
412 return -EINVAL;
413
382 /* 414 /*
383 * Check for proper erase block alignment of the to-be-erased area. 415 * Check for proper erase block alignment of the to-be-erased area.
384 * It is easier to do this based on the super device's erase 416 * It is easier to do this based on the super device's erase
@@ -426,6 +458,8 @@ static int concat_erase(struct mtd_info *mtd, struct erase_info *instr)
426 return -EINVAL; 458 return -EINVAL;
427 } 459 }
428 460
461 instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
462
429 /* make a local copy of instr to avoid modifying the caller's struct */ 463 /* make a local copy of instr to avoid modifying the caller's struct */
430 erase = kmalloc(sizeof (struct erase_info), GFP_KERNEL); 464 erase = kmalloc(sizeof (struct erase_info), GFP_KERNEL);
431 465
@@ -464,6 +498,10 @@ static int concat_erase(struct mtd_info *mtd, struct erase_info *instr)
464 else 498 else
465 erase->len = length; 499 erase->len = length;
466 500
501 if (!(subdev->flags & MTD_WRITEABLE)) {
502 err = -EROFS;
503 break;
504 }
467 length -= erase->len; 505 length -= erase->len;
468 if ((err = concat_dev_erase(subdev, erase))) { 506 if ((err = concat_dev_erase(subdev, erase))) {
469 /* sanity check: should never happen since 507 /* sanity check: should never happen since
@@ -499,6 +537,9 @@ static int concat_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
499 struct mtd_concat *concat = CONCAT(mtd); 537 struct mtd_concat *concat = CONCAT(mtd);
500 int i, err = -EINVAL; 538 int i, err = -EINVAL;
501 539
540 if ((len + ofs) > mtd->size)
541 return -EINVAL;
542
502 for (i = 0; i < concat->num_subdev; i++) { 543 for (i = 0; i < concat->num_subdev; i++) {
503 struct mtd_info *subdev = concat->subdev[i]; 544 struct mtd_info *subdev = concat->subdev[i];
504 uint64_t size; 545 uint64_t size;
@@ -513,9 +554,12 @@ static int concat_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
513 else 554 else
514 size = len; 555 size = len;
515 556
516 err = mtd_lock(subdev, ofs, size); 557 if (subdev->lock) {
517 if (err) 558 err = subdev->lock(subdev, ofs, size);
518 break; 559 if (err)
560 break;
561 } else
562 err = -EOPNOTSUPP;
519 563
520 len -= size; 564 len -= size;
521 if (len == 0) 565 if (len == 0)
@@ -533,6 +577,9 @@ static int concat_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
533 struct mtd_concat *concat = CONCAT(mtd); 577 struct mtd_concat *concat = CONCAT(mtd);
534 int i, err = 0; 578 int i, err = 0;
535 579
580 if ((len + ofs) > mtd->size)
581 return -EINVAL;
582
536 for (i = 0; i < concat->num_subdev; i++) { 583 for (i = 0; i < concat->num_subdev; i++) {
537 struct mtd_info *subdev = concat->subdev[i]; 584 struct mtd_info *subdev = concat->subdev[i];
538 uint64_t size; 585 uint64_t size;
@@ -547,9 +594,12 @@ static int concat_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
547 else 594 else
548 size = len; 595 size = len;
549 596
550 err = mtd_unlock(subdev, ofs, size); 597 if (subdev->unlock) {
551 if (err) 598 err = subdev->unlock(subdev, ofs, size);
552 break; 599 if (err)
600 break;
601 } else
602 err = -EOPNOTSUPP;
553 603
554 len -= size; 604 len -= size;
555 if (len == 0) 605 if (len == 0)
@@ -569,7 +619,7 @@ static void concat_sync(struct mtd_info *mtd)
569 619
570 for (i = 0; i < concat->num_subdev; i++) { 620 for (i = 0; i < concat->num_subdev; i++) {
571 struct mtd_info *subdev = concat->subdev[i]; 621 struct mtd_info *subdev = concat->subdev[i];
572 mtd_sync(subdev); 622 subdev->sync(subdev);
573 } 623 }
574} 624}
575 625
@@ -580,7 +630,7 @@ static int concat_suspend(struct mtd_info *mtd)
580 630
581 for (i = 0; i < concat->num_subdev; i++) { 631 for (i = 0; i < concat->num_subdev; i++) {
582 struct mtd_info *subdev = concat->subdev[i]; 632 struct mtd_info *subdev = concat->subdev[i];
583 if ((rc = mtd_suspend(subdev)) < 0) 633 if ((rc = subdev->suspend(subdev)) < 0)
584 return rc; 634 return rc;
585 } 635 }
586 return rc; 636 return rc;
@@ -593,7 +643,7 @@ static void concat_resume(struct mtd_info *mtd)
593 643
594 for (i = 0; i < concat->num_subdev; i++) { 644 for (i = 0; i < concat->num_subdev; i++) {
595 struct mtd_info *subdev = concat->subdev[i]; 645 struct mtd_info *subdev = concat->subdev[i];
596 mtd_resume(subdev); 646 subdev->resume(subdev);
597 } 647 }
598} 648}
599 649
@@ -602,9 +652,12 @@ static int concat_block_isbad(struct mtd_info *mtd, loff_t ofs)
602 struct mtd_concat *concat = CONCAT(mtd); 652 struct mtd_concat *concat = CONCAT(mtd);
603 int i, res = 0; 653 int i, res = 0;
604 654
605 if (!mtd_can_have_bb(concat->subdev[0])) 655 if (!concat->subdev[0]->block_isbad)
606 return res; 656 return res;
607 657
658 if (ofs > mtd->size)
659 return -EINVAL;
660
608 for (i = 0; i < concat->num_subdev; i++) { 661 for (i = 0; i < concat->num_subdev; i++) {
609 struct mtd_info *subdev = concat->subdev[i]; 662 struct mtd_info *subdev = concat->subdev[i];
610 663
@@ -613,7 +666,7 @@ static int concat_block_isbad(struct mtd_info *mtd, loff_t ofs)
613 continue; 666 continue;
614 } 667 }
615 668
616 res = mtd_block_isbad(subdev, ofs); 669 res = subdev->block_isbad(subdev, ofs);
617 break; 670 break;
618 } 671 }
619 672
@@ -625,6 +678,12 @@ static int concat_block_markbad(struct mtd_info *mtd, loff_t ofs)
625 struct mtd_concat *concat = CONCAT(mtd); 678 struct mtd_concat *concat = CONCAT(mtd);
626 int i, err = -EINVAL; 679 int i, err = -EINVAL;
627 680
681 if (!concat->subdev[0]->block_markbad)
682 return 0;
683
684 if (ofs > mtd->size)
685 return -EINVAL;
686
628 for (i = 0; i < concat->num_subdev; i++) { 687 for (i = 0; i < concat->num_subdev; i++) {
629 struct mtd_info *subdev = concat->subdev[i]; 688 struct mtd_info *subdev = concat->subdev[i];
630 689
@@ -633,7 +692,7 @@ static int concat_block_markbad(struct mtd_info *mtd, loff_t ofs)
633 continue; 692 continue;
634 } 693 }
635 694
636 err = mtd_block_markbad(subdev, ofs); 695 err = subdev->block_markbad(subdev, ofs);
637 if (!err) 696 if (!err)
638 mtd->ecc_stats.badblocks++; 697 mtd->ecc_stats.badblocks++;
639 break; 698 break;
@@ -662,7 +721,15 @@ static unsigned long concat_get_unmapped_area(struct mtd_info *mtd,
662 continue; 721 continue;
663 } 722 }
664 723
665 return mtd_get_unmapped_area(subdev, len, offset, flags); 724 /* we've found the subdev over which the mapping will reside */
725 if (offset + len > subdev->size)
726 return (unsigned long) -EINVAL;
727
728 if (subdev->get_unmapped_area)
729 return subdev->get_unmapped_area(subdev, len, offset,
730 flags);
731
732 break;
666 } 733 }
667 734
668 return (unsigned long) -ENOSYS; 735 return (unsigned long) -ENOSYS;
@@ -703,7 +770,7 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c
703 770
704 /* 771 /*
705 * Set up the new "super" device's MTD object structure, check for 772 * Set up the new "super" device's MTD object structure, check for
706 * incompatibilities between the subdevices. 773 * incompatibilites between the subdevices.
707 */ 774 */
708 concat->mtd.type = subdev[0]->type; 775 concat->mtd.type = subdev[0]->type;
709 concat->mtd.flags = subdev[0]->flags; 776 concat->mtd.flags = subdev[0]->flags;
@@ -719,16 +786,16 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c
719 concat->mtd.subpage_sft = subdev[0]->subpage_sft; 786 concat->mtd.subpage_sft = subdev[0]->subpage_sft;
720 concat->mtd.oobsize = subdev[0]->oobsize; 787 concat->mtd.oobsize = subdev[0]->oobsize;
721 concat->mtd.oobavail = subdev[0]->oobavail; 788 concat->mtd.oobavail = subdev[0]->oobavail;
722 if (subdev[0]->_writev) 789 if (subdev[0]->writev)
723 concat->mtd._writev = concat_writev; 790 concat->mtd.writev = concat_writev;
724 if (subdev[0]->_read_oob) 791 if (subdev[0]->read_oob)
725 concat->mtd._read_oob = concat_read_oob; 792 concat->mtd.read_oob = concat_read_oob;
726 if (subdev[0]->_write_oob) 793 if (subdev[0]->write_oob)
727 concat->mtd._write_oob = concat_write_oob; 794 concat->mtd.write_oob = concat_write_oob;
728 if (subdev[0]->_block_isbad) 795 if (subdev[0]->block_isbad)
729 concat->mtd._block_isbad = concat_block_isbad; 796 concat->mtd.block_isbad = concat_block_isbad;
730 if (subdev[0]->_block_markbad) 797 if (subdev[0]->block_markbad)
731 concat->mtd._block_markbad = concat_block_markbad; 798 concat->mtd.block_markbad = concat_block_markbad;
732 799
733 concat->mtd.ecc_stats.badblocks = subdev[0]->ecc_stats.badblocks; 800 concat->mtd.ecc_stats.badblocks = subdev[0]->ecc_stats.badblocks;
734 801
@@ -775,8 +842,8 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c
775 if (concat->mtd.writesize != subdev[i]->writesize || 842 if (concat->mtd.writesize != subdev[i]->writesize ||
776 concat->mtd.subpage_sft != subdev[i]->subpage_sft || 843 concat->mtd.subpage_sft != subdev[i]->subpage_sft ||
777 concat->mtd.oobsize != subdev[i]->oobsize || 844 concat->mtd.oobsize != subdev[i]->oobsize ||
778 !concat->mtd._read_oob != !subdev[i]->_read_oob || 845 !concat->mtd.read_oob != !subdev[i]->read_oob ||
779 !concat->mtd._write_oob != !subdev[i]->_write_oob) { 846 !concat->mtd.write_oob != !subdev[i]->write_oob) {
780 kfree(concat); 847 kfree(concat);
781 printk("Incompatible OOB or ECC data on \"%s\"\n", 848 printk("Incompatible OOB or ECC data on \"%s\"\n",
782 subdev[i]->name); 849 subdev[i]->name);
@@ -791,15 +858,15 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c
791 concat->num_subdev = num_devs; 858 concat->num_subdev = num_devs;
792 concat->mtd.name = name; 859 concat->mtd.name = name;
793 860
794 concat->mtd._erase = concat_erase; 861 concat->mtd.erase = concat_erase;
795 concat->mtd._read = concat_read; 862 concat->mtd.read = concat_read;
796 concat->mtd._write = concat_write; 863 concat->mtd.write = concat_write;
797 concat->mtd._sync = concat_sync; 864 concat->mtd.sync = concat_sync;
798 concat->mtd._lock = concat_lock; 865 concat->mtd.lock = concat_lock;
799 concat->mtd._unlock = concat_unlock; 866 concat->mtd.unlock = concat_unlock;
800 concat->mtd._suspend = concat_suspend; 867 concat->mtd.suspend = concat_suspend;
801 concat->mtd._resume = concat_resume; 868 concat->mtd.resume = concat_resume;
802 concat->mtd._get_unmapped_area = concat_get_unmapped_area; 869 concat->mtd.get_unmapped_area = concat_get_unmapped_area;
803 870
804 /* 871 /*
805 * Combine the erase block size info of the subdevices: 872 * Combine the erase block size info of the subdevices: