aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mtd
diff options
context:
space:
mode:
authorArtem Bityutskiy <Artem.Bityutskiy@nokia.com>2011-03-25 11:41:20 -0400
committerArtem Bityutskiy <Artem.Bityutskiy@nokia.com>2011-03-25 11:41:20 -0400
commit7bf7e370d5919112c223a269462cd0b546903829 (patch)
tree03ccc715239df14ae168277dbccc9d9cf4d8a2c8 /drivers/mtd
parent68b1a1e786f29c900fa1c516a402e24f0ece622a (diff)
parentd39dd11c3e6a7af5c20bfac40594db36cf270f42 (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6 into for-linus-1
* 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6: (9356 commits) [media] rc: update for bitop name changes fs: simplify iget & friends fs: pull inode->i_lock up out of writeback_single_inode fs: rename inode_lock to inode_hash_lock fs: move i_wb_list out from under inode_lock fs: move i_sb_list out from under inode_lock fs: remove inode_lock from iput_final and prune_icache fs: Lock the inode LRU list separately fs: factor inode disposal fs: protect inode->i_state with inode->i_lock lib, arch: add filter argument to show_mem and fix private implementations SLUB: Write to per cpu data when allocating it slub: Fix debugobjects with lockless fastpath autofs4: Do not potentially dereference NULL pointer returned by fget() in autofs_dev_ioctl_setpipefd() autofs4 - remove autofs4_lock autofs4 - fix d_manage() return on rcu-walk autofs4 - fix autofs4_expire_indirect() traversal autofs4 - fix dentry leak in autofs4_expire_direct() autofs4 - reinstate last used update on access vfs - check non-mountpoint dentry might block in __follow_mount_rcu() ... NOTE! This merge commit was created to fix compilation error. The block tree was merged upstream and removed the 'elv_queue_empty()' function which the new 'mtdswap' driver is using. So a simple merge of the mtd tree with upstream does not compile. And the mtd tree has already be published, so re-basing it is not an option. To fix this unfortunate situation, I had to merge upstream into the mtd-2.6.git tree without committing, put the fixup patch on top of this, and then commit this. The result is that we do not have commits which do not compile. In other words, this merge commit "merges" 3 things: the MTD tree, the upstream tree, and the fixup patch.
Diffstat (limited to 'drivers/mtd')
-rw-r--r--drivers/mtd/maps/physmap_of.c15
-rw-r--r--drivers/mtd/maps/sun_uflash.c8
-rw-r--r--drivers/mtd/mtd_blkdevs.c9
-rw-r--r--drivers/mtd/nand/Kconfig19
-rw-r--r--drivers/mtd/nand/fsl_upm.c9
-rw-r--r--drivers/mtd/nand/mpc5121_nfc.c9
-rw-r--r--drivers/mtd/nand/mxc_nand.c5
-rw-r--r--drivers/mtd/nand/ndfc.c9
-rw-r--r--drivers/mtd/nand/omap2.c367
-rw-r--r--drivers/mtd/nand/pasemi_nand.c9
-rw-r--r--drivers/mtd/nand/r852.c2
-rw-r--r--drivers/mtd/nand/socrates_nand.c9
-rw-r--r--drivers/mtd/nand/tmio_nand.c11
-rw-r--r--drivers/mtd/onenand/Kconfig2
-rw-r--r--drivers/mtd/onenand/omap2.c36
-rw-r--r--drivers/mtd/sm_ftl.c2
-rw-r--r--drivers/mtd/ubi/Kconfig8
-rw-r--r--drivers/mtd/ubi/Kconfig.debug73
-rw-r--r--drivers/mtd/ubi/build.c62
-rw-r--r--drivers/mtd/ubi/debug.c14
-rw-r--r--drivers/mtd/ubi/debug.h131
-rw-r--r--drivers/mtd/ubi/io.c145
-rw-r--r--drivers/mtd/ubi/kapi.c2
-rw-r--r--drivers/mtd/ubi/scan.c95
-rw-r--r--drivers/mtd/ubi/scan.h2
-rw-r--r--drivers/mtd/ubi/ubi.h10
-rw-r--r--drivers/mtd/ubi/vmt.c7
-rw-r--r--drivers/mtd/ubi/vtbl.c9
-rw-r--r--drivers/mtd/ubi/wl.c20
29 files changed, 654 insertions, 445 deletions
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index c85be8312488..bd483f0c57e1 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -214,8 +214,7 @@ static void __devinit of_free_probes(const char **probes)
214} 214}
215#endif 215#endif
216 216
217static int __devinit of_flash_probe(struct platform_device *dev, 217static int __devinit of_flash_probe(struct platform_device *dev)
218 const struct of_device_id *match)
219{ 218{
220#ifdef CONFIG_MTD_PARTITIONS 219#ifdef CONFIG_MTD_PARTITIONS
221 const char **part_probe_types; 220 const char **part_probe_types;
@@ -223,7 +222,7 @@ static int __devinit of_flash_probe(struct platform_device *dev,
223 struct device_node *dp = dev->dev.of_node; 222 struct device_node *dp = dev->dev.of_node;
224 struct resource res; 223 struct resource res;
225 struct of_flash *info; 224 struct of_flash *info;
226 const char *probe_type = match->data; 225 const char *probe_type;
227 const __be32 *width; 226 const __be32 *width;
228 int err; 227 int err;
229 int i; 228 int i;
@@ -233,6 +232,10 @@ static int __devinit of_flash_probe(struct platform_device *dev,
233 struct mtd_info **mtd_list = NULL; 232 struct mtd_info **mtd_list = NULL;
234 resource_size_t res_size; 233 resource_size_t res_size;
235 234
235 if (!dev->dev.of_match)
236 return -EINVAL;
237 probe_type = dev->dev.of_match->data;
238
236 reg_tuple_size = (of_n_addr_cells(dp) + of_n_size_cells(dp)) * sizeof(u32); 239 reg_tuple_size = (of_n_addr_cells(dp) + of_n_size_cells(dp)) * sizeof(u32);
237 240
238 /* 241 /*
@@ -410,7 +413,7 @@ static struct of_device_id of_flash_match[] = {
410}; 413};
411MODULE_DEVICE_TABLE(of, of_flash_match); 414MODULE_DEVICE_TABLE(of, of_flash_match);
412 415
413static struct of_platform_driver of_flash_driver = { 416static struct platform_driver of_flash_driver = {
414 .driver = { 417 .driver = {
415 .name = "of-flash", 418 .name = "of-flash",
416 .owner = THIS_MODULE, 419 .owner = THIS_MODULE,
@@ -422,12 +425,12 @@ static struct of_platform_driver of_flash_driver = {
422 425
423static int __init of_flash_init(void) 426static int __init of_flash_init(void)
424{ 427{
425 return of_register_platform_driver(&of_flash_driver); 428 return platform_driver_register(&of_flash_driver);
426} 429}
427 430
428static void __exit of_flash_exit(void) 431static void __exit of_flash_exit(void)
429{ 432{
430 of_unregister_platform_driver(&of_flash_driver); 433 platform_driver_unregister(&of_flash_driver);
431} 434}
432 435
433module_init(of_flash_init); 436module_init(of_flash_init);
diff --git a/drivers/mtd/maps/sun_uflash.c b/drivers/mtd/maps/sun_uflash.c
index 3582ba1f9b09..3f1cb328a574 100644
--- a/drivers/mtd/maps/sun_uflash.c
+++ b/drivers/mtd/maps/sun_uflash.c
@@ -108,7 +108,7 @@ int uflash_devinit(struct platform_device *op, struct device_node *dp)
108 return 0; 108 return 0;
109} 109}
110 110
111static int __devinit uflash_probe(struct platform_device *op, const struct of_device_id *match) 111static int __devinit uflash_probe(struct platform_device *op)
112{ 112{
113 struct device_node *dp = op->dev.of_node; 113 struct device_node *dp = op->dev.of_node;
114 114
@@ -148,7 +148,7 @@ static const struct of_device_id uflash_match[] = {
148 148
149MODULE_DEVICE_TABLE(of, uflash_match); 149MODULE_DEVICE_TABLE(of, uflash_match);
150 150
151static struct of_platform_driver uflash_driver = { 151static struct platform_driver uflash_driver = {
152 .driver = { 152 .driver = {
153 .name = DRIVER_NAME, 153 .name = DRIVER_NAME,
154 .owner = THIS_MODULE, 154 .owner = THIS_MODULE,
@@ -160,12 +160,12 @@ static struct of_platform_driver uflash_driver = {
160 160
161static int __init uflash_init(void) 161static int __init uflash_init(void)
162{ 162{
163 return of_register_platform_driver(&uflash_driver); 163 return platform_driver_register(&uflash_driver);
164} 164}
165 165
166static void __exit uflash_exit(void) 166static void __exit uflash_exit(void)
167{ 167{
168 of_unregister_platform_driver(&uflash_driver); 168 platform_driver_unregister(&uflash_driver);
169} 169}
170 170
171module_init(uflash_init); 171module_init(uflash_init);
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index 650511304030..a534e1f0c348 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -124,7 +124,7 @@ int mtd_blktrans_cease_background(struct mtd_blktrans_dev *dev)
124 if (kthread_should_stop()) 124 if (kthread_should_stop())
125 return 1; 125 return 1;
126 126
127 return !elv_queue_empty(dev->rq); 127 return dev->bg_stop;
128} 128}
129EXPORT_SYMBOL_GPL(mtd_blktrans_cease_background); 129EXPORT_SYMBOL_GPL(mtd_blktrans_cease_background);
130 130
@@ -141,6 +141,7 @@ static int mtd_blktrans_thread(void *arg)
141 while (!kthread_should_stop()) { 141 while (!kthread_should_stop()) {
142 int res; 142 int res;
143 143
144 dev->bg_stop = false;
144 if (!req && !(req = blk_fetch_request(rq))) { 145 if (!req && !(req = blk_fetch_request(rq))) {
145 if (tr->background && !background_done) { 146 if (tr->background && !background_done) {
146 spin_unlock_irq(rq->queue_lock); 147 spin_unlock_irq(rq->queue_lock);
@@ -152,7 +153,7 @@ static int mtd_blktrans_thread(void *arg)
152 * Do background processing just once per idle 153 * Do background processing just once per idle
153 * period. 154 * period.
154 */ 155 */
155 background_done = 1; 156 background_done = !dev->bg_stop;
156 continue; 157 continue;
157 } 158 }
158 set_current_state(TASK_INTERRUPTIBLE); 159 set_current_state(TASK_INTERRUPTIBLE);
@@ -198,8 +199,10 @@ static void mtd_blktrans_request(struct request_queue *rq)
198 if (!dev) 199 if (!dev)
199 while ((req = blk_fetch_request(rq)) != NULL) 200 while ((req = blk_fetch_request(rq)) != NULL)
200 __blk_end_request_all(req, -ENODEV); 201 __blk_end_request_all(req, -ENODEV);
201 else 202 else {
203 dev->bg_stop = true;
202 wake_up_process(dev->thread); 204 wake_up_process(dev->thread);
205 }
203} 206}
204 207
205static int blktrans_open(struct block_device *bdev, fmode_t mode) 208static int blktrans_open(struct block_device *bdev, fmode_t mode)
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 78205ac2b10f..a92054e945e1 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -121,23 +121,6 @@ config MTD_NAND_OMAP2
121 help 121 help
122 Support for NAND flash on Texas Instruments OMAP2 and OMAP3 platforms. 122 Support for NAND flash on Texas Instruments OMAP2 and OMAP3 platforms.
123 123
124config MTD_NAND_OMAP_PREFETCH
125 bool "GPMC prefetch support for NAND Flash device"
126 depends on MTD_NAND_OMAP2
127 default y
128 help
129 The NAND device can be accessed for Read/Write using GPMC PREFETCH engine
130 to improve the performance.
131
132config MTD_NAND_OMAP_PREFETCH_DMA
133 depends on MTD_NAND_OMAP_PREFETCH
134 bool "DMA mode"
135 default n
136 help
137 The GPMC PREFETCH engine can be configured eigther in MPU interrupt mode
138 or in DMA interrupt mode.
139 Say y for DMA mode or MPU mode will be used
140
141config MTD_NAND_IDS 124config MTD_NAND_IDS
142 tristate 125 tristate
143 126
@@ -491,7 +474,7 @@ config MTD_NAND_MPC5121_NFC
491 474
492config MTD_NAND_MXC 475config MTD_NAND_MXC
493 tristate "MXC NAND support" 476 tristate "MXC NAND support"
494 depends on ARCH_MX2 || ARCH_MX25 || ARCH_MX3 || ARCH_MX51 477 depends on IMX_HAVE_PLATFORM_MXC_NAND
495 help 478 help
496 This enables the driver for the NAND flash controller on the 479 This enables the driver for the NAND flash controller on the
497 MXC processors. 480 MXC processors.
diff --git a/drivers/mtd/nand/fsl_upm.c b/drivers/mtd/nand/fsl_upm.c
index efdcca94ce55..073ee026a17c 100644
--- a/drivers/mtd/nand/fsl_upm.c
+++ b/drivers/mtd/nand/fsl_upm.c
@@ -217,8 +217,7 @@ err:
217 return ret; 217 return ret;
218} 218}
219 219
220static int __devinit fun_probe(struct platform_device *ofdev, 220static int __devinit fun_probe(struct platform_device *ofdev)
221 const struct of_device_id *ofid)
222{ 221{
223 struct fsl_upm_nand *fun; 222 struct fsl_upm_nand *fun;
224 struct resource io_res; 223 struct resource io_res;
@@ -360,7 +359,7 @@ static const struct of_device_id of_fun_match[] = {
360}; 359};
361MODULE_DEVICE_TABLE(of, of_fun_match); 360MODULE_DEVICE_TABLE(of, of_fun_match);
362 361
363static struct of_platform_driver of_fun_driver = { 362static struct platform_driver of_fun_driver = {
364 .driver = { 363 .driver = {
365 .name = "fsl,upm-nand", 364 .name = "fsl,upm-nand",
366 .owner = THIS_MODULE, 365 .owner = THIS_MODULE,
@@ -372,13 +371,13 @@ static struct of_platform_driver of_fun_driver = {
372 371
373static int __init fun_module_init(void) 372static int __init fun_module_init(void)
374{ 373{
375 return of_register_platform_driver(&of_fun_driver); 374 return platform_driver_register(&of_fun_driver);
376} 375}
377module_init(fun_module_init); 376module_init(fun_module_init);
378 377
379static void __exit fun_module_exit(void) 378static void __exit fun_module_exit(void)
380{ 379{
381 of_unregister_platform_driver(&of_fun_driver); 380 platform_driver_unregister(&of_fun_driver);
382} 381}
383module_exit(fun_module_exit); 382module_exit(fun_module_exit);
384 383
diff --git a/drivers/mtd/nand/mpc5121_nfc.c b/drivers/mtd/nand/mpc5121_nfc.c
index ddaf0011aa88..0b81b5b499d1 100644
--- a/drivers/mtd/nand/mpc5121_nfc.c
+++ b/drivers/mtd/nand/mpc5121_nfc.c
@@ -651,8 +651,7 @@ static void mpc5121_nfc_free(struct device *dev, struct mtd_info *mtd)
651 iounmap(prv->csreg); 651 iounmap(prv->csreg);
652} 652}
653 653
654static int __devinit mpc5121_nfc_probe(struct platform_device *op, 654static int __devinit mpc5121_nfc_probe(struct platform_device *op)
655 const struct of_device_id *match)
656{ 655{
657 struct device_node *rootnode, *dn = op->dev.of_node; 656 struct device_node *rootnode, *dn = op->dev.of_node;
658 struct device *dev = &op->dev; 657 struct device *dev = &op->dev;
@@ -892,7 +891,7 @@ static struct of_device_id mpc5121_nfc_match[] __devinitdata = {
892 {}, 891 {},
893}; 892};
894 893
895static struct of_platform_driver mpc5121_nfc_driver = { 894static struct platform_driver mpc5121_nfc_driver = {
896 .probe = mpc5121_nfc_probe, 895 .probe = mpc5121_nfc_probe,
897 .remove = __devexit_p(mpc5121_nfc_remove), 896 .remove = __devexit_p(mpc5121_nfc_remove),
898 .driver = { 897 .driver = {
@@ -904,14 +903,14 @@ static struct of_platform_driver mpc5121_nfc_driver = {
904 903
905static int __init mpc5121_nfc_init(void) 904static int __init mpc5121_nfc_init(void)
906{ 905{
907 return of_register_platform_driver(&mpc5121_nfc_driver); 906 return platform_driver_register(&mpc5121_nfc_driver);
908} 907}
909 908
910module_init(mpc5121_nfc_init); 909module_init(mpc5121_nfc_init);
911 910
912static void __exit mpc5121_nfc_cleanup(void) 911static void __exit mpc5121_nfc_cleanup(void)
913{ 912{
914 of_unregister_platform_driver(&mpc5121_nfc_driver); 913 platform_driver_unregister(&mpc5121_nfc_driver);
915} 914}
916 915
917module_exit(mpc5121_nfc_cleanup); 916module_exit(mpc5121_nfc_cleanup);
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index b7d5a5b9a543..42a95fb41504 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -747,9 +747,8 @@ static void mxc_do_addr_cycle(struct mtd_info *mtd, int column, int page_addr)
747 /* 747 /*
748 * MXC NANDFC can only perform full page+spare or 748 * MXC NANDFC can only perform full page+spare or
749 * spare-only read/write. When the upper layers 749 * spare-only read/write. When the upper layers
750 * layers perform a read/write buf operation, 750 * perform a read/write buf operation, the saved column
751 * we will used the saved column address to index into 751 * address is used to index into the full page.
752 * the full page.
753 */ 752 */
754 host->send_addr(host, 0, page_addr == -1); 753 host->send_addr(host, 0, page_addr == -1);
755 if (mtd->writesize > 512) 754 if (mtd->writesize > 512)
diff --git a/drivers/mtd/nand/ndfc.c b/drivers/mtd/nand/ndfc.c
index c9ae0a5023b6..bbe6d451290d 100644
--- a/drivers/mtd/nand/ndfc.c
+++ b/drivers/mtd/nand/ndfc.c
@@ -225,8 +225,7 @@ err:
225 return ret; 225 return ret;
226} 226}
227 227
228static int __devinit ndfc_probe(struct platform_device *ofdev, 228static int __devinit ndfc_probe(struct platform_device *ofdev)
229 const struct of_device_id *match)
230{ 229{
231 struct ndfc_controller *ndfc = &ndfc_ctrl; 230 struct ndfc_controller *ndfc = &ndfc_ctrl;
232 const __be32 *reg; 231 const __be32 *reg;
@@ -292,7 +291,7 @@ static const struct of_device_id ndfc_match[] = {
292}; 291};
293MODULE_DEVICE_TABLE(of, ndfc_match); 292MODULE_DEVICE_TABLE(of, ndfc_match);
294 293
295static struct of_platform_driver ndfc_driver = { 294static struct platform_driver ndfc_driver = {
296 .driver = { 295 .driver = {
297 .name = "ndfc", 296 .name = "ndfc",
298 .owner = THIS_MODULE, 297 .owner = THIS_MODULE,
@@ -304,12 +303,12 @@ static struct of_platform_driver ndfc_driver = {
304 303
305static int __init ndfc_nand_init(void) 304static int __init ndfc_nand_init(void)
306{ 305{
307 return of_register_platform_driver(&ndfc_driver); 306 return platform_driver_register(&ndfc_driver);
308} 307}
309 308
310static void __exit ndfc_nand_exit(void) 309static void __exit ndfc_nand_exit(void)
311{ 310{
312 of_unregister_platform_driver(&ndfc_driver); 311 platform_driver_unregister(&ndfc_driver);
313} 312}
314 313
315module_init(ndfc_nand_init); 314module_init(ndfc_nand_init);
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index d7a4e2550b13..da9a351c9d79 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -11,6 +11,7 @@
11#include <linux/platform_device.h> 11#include <linux/platform_device.h>
12#include <linux/dma-mapping.h> 12#include <linux/dma-mapping.h>
13#include <linux/delay.h> 13#include <linux/delay.h>
14#include <linux/interrupt.h>
14#include <linux/jiffies.h> 15#include <linux/jiffies.h>
15#include <linux/sched.h> 16#include <linux/sched.h>
16#include <linux/mtd/mtd.h> 17#include <linux/mtd/mtd.h>
@@ -24,6 +25,7 @@
24#include <plat/nand.h> 25#include <plat/nand.h>
25 26
26#define DRIVER_NAME "omap2-nand" 27#define DRIVER_NAME "omap2-nand"
28#define OMAP_NAND_TIMEOUT_MS 5000
27 29
28#define NAND_Ecc_P1e (1 << 0) 30#define NAND_Ecc_P1e (1 << 0)
29#define NAND_Ecc_P2e (1 << 1) 31#define NAND_Ecc_P2e (1 << 1)
@@ -96,26 +98,19 @@
96static const char *part_probes[] = { "cmdlinepart", NULL }; 98static const char *part_probes[] = { "cmdlinepart", NULL };
97#endif 99#endif
98 100
99#ifdef CONFIG_MTD_NAND_OMAP_PREFETCH 101/* oob info generated runtime depending on ecc algorithm and layout selected */
100static int use_prefetch = 1; 102static struct nand_ecclayout omap_oobinfo;
101 103/* Define some generic bad / good block scan pattern which are used
102/* "modprobe ... use_prefetch=0" etc */ 104 * while scanning a device for factory marked good / bad blocks
103module_param(use_prefetch, bool, 0); 105 */
104MODULE_PARM_DESC(use_prefetch, "enable/disable use of PREFETCH"); 106static uint8_t scan_ff_pattern[] = { 0xff };
105 107static struct nand_bbt_descr bb_descrip_flashbased = {
106#ifdef CONFIG_MTD_NAND_OMAP_PREFETCH_DMA 108 .options = NAND_BBT_SCANEMPTY | NAND_BBT_SCANALLPAGES,
107static int use_dma = 1; 109 .offs = 0,
110 .len = 1,
111 .pattern = scan_ff_pattern,
112};
108 113
109/* "modprobe ... use_dma=0" etc */
110module_param(use_dma, bool, 0);
111MODULE_PARM_DESC(use_dma, "enable/disable use of DMA");
112#else
113static const int use_dma;
114#endif
115#else
116const int use_prefetch;
117static const int use_dma;
118#endif
119 114
120struct omap_nand_info { 115struct omap_nand_info {
121 struct nand_hw_control controller; 116 struct nand_hw_control controller;
@@ -129,6 +124,13 @@ struct omap_nand_info {
129 unsigned long phys_base; 124 unsigned long phys_base;
130 struct completion comp; 125 struct completion comp;
131 int dma_ch; 126 int dma_ch;
127 int gpmc_irq;
128 enum {
129 OMAP_NAND_IO_READ = 0, /* read */
130 OMAP_NAND_IO_WRITE, /* write */
131 } iomode;
132 u_char *buf;
133 int buf_len;
132}; 134};
133 135
134/** 136/**
@@ -256,7 +258,8 @@ static void omap_read_buf_pref(struct mtd_info *mtd, u_char *buf, int len)
256 } 258 }
257 259
258 /* configure and start prefetch transfer */ 260 /* configure and start prefetch transfer */
259 ret = gpmc_prefetch_enable(info->gpmc_cs, 0x0, len, 0x0); 261 ret = gpmc_prefetch_enable(info->gpmc_cs,
262 PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x0);
260 if (ret) { 263 if (ret) {
261 /* PFPW engine is busy, use cpu copy method */ 264 /* PFPW engine is busy, use cpu copy method */
262 if (info->nand.options & NAND_BUSWIDTH_16) 265 if (info->nand.options & NAND_BUSWIDTH_16)
@@ -288,9 +291,10 @@ static void omap_write_buf_pref(struct mtd_info *mtd,
288{ 291{
289 struct omap_nand_info *info = container_of(mtd, 292 struct omap_nand_info *info = container_of(mtd,
290 struct omap_nand_info, mtd); 293 struct omap_nand_info, mtd);
291 uint32_t pref_count = 0, w_count = 0; 294 uint32_t w_count = 0;
292 int i = 0, ret = 0; 295 int i = 0, ret = 0;
293 u16 *p; 296 u16 *p;
297 unsigned long tim, limit;
294 298
295 /* take care of subpage writes */ 299 /* take care of subpage writes */
296 if (len % 2 != 0) { 300 if (len % 2 != 0) {
@@ -300,7 +304,8 @@ static void omap_write_buf_pref(struct mtd_info *mtd,
300 } 304 }
301 305
302 /* configure and start prefetch transfer */ 306 /* configure and start prefetch transfer */
303 ret = gpmc_prefetch_enable(info->gpmc_cs, 0x0, len, 0x1); 307 ret = gpmc_prefetch_enable(info->gpmc_cs,
308 PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x1);
304 if (ret) { 309 if (ret) {
305 /* PFPW engine is busy, use cpu copy method */ 310 /* PFPW engine is busy, use cpu copy method */
306 if (info->nand.options & NAND_BUSWIDTH_16) 311 if (info->nand.options & NAND_BUSWIDTH_16)
@@ -316,15 +321,17 @@ static void omap_write_buf_pref(struct mtd_info *mtd,
316 iowrite16(*p++, info->nand.IO_ADDR_W); 321 iowrite16(*p++, info->nand.IO_ADDR_W);
317 } 322 }
318 /* wait for data to flushed-out before reset the prefetch */ 323 /* wait for data to flushed-out before reset the prefetch */
319 do { 324 tim = 0;
320 pref_count = gpmc_read_status(GPMC_PREFETCH_COUNT); 325 limit = (loops_per_jiffy *
321 } while (pref_count); 326 msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
327 while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit))
328 cpu_relax();
329
322 /* disable and stop the PFPW engine */ 330 /* disable and stop the PFPW engine */
323 gpmc_prefetch_reset(info->gpmc_cs); 331 gpmc_prefetch_reset(info->gpmc_cs);
324 } 332 }
325} 333}
326 334
327#ifdef CONFIG_MTD_NAND_OMAP_PREFETCH_DMA
328/* 335/*
329 * omap_nand_dma_cb: callback on the completion of dma transfer 336 * omap_nand_dma_cb: callback on the completion of dma transfer
330 * @lch: logical channel 337 * @lch: logical channel
@@ -348,14 +355,15 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
348{ 355{
349 struct omap_nand_info *info = container_of(mtd, 356 struct omap_nand_info *info = container_of(mtd,
350 struct omap_nand_info, mtd); 357 struct omap_nand_info, mtd);
351 uint32_t prefetch_status = 0;
352 enum dma_data_direction dir = is_write ? DMA_TO_DEVICE : 358 enum dma_data_direction dir = is_write ? DMA_TO_DEVICE :
353 DMA_FROM_DEVICE; 359 DMA_FROM_DEVICE;
354 dma_addr_t dma_addr; 360 dma_addr_t dma_addr;
355 int ret; 361 int ret;
362 unsigned long tim, limit;
356 363
357 /* The fifo depth is 64 bytes. We have a sync at each frame and frame 364 /* The fifo depth is 64 bytes max.
358 * length is 64 bytes. 365 * But configure the FIFO-threahold to 32 to get a sync at each frame
366 * and frame length is 32 bytes.
359 */ 367 */
360 int buf_len = len >> 6; 368 int buf_len = len >> 6;
361 369
@@ -396,9 +404,10 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
396 OMAP24XX_DMA_GPMC, OMAP_DMA_SRC_SYNC); 404 OMAP24XX_DMA_GPMC, OMAP_DMA_SRC_SYNC);
397 } 405 }
398 /* configure and start prefetch transfer */ 406 /* configure and start prefetch transfer */
399 ret = gpmc_prefetch_enable(info->gpmc_cs, 0x1, len, is_write); 407 ret = gpmc_prefetch_enable(info->gpmc_cs,
408 PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write);
400 if (ret) 409 if (ret)
401 /* PFPW engine is busy, use cpu copy methode */ 410 /* PFPW engine is busy, use cpu copy method */
402 goto out_copy; 411 goto out_copy;
403 412
404 init_completion(&info->comp); 413 init_completion(&info->comp);
@@ -407,10 +416,11 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
407 416
408 /* setup and start DMA using dma_addr */ 417 /* setup and start DMA using dma_addr */
409 wait_for_completion(&info->comp); 418 wait_for_completion(&info->comp);
419 tim = 0;
420 limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
421 while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit))
422 cpu_relax();
410 423
411 do {
412 prefetch_status = gpmc_read_status(GPMC_PREFETCH_COUNT);
413 } while (prefetch_status);
414 /* disable and stop the PFPW engine */ 424 /* disable and stop the PFPW engine */
415 gpmc_prefetch_reset(info->gpmc_cs); 425 gpmc_prefetch_reset(info->gpmc_cs);
416 426
@@ -426,14 +436,6 @@ out_copy:
426 : omap_write_buf8(mtd, (u_char *) addr, len); 436 : omap_write_buf8(mtd, (u_char *) addr, len);
427 return 0; 437 return 0;
428} 438}
429#else
430static void omap_nand_dma_cb(int lch, u16 ch_status, void *data) {}
431static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
432 unsigned int len, int is_write)
433{
434 return 0;
435}
436#endif
437 439
438/** 440/**
439 * omap_read_buf_dma_pref - read data from NAND controller into buffer 441 * omap_read_buf_dma_pref - read data from NAND controller into buffer
@@ -466,6 +468,157 @@ static void omap_write_buf_dma_pref(struct mtd_info *mtd,
466 omap_nand_dma_transfer(mtd, (u_char *) buf, len, 0x1); 468 omap_nand_dma_transfer(mtd, (u_char *) buf, len, 0x1);
467} 469}
468 470
471/*
472 * omap_nand_irq - GMPC irq handler
473 * @this_irq: gpmc irq number
474 * @dev: omap_nand_info structure pointer is passed here
475 */
476static irqreturn_t omap_nand_irq(int this_irq, void *dev)
477{
478 struct omap_nand_info *info = (struct omap_nand_info *) dev;
479 u32 bytes;
480 u32 irq_stat;
481
482 irq_stat = gpmc_read_status(GPMC_GET_IRQ_STATUS);
483 bytes = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT);
484 bytes = bytes & 0xFFFC; /* io in multiple of 4 bytes */
485 if (info->iomode == OMAP_NAND_IO_WRITE) { /* checks for write io */
486 if (irq_stat & 0x2)
487 goto done;
488
489 if (info->buf_len && (info->buf_len < bytes))
490 bytes = info->buf_len;
491 else if (!info->buf_len)
492 bytes = 0;
493 iowrite32_rep(info->nand.IO_ADDR_W,
494 (u32 *)info->buf, bytes >> 2);
495 info->buf = info->buf + bytes;
496 info->buf_len -= bytes;
497
498 } else {
499 ioread32_rep(info->nand.IO_ADDR_R,
500 (u32 *)info->buf, bytes >> 2);
501 info->buf = info->buf + bytes;
502
503 if (irq_stat & 0x2)
504 goto done;
505 }
506 gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, irq_stat);
507
508 return IRQ_HANDLED;
509
510done:
511 complete(&info->comp);
512 /* disable irq */
513 gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ, 0);
514
515 /* clear status */
516 gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, irq_stat);
517
518 return IRQ_HANDLED;
519}
520
521/*
522 * omap_read_buf_irq_pref - read data from NAND controller into buffer
523 * @mtd: MTD device structure
524 * @buf: buffer to store date
525 * @len: number of bytes to read
526 */
527static void omap_read_buf_irq_pref(struct mtd_info *mtd, u_char *buf, int len)
528{
529 struct omap_nand_info *info = container_of(mtd,
530 struct omap_nand_info, mtd);
531 int ret = 0;
532
533 if (len <= mtd->oobsize) {
534 omap_read_buf_pref(mtd, buf, len);
535 return;
536 }
537
538 info->iomode = OMAP_NAND_IO_READ;
539 info->buf = buf;
540 init_completion(&info->comp);
541
542 /* configure and start prefetch transfer */
543 ret = gpmc_prefetch_enable(info->gpmc_cs,
544 PREFETCH_FIFOTHRESHOLD_MAX/2, 0x0, len, 0x0);
545 if (ret)
546 /* PFPW engine is busy, use cpu copy method */
547 goto out_copy;
548
549 info->buf_len = len;
550 /* enable irq */
551 gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ,
552 (GPMC_IRQ_FIFOEVENTENABLE | GPMC_IRQ_COUNT_EVENT));
553
554 /* waiting for read to complete */
555 wait_for_completion(&info->comp);
556
557 /* disable and stop the PFPW engine */
558 gpmc_prefetch_reset(info->gpmc_cs);
559 return;
560
561out_copy:
562 if (info->nand.options & NAND_BUSWIDTH_16)
563 omap_read_buf16(mtd, buf, len);
564 else
565 omap_read_buf8(mtd, buf, len);
566}
567
568/*
569 * omap_write_buf_irq_pref - write buffer to NAND controller
570 * @mtd: MTD device structure
571 * @buf: data buffer
572 * @len: number of bytes to write
573 */
574static void omap_write_buf_irq_pref(struct mtd_info *mtd,
575 const u_char *buf, int len)
576{
577 struct omap_nand_info *info = container_of(mtd,
578 struct omap_nand_info, mtd);
579 int ret = 0;
580 unsigned long tim, limit;
581
582 if (len <= mtd->oobsize) {
583 omap_write_buf_pref(mtd, buf, len);
584 return;
585 }
586
587 info->iomode = OMAP_NAND_IO_WRITE;
588 info->buf = (u_char *) buf;
589 init_completion(&info->comp);
590
591 /* configure and start prefetch transfer : size=24 */
592 ret = gpmc_prefetch_enable(info->gpmc_cs,
593 (PREFETCH_FIFOTHRESHOLD_MAX * 3) / 8, 0x0, len, 0x1);
594 if (ret)
595 /* PFPW engine is busy, use cpu copy method */
596 goto out_copy;
597
598 info->buf_len = len;
599 /* enable irq */
600 gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ,
601 (GPMC_IRQ_FIFOEVENTENABLE | GPMC_IRQ_COUNT_EVENT));
602
603 /* waiting for write to complete */
604 wait_for_completion(&info->comp);
605 /* wait for data to flushed-out before reset the prefetch */
606 tim = 0;
607 limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
608 while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit))
609 cpu_relax();
610
611 /* disable and stop the PFPW engine */
612 gpmc_prefetch_reset(info->gpmc_cs);
613 return;
614
615out_copy:
616 if (info->nand.options & NAND_BUSWIDTH_16)
617 omap_write_buf16(mtd, buf, len);
618 else
619 omap_write_buf8(mtd, buf, len);
620}
621
469/** 622/**
470 * omap_verify_buf - Verify chip data against buffer 623 * omap_verify_buf - Verify chip data against buffer
471 * @mtd: MTD device structure 624 * @mtd: MTD device structure
@@ -487,8 +640,6 @@ static int omap_verify_buf(struct mtd_info *mtd, const u_char * buf, int len)
487 return 0; 640 return 0;
488} 641}
489 642
490#ifdef CONFIG_MTD_NAND_OMAP_HWECC
491
492/** 643/**
493 * gen_true_ecc - This function will generate true ECC value 644 * gen_true_ecc - This function will generate true ECC value
494 * @ecc_buf: buffer to store ecc code 645 * @ecc_buf: buffer to store ecc code
@@ -716,8 +867,6 @@ static void omap_enable_hwecc(struct mtd_info *mtd, int mode)
716 gpmc_enable_hwecc(info->gpmc_cs, mode, dev_width, info->nand.ecc.size); 867 gpmc_enable_hwecc(info->gpmc_cs, mode, dev_width, info->nand.ecc.size);
717} 868}
718 869
719#endif
720
721/** 870/**
722 * omap_wait - wait until the command is done 871 * omap_wait - wait until the command is done
723 * @mtd: MTD device structure 872 * @mtd: MTD device structure
@@ -787,6 +936,7 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
787 struct omap_nand_info *info; 936 struct omap_nand_info *info;
788 struct omap_nand_platform_data *pdata; 937 struct omap_nand_platform_data *pdata;
789 int err; 938 int err;
939 int i, offset;
790 940
791 pdata = pdev->dev.platform_data; 941 pdata = pdev->dev.platform_data;
792 if (pdata == NULL) { 942 if (pdata == NULL) {
@@ -812,7 +962,7 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
812 info->mtd.name = dev_name(&pdev->dev); 962 info->mtd.name = dev_name(&pdev->dev);
813 info->mtd.owner = THIS_MODULE; 963 info->mtd.owner = THIS_MODULE;
814 964
815 info->nand.options |= pdata->devsize ? NAND_BUSWIDTH_16 : 0; 965 info->nand.options = pdata->devsize;
816 info->nand.options |= NAND_SKIP_BBTSCAN; 966 info->nand.options |= NAND_SKIP_BBTSCAN;
817 967
818 /* NAND write protect off */ 968 /* NAND write protect off */
@@ -850,28 +1000,13 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
850 info->nand.chip_delay = 50; 1000 info->nand.chip_delay = 50;
851 } 1001 }
852 1002
853 if (use_prefetch) { 1003 switch (pdata->xfer_type) {
854 1004 case NAND_OMAP_PREFETCH_POLLED:
855 info->nand.read_buf = omap_read_buf_pref; 1005 info->nand.read_buf = omap_read_buf_pref;
856 info->nand.write_buf = omap_write_buf_pref; 1006 info->nand.write_buf = omap_write_buf_pref;
857 if (use_dma) { 1007 break;
858 err = omap_request_dma(OMAP24XX_DMA_GPMC, "NAND", 1008
859 omap_nand_dma_cb, &info->comp, &info->dma_ch); 1009 case NAND_OMAP_POLLED:
860 if (err < 0) {
861 info->dma_ch = -1;
862 printk(KERN_WARNING "DMA request failed."
863 " Non-dma data transfer mode\n");
864 } else {
865 omap_set_dma_dest_burst_mode(info->dma_ch,
866 OMAP_DMA_DATA_BURST_16);
867 omap_set_dma_src_burst_mode(info->dma_ch,
868 OMAP_DMA_DATA_BURST_16);
869
870 info->nand.read_buf = omap_read_buf_dma_pref;
871 info->nand.write_buf = omap_write_buf_dma_pref;
872 }
873 }
874 } else {
875 if (info->nand.options & NAND_BUSWIDTH_16) { 1010 if (info->nand.options & NAND_BUSWIDTH_16) {
876 info->nand.read_buf = omap_read_buf16; 1011 info->nand.read_buf = omap_read_buf16;
877 info->nand.write_buf = omap_write_buf16; 1012 info->nand.write_buf = omap_write_buf16;
@@ -879,20 +1014,61 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
879 info->nand.read_buf = omap_read_buf8; 1014 info->nand.read_buf = omap_read_buf8;
880 info->nand.write_buf = omap_write_buf8; 1015 info->nand.write_buf = omap_write_buf8;
881 } 1016 }
1017 break;
1018
1019 case NAND_OMAP_PREFETCH_DMA:
1020 err = omap_request_dma(OMAP24XX_DMA_GPMC, "NAND",
1021 omap_nand_dma_cb, &info->comp, &info->dma_ch);
1022 if (err < 0) {
1023 info->dma_ch = -1;
1024 dev_err(&pdev->dev, "DMA request failed!\n");
1025 goto out_release_mem_region;
1026 } else {
1027 omap_set_dma_dest_burst_mode(info->dma_ch,
1028 OMAP_DMA_DATA_BURST_16);
1029 omap_set_dma_src_burst_mode(info->dma_ch,
1030 OMAP_DMA_DATA_BURST_16);
1031
1032 info->nand.read_buf = omap_read_buf_dma_pref;
1033 info->nand.write_buf = omap_write_buf_dma_pref;
1034 }
1035 break;
1036
1037 case NAND_OMAP_PREFETCH_IRQ:
1038 err = request_irq(pdata->gpmc_irq,
1039 omap_nand_irq, IRQF_SHARED, "gpmc-nand", info);
1040 if (err) {
1041 dev_err(&pdev->dev, "requesting irq(%d) error:%d",
1042 pdata->gpmc_irq, err);
1043 goto out_release_mem_region;
1044 } else {
1045 info->gpmc_irq = pdata->gpmc_irq;
1046 info->nand.read_buf = omap_read_buf_irq_pref;
1047 info->nand.write_buf = omap_write_buf_irq_pref;
1048 }
1049 break;
1050
1051 default:
1052 dev_err(&pdev->dev,
1053 "xfer_type(%d) not supported!\n", pdata->xfer_type);
1054 err = -EINVAL;
1055 goto out_release_mem_region;
882 } 1056 }
883 info->nand.verify_buf = omap_verify_buf;
884 1057
885#ifdef CONFIG_MTD_NAND_OMAP_HWECC 1058 info->nand.verify_buf = omap_verify_buf;
886 info->nand.ecc.bytes = 3;
887 info->nand.ecc.size = 512;
888 info->nand.ecc.calculate = omap_calculate_ecc;
889 info->nand.ecc.hwctl = omap_enable_hwecc;
890 info->nand.ecc.correct = omap_correct_data;
891 info->nand.ecc.mode = NAND_ECC_HW;
892 1059
893#else 1060 /* selsect the ecc type */
894 info->nand.ecc.mode = NAND_ECC_SOFT; 1061 if (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_DEFAULT)
895#endif 1062 info->nand.ecc.mode = NAND_ECC_SOFT;
1063 else if ((pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW) ||
1064 (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW_ROMCODE)) {
1065 info->nand.ecc.bytes = 3;
1066 info->nand.ecc.size = 512;
1067 info->nand.ecc.calculate = omap_calculate_ecc;
1068 info->nand.ecc.hwctl = omap_enable_hwecc;
1069 info->nand.ecc.correct = omap_correct_data;
1070 info->nand.ecc.mode = NAND_ECC_HW;
1071 }
896 1072
897 /* DIP switches on some boards change between 8 and 16 bit 1073 /* DIP switches on some boards change between 8 and 16 bit
898 * bus widths for flash. Try the other width if the first try fails. 1074 * bus widths for flash. Try the other width if the first try fails.
@@ -905,6 +1081,26 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
905 } 1081 }
906 } 1082 }
907 1083
1084 /* rom code layout */
1085 if (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW_ROMCODE) {
1086
1087 if (info->nand.options & NAND_BUSWIDTH_16)
1088 offset = 2;
1089 else {
1090 offset = 1;
1091 info->nand.badblock_pattern = &bb_descrip_flashbased;
1092 }
1093 omap_oobinfo.eccbytes = 3 * (info->mtd.oobsize/16);
1094 for (i = 0; i < omap_oobinfo.eccbytes; i++)
1095 omap_oobinfo.eccpos[i] = i+offset;
1096
1097 omap_oobinfo.oobfree->offset = offset + omap_oobinfo.eccbytes;
1098 omap_oobinfo.oobfree->length = info->mtd.oobsize -
1099 (offset + omap_oobinfo.eccbytes);
1100
1101 info->nand.ecc.layout = &omap_oobinfo;
1102 }
1103
908#ifdef CONFIG_MTD_PARTITIONS 1104#ifdef CONFIG_MTD_PARTITIONS
909 err = parse_mtd_partitions(&info->mtd, part_probes, &info->parts, 0); 1105 err = parse_mtd_partitions(&info->mtd, part_probes, &info->parts, 0);
910 if (err > 0) 1106 if (err > 0)
@@ -934,9 +1130,12 @@ static int omap_nand_remove(struct platform_device *pdev)
934 mtd); 1130 mtd);
935 1131
936 platform_set_drvdata(pdev, NULL); 1132 platform_set_drvdata(pdev, NULL);
937 if (use_dma) 1133 if (info->dma_ch != -1)
938 omap_free_dma(info->dma_ch); 1134 omap_free_dma(info->dma_ch);
939 1135
1136 if (info->gpmc_irq)
1137 free_irq(info->gpmc_irq, info);
1138
940 /* Release NAND device, its internal structures and partitions */ 1139 /* Release NAND device, its internal structures and partitions */
941 nand_release(&info->mtd); 1140 nand_release(&info->mtd);
942 iounmap(info->nand.IO_ADDR_R); 1141 iounmap(info->nand.IO_ADDR_R);
@@ -955,16 +1154,8 @@ static struct platform_driver omap_nand_driver = {
955 1154
956static int __init omap_nand_init(void) 1155static int __init omap_nand_init(void)
957{ 1156{
958 printk(KERN_INFO "%s driver initializing\n", DRIVER_NAME); 1157 pr_info("%s driver initializing\n", DRIVER_NAME);
959 1158
960 /* This check is required if driver is being
961 * loaded run time as a module
962 */
963 if ((1 == use_dma) && (0 == use_prefetch)) {
964 printk(KERN_INFO"Wrong parameters: 'use_dma' can not be 1 "
965 "without use_prefetch'. Prefetch will not be"
966 " used in either mode (mpu or dma)\n");
967 }
968 return platform_driver_register(&omap_nand_driver); 1159 return platform_driver_register(&omap_nand_driver);
969} 1160}
970 1161
diff --git a/drivers/mtd/nand/pasemi_nand.c b/drivers/mtd/nand/pasemi_nand.c
index bb277a54986f..59efa829ef24 100644
--- a/drivers/mtd/nand/pasemi_nand.c
+++ b/drivers/mtd/nand/pasemi_nand.c
@@ -89,8 +89,7 @@ int pasemi_device_ready(struct mtd_info *mtd)
89 return !!(inl(lpcctl) & LBICTRL_LPCCTL_NR); 89 return !!(inl(lpcctl) & LBICTRL_LPCCTL_NR);
90} 90}
91 91
92static int __devinit pasemi_nand_probe(struct platform_device *ofdev, 92static int __devinit pasemi_nand_probe(struct platform_device *ofdev)
93 const struct of_device_id *match)
94{ 93{
95 struct pci_dev *pdev; 94 struct pci_dev *pdev;
96 struct device_node *np = ofdev->dev.of_node; 95 struct device_node *np = ofdev->dev.of_node;
@@ -219,7 +218,7 @@ static const struct of_device_id pasemi_nand_match[] =
219 218
220MODULE_DEVICE_TABLE(of, pasemi_nand_match); 219MODULE_DEVICE_TABLE(of, pasemi_nand_match);
221 220
222static struct of_platform_driver pasemi_nand_driver = 221static struct platform_driver pasemi_nand_driver =
223{ 222{
224 .driver = { 223 .driver = {
225 .name = (char*)driver_name, 224 .name = (char*)driver_name,
@@ -232,13 +231,13 @@ static struct of_platform_driver pasemi_nand_driver =
232 231
233static int __init pasemi_nand_init(void) 232static int __init pasemi_nand_init(void)
234{ 233{
235 return of_register_platform_driver(&pasemi_nand_driver); 234 return platform_driver_register(&pasemi_nand_driver);
236} 235}
237module_init(pasemi_nand_init); 236module_init(pasemi_nand_init);
238 237
239static void __exit pasemi_nand_exit(void) 238static void __exit pasemi_nand_exit(void)
240{ 239{
241 of_unregister_platform_driver(&pasemi_nand_driver); 240 platform_driver_unregister(&pasemi_nand_driver);
242} 241}
243module_exit(pasemi_nand_exit); 242module_exit(pasemi_nand_exit);
244 243
diff --git a/drivers/mtd/nand/r852.c b/drivers/mtd/nand/r852.c
index d9d7efbc77cc..6322d1fb5d62 100644
--- a/drivers/mtd/nand/r852.c
+++ b/drivers/mtd/nand/r852.c
@@ -930,7 +930,7 @@ int r852_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
930 930
931 init_completion(&dev->dma_done); 931 init_completion(&dev->dma_done);
932 932
933 dev->card_workqueue = create_freezeable_workqueue(DRV_NAME); 933 dev->card_workqueue = create_freezable_workqueue(DRV_NAME);
934 934
935 if (!dev->card_workqueue) 935 if (!dev->card_workqueue)
936 goto error9; 936 goto error9;
diff --git a/drivers/mtd/nand/socrates_nand.c b/drivers/mtd/nand/socrates_nand.c
index a8e403eebedb..a853548986f0 100644
--- a/drivers/mtd/nand/socrates_nand.c
+++ b/drivers/mtd/nand/socrates_nand.c
@@ -162,8 +162,7 @@ static const char *part_probes[] = { "cmdlinepart", NULL };
162/* 162/*
163 * Probe for the NAND device. 163 * Probe for the NAND device.
164 */ 164 */
165static int __devinit socrates_nand_probe(struct platform_device *ofdev, 165static int __devinit socrates_nand_probe(struct platform_device *ofdev)
166 const struct of_device_id *ofid)
167{ 166{
168 struct socrates_nand_host *host; 167 struct socrates_nand_host *host;
169 struct mtd_info *mtd; 168 struct mtd_info *mtd;
@@ -300,7 +299,7 @@ static const struct of_device_id socrates_nand_match[] =
300 299
301MODULE_DEVICE_TABLE(of, socrates_nand_match); 300MODULE_DEVICE_TABLE(of, socrates_nand_match);
302 301
303static struct of_platform_driver socrates_nand_driver = { 302static struct platform_driver socrates_nand_driver = {
304 .driver = { 303 .driver = {
305 .name = "socrates_nand", 304 .name = "socrates_nand",
306 .owner = THIS_MODULE, 305 .owner = THIS_MODULE,
@@ -312,12 +311,12 @@ static struct of_platform_driver socrates_nand_driver = {
312 311
313static int __init socrates_nand_init(void) 312static int __init socrates_nand_init(void)
314{ 313{
315 return of_register_platform_driver(&socrates_nand_driver); 314 return platform_driver_register(&socrates_nand_driver);
316} 315}
317 316
318static void __exit socrates_nand_exit(void) 317static void __exit socrates_nand_exit(void)
319{ 318{
320 of_unregister_platform_driver(&socrates_nand_driver); 319 platform_driver_unregister(&socrates_nand_driver);
321} 320}
322 321
323module_init(socrates_nand_init); 322module_init(socrates_nand_init);
diff --git a/drivers/mtd/nand/tmio_nand.c b/drivers/mtd/nand/tmio_nand.c
index 3041d1f7ae3f..38fb16771f85 100644
--- a/drivers/mtd/nand/tmio_nand.c
+++ b/drivers/mtd/nand/tmio_nand.c
@@ -319,7 +319,7 @@ static int tmio_nand_correct_data(struct mtd_info *mtd, unsigned char *buf,
319 319
320static int tmio_hw_init(struct platform_device *dev, struct tmio_nand *tmio) 320static int tmio_hw_init(struct platform_device *dev, struct tmio_nand *tmio)
321{ 321{
322 struct mfd_cell *cell = dev_get_platdata(&dev->dev); 322 const struct mfd_cell *cell = mfd_get_cell(dev);
323 int ret; 323 int ret;
324 324
325 if (cell->enable) { 325 if (cell->enable) {
@@ -363,7 +363,7 @@ static int tmio_hw_init(struct platform_device *dev, struct tmio_nand *tmio)
363 363
364static void tmio_hw_stop(struct platform_device *dev, struct tmio_nand *tmio) 364static void tmio_hw_stop(struct platform_device *dev, struct tmio_nand *tmio)
365{ 365{
366 struct mfd_cell *cell = dev_get_platdata(&dev->dev); 366 const struct mfd_cell *cell = mfd_get_cell(dev);
367 367
368 tmio_iowrite8(FCR_MODE_POWER_OFF, tmio->fcr + FCR_MODE); 368 tmio_iowrite8(FCR_MODE_POWER_OFF, tmio->fcr + FCR_MODE);
369 if (cell->disable) 369 if (cell->disable)
@@ -372,8 +372,7 @@ static void tmio_hw_stop(struct platform_device *dev, struct tmio_nand *tmio)
372 372
373static int tmio_probe(struct platform_device *dev) 373static int tmio_probe(struct platform_device *dev)
374{ 374{
375 struct mfd_cell *cell = dev_get_platdata(&dev->dev); 375 struct tmio_nand_data *data = mfd_get_data(dev);
376 struct tmio_nand_data *data = cell->driver_data;
377 struct resource *fcr = platform_get_resource(dev, 376 struct resource *fcr = platform_get_resource(dev,
378 IORESOURCE_MEM, 0); 377 IORESOURCE_MEM, 0);
379 struct resource *ccr = platform_get_resource(dev, 378 struct resource *ccr = platform_get_resource(dev,
@@ -516,7 +515,7 @@ static int tmio_remove(struct platform_device *dev)
516#ifdef CONFIG_PM 515#ifdef CONFIG_PM
517static int tmio_suspend(struct platform_device *dev, pm_message_t state) 516static int tmio_suspend(struct platform_device *dev, pm_message_t state)
518{ 517{
519 struct mfd_cell *cell = dev_get_platdata(&dev->dev); 518 const struct mfd_cell *cell = mfd_get_cell(dev);
520 519
521 if (cell->suspend) 520 if (cell->suspend)
522 cell->suspend(dev); 521 cell->suspend(dev);
@@ -527,7 +526,7 @@ static int tmio_suspend(struct platform_device *dev, pm_message_t state)
527 526
528static int tmio_resume(struct platform_device *dev) 527static int tmio_resume(struct platform_device *dev)
529{ 528{
530 struct mfd_cell *cell = dev_get_platdata(&dev->dev); 529 const struct mfd_cell *cell = mfd_get_cell(dev);
531 530
532 /* FIXME - is this required or merely another attack of the broken 531 /* FIXME - is this required or merely another attack of the broken
533 * SHARP platform? Looks suspicious. 532 * SHARP platform? Looks suspicious.
diff --git a/drivers/mtd/onenand/Kconfig b/drivers/mtd/onenand/Kconfig
index 4dbd0f58eebf..4f426195f8db 100644
--- a/drivers/mtd/onenand/Kconfig
+++ b/drivers/mtd/onenand/Kconfig
@@ -32,7 +32,7 @@ config MTD_ONENAND_OMAP2
32 32
33config MTD_ONENAND_SAMSUNG 33config MTD_ONENAND_SAMSUNG
34 tristate "OneNAND on Samsung SOC controller support" 34 tristate "OneNAND on Samsung SOC controller support"
35 depends on ARCH_S3C64XX || ARCH_S5PC100 || ARCH_S5PV210 || ARCH_S5PV310 35 depends on ARCH_S3C64XX || ARCH_S5PC100 || ARCH_S5PV210 || ARCH_EXYNOS4
36 help 36 help
37 Support for a OneNAND flash device connected to an Samsung SOC. 37 Support for a OneNAND flash device connected to an Samsung SOC.
38 S3C64XX/S5PC100 use command mapping method. 38 S3C64XX/S5PC100 use command mapping method.
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index ea32c2fc4622..f591f615d3f6 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -63,7 +63,7 @@ struct omap2_onenand {
63 struct completion dma_done; 63 struct completion dma_done;
64 int dma_channel; 64 int dma_channel;
65 int freq; 65 int freq;
66 int (*setup)(void __iomem *base, int freq); 66 int (*setup)(void __iomem *base, int *freq_ptr);
67 struct regulator *regulator; 67 struct regulator *regulator;
68}; 68};
69 69
@@ -148,11 +148,9 @@ static int omap2_onenand_wait(struct mtd_info *mtd, int state)
148 wait_err("controller error", state, ctrl, intr); 148 wait_err("controller error", state, ctrl, intr);
149 return -EIO; 149 return -EIO;
150 } 150 }
151 if ((intr & intr_flags) != intr_flags) { 151 if ((intr & intr_flags) == intr_flags)
152 wait_err("timeout", state, ctrl, intr); 152 return 0;
153 return -EIO; 153 /* Continue in wait for interrupt branch */
154 }
155 return 0;
156 } 154 }
157 155
158 if (state != FL_READING) { 156 if (state != FL_READING) {
@@ -581,7 +579,7 @@ static int __adjust_timing(struct device *dev, void *data)
581 579
582 /* DMA is not in use so this is all that is needed */ 580 /* DMA is not in use so this is all that is needed */
583 /* Revisit for OMAP3! */ 581 /* Revisit for OMAP3! */
584 ret = c->setup(c->onenand.base, c->freq); 582 ret = c->setup(c->onenand.base, &c->freq);
585 583
586 return ret; 584 return ret;
587} 585}
@@ -674,7 +672,7 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev)
674 } 672 }
675 673
676 if (pdata->onenand_setup != NULL) { 674 if (pdata->onenand_setup != NULL) {
677 r = pdata->onenand_setup(c->onenand.base, c->freq); 675 r = pdata->onenand_setup(c->onenand.base, &c->freq);
678 if (r < 0) { 676 if (r < 0) {
679 dev_err(&pdev->dev, "Onenand platform setup failed: " 677 dev_err(&pdev->dev, "Onenand platform setup failed: "
680 "%d\n", r); 678 "%d\n", r);
@@ -719,8 +717,8 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev)
719 } 717 }
720 718
721 dev_info(&pdev->dev, "initializing on CS%d, phys base 0x%08lx, virtual " 719 dev_info(&pdev->dev, "initializing on CS%d, phys base 0x%08lx, virtual "
722 "base %p\n", c->gpmc_cs, c->phys_base, 720 "base %p, freq %d MHz\n", c->gpmc_cs, c->phys_base,
723 c->onenand.base); 721 c->onenand.base, c->freq);
724 722
725 c->pdev = pdev; 723 c->pdev = pdev;
726 c->mtd.name = dev_name(&pdev->dev); 724 c->mtd.name = dev_name(&pdev->dev);
@@ -757,24 +755,6 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev)
757 if ((r = onenand_scan(&c->mtd, 1)) < 0) 755 if ((r = onenand_scan(&c->mtd, 1)) < 0)
758 goto err_release_regulator; 756 goto err_release_regulator;
759 757
760 switch ((c->onenand.version_id >> 4) & 0xf) {
761 case 0:
762 c->freq = 40;
763 break;
764 case 1:
765 c->freq = 54;
766 break;
767 case 2:
768 c->freq = 66;
769 break;
770 case 3:
771 c->freq = 83;
772 break;
773 case 4:
774 c->freq = 104;
775 break;
776 }
777
778#ifdef CONFIG_MTD_PARTITIONS 758#ifdef CONFIG_MTD_PARTITIONS
779 r = parse_mtd_partitions(&c->mtd, part_probes, &c->parts, 0); 759 r = parse_mtd_partitions(&c->mtd, part_probes, &c->parts, 0);
780 if (r > 0) 760 if (r > 0)
diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
index 6405a95dc5bd..2b0daae4018d 100644
--- a/drivers/mtd/sm_ftl.c
+++ b/drivers/mtd/sm_ftl.c
@@ -1276,7 +1276,7 @@ static struct mtd_blktrans_ops sm_ftl_ops = {
1276static __init int sm_module_init(void) 1276static __init int sm_module_init(void)
1277{ 1277{
1278 int error = 0; 1278 int error = 0;
1279 cache_flush_workqueue = create_freezeable_workqueue("smflush"); 1279 cache_flush_workqueue = create_freezable_workqueue("smflush");
1280 1280
1281 if (IS_ERR(cache_flush_workqueue)) 1281 if (IS_ERR(cache_flush_workqueue))
1282 return PTR_ERR(cache_flush_workqueue); 1282 return PTR_ERR(cache_flush_workqueue);
diff --git a/drivers/mtd/ubi/Kconfig b/drivers/mtd/ubi/Kconfig
index 3cf193fb5e00..6abeb4f13403 100644
--- a/drivers/mtd/ubi/Kconfig
+++ b/drivers/mtd/ubi/Kconfig
@@ -52,6 +52,12 @@ config MTD_UBI_GLUEBI
52 work on top of UBI. Do not enable this unless you use legacy 52 work on top of UBI. Do not enable this unless you use legacy
53 software. 53 software.
54 54
55source "drivers/mtd/ubi/Kconfig.debug" 55config MTD_UBI_DEBUG
56 bool "UBI debugging"
57 depends on SYSFS
58 select DEBUG_FS
59 select KALLSYMS_ALL if KALLSYMS && DEBUG_KERNEL
60 help
61 This option enables UBI debugging.
56 62
57endif # MTD_UBI 63endif # MTD_UBI
diff --git a/drivers/mtd/ubi/Kconfig.debug b/drivers/mtd/ubi/Kconfig.debug
deleted file mode 100644
index fad4adc0fe2c..000000000000
--- a/drivers/mtd/ubi/Kconfig.debug
+++ /dev/null
@@ -1,73 +0,0 @@
1comment "UBI debugging options"
2
3config MTD_UBI_DEBUG
4 bool "UBI debugging"
5 depends on SYSFS
6 select DEBUG_FS
7 select KALLSYMS_ALL if KALLSYMS && DEBUG_KERNEL
8 help
9 This option enables UBI debugging.
10
11if MTD_UBI_DEBUG
12
13config MTD_UBI_DEBUG_MSG
14 bool "UBI debugging messages"
15 help
16 This option enables UBI debugging messages.
17
18config MTD_UBI_DEBUG_PARANOID
19 bool "Extra self-checks"
20 help
21 This option enables extra checks in UBI code. Note this slows UBI down
22 significantly.
23
24config MTD_UBI_DEBUG_DISABLE_BGT
25 bool "Do not enable the UBI background thread"
26 help
27 This option switches the background thread off by default. The thread
28 may be also be enabled/disabled via UBI sysfs.
29
30config MTD_UBI_DEBUG_EMULATE_BITFLIPS
31 bool "Emulate flash bit-flips"
32 help
33 This option emulates bit-flips with probability 1/50, which in turn
34 causes scrubbing. Useful for debugging and stressing UBI.
35
36config MTD_UBI_DEBUG_EMULATE_WRITE_FAILURES
37 bool "Emulate flash write failures"
38 help
39 This option emulates write failures with probability 1/100. Useful for
40 debugging and testing how UBI handlines errors.
41
42config MTD_UBI_DEBUG_EMULATE_ERASE_FAILURES
43 bool "Emulate flash erase failures"
44 help
45 This option emulates erase failures with probability 1/100. Useful for
46 debugging and testing how UBI handlines errors.
47
48comment "Additional UBI debugging messages"
49
50config MTD_UBI_DEBUG_MSG_BLD
51 bool "Additional UBI initialization and build messages"
52 help
53 This option enables detailed UBI initialization and device build
54 debugging messages.
55
56config MTD_UBI_DEBUG_MSG_EBA
57 bool "Eraseblock association unit messages"
58 help
59 This option enables debugging messages from the UBI eraseblock
60 association unit.
61
62config MTD_UBI_DEBUG_MSG_WL
63 bool "Wear-leveling unit messages"
64 help
65 This option enables debugging messages from the UBI wear-leveling
66 unit.
67
68config MTD_UBI_DEBUG_MSG_IO
69 bool "Input/output unit messages"
70 help
71 This option enables debugging messages from the UBI input/output unit.
72
73endif # MTD_UBI_DEBUG
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index f49e49dc5928..65626c1c446d 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -672,33 +672,7 @@ static int io_init(struct ubi_device *ubi)
672 ubi->nor_flash = 1; 672 ubi->nor_flash = 1;
673 } 673 }
674 674
675 /* 675 ubi->min_io_size = ubi->mtd->writesize;
676 * Set UBI min. I/O size (@ubi->min_io_size). We use @mtd->writebufsize
677 * for these purposes, not @mtd->writesize. At the moment this does not
678 * matter for NAND, because currently @mtd->writebufsize is equivalent to
679 * @mtd->writesize for all NANDs. However, some CFI NOR flashes may
680 * have @mtd->writebufsize which is multiple of @mtd->writesize.
681 *
682 * The reason we use @mtd->writebufsize for @ubi->min_io_size is that
683 * UBI and UBIFS recovery algorithms rely on the fact that if there was
684 * an unclean power cut, then we can find offset of the last corrupted
685 * node, align the offset to @ubi->min_io_size, read the rest of the
686 * eraseblock starting from this offset, and check whether there are
687 * only 0xFF bytes. If yes, then we are probably dealing with a
688 * corruption caused by a power cut, if not, then this is probably some
689 * severe corruption.
690 *
691 * Thus, we have to use the maximum write unit size of the flash, which
692 * is @mtd->writebufsize, because @mtd->writesize is the minimum write
693 * size, not the maximum.
694 */
695 if (ubi->mtd->type == MTD_NANDFLASH)
696 ubi_assert(ubi->mtd->writebufsize == ubi->mtd->writesize);
697 else if (ubi->mtd->type == MTD_NORFLASH)
698 ubi_assert(ubi->mtd->writebufsize % ubi->mtd->writesize == 0);
699
700 ubi->min_io_size = ubi->mtd->writebufsize;
701
702 ubi->hdrs_min_io_size = ubi->mtd->writesize >> ubi->mtd->subpage_sft; 676 ubi->hdrs_min_io_size = ubi->mtd->writesize >> ubi->mtd->subpage_sft;
703 677
704 /* 678 /*
@@ -716,11 +690,25 @@ static int io_init(struct ubi_device *ubi)
716 ubi_assert(ubi->hdrs_min_io_size <= ubi->min_io_size); 690 ubi_assert(ubi->hdrs_min_io_size <= ubi->min_io_size);
717 ubi_assert(ubi->min_io_size % ubi->hdrs_min_io_size == 0); 691 ubi_assert(ubi->min_io_size % ubi->hdrs_min_io_size == 0);
718 692
693 ubi->max_write_size = ubi->mtd->writebufsize;
694 /*
695 * Maximum write size has to be greater or equivalent to min. I/O
696 * size, and be multiple of min. I/O size.
697 */
698 if (ubi->max_write_size < ubi->min_io_size ||
699 ubi->max_write_size % ubi->min_io_size ||
700 !is_power_of_2(ubi->max_write_size)) {
701 ubi_err("bad write buffer size %d for %d min. I/O unit",
702 ubi->max_write_size, ubi->min_io_size);
703 return -EINVAL;
704 }
705
719 /* Calculate default aligned sizes of EC and VID headers */ 706 /* Calculate default aligned sizes of EC and VID headers */
720 ubi->ec_hdr_alsize = ALIGN(UBI_EC_HDR_SIZE, ubi->hdrs_min_io_size); 707 ubi->ec_hdr_alsize = ALIGN(UBI_EC_HDR_SIZE, ubi->hdrs_min_io_size);
721 ubi->vid_hdr_alsize = ALIGN(UBI_VID_HDR_SIZE, ubi->hdrs_min_io_size); 708 ubi->vid_hdr_alsize = ALIGN(UBI_VID_HDR_SIZE, ubi->hdrs_min_io_size);
722 709
723 dbg_msg("min_io_size %d", ubi->min_io_size); 710 dbg_msg("min_io_size %d", ubi->min_io_size);
711 dbg_msg("max_write_size %d", ubi->max_write_size);
724 dbg_msg("hdrs_min_io_size %d", ubi->hdrs_min_io_size); 712 dbg_msg("hdrs_min_io_size %d", ubi->hdrs_min_io_size);
725 dbg_msg("ec_hdr_alsize %d", ubi->ec_hdr_alsize); 713 dbg_msg("ec_hdr_alsize %d", ubi->ec_hdr_alsize);
726 dbg_msg("vid_hdr_alsize %d", ubi->vid_hdr_alsize); 714 dbg_msg("vid_hdr_alsize %d", ubi->vid_hdr_alsize);
@@ -737,7 +725,7 @@ static int io_init(struct ubi_device *ubi)
737 } 725 }
738 726
739 /* Similar for the data offset */ 727 /* Similar for the data offset */
740 ubi->leb_start = ubi->vid_hdr_offset + UBI_EC_HDR_SIZE; 728 ubi->leb_start = ubi->vid_hdr_offset + UBI_VID_HDR_SIZE;
741 ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size); 729 ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size);
742 730
743 dbg_msg("vid_hdr_offset %d", ubi->vid_hdr_offset); 731 dbg_msg("vid_hdr_offset %d", ubi->vid_hdr_offset);
@@ -949,6 +937,8 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
949 spin_lock_init(&ubi->volumes_lock); 937 spin_lock_init(&ubi->volumes_lock);
950 938
951 ubi_msg("attaching mtd%d to ubi%d", mtd->index, ubi_num); 939 ubi_msg("attaching mtd%d to ubi%d", mtd->index, ubi_num);
940 dbg_msg("sizeof(struct ubi_scan_leb) %zu", sizeof(struct ubi_scan_leb));
941 dbg_msg("sizeof(struct ubi_wl_entry) %zu", sizeof(struct ubi_wl_entry));
952 942
953 err = io_init(ubi); 943 err = io_init(ubi);
954 if (err) 944 if (err)
@@ -963,13 +953,6 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
963 if (!ubi->peb_buf2) 953 if (!ubi->peb_buf2)
964 goto out_free; 954 goto out_free;
965 955
966#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
967 mutex_init(&ubi->dbg_buf_mutex);
968 ubi->dbg_peb_buf = vmalloc(ubi->peb_size);
969 if (!ubi->dbg_peb_buf)
970 goto out_free;
971#endif
972
973 err = attach_by_scanning(ubi); 956 err = attach_by_scanning(ubi);
974 if (err) { 957 if (err) {
975 dbg_err("failed to attach by scanning, error %d", err); 958 dbg_err("failed to attach by scanning, error %d", err);
@@ -1017,8 +1000,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
1017 * checks @ubi->thread_enabled. Otherwise we may fail to wake it up. 1000 * checks @ubi->thread_enabled. Otherwise we may fail to wake it up.
1018 */ 1001 */
1019 spin_lock(&ubi->wl_lock); 1002 spin_lock(&ubi->wl_lock);
1020 if (!DBG_DISABLE_BGT) 1003 ubi->thread_enabled = 1;
1021 ubi->thread_enabled = 1;
1022 wake_up_process(ubi->bgt_thread); 1004 wake_up_process(ubi->bgt_thread);
1023 spin_unlock(&ubi->wl_lock); 1005 spin_unlock(&ubi->wl_lock);
1024 1006
@@ -1035,9 +1017,6 @@ out_detach:
1035out_free: 1017out_free:
1036 vfree(ubi->peb_buf1); 1018 vfree(ubi->peb_buf1);
1037 vfree(ubi->peb_buf2); 1019 vfree(ubi->peb_buf2);
1038#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
1039 vfree(ubi->dbg_peb_buf);
1040#endif
1041 if (ref) 1020 if (ref)
1042 put_device(&ubi->dev); 1021 put_device(&ubi->dev);
1043 else 1022 else
@@ -1108,9 +1087,6 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
1108 put_mtd_device(ubi->mtd); 1087 put_mtd_device(ubi->mtd);
1109 vfree(ubi->peb_buf1); 1088 vfree(ubi->peb_buf1);
1110 vfree(ubi->peb_buf2); 1089 vfree(ubi->peb_buf2);
1111#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
1112 vfree(ubi->dbg_peb_buf);
1113#endif
1114 ubi_msg("mtd%d is detached from ubi%d", ubi->mtd->index, ubi->ubi_num); 1090 ubi_msg("mtd%d is detached from ubi%d", ubi->mtd->index, ubi->ubi_num);
1115 put_device(&ubi->dev); 1091 put_device(&ubi->dev);
1116 return 0; 1092 return 0;
diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c
index 4876977e52cb..d4d07e5f138f 100644
--- a/drivers/mtd/ubi/debug.c
+++ b/drivers/mtd/ubi/debug.c
@@ -27,6 +27,20 @@
27#ifdef CONFIG_MTD_UBI_DEBUG 27#ifdef CONFIG_MTD_UBI_DEBUG
28 28
29#include "ubi.h" 29#include "ubi.h"
30#include <linux/module.h>
31#include <linux/moduleparam.h>
32
33unsigned int ubi_msg_flags;
34unsigned int ubi_chk_flags;
35unsigned int ubi_tst_flags;
36
37module_param_named(debug_msgs, ubi_msg_flags, uint, S_IRUGO | S_IWUSR);
38module_param_named(debug_chks, ubi_chk_flags, uint, S_IRUGO | S_IWUSR);
39module_param_named(debug_tsts, ubi_chk_flags, uint, S_IRUGO | S_IWUSR);
40
41MODULE_PARM_DESC(debug_msgs, "Debug message type flags");
42MODULE_PARM_DESC(debug_chks, "Debug check flags");
43MODULE_PARM_DESC(debug_tsts, "Debug special test flags");
30 44
31/** 45/**
32 * ubi_dbg_dump_ec_hdr - dump an erase counter header. 46 * ubi_dbg_dump_ec_hdr - dump an erase counter header.
diff --git a/drivers/mtd/ubi/debug.h b/drivers/mtd/ubi/debug.h
index 9eca95074bc2..0b0c2888c656 100644
--- a/drivers/mtd/ubi/debug.h
+++ b/drivers/mtd/ubi/debug.h
@@ -38,6 +38,11 @@
38 printk(KERN_DEBUG "UBI DBG (pid %d): %s: " fmt "\n", \ 38 printk(KERN_DEBUG "UBI DBG (pid %d): %s: " fmt "\n", \
39 current->pid, __func__, ##__VA_ARGS__) 39 current->pid, __func__, ##__VA_ARGS__)
40 40
41#define dbg_do_msg(typ, fmt, ...) do { \
42 if (ubi_msg_flags & typ) \
43 dbg_msg(fmt, ##__VA_ARGS__); \
44} while (0)
45
41#define ubi_dbg_dump_stack() dump_stack() 46#define ubi_dbg_dump_stack() dump_stack()
42 47
43struct ubi_ec_hdr; 48struct ubi_ec_hdr;
@@ -57,62 +62,88 @@ void ubi_dbg_dump_seb(const struct ubi_scan_leb *seb, int type);
57void ubi_dbg_dump_mkvol_req(const struct ubi_mkvol_req *req); 62void ubi_dbg_dump_mkvol_req(const struct ubi_mkvol_req *req);
58void ubi_dbg_dump_flash(struct ubi_device *ubi, int pnum, int offset, int len); 63void ubi_dbg_dump_flash(struct ubi_device *ubi, int pnum, int offset, int len);
59 64
65extern unsigned int ubi_msg_flags;
66
67/*
68 * Debugging message type flags (must match msg_type_names in debug.c).
69 *
70 * UBI_MSG_GEN: general messages
71 * UBI_MSG_EBA: journal messages
72 * UBI_MSG_WL: mount messages
73 * UBI_MSG_IO: commit messages
74 * UBI_MSG_BLD: LEB find messages
75 */
76enum {
77 UBI_MSG_GEN = 0x1,
78 UBI_MSG_EBA = 0x2,
79 UBI_MSG_WL = 0x4,
80 UBI_MSG_IO = 0x8,
81 UBI_MSG_BLD = 0x10,
82};
83
60#define ubi_dbg_print_hex_dump(l, ps, pt, r, g, b, len, a) \ 84#define ubi_dbg_print_hex_dump(l, ps, pt, r, g, b, len, a) \
61 print_hex_dump(l, ps, pt, r, g, b, len, a) 85 print_hex_dump(l, ps, pt, r, g, b, len, a)
62 86
63#ifdef CONFIG_MTD_UBI_DEBUG_MSG
64/* General debugging messages */ 87/* General debugging messages */
65#define dbg_gen(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__) 88#define dbg_gen(fmt, ...) dbg_do_msg(UBI_MSG_GEN, fmt, ##__VA_ARGS__)
66#else
67#define dbg_gen(fmt, ...) ({})
68#endif
69 89
70#ifdef CONFIG_MTD_UBI_DEBUG_MSG_EBA
71/* Messages from the eraseblock association sub-system */ 90/* Messages from the eraseblock association sub-system */
72#define dbg_eba(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__) 91#define dbg_eba(fmt, ...) dbg_do_msg(UBI_MSG_EBA, fmt, ##__VA_ARGS__)
73#else
74#define dbg_eba(fmt, ...) ({})
75#endif
76 92
77#ifdef CONFIG_MTD_UBI_DEBUG_MSG_WL
78/* Messages from the wear-leveling sub-system */ 93/* Messages from the wear-leveling sub-system */
79#define dbg_wl(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__) 94#define dbg_wl(fmt, ...) dbg_do_msg(UBI_MSG_WL, fmt, ##__VA_ARGS__)
80#else
81#define dbg_wl(fmt, ...) ({})
82#endif
83 95
84#ifdef CONFIG_MTD_UBI_DEBUG_MSG_IO
85/* Messages from the input/output sub-system */ 96/* Messages from the input/output sub-system */
86#define dbg_io(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__) 97#define dbg_io(fmt, ...) dbg_do_msg(UBI_MSG_IO, fmt, ##__VA_ARGS__)
87#else
88#define dbg_io(fmt, ...) ({})
89#endif
90 98
91#ifdef CONFIG_MTD_UBI_DEBUG_MSG_BLD
92/* Initialization and build messages */ 99/* Initialization and build messages */
93#define dbg_bld(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__) 100#define dbg_bld(fmt, ...) dbg_do_msg(UBI_MSG_BLD, fmt, ##__VA_ARGS__)
94#define UBI_IO_DEBUG 1 101
95#else 102extern unsigned int ubi_chk_flags;
96#define dbg_bld(fmt, ...) ({}) 103
97#define UBI_IO_DEBUG 0 104/*
98#endif 105 * Debugging check flags.
106 *
107 * UBI_CHK_GEN: general checks
108 * UBI_CHK_IO: check writes and erases
109 */
110enum {
111 UBI_CHK_GEN = 0x1,
112 UBI_CHK_IO = 0x2,
113};
99 114
100#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
101int ubi_dbg_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len); 115int ubi_dbg_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len);
102int ubi_dbg_check_write(struct ubi_device *ubi, const void *buf, int pnum, 116int ubi_dbg_check_write(struct ubi_device *ubi, const void *buf, int pnum,
103 int offset, int len); 117 int offset, int len);
104#else
105#define ubi_dbg_check_all_ff(ubi, pnum, offset, len) 0
106#define ubi_dbg_check_write(ubi, buf, pnum, offset, len) 0
107#endif
108 118
109#ifdef CONFIG_MTD_UBI_DEBUG_DISABLE_BGT 119extern unsigned int ubi_tst_flags;
110#define DBG_DISABLE_BGT 1 120
111#else 121/*
112#define DBG_DISABLE_BGT 0 122 * Special testing flags.
113#endif 123 *
124 * UBIFS_TST_DISABLE_BGT: disable the background thread
125 * UBI_TST_EMULATE_BITFLIPS: emulate bit-flips
126 * UBI_TST_EMULATE_WRITE_FAILURES: emulate write failures
127 * UBI_TST_EMULATE_ERASE_FAILURES: emulate erase failures
128 */
129enum {
130 UBI_TST_DISABLE_BGT = 0x1,
131 UBI_TST_EMULATE_BITFLIPS = 0x2,
132 UBI_TST_EMULATE_WRITE_FAILURES = 0x4,
133 UBI_TST_EMULATE_ERASE_FAILURES = 0x8,
134};
135
136/**
137 * ubi_dbg_is_bgt_disabled - if the background thread is disabled.
138 *
139 * Returns non-zero if the UBI background thread is disabled for testing
140 * purposes.
141 */
142static inline int ubi_dbg_is_bgt_disabled(void)
143{
144 return ubi_tst_flags & UBI_TST_DISABLE_BGT;
145}
114 146
115#ifdef CONFIG_MTD_UBI_DEBUG_EMULATE_BITFLIPS
116/** 147/**
117 * ubi_dbg_is_bitflip - if it is time to emulate a bit-flip. 148 * ubi_dbg_is_bitflip - if it is time to emulate a bit-flip.
118 * 149 *
@@ -120,13 +151,11 @@ int ubi_dbg_check_write(struct ubi_device *ubi, const void *buf, int pnum,
120 */ 151 */
121static inline int ubi_dbg_is_bitflip(void) 152static inline int ubi_dbg_is_bitflip(void)
122{ 153{
123 return !(random32() % 200); 154 if (ubi_tst_flags & UBI_TST_EMULATE_BITFLIPS)
155 return !(random32() % 200);
156 return 0;
124} 157}
125#else
126#define ubi_dbg_is_bitflip() 0
127#endif
128 158
129#ifdef CONFIG_MTD_UBI_DEBUG_EMULATE_WRITE_FAILURES
130/** 159/**
131 * ubi_dbg_is_write_failure - if it is time to emulate a write failure. 160 * ubi_dbg_is_write_failure - if it is time to emulate a write failure.
132 * 161 *
@@ -135,13 +164,11 @@ static inline int ubi_dbg_is_bitflip(void)
135 */ 164 */
136static inline int ubi_dbg_is_write_failure(void) 165static inline int ubi_dbg_is_write_failure(void)
137{ 166{
138 return !(random32() % 500); 167 if (ubi_tst_flags & UBI_TST_EMULATE_WRITE_FAILURES)
168 return !(random32() % 500);
169 return 0;
139} 170}
140#else
141#define ubi_dbg_is_write_failure() 0
142#endif
143 171
144#ifdef CONFIG_MTD_UBI_DEBUG_EMULATE_ERASE_FAILURES
145/** 172/**
146 * ubi_dbg_is_erase_failure - if its time to emulate an erase failure. 173 * ubi_dbg_is_erase_failure - if its time to emulate an erase failure.
147 * 174 *
@@ -150,11 +177,10 @@ static inline int ubi_dbg_is_write_failure(void)
150 */ 177 */
151static inline int ubi_dbg_is_erase_failure(void) 178static inline int ubi_dbg_is_erase_failure(void)
152{ 179{
180 if (ubi_tst_flags & UBI_TST_EMULATE_ERASE_FAILURES)
153 return !(random32() % 400); 181 return !(random32() % 400);
182 return 0;
154} 183}
155#else
156#define ubi_dbg_is_erase_failure() 0
157#endif
158 184
159#else 185#else
160 186
@@ -177,8 +203,7 @@ static inline int ubi_dbg_is_erase_failure(void)
177#define ubi_dbg_dump_flash(ubi, pnum, offset, len) ({}) 203#define ubi_dbg_dump_flash(ubi, pnum, offset, len) ({})
178#define ubi_dbg_print_hex_dump(l, ps, pt, r, g, b, len, a) ({}) 204#define ubi_dbg_print_hex_dump(l, ps, pt, r, g, b, len, a) ({})
179 205
180#define UBI_IO_DEBUG 0 206#define ubi_dbg_is_bgt_disabled() 0
181#define DBG_DISABLE_BGT 0
182#define ubi_dbg_is_bitflip() 0 207#define ubi_dbg_is_bitflip() 0
183#define ubi_dbg_is_write_failure() 0 208#define ubi_dbg_is_write_failure() 0
184#define ubi_dbg_is_erase_failure() 0 209#define ubi_dbg_is_erase_failure() 0
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index 811775aa8ee8..eededf94f5a6 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -91,7 +91,7 @@
91#include <linux/slab.h> 91#include <linux/slab.h>
92#include "ubi.h" 92#include "ubi.h"
93 93
94#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID 94#ifdef CONFIG_MTD_UBI_DEBUG
95static int paranoid_check_not_bad(const struct ubi_device *ubi, int pnum); 95static int paranoid_check_not_bad(const struct ubi_device *ubi, int pnum);
96static int paranoid_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum); 96static int paranoid_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum);
97static int paranoid_check_ec_hdr(const struct ubi_device *ubi, int pnum, 97static int paranoid_check_ec_hdr(const struct ubi_device *ubi, int pnum,
@@ -146,6 +146,28 @@ int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset,
146 if (err) 146 if (err)
147 return err; 147 return err;
148 148
149 /*
150 * Deliberately corrupt the buffer to improve robustness. Indeed, if we
151 * do not do this, the following may happen:
152 * 1. The buffer contains data from previous operation, e.g., read from
153 * another PEB previously. The data looks like expected, e.g., if we
154 * just do not read anything and return - the caller would not
155 * notice this. E.g., if we are reading a VID header, the buffer may
156 * contain a valid VID header from another PEB.
157 * 2. The driver is buggy and returns us success or -EBADMSG or
158 * -EUCLEAN, but it does not actually put any data to the buffer.
159 *
160 * This may confuse UBI or upper layers - they may think the buffer
161 * contains valid data while in fact it is just old data. This is
162 * especially possible because UBI (and UBIFS) relies on CRC, and
163 * treats data as correct even in case of ECC errors if the CRC is
164 * correct.
165 *
166 * Try to prevent this situation by changing the first byte of the
167 * buffer.
168 */
169 *((uint8_t *)buf) ^= 0xFF;
170
149 addr = (loff_t)pnum * ubi->peb_size + offset; 171 addr = (loff_t)pnum * ubi->peb_size + offset;
150retry: 172retry:
151 err = ubi->mtd->read(ubi->mtd, addr, len, &read, buf); 173 err = ubi->mtd->read(ubi->mtd, addr, len, &read, buf);
@@ -166,7 +188,7 @@ retry:
166 return UBI_IO_BITFLIPS; 188 return UBI_IO_BITFLIPS;
167 } 189 }
168 190
169 if (read != len && retries++ < UBI_IO_RETRIES) { 191 if (retries++ < UBI_IO_RETRIES) {
170 dbg_io("error %d%s while reading %d bytes from PEB %d:%d," 192 dbg_io("error %d%s while reading %d bytes from PEB %d:%d,"
171 " read only %zd bytes, retry", 193 " read only %zd bytes, retry",
172 err, errstr, len, pnum, offset, read); 194 err, errstr, len, pnum, offset, read);
@@ -480,6 +502,13 @@ static int nor_erase_prepare(struct ubi_device *ubi, int pnum)
480 size_t written; 502 size_t written;
481 loff_t addr; 503 loff_t addr;
482 uint32_t data = 0; 504 uint32_t data = 0;
505 /*
506 * Note, we cannot generally define VID header buffers on stack,
507 * because of the way we deal with these buffers (see the header
508 * comment in this file). But we know this is a NOR-specific piece of
509 * code, so we can do this. But yes, this is error-prone and we should
510 * (pre-)allocate VID header buffer instead.
511 */
483 struct ubi_vid_hdr vid_hdr; 512 struct ubi_vid_hdr vid_hdr;
484 513
485 /* 514 /*
@@ -507,11 +536,13 @@ static int nor_erase_prepare(struct ubi_device *ubi, int pnum)
507 * PEB. 536 * PEB.
508 */ 537 */
509 err1 = ubi_io_read_vid_hdr(ubi, pnum, &vid_hdr, 0); 538 err1 = ubi_io_read_vid_hdr(ubi, pnum, &vid_hdr, 0);
510 if (err1 == UBI_IO_BAD_HDR_EBADMSG || err1 == UBI_IO_BAD_HDR) { 539 if (err1 == UBI_IO_BAD_HDR_EBADMSG || err1 == UBI_IO_BAD_HDR ||
540 err1 == UBI_IO_FF) {
511 struct ubi_ec_hdr ec_hdr; 541 struct ubi_ec_hdr ec_hdr;
512 542
513 err1 = ubi_io_read_ec_hdr(ubi, pnum, &ec_hdr, 0); 543 err1 = ubi_io_read_ec_hdr(ubi, pnum, &ec_hdr, 0);
514 if (err1 == UBI_IO_BAD_HDR_EBADMSG || err1 == UBI_IO_BAD_HDR) 544 if (err1 == UBI_IO_BAD_HDR_EBADMSG || err1 == UBI_IO_BAD_HDR ||
545 err1 == UBI_IO_FF)
515 /* 546 /*
516 * Both VID and EC headers are corrupted, so we can 547 * Both VID and EC headers are corrupted, so we can
517 * safely erase this PEB and not afraid that it will be 548 * safely erase this PEB and not afraid that it will be
@@ -752,9 +783,8 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
752 if (verbose) 783 if (verbose)
753 ubi_warn("no EC header found at PEB %d, " 784 ubi_warn("no EC header found at PEB %d, "
754 "only 0xFF bytes", pnum); 785 "only 0xFF bytes", pnum);
755 else if (UBI_IO_DEBUG) 786 dbg_bld("no EC header found at PEB %d, "
756 dbg_msg("no EC header found at PEB %d, " 787 "only 0xFF bytes", pnum);
757 "only 0xFF bytes", pnum);
758 if (!read_err) 788 if (!read_err)
759 return UBI_IO_FF; 789 return UBI_IO_FF;
760 else 790 else
@@ -769,9 +799,9 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
769 ubi_warn("bad magic number at PEB %d: %08x instead of " 799 ubi_warn("bad magic number at PEB %d: %08x instead of "
770 "%08x", pnum, magic, UBI_EC_HDR_MAGIC); 800 "%08x", pnum, magic, UBI_EC_HDR_MAGIC);
771 ubi_dbg_dump_ec_hdr(ec_hdr); 801 ubi_dbg_dump_ec_hdr(ec_hdr);
772 } else if (UBI_IO_DEBUG) 802 }
773 dbg_msg("bad magic number at PEB %d: %08x instead of " 803 dbg_bld("bad magic number at PEB %d: %08x instead of "
774 "%08x", pnum, magic, UBI_EC_HDR_MAGIC); 804 "%08x", pnum, magic, UBI_EC_HDR_MAGIC);
775 return UBI_IO_BAD_HDR; 805 return UBI_IO_BAD_HDR;
776 } 806 }
777 807
@@ -783,9 +813,9 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
783 ubi_warn("bad EC header CRC at PEB %d, calculated " 813 ubi_warn("bad EC header CRC at PEB %d, calculated "
784 "%#08x, read %#08x", pnum, crc, hdr_crc); 814 "%#08x, read %#08x", pnum, crc, hdr_crc);
785 ubi_dbg_dump_ec_hdr(ec_hdr); 815 ubi_dbg_dump_ec_hdr(ec_hdr);
786 } else if (UBI_IO_DEBUG) 816 }
787 dbg_msg("bad EC header CRC at PEB %d, calculated " 817 dbg_bld("bad EC header CRC at PEB %d, calculated "
788 "%#08x, read %#08x", pnum, crc, hdr_crc); 818 "%#08x, read %#08x", pnum, crc, hdr_crc);
789 819
790 if (!read_err) 820 if (!read_err)
791 return UBI_IO_BAD_HDR; 821 return UBI_IO_BAD_HDR;
@@ -1008,9 +1038,8 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
1008 if (verbose) 1038 if (verbose)
1009 ubi_warn("no VID header found at PEB %d, " 1039 ubi_warn("no VID header found at PEB %d, "
1010 "only 0xFF bytes", pnum); 1040 "only 0xFF bytes", pnum);
1011 else if (UBI_IO_DEBUG) 1041 dbg_bld("no VID header found at PEB %d, "
1012 dbg_msg("no VID header found at PEB %d, " 1042 "only 0xFF bytes", pnum);
1013 "only 0xFF bytes", pnum);
1014 if (!read_err) 1043 if (!read_err)
1015 return UBI_IO_FF; 1044 return UBI_IO_FF;
1016 else 1045 else
@@ -1021,9 +1050,9 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
1021 ubi_warn("bad magic number at PEB %d: %08x instead of " 1050 ubi_warn("bad magic number at PEB %d: %08x instead of "
1022 "%08x", pnum, magic, UBI_VID_HDR_MAGIC); 1051 "%08x", pnum, magic, UBI_VID_HDR_MAGIC);
1023 ubi_dbg_dump_vid_hdr(vid_hdr); 1052 ubi_dbg_dump_vid_hdr(vid_hdr);
1024 } else if (UBI_IO_DEBUG) 1053 }
1025 dbg_msg("bad magic number at PEB %d: %08x instead of " 1054 dbg_bld("bad magic number at PEB %d: %08x instead of "
1026 "%08x", pnum, magic, UBI_VID_HDR_MAGIC); 1055 "%08x", pnum, magic, UBI_VID_HDR_MAGIC);
1027 return UBI_IO_BAD_HDR; 1056 return UBI_IO_BAD_HDR;
1028 } 1057 }
1029 1058
@@ -1035,9 +1064,9 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
1035 ubi_warn("bad CRC at PEB %d, calculated %#08x, " 1064 ubi_warn("bad CRC at PEB %d, calculated %#08x, "
1036 "read %#08x", pnum, crc, hdr_crc); 1065 "read %#08x", pnum, crc, hdr_crc);
1037 ubi_dbg_dump_vid_hdr(vid_hdr); 1066 ubi_dbg_dump_vid_hdr(vid_hdr);
1038 } else if (UBI_IO_DEBUG) 1067 }
1039 dbg_msg("bad CRC at PEB %d, calculated %#08x, " 1068 dbg_bld("bad CRC at PEB %d, calculated %#08x, "
1040 "read %#08x", pnum, crc, hdr_crc); 1069 "read %#08x", pnum, crc, hdr_crc);
1041 if (!read_err) 1070 if (!read_err)
1042 return UBI_IO_BAD_HDR; 1071 return UBI_IO_BAD_HDR;
1043 else 1072 else
@@ -1097,7 +1126,7 @@ int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
1097 return err; 1126 return err;
1098} 1127}
1099 1128
1100#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID 1129#ifdef CONFIG_MTD_UBI_DEBUG
1101 1130
1102/** 1131/**
1103 * paranoid_check_not_bad - ensure that a physical eraseblock is not bad. 1132 * paranoid_check_not_bad - ensure that a physical eraseblock is not bad.
@@ -1111,6 +1140,9 @@ static int paranoid_check_not_bad(const struct ubi_device *ubi, int pnum)
1111{ 1140{
1112 int err; 1141 int err;
1113 1142
1143 if (!(ubi_chk_flags & UBI_CHK_IO))
1144 return 0;
1145
1114 err = ubi_io_is_bad(ubi, pnum); 1146 err = ubi_io_is_bad(ubi, pnum);
1115 if (!err) 1147 if (!err)
1116 return err; 1148 return err;
@@ -1135,6 +1167,9 @@ static int paranoid_check_ec_hdr(const struct ubi_device *ubi, int pnum,
1135 int err; 1167 int err;
1136 uint32_t magic; 1168 uint32_t magic;
1137 1169
1170 if (!(ubi_chk_flags & UBI_CHK_IO))
1171 return 0;
1172
1138 magic = be32_to_cpu(ec_hdr->magic); 1173 magic = be32_to_cpu(ec_hdr->magic);
1139 if (magic != UBI_EC_HDR_MAGIC) { 1174 if (magic != UBI_EC_HDR_MAGIC) {
1140 ubi_err("bad magic %#08x, must be %#08x", 1175 ubi_err("bad magic %#08x, must be %#08x",
@@ -1170,6 +1205,9 @@ static int paranoid_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum)
1170 uint32_t crc, hdr_crc; 1205 uint32_t crc, hdr_crc;
1171 struct ubi_ec_hdr *ec_hdr; 1206 struct ubi_ec_hdr *ec_hdr;
1172 1207
1208 if (!(ubi_chk_flags & UBI_CHK_IO))
1209 return 0;
1210
1173 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS); 1211 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
1174 if (!ec_hdr) 1212 if (!ec_hdr)
1175 return -ENOMEM; 1213 return -ENOMEM;
@@ -1211,6 +1249,9 @@ static int paranoid_check_vid_hdr(const struct ubi_device *ubi, int pnum,
1211 int err; 1249 int err;
1212 uint32_t magic; 1250 uint32_t magic;
1213 1251
1252 if (!(ubi_chk_flags & UBI_CHK_IO))
1253 return 0;
1254
1214 magic = be32_to_cpu(vid_hdr->magic); 1255 magic = be32_to_cpu(vid_hdr->magic);
1215 if (magic != UBI_VID_HDR_MAGIC) { 1256 if (magic != UBI_VID_HDR_MAGIC) {
1216 ubi_err("bad VID header magic %#08x at PEB %d, must be %#08x", 1257 ubi_err("bad VID header magic %#08x at PEB %d, must be %#08x",
@@ -1249,6 +1290,9 @@ static int paranoid_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum)
1249 struct ubi_vid_hdr *vid_hdr; 1290 struct ubi_vid_hdr *vid_hdr;
1250 void *p; 1291 void *p;
1251 1292
1293 if (!(ubi_chk_flags & UBI_CHK_IO))
1294 return 0;
1295
1252 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); 1296 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
1253 if (!vid_hdr) 1297 if (!vid_hdr)
1254 return -ENOMEM; 1298 return -ENOMEM;
@@ -1294,15 +1338,26 @@ int ubi_dbg_check_write(struct ubi_device *ubi, const void *buf, int pnum,
1294 int offset, int len) 1338 int offset, int len)
1295{ 1339{
1296 int err, i; 1340 int err, i;
1341 size_t read;
1342 void *buf1;
1343 loff_t addr = (loff_t)pnum * ubi->peb_size + offset;
1297 1344
1298 mutex_lock(&ubi->dbg_buf_mutex); 1345 if (!(ubi_chk_flags & UBI_CHK_IO))
1299 err = ubi_io_read(ubi, ubi->dbg_peb_buf, pnum, offset, len); 1346 return 0;
1300 if (err) 1347
1301 goto out_unlock; 1348 buf1 = __vmalloc(len, GFP_NOFS, PAGE_KERNEL);
1349 if (!buf1) {
1350 ubi_err("cannot allocate memory to check writes");
1351 return 0;
1352 }
1353
1354 err = ubi->mtd->read(ubi->mtd, addr, len, &read, buf1);
1355 if (err && err != -EUCLEAN)
1356 goto out_free;
1302 1357
1303 for (i = 0; i < len; i++) { 1358 for (i = 0; i < len; i++) {
1304 uint8_t c = ((uint8_t *)buf)[i]; 1359 uint8_t c = ((uint8_t *)buf)[i];
1305 uint8_t c1 = ((uint8_t *)ubi->dbg_peb_buf)[i]; 1360 uint8_t c1 = ((uint8_t *)buf1)[i];
1306 int dump_len; 1361 int dump_len;
1307 1362
1308 if (c == c1) 1363 if (c == c1)
@@ -1319,17 +1374,17 @@ int ubi_dbg_check_write(struct ubi_device *ubi, const void *buf, int pnum,
1319 ubi_msg("hex dump of the read buffer from %d to %d", 1374 ubi_msg("hex dump of the read buffer from %d to %d",
1320 i, i + dump_len); 1375 i, i + dump_len);
1321 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, 1376 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
1322 ubi->dbg_peb_buf + i, dump_len, 1); 1377 buf1 + i, dump_len, 1);
1323 ubi_dbg_dump_stack(); 1378 ubi_dbg_dump_stack();
1324 err = -EINVAL; 1379 err = -EINVAL;
1325 goto out_unlock; 1380 goto out_free;
1326 } 1381 }
1327 mutex_unlock(&ubi->dbg_buf_mutex);
1328 1382
1383 vfree(buf1);
1329 return 0; 1384 return 0;
1330 1385
1331out_unlock: 1386out_free:
1332 mutex_unlock(&ubi->dbg_buf_mutex); 1387 vfree(buf1);
1333 return err; 1388 return err;
1334} 1389}
1335 1390
@@ -1348,36 +1403,44 @@ int ubi_dbg_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len)
1348{ 1403{
1349 size_t read; 1404 size_t read;
1350 int err; 1405 int err;
1406 void *buf;
1351 loff_t addr = (loff_t)pnum * ubi->peb_size + offset; 1407 loff_t addr = (loff_t)pnum * ubi->peb_size + offset;
1352 1408
1353 mutex_lock(&ubi->dbg_buf_mutex); 1409 if (!(ubi_chk_flags & UBI_CHK_IO))
1354 err = ubi->mtd->read(ubi->mtd, addr, len, &read, ubi->dbg_peb_buf); 1410 return 0;
1411
1412 buf = __vmalloc(len, GFP_NOFS, PAGE_KERNEL);
1413 if (!buf) {
1414 ubi_err("cannot allocate memory to check for 0xFFs");
1415 return 0;
1416 }
1417
1418 err = ubi->mtd->read(ubi->mtd, addr, len, &read, buf);
1355 if (err && err != -EUCLEAN) { 1419 if (err && err != -EUCLEAN) {
1356 ubi_err("error %d while reading %d bytes from PEB %d:%d, " 1420 ubi_err("error %d while reading %d bytes from PEB %d:%d, "
1357 "read %zd bytes", err, len, pnum, offset, read); 1421 "read %zd bytes", err, len, pnum, offset, read);
1358 goto error; 1422 goto error;
1359 } 1423 }
1360 1424
1361 err = ubi_check_pattern(ubi->dbg_peb_buf, 0xFF, len); 1425 err = ubi_check_pattern(buf, 0xFF, len);
1362 if (err == 0) { 1426 if (err == 0) {
1363 ubi_err("flash region at PEB %d:%d, length %d does not " 1427 ubi_err("flash region at PEB %d:%d, length %d does not "
1364 "contain all 0xFF bytes", pnum, offset, len); 1428 "contain all 0xFF bytes", pnum, offset, len);
1365 goto fail; 1429 goto fail;
1366 } 1430 }
1367 mutex_unlock(&ubi->dbg_buf_mutex);
1368 1431
1432 vfree(buf);
1369 return 0; 1433 return 0;
1370 1434
1371fail: 1435fail:
1372 ubi_err("paranoid check failed for PEB %d", pnum); 1436 ubi_err("paranoid check failed for PEB %d", pnum);
1373 ubi_msg("hex dump of the %d-%d region", offset, offset + len); 1437 ubi_msg("hex dump of the %d-%d region", offset, offset + len);
1374 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, 1438 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, buf, len, 1);
1375 ubi->dbg_peb_buf, len, 1);
1376 err = -EINVAL; 1439 err = -EINVAL;
1377error: 1440error:
1378 ubi_dbg_dump_stack(); 1441 ubi_dbg_dump_stack();
1379 mutex_unlock(&ubi->dbg_buf_mutex); 1442 vfree(buf);
1380 return err; 1443 return err;
1381} 1444}
1382 1445
1383#endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */ 1446#endif /* CONFIG_MTD_UBI_DEBUG */
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c
index 69fa4ef03c53..d39716e5b204 100644
--- a/drivers/mtd/ubi/kapi.c
+++ b/drivers/mtd/ubi/kapi.c
@@ -40,7 +40,9 @@ void ubi_do_get_device_info(struct ubi_device *ubi, struct ubi_device_info *di)
40{ 40{
41 di->ubi_num = ubi->ubi_num; 41 di->ubi_num = ubi->ubi_num;
42 di->leb_size = ubi->leb_size; 42 di->leb_size = ubi->leb_size;
43 di->leb_start = ubi->leb_start;
43 di->min_io_size = ubi->min_io_size; 44 di->min_io_size = ubi->min_io_size;
45 di->max_write_size = ubi->max_write_size;
44 di->ro_mode = ubi->ro_mode; 46 di->ro_mode = ubi->ro_mode;
45 di->cdev = ubi->cdev.dev; 47 di->cdev = ubi->cdev.dev;
46} 48}
diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c
index 79ca304fc4db..11eb8ef12485 100644
--- a/drivers/mtd/ubi/scan.c
+++ b/drivers/mtd/ubi/scan.c
@@ -39,32 +39,46 @@
39 * eraseblocks are put to the @free list and the physical eraseblock to be 39 * eraseblocks are put to the @free list and the physical eraseblock to be
40 * erased are put to the @erase list. 40 * erased are put to the @erase list.
41 * 41 *
42 * About corruptions
43 * ~~~~~~~~~~~~~~~~~
44 *
45 * UBI protects EC and VID headers with CRC-32 checksums, so it can detect
46 * whether the headers are corrupted or not. Sometimes UBI also protects the
47 * data with CRC-32, e.g., when it executes the atomic LEB change operation, or
48 * when it moves the contents of a PEB for wear-leveling purposes.
49 *
42 * UBI tries to distinguish between 2 types of corruptions. 50 * UBI tries to distinguish between 2 types of corruptions.
43 * 1. Corruptions caused by power cuts. These are harmless and expected 51 *
44 * corruptions and UBI tries to handle them gracefully, without printing too 52 * 1. Corruptions caused by power cuts. These are expected corruptions and UBI
45 * many warnings and error messages. The idea is that we do not lose 53 * tries to handle them gracefully, without printing too many warnings and
46 * important data in these case - we may lose only the data which was being 54 * error messages. The idea is that we do not lose important data in these case
47 * written to the media just before the power cut happened, and the upper 55 * - we may lose only the data which was being written to the media just before
48 * layers (e.g., UBIFS) are supposed to handle these situations. UBI puts 56 * the power cut happened, and the upper layers (e.g., UBIFS) are supposed to
49 * these PEBs to the head of the @erase list and they are scheduled for 57 * handle such data losses (e.g., by using the FS journal).
50 * erasure. 58 *
59 * When UBI detects a corruption (CRC-32 mismatch) in a PEB, and it looks like
60 * the reason is a power cut, UBI puts this PEB to the @erase list, and all
61 * PEBs in the @erase list are scheduled for erasure later.
51 * 62 *
52 * 2. Unexpected corruptions which are not caused by power cuts. During 63 * 2. Unexpected corruptions which are not caused by power cuts. During
53 * scanning, such PEBs are put to the @corr list and UBI preserves them. 64 * scanning, such PEBs are put to the @corr list and UBI preserves them.
54 * Obviously, this lessens the amount of available PEBs, and if at some 65 * Obviously, this lessens the amount of available PEBs, and if at some point
55 * point UBI runs out of free PEBs, it switches to R/O mode. UBI also loudly 66 * UBI runs out of free PEBs, it switches to R/O mode. UBI also loudly informs
56 * informs about such PEBs every time the MTD device is attached. 67 * about such PEBs every time the MTD device is attached.
57 * 68 *
58 * However, it is difficult to reliably distinguish between these types of 69 * However, it is difficult to reliably distinguish between these types of
59 * corruptions and UBI's strategy is as follows. UBI assumes (2.) if the VID 70 * corruptions and UBI's strategy is as follows. UBI assumes corruption type 2
60 * header is corrupted and the data area does not contain all 0xFFs, and there 71 * if the VID header is corrupted and the data area does not contain all 0xFFs,
61 * were not bit-flips or integrity errors while reading the data area. Otherwise 72 * and there were no bit-flips or integrity errors while reading the data area.
62 * UBI assumes (1.). The assumptions are: 73 * Otherwise UBI assumes corruption type 1. So the decision criteria are as
63 * o if the data area contains only 0xFFs, there is no data, and it is safe 74 * follows.
64 * to just erase this PEB. 75 * o If the data area contains only 0xFFs, there is no data, and it is safe
65 * o if the data area has bit-flips and data integrity errors (ECC errors on 76 * to just erase this PEB - this is corruption type 1.
77 * o If the data area has bit-flips or data integrity errors (ECC errors on
66 * NAND), it is probably a PEB which was being erased when power cut 78 * NAND), it is probably a PEB which was being erased when power cut
67 * happened. 79 * happened, so this is corruption type 1. However, this is just a guess,
80 * which might be wrong.
81 * o Otherwise this it corruption type 2.
68 */ 82 */
69 83
70#include <linux/err.h> 84#include <linux/err.h>
@@ -74,7 +88,7 @@
74#include <linux/random.h> 88#include <linux/random.h>
75#include "ubi.h" 89#include "ubi.h"
76 90
77#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID 91#ifdef CONFIG_MTD_UBI_DEBUG
78static int paranoid_check_si(struct ubi_device *ubi, struct ubi_scan_info *si); 92static int paranoid_check_si(struct ubi_device *ubi, struct ubi_scan_info *si);
79#else 93#else
80#define paranoid_check_si(ubi, si) 0 94#define paranoid_check_si(ubi, si) 0
@@ -115,7 +129,7 @@ static int add_to_list(struct ubi_scan_info *si, int pnum, int ec, int to_head,
115 } else 129 } else
116 BUG(); 130 BUG();
117 131
118 seb = kmalloc(sizeof(struct ubi_scan_leb), GFP_KERNEL); 132 seb = kmem_cache_alloc(si->scan_leb_slab, GFP_KERNEL);
119 if (!seb) 133 if (!seb)
120 return -ENOMEM; 134 return -ENOMEM;
121 135
@@ -144,7 +158,7 @@ static int add_corrupted(struct ubi_scan_info *si, int pnum, int ec)
144 158
145 dbg_bld("add to corrupted: PEB %d, EC %d", pnum, ec); 159 dbg_bld("add to corrupted: PEB %d, EC %d", pnum, ec);
146 160
147 seb = kmalloc(sizeof(struct ubi_scan_leb), GFP_KERNEL); 161 seb = kmem_cache_alloc(si->scan_leb_slab, GFP_KERNEL);
148 if (!seb) 162 if (!seb)
149 return -ENOMEM; 163 return -ENOMEM;
150 164
@@ -553,7 +567,7 @@ int ubi_scan_add_used(struct ubi_device *ubi, struct ubi_scan_info *si,
553 if (err) 567 if (err)
554 return err; 568 return err;
555 569
556 seb = kmalloc(sizeof(struct ubi_scan_leb), GFP_KERNEL); 570 seb = kmem_cache_alloc(si->scan_leb_slab, GFP_KERNEL);
557 if (!seb) 571 if (!seb)
558 return -ENOMEM; 572 return -ENOMEM;
559 573
@@ -1152,9 +1166,15 @@ struct ubi_scan_info *ubi_scan(struct ubi_device *ubi)
1152 si->volumes = RB_ROOT; 1166 si->volumes = RB_ROOT;
1153 1167
1154 err = -ENOMEM; 1168 err = -ENOMEM;
1169 si->scan_leb_slab = kmem_cache_create("ubi_scan_leb_slab",
1170 sizeof(struct ubi_scan_leb),
1171 0, 0, NULL);
1172 if (!si->scan_leb_slab)
1173 goto out_si;
1174
1155 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); 1175 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
1156 if (!ech) 1176 if (!ech)
1157 goto out_si; 1177 goto out_slab;
1158 1178
1159 vidh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL); 1179 vidh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
1160 if (!vidh) 1180 if (!vidh)
@@ -1215,6 +1235,8 @@ out_vidh:
1215 ubi_free_vid_hdr(ubi, vidh); 1235 ubi_free_vid_hdr(ubi, vidh);
1216out_ech: 1236out_ech:
1217 kfree(ech); 1237 kfree(ech);
1238out_slab:
1239 kmem_cache_destroy(si->scan_leb_slab);
1218out_si: 1240out_si:
1219 ubi_scan_destroy_si(si); 1241 ubi_scan_destroy_si(si);
1220 return ERR_PTR(err); 1242 return ERR_PTR(err);
@@ -1223,11 +1245,12 @@ out_si:
1223/** 1245/**
1224 * destroy_sv - free the scanning volume information 1246 * destroy_sv - free the scanning volume information
1225 * @sv: scanning volume information 1247 * @sv: scanning volume information
1248 * @si: scanning information
1226 * 1249 *
1227 * This function destroys the volume RB-tree (@sv->root) and the scanning 1250 * This function destroys the volume RB-tree (@sv->root) and the scanning
1228 * volume information. 1251 * volume information.
1229 */ 1252 */
1230static void destroy_sv(struct ubi_scan_volume *sv) 1253static void destroy_sv(struct ubi_scan_info *si, struct ubi_scan_volume *sv)
1231{ 1254{
1232 struct ubi_scan_leb *seb; 1255 struct ubi_scan_leb *seb;
1233 struct rb_node *this = sv->root.rb_node; 1256 struct rb_node *this = sv->root.rb_node;
@@ -1247,7 +1270,7 @@ static void destroy_sv(struct ubi_scan_volume *sv)
1247 this->rb_right = NULL; 1270 this->rb_right = NULL;
1248 } 1271 }
1249 1272
1250 kfree(seb); 1273 kmem_cache_free(si->scan_leb_slab, seb);
1251 } 1274 }
1252 } 1275 }
1253 kfree(sv); 1276 kfree(sv);
@@ -1265,19 +1288,19 @@ void ubi_scan_destroy_si(struct ubi_scan_info *si)
1265 1288
1266 list_for_each_entry_safe(seb, seb_tmp, &si->alien, u.list) { 1289 list_for_each_entry_safe(seb, seb_tmp, &si->alien, u.list) {
1267 list_del(&seb->u.list); 1290 list_del(&seb->u.list);
1268 kfree(seb); 1291 kmem_cache_free(si->scan_leb_slab, seb);
1269 } 1292 }
1270 list_for_each_entry_safe(seb, seb_tmp, &si->erase, u.list) { 1293 list_for_each_entry_safe(seb, seb_tmp, &si->erase, u.list) {
1271 list_del(&seb->u.list); 1294 list_del(&seb->u.list);
1272 kfree(seb); 1295 kmem_cache_free(si->scan_leb_slab, seb);
1273 } 1296 }
1274 list_for_each_entry_safe(seb, seb_tmp, &si->corr, u.list) { 1297 list_for_each_entry_safe(seb, seb_tmp, &si->corr, u.list) {
1275 list_del(&seb->u.list); 1298 list_del(&seb->u.list);
1276 kfree(seb); 1299 kmem_cache_free(si->scan_leb_slab, seb);
1277 } 1300 }
1278 list_for_each_entry_safe(seb, seb_tmp, &si->free, u.list) { 1301 list_for_each_entry_safe(seb, seb_tmp, &si->free, u.list) {
1279 list_del(&seb->u.list); 1302 list_del(&seb->u.list);
1280 kfree(seb); 1303 kmem_cache_free(si->scan_leb_slab, seb);
1281 } 1304 }
1282 1305
1283 /* Destroy the volume RB-tree */ 1306 /* Destroy the volume RB-tree */
@@ -1298,14 +1321,15 @@ void ubi_scan_destroy_si(struct ubi_scan_info *si)
1298 rb->rb_right = NULL; 1321 rb->rb_right = NULL;
1299 } 1322 }
1300 1323
1301 destroy_sv(sv); 1324 destroy_sv(si, sv);
1302 } 1325 }
1303 } 1326 }
1304 1327
1328 kmem_cache_destroy(si->scan_leb_slab);
1305 kfree(si); 1329 kfree(si);
1306} 1330}
1307 1331
1308#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID 1332#ifdef CONFIG_MTD_UBI_DEBUG
1309 1333
1310/** 1334/**
1311 * paranoid_check_si - check the scanning information. 1335 * paranoid_check_si - check the scanning information.
@@ -1323,6 +1347,9 @@ static int paranoid_check_si(struct ubi_device *ubi, struct ubi_scan_info *si)
1323 struct ubi_scan_leb *seb, *last_seb; 1347 struct ubi_scan_leb *seb, *last_seb;
1324 uint8_t *buf; 1348 uint8_t *buf;
1325 1349
1350 if (!(ubi_chk_flags & UBI_CHK_GEN))
1351 return 0;
1352
1326 /* 1353 /*
1327 * At first, check that scanning information is OK. 1354 * At first, check that scanning information is OK.
1328 */ 1355 */
@@ -1575,4 +1602,4 @@ out:
1575 return -EINVAL; 1602 return -EINVAL;
1576} 1603}
1577 1604
1578#endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */ 1605#endif /* CONFIG_MTD_UBI_DEBUG */
diff --git a/drivers/mtd/ubi/scan.h b/drivers/mtd/ubi/scan.h
index a3264f0bef2b..d48aef15ab5d 100644
--- a/drivers/mtd/ubi/scan.h
+++ b/drivers/mtd/ubi/scan.h
@@ -109,6 +109,7 @@ struct ubi_scan_volume {
109 * @mean_ec: mean erase counter value 109 * @mean_ec: mean erase counter value
110 * @ec_sum: a temporary variable used when calculating @mean_ec 110 * @ec_sum: a temporary variable used when calculating @mean_ec
111 * @ec_count: a temporary variable used when calculating @mean_ec 111 * @ec_count: a temporary variable used when calculating @mean_ec
112 * @scan_leb_slab: slab cache for &struct ubi_scan_leb objects
112 * 113 *
113 * This data structure contains the result of scanning and may be used by other 114 * This data structure contains the result of scanning and may be used by other
114 * UBI sub-systems to build final UBI data structures, further error-recovery 115 * UBI sub-systems to build final UBI data structures, further error-recovery
@@ -134,6 +135,7 @@ struct ubi_scan_info {
134 int mean_ec; 135 int mean_ec;
135 uint64_t ec_sum; 136 uint64_t ec_sum;
136 int ec_count; 137 int ec_count;
138 struct kmem_cache *scan_leb_slab;
137}; 139};
138 140
139struct ubi_device; 141struct ubi_device;
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index 0b0149c41fe3..f1be8b79663c 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -40,6 +40,7 @@
40#include <linux/notifier.h> 40#include <linux/notifier.h>
41#include <linux/mtd/mtd.h> 41#include <linux/mtd/mtd.h>
42#include <linux/mtd/ubi.h> 42#include <linux/mtd/ubi.h>
43#include <asm/pgtable.h>
43 44
44#include "ubi-media.h" 45#include "ubi-media.h"
45#include "scan.h" 46#include "scan.h"
@@ -381,14 +382,14 @@ struct ubi_wl_entry;
381 * @bad_allowed: whether the MTD device admits of bad physical eraseblocks or 382 * @bad_allowed: whether the MTD device admits of bad physical eraseblocks or
382 * not 383 * not
383 * @nor_flash: non-zero if working on top of NOR flash 384 * @nor_flash: non-zero if working on top of NOR flash
385 * @max_write_size: maximum amount of bytes the underlying flash can write at a
386 * time (MTD write buffer size)
384 * @mtd: MTD device descriptor 387 * @mtd: MTD device descriptor
385 * 388 *
386 * @peb_buf1: a buffer of PEB size used for different purposes 389 * @peb_buf1: a buffer of PEB size used for different purposes
387 * @peb_buf2: another buffer of PEB size used for different purposes 390 * @peb_buf2: another buffer of PEB size used for different purposes
388 * @buf_mutex: protects @peb_buf1 and @peb_buf2 391 * @buf_mutex: protects @peb_buf1 and @peb_buf2
389 * @ckvol_mutex: serializes static volume checking when opening 392 * @ckvol_mutex: serializes static volume checking when opening
390 * @dbg_peb_buf: buffer of PEB size used for debugging
391 * @dbg_buf_mutex: protects @dbg_peb_buf
392 */ 393 */
393struct ubi_device { 394struct ubi_device {
394 struct cdev cdev; 395 struct cdev cdev;
@@ -464,16 +465,13 @@ struct ubi_device {
464 int vid_hdr_shift; 465 int vid_hdr_shift;
465 unsigned int bad_allowed:1; 466 unsigned int bad_allowed:1;
466 unsigned int nor_flash:1; 467 unsigned int nor_flash:1;
468 int max_write_size;
467 struct mtd_info *mtd; 469 struct mtd_info *mtd;
468 470
469 void *peb_buf1; 471 void *peb_buf1;
470 void *peb_buf2; 472 void *peb_buf2;
471 struct mutex buf_mutex; 473 struct mutex buf_mutex;
472 struct mutex ckvol_mutex; 474 struct mutex ckvol_mutex;
473#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
474 void *dbg_peb_buf;
475 struct mutex dbg_buf_mutex;
476#endif
477}; 475};
478 476
479extern struct kmem_cache *ubi_wl_entry_slab; 477extern struct kmem_cache *ubi_wl_entry_slab;
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
index c47620dfc722..b79e0dea3632 100644
--- a/drivers/mtd/ubi/vmt.c
+++ b/drivers/mtd/ubi/vmt.c
@@ -28,7 +28,7 @@
28#include <linux/slab.h> 28#include <linux/slab.h>
29#include "ubi.h" 29#include "ubi.h"
30 30
31#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID 31#ifdef CONFIG_MTD_UBI_DEBUG
32static int paranoid_check_volumes(struct ubi_device *ubi); 32static int paranoid_check_volumes(struct ubi_device *ubi);
33#else 33#else
34#define paranoid_check_volumes(ubi) 0 34#define paranoid_check_volumes(ubi) 0
@@ -711,7 +711,7 @@ void ubi_free_volume(struct ubi_device *ubi, struct ubi_volume *vol)
711 volume_sysfs_close(vol); 711 volume_sysfs_close(vol);
712} 712}
713 713
714#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID 714#ifdef CONFIG_MTD_UBI_DEBUG
715 715
716/** 716/**
717 * paranoid_check_volume - check volume information. 717 * paranoid_check_volume - check volume information.
@@ -876,6 +876,9 @@ static int paranoid_check_volumes(struct ubi_device *ubi)
876{ 876{
877 int i, err = 0; 877 int i, err = 0;
878 878
879 if (!(ubi_chk_flags & UBI_CHK_GEN))
880 return 0;
881
879 for (i = 0; i < ubi->vtbl_slots; i++) { 882 for (i = 0; i < ubi->vtbl_slots; i++) {
880 err = paranoid_check_volume(ubi, i); 883 err = paranoid_check_volume(ubi, i);
881 if (err) 884 if (err)
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
index 0b8141fc5c26..fd3bf770f518 100644
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -62,7 +62,7 @@
62#include <asm/div64.h> 62#include <asm/div64.h>
63#include "ubi.h" 63#include "ubi.h"
64 64
65#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID 65#ifdef CONFIG_MTD_UBI_DEBUG
66static void paranoid_vtbl_check(const struct ubi_device *ubi); 66static void paranoid_vtbl_check(const struct ubi_device *ubi);
67#else 67#else
68#define paranoid_vtbl_check(ubi) 68#define paranoid_vtbl_check(ubi)
@@ -868,7 +868,7 @@ out_free:
868 return err; 868 return err;
869} 869}
870 870
871#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID 871#ifdef CONFIG_MTD_UBI_DEBUG
872 872
873/** 873/**
874 * paranoid_vtbl_check - check volume table. 874 * paranoid_vtbl_check - check volume table.
@@ -876,10 +876,13 @@ out_free:
876 */ 876 */
877static void paranoid_vtbl_check(const struct ubi_device *ubi) 877static void paranoid_vtbl_check(const struct ubi_device *ubi)
878{ 878{
879 if (!(ubi_chk_flags & UBI_CHK_GEN))
880 return;
881
879 if (vtbl_check(ubi, ubi->vtbl)) { 882 if (vtbl_check(ubi, ubi->vtbl)) {
880 ubi_err("paranoid check failed"); 883 ubi_err("paranoid check failed");
881 BUG(); 884 BUG();
882 } 885 }
883} 886}
884 887
885#endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */ 888#endif /* CONFIG_MTD_UBI_DEBUG */
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 655bbbe415d9..b4cf57db2556 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -161,7 +161,7 @@ struct ubi_work {
161 int torture; 161 int torture;
162}; 162};
163 163
164#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID 164#ifdef CONFIG_MTD_UBI_DEBUG
165static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec); 165static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec);
166static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e, 166static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
167 struct rb_root *root); 167 struct rb_root *root);
@@ -613,7 +613,7 @@ static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
613 list_add_tail(&wrk->list, &ubi->works); 613 list_add_tail(&wrk->list, &ubi->works);
614 ubi_assert(ubi->works_count >= 0); 614 ubi_assert(ubi->works_count >= 0);
615 ubi->works_count += 1; 615 ubi->works_count += 1;
616 if (ubi->thread_enabled) 616 if (ubi->thread_enabled && !ubi_dbg_is_bgt_disabled())
617 wake_up_process(ubi->bgt_thread); 617 wake_up_process(ubi->bgt_thread);
618 spin_unlock(&ubi->wl_lock); 618 spin_unlock(&ubi->wl_lock);
619} 619}
@@ -1364,7 +1364,7 @@ int ubi_thread(void *u)
1364 1364
1365 spin_lock(&ubi->wl_lock); 1365 spin_lock(&ubi->wl_lock);
1366 if (list_empty(&ubi->works) || ubi->ro_mode || 1366 if (list_empty(&ubi->works) || ubi->ro_mode ||
1367 !ubi->thread_enabled) { 1367 !ubi->thread_enabled || ubi_dbg_is_bgt_disabled()) {
1368 set_current_state(TASK_INTERRUPTIBLE); 1368 set_current_state(TASK_INTERRUPTIBLE);
1369 spin_unlock(&ubi->wl_lock); 1369 spin_unlock(&ubi->wl_lock);
1370 schedule(); 1370 schedule();
@@ -1561,7 +1561,7 @@ void ubi_wl_close(struct ubi_device *ubi)
1561 kfree(ubi->lookuptbl); 1561 kfree(ubi->lookuptbl);
1562} 1562}
1563 1563
1564#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID 1564#ifdef CONFIG_MTD_UBI_DEBUG
1565 1565
1566/** 1566/**
1567 * paranoid_check_ec - make sure that the erase counter of a PEB is correct. 1567 * paranoid_check_ec - make sure that the erase counter of a PEB is correct.
@@ -1578,6 +1578,9 @@ static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec)
1578 long long read_ec; 1578 long long read_ec;
1579 struct ubi_ec_hdr *ec_hdr; 1579 struct ubi_ec_hdr *ec_hdr;
1580 1580
1581 if (!(ubi_chk_flags & UBI_CHK_GEN))
1582 return 0;
1583
1581 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS); 1584 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
1582 if (!ec_hdr) 1585 if (!ec_hdr)
1583 return -ENOMEM; 1586 return -ENOMEM;
@@ -1614,6 +1617,9 @@ out_free:
1614static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e, 1617static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
1615 struct rb_root *root) 1618 struct rb_root *root)
1616{ 1619{
1620 if (!(ubi_chk_flags & UBI_CHK_GEN))
1621 return 0;
1622
1617 if (in_wl_tree(e, root)) 1623 if (in_wl_tree(e, root))
1618 return 0; 1624 return 0;
1619 1625
@@ -1636,6 +1642,9 @@ static int paranoid_check_in_pq(struct ubi_device *ubi, struct ubi_wl_entry *e)
1636 struct ubi_wl_entry *p; 1642 struct ubi_wl_entry *p;
1637 int i; 1643 int i;
1638 1644
1645 if (!(ubi_chk_flags & UBI_CHK_GEN))
1646 return 0;
1647
1639 for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i) 1648 for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i)
1640 list_for_each_entry(p, &ubi->pq[i], u.list) 1649 list_for_each_entry(p, &ubi->pq[i], u.list)
1641 if (p == e) 1650 if (p == e)
@@ -1646,4 +1655,5 @@ static int paranoid_check_in_pq(struct ubi_device *ubi, struct ubi_wl_entry *e)
1646 ubi_dbg_dump_stack(); 1655 ubi_dbg_dump_stack();
1647 return -EINVAL; 1656 return -EINVAL;
1648} 1657}
1649#endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */ 1658
1659#endif /* CONFIG_MTD_UBI_DEBUG */