aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mtd
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mtd')
-rw-r--r--drivers/mtd/Kconfig2
-rw-r--r--drivers/mtd/Makefile2
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c12
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c8
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0020.c14
-rw-r--r--drivers/mtd/chips/fwh_lock.h4
-rw-r--r--drivers/mtd/devices/block2mtd.c4
-rw-r--r--drivers/mtd/devices/lart.c6
-rw-r--r--drivers/mtd/devices/m25p80.c53
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c52
-rw-r--r--drivers/mtd/ftl.c100
-rw-r--r--drivers/mtd/inftlcore.c2
-rw-r--r--drivers/mtd/inftlmount.c4
-rw-r--r--drivers/mtd/lpddr/Kconfig22
-rw-r--r--drivers/mtd/lpddr/Makefile6
-rw-r--r--drivers/mtd/lpddr/lpddr_cmds.c796
-rw-r--r--drivers/mtd/lpddr/qinfo_probe.c255
-rw-r--r--drivers/mtd/maps/Kconfig21
-rw-r--r--drivers/mtd/maps/alchemy-flash.c2
-rw-r--r--drivers/mtd/maps/amd76xrom.c4
-rw-r--r--drivers/mtd/maps/cdb89712.c13
-rw-r--r--drivers/mtd/maps/cfi_flagadm.c2
-rw-r--r--drivers/mtd/maps/ck804xrom.c4
-rw-r--r--drivers/mtd/maps/dbox2-flash.c2
-rw-r--r--drivers/mtd/maps/dc21285.c7
-rw-r--r--drivers/mtd/maps/edb7312.c2
-rw-r--r--drivers/mtd/maps/esb2rom.c4
-rw-r--r--drivers/mtd/maps/fortunet.c2
-rw-r--r--drivers/mtd/maps/h720x-flash.c8
-rw-r--r--drivers/mtd/maps/ichxrom.c4
-rw-r--r--drivers/mtd/maps/impa7.c2
-rw-r--r--drivers/mtd/maps/integrator-flash.c2
-rw-r--r--drivers/mtd/maps/ipaq-flash.c2
-rw-r--r--drivers/mtd/maps/ixp2000.c6
-rw-r--r--drivers/mtd/maps/ixp4xx.c4
-rw-r--r--drivers/mtd/maps/mbx860.c2
-rw-r--r--drivers/mtd/maps/nettel.c5
-rw-r--r--drivers/mtd/maps/octagon-5066.c2
-rw-r--r--drivers/mtd/maps/omap_nor.c2
-rw-r--r--drivers/mtd/maps/physmap.c47
-rw-r--r--drivers/mtd/maps/physmap_of.c4
-rw-r--r--drivers/mtd/maps/pmcmsp-flash.c2
-rw-r--r--drivers/mtd/maps/redwood.c2
-rw-r--r--drivers/mtd/maps/rpxlite.c2
-rw-r--r--drivers/mtd/maps/sbc8240.c2
-rw-r--r--drivers/mtd/maps/scb2_flash.c8
-rw-r--r--drivers/mtd/maps/sharpsl-flash.c2
-rw-r--r--drivers/mtd/maps/tqm8xxl.c2
-rw-r--r--drivers/mtd/maps/uclinux.c4
-rw-r--r--drivers/mtd/maps/vmax301.c2
-rw-r--r--drivers/mtd/maps/wr_sbc82xx_flash.c2
-rw-r--r--drivers/mtd/mtd_blkdevs.c24
-rw-r--r--drivers/mtd/mtdchar.c16
-rw-r--r--drivers/mtd/mtdconcat.c37
-rw-r--r--drivers/mtd/mtdcore.c16
-rw-r--r--drivers/mtd/mtdoops.c9
-rw-r--r--drivers/mtd/mtdpart.c34
-rw-r--r--drivers/mtd/nand/Kconfig2
-rw-r--r--drivers/mtd/nand/alauda.c6
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c4
-rw-r--r--drivers/mtd/nand/fsl_upm.c2
-rw-r--r--drivers/mtd/nand/nand_base.c25
-rw-r--r--drivers/mtd/nand/nand_bbt.c31
-rw-r--r--drivers/mtd/nand/nandsim.c339
-rw-r--r--drivers/mtd/nand/plat_nand.c2
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c10
-rw-r--r--drivers/mtd/nand/s3c2410.c8
-rw-r--r--drivers/mtd/nand/sharpsl.c247
-rw-r--r--drivers/mtd/nand/tmio_nand.c2
-rw-r--r--drivers/mtd/nftlcore.c2
-rw-r--r--drivers/mtd/nftlmount.c4
-rw-r--r--drivers/mtd/onenand/generic.c2
-rw-r--r--drivers/mtd/onenand/omap2.c12
-rw-r--r--drivers/mtd/onenand/onenand_base.c8
-rw-r--r--drivers/mtd/rfd_ftl.c29
-rw-r--r--drivers/mtd/ssfdc.c7
-rw-r--r--drivers/mtd/ubi/build.c9
-rw-r--r--drivers/mtd/ubi/cdev.c3
-rw-r--r--drivers/mtd/ubi/debug.h10
-rw-r--r--drivers/mtd/ubi/eba.c53
-rw-r--r--drivers/mtd/ubi/gluebi.c17
-rw-r--r--drivers/mtd/ubi/io.c28
-rw-r--r--drivers/mtd/ubi/scan.c2
-rw-r--r--drivers/mtd/ubi/ubi.h45
-rw-r--r--drivers/mtd/ubi/vmt.c4
-rw-r--r--drivers/mtd/ubi/wl.c492
86 files changed, 2209 insertions, 855 deletions
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index 8581ed9eafe2..7d04fb9ddcaa 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -324,6 +324,8 @@ source "drivers/mtd/nand/Kconfig"
324 324
325source "drivers/mtd/onenand/Kconfig" 325source "drivers/mtd/onenand/Kconfig"
326 326
327source "drivers/mtd/lpddr/Kconfig"
328
327source "drivers/mtd/ubi/Kconfig" 329source "drivers/mtd/ubi/Kconfig"
328 330
329endif # MTD 331endif # MTD
diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile
index 40d304d6191d..4521b1ecce45 100644
--- a/drivers/mtd/Makefile
+++ b/drivers/mtd/Makefile
@@ -29,6 +29,6 @@ obj-$(CONFIG_MTD_OOPS) += mtdoops.o
29nftl-objs := nftlcore.o nftlmount.o 29nftl-objs := nftlcore.o nftlmount.o
30inftl-objs := inftlcore.o inftlmount.o 30inftl-objs := inftlcore.o inftlmount.o
31 31
32obj-y += chips/ maps/ devices/ nand/ onenand/ tests/ 32obj-y += chips/ lpddr/ maps/ devices/ nand/ onenand/ tests/
33 33
34obj-$(CONFIG_MTD_UBI) += ubi/ 34obj-$(CONFIG_MTD_UBI) += ubi/
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index c93a8be5d5f1..f5ab6fa1057b 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -58,8 +58,8 @@ static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t
58static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *); 58static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
59static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *); 59static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
60static void cfi_intelext_sync (struct mtd_info *); 60static void cfi_intelext_sync (struct mtd_info *);
61static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len); 61static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
62static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len); 62static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
63#ifdef CONFIG_MTD_OTP 63#ifdef CONFIG_MTD_OTP
64static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *); 64static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
65static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *); 65static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
@@ -558,8 +558,8 @@ static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
558 } 558 }
559 559
560 for (i=0; i<mtd->numeraseregions;i++){ 560 for (i=0; i<mtd->numeraseregions;i++){
561 printk(KERN_DEBUG "erase region %d: offset=0x%x,size=0x%x,blocks=%d\n", 561 printk(KERN_DEBUG "erase region %d: offset=0x%llx,size=0x%x,blocks=%d\n",
562 i,mtd->eraseregions[i].offset, 562 i,(unsigned long long)mtd->eraseregions[i].offset,
563 mtd->eraseregions[i].erasesize, 563 mtd->eraseregions[i].erasesize,
564 mtd->eraseregions[i].numblocks); 564 mtd->eraseregions[i].numblocks);
565 } 565 }
@@ -2058,7 +2058,7 @@ out: put_chip(map, chip, adr);
2058 return ret; 2058 return ret;
2059} 2059}
2060 2060
2061static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len) 2061static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2062{ 2062{
2063 int ret; 2063 int ret;
2064 2064
@@ -2082,7 +2082,7 @@ static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
2082 return ret; 2082 return ret;
2083} 2083}
2084 2084
2085static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len) 2085static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2086{ 2086{
2087 int ret; 2087 int ret;
2088 2088
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index d74ec46aa032..f9c435a42670 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -71,8 +71,8 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
71static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr); 71static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
72#include "fwh_lock.h" 72#include "fwh_lock.h"
73 73
74static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, size_t len); 74static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
75static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, size_t len); 75static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
76 76
77static struct mtd_chip_driver cfi_amdstd_chipdrv = { 77static struct mtd_chip_driver cfi_amdstd_chipdrv = {
78 .probe = NULL, /* Not usable directly */ 78 .probe = NULL, /* Not usable directly */
@@ -1774,12 +1774,12 @@ out_unlock:
1774 return ret; 1774 return ret;
1775} 1775}
1776 1776
1777static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, size_t len) 1777static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1778{ 1778{
1779 return cfi_varsize_frob(mtd, do_atmel_lock, ofs, len, NULL); 1779 return cfi_varsize_frob(mtd, do_atmel_lock, ofs, len, NULL);
1780} 1780}
1781 1781
1782static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, size_t len) 1782static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1783{ 1783{
1784 return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL); 1784 return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL);
1785} 1785}
diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
index d4714dd9f7ab..6c740f346f91 100644
--- a/drivers/mtd/chips/cfi_cmdset_0020.c
+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
@@ -42,8 +42,8 @@ static int cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
42 unsigned long count, loff_t to, size_t *retlen); 42 unsigned long count, loff_t to, size_t *retlen);
43static int cfi_staa_erase_varsize(struct mtd_info *, struct erase_info *); 43static int cfi_staa_erase_varsize(struct mtd_info *, struct erase_info *);
44static void cfi_staa_sync (struct mtd_info *); 44static void cfi_staa_sync (struct mtd_info *);
45static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, size_t len); 45static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
46static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, size_t len); 46static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
47static int cfi_staa_suspend (struct mtd_info *); 47static int cfi_staa_suspend (struct mtd_info *);
48static void cfi_staa_resume (struct mtd_info *); 48static void cfi_staa_resume (struct mtd_info *);
49 49
@@ -221,8 +221,8 @@ static struct mtd_info *cfi_staa_setup(struct map_info *map)
221 } 221 }
222 222
223 for (i=0; i<mtd->numeraseregions;i++){ 223 for (i=0; i<mtd->numeraseregions;i++){
224 printk(KERN_DEBUG "%d: offset=0x%x,size=0x%x,blocks=%d\n", 224 printk(KERN_DEBUG "%d: offset=0x%llx,size=0x%x,blocks=%d\n",
225 i,mtd->eraseregions[i].offset, 225 i, (unsigned long long)mtd->eraseregions[i].offset,
226 mtd->eraseregions[i].erasesize, 226 mtd->eraseregions[i].erasesize,
227 mtd->eraseregions[i].numblocks); 227 mtd->eraseregions[i].numblocks);
228 } 228 }
@@ -964,7 +964,7 @@ static int cfi_staa_erase_varsize(struct mtd_info *mtd,
964 adr += regions[i].erasesize; 964 adr += regions[i].erasesize;
965 len -= regions[i].erasesize; 965 len -= regions[i].erasesize;
966 966
967 if (adr % (1<< cfi->chipshift) == ((regions[i].offset + (regions[i].erasesize * regions[i].numblocks)) %( 1<< cfi->chipshift))) 967 if (adr % (1<< cfi->chipshift) == (((unsigned long)regions[i].offset + (regions[i].erasesize * regions[i].numblocks)) %( 1<< cfi->chipshift)))
968 i++; 968 i++;
969 969
970 if (adr >> cfi->chipshift) { 970 if (adr >> cfi->chipshift) {
@@ -1135,7 +1135,7 @@ retry:
1135 spin_unlock_bh(chip->mutex); 1135 spin_unlock_bh(chip->mutex);
1136 return 0; 1136 return 0;
1137} 1137}
1138static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, size_t len) 1138static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1139{ 1139{
1140 struct map_info *map = mtd->priv; 1140 struct map_info *map = mtd->priv;
1141 struct cfi_private *cfi = map->fldrv_priv; 1141 struct cfi_private *cfi = map->fldrv_priv;
@@ -1284,7 +1284,7 @@ retry:
1284 spin_unlock_bh(chip->mutex); 1284 spin_unlock_bh(chip->mutex);
1285 return 0; 1285 return 0;
1286} 1286}
1287static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, size_t len) 1287static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1288{ 1288{
1289 struct map_info *map = mtd->priv; 1289 struct map_info *map = mtd->priv;
1290 struct cfi_private *cfi = map->fldrv_priv; 1290 struct cfi_private *cfi = map->fldrv_priv;
diff --git a/drivers/mtd/chips/fwh_lock.h b/drivers/mtd/chips/fwh_lock.h
index ab44f2b996f8..57e0e4e921f9 100644
--- a/drivers/mtd/chips/fwh_lock.h
+++ b/drivers/mtd/chips/fwh_lock.h
@@ -77,7 +77,7 @@ static int fwh_xxlock_oneblock(struct map_info *map, struct flchip *chip,
77} 77}
78 78
79 79
80static int fwh_lock_varsize(struct mtd_info *mtd, loff_t ofs, size_t len) 80static int fwh_lock_varsize(struct mtd_info *mtd, loff_t ofs, uint64_t len)
81{ 81{
82 int ret; 82 int ret;
83 83
@@ -88,7 +88,7 @@ static int fwh_lock_varsize(struct mtd_info *mtd, loff_t ofs, size_t len)
88} 88}
89 89
90 90
91static int fwh_unlock_varsize(struct mtd_info *mtd, loff_t ofs, size_t len) 91static int fwh_unlock_varsize(struct mtd_info *mtd, loff_t ofs, uint64_t len)
92{ 92{
93 int ret; 93 int ret;
94 94
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index 91fbba767635..8c295f40d2ac 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -224,7 +224,7 @@ static void block2mtd_free_device(struct block2mtd_dev *dev)
224 if (dev->blkdev) { 224 if (dev->blkdev) {
225 invalidate_mapping_pages(dev->blkdev->bd_inode->i_mapping, 225 invalidate_mapping_pages(dev->blkdev->bd_inode->i_mapping,
226 0, -1); 226 0, -1);
227 close_bdev_excl(dev->blkdev); 227 close_bdev_exclusive(dev->blkdev, FMODE_READ|FMODE_WRITE);
228 } 228 }
229 229
230 kfree(dev); 230 kfree(dev);
@@ -246,7 +246,7 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size)
246 return NULL; 246 return NULL;
247 247
248 /* Get a handle on the device */ 248 /* Get a handle on the device */
249 bdev = open_bdev_excl(devname, O_RDWR, NULL); 249 bdev = open_bdev_exclusive(devname, FMODE_READ|FMODE_WRITE, NULL);
250#ifndef MODULE 250#ifndef MODULE
251 if (IS_ERR(bdev)) { 251 if (IS_ERR(bdev)) {
252 252
diff --git a/drivers/mtd/devices/lart.c b/drivers/mtd/devices/lart.c
index f4bda4cee495..578de1c67bfe 100644
--- a/drivers/mtd/devices/lart.c
+++ b/drivers/mtd/devices/lart.c
@@ -619,7 +619,7 @@ static struct mtd_partition lart_partitions[] = {
619}; 619};
620#endif 620#endif
621 621
622int __init lart_flash_init (void) 622static int __init lart_flash_init (void)
623{ 623{
624 int result; 624 int result;
625 memset (&mtd,0,sizeof (mtd)); 625 memset (&mtd,0,sizeof (mtd));
@@ -690,7 +690,7 @@ int __init lart_flash_init (void)
690 return (result); 690 return (result);
691} 691}
692 692
693void __exit lart_flash_exit (void) 693static void __exit lart_flash_exit (void)
694{ 694{
695#ifndef HAVE_PARTITIONS 695#ifndef HAVE_PARTITIONS
696 del_mtd_device (&mtd); 696 del_mtd_device (&mtd);
@@ -705,5 +705,3 @@ module_exit (lart_flash_exit);
705MODULE_LICENSE("GPL"); 705MODULE_LICENSE("GPL");
706MODULE_AUTHOR("Abraham vd Merwe <abraham@2d3d.co.za>"); 706MODULE_AUTHOR("Abraham vd Merwe <abraham@2d3d.co.za>");
707MODULE_DESCRIPTION("MTD driver for Intel 28F160F3 on LART board"); 707MODULE_DESCRIPTION("MTD driver for Intel 28F160F3 on LART board");
708
709
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 6659b2275c0c..7c3fc766dcf1 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -20,6 +20,7 @@
20#include <linux/device.h> 20#include <linux/device.h>
21#include <linux/interrupt.h> 21#include <linux/interrupt.h>
22#include <linux/mutex.h> 22#include <linux/mutex.h>
23#include <linux/math64.h>
23 24
24#include <linux/mtd/mtd.h> 25#include <linux/mtd/mtd.h>
25#include <linux/mtd/partitions.h> 26#include <linux/mtd/partitions.h>
@@ -169,9 +170,9 @@ static int wait_till_ready(struct m25p *flash)
169 */ 170 */
170static int erase_chip(struct m25p *flash) 171static int erase_chip(struct m25p *flash)
171{ 172{
172 DEBUG(MTD_DEBUG_LEVEL3, "%s: %s %dKiB\n", 173 DEBUG(MTD_DEBUG_LEVEL3, "%s: %s %lldKiB\n",
173 flash->spi->dev.bus_id, __func__, 174 dev_name(&flash->spi->dev), __func__,
174 flash->mtd.size / 1024); 175 (long long)(flash->mtd.size >> 10));
175 176
176 /* Wait until finished previous write command. */ 177 /* Wait until finished previous write command. */
177 if (wait_till_ready(flash)) 178 if (wait_till_ready(flash))
@@ -197,7 +198,7 @@ static int erase_chip(struct m25p *flash)
197static int erase_sector(struct m25p *flash, u32 offset) 198static int erase_sector(struct m25p *flash, u32 offset)
198{ 199{
199 DEBUG(MTD_DEBUG_LEVEL3, "%s: %s %dKiB at 0x%08x\n", 200 DEBUG(MTD_DEBUG_LEVEL3, "%s: %s %dKiB at 0x%08x\n",
200 flash->spi->dev.bus_id, __func__, 201 dev_name(&flash->spi->dev), __func__,
201 flash->mtd.erasesize / 1024, offset); 202 flash->mtd.erasesize / 1024, offset);
202 203
203 /* Wait until finished previous write command. */ 204 /* Wait until finished previous write command. */
@@ -232,18 +233,18 @@ static int m25p80_erase(struct mtd_info *mtd, struct erase_info *instr)
232{ 233{
233 struct m25p *flash = mtd_to_m25p(mtd); 234 struct m25p *flash = mtd_to_m25p(mtd);
234 u32 addr,len; 235 u32 addr,len;
236 uint32_t rem;
235 237
236 DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%08x, len %d\n", 238 DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%llx, len %lld\n",
237 flash->spi->dev.bus_id, __func__, "at", 239 dev_name(&flash->spi->dev), __func__, "at",
238 (u32)instr->addr, instr->len); 240 (long long)instr->addr, (long long)instr->len);
239 241
240 /* sanity checks */ 242 /* sanity checks */
241 if (instr->addr + instr->len > flash->mtd.size) 243 if (instr->addr + instr->len > flash->mtd.size)
242 return -EINVAL; 244 return -EINVAL;
243 if ((instr->addr % mtd->erasesize) != 0 245 div_u64_rem(instr->len, mtd->erasesize, &rem);
244 || (instr->len % mtd->erasesize) != 0) { 246 if (rem)
245 return -EINVAL; 247 return -EINVAL;
246 }
247 248
248 addr = instr->addr; 249 addr = instr->addr;
249 len = instr->len; 250 len = instr->len;
@@ -295,7 +296,7 @@ static int m25p80_read(struct mtd_info *mtd, loff_t from, size_t len,
295 struct spi_message m; 296 struct spi_message m;
296 297
297 DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%08x, len %zd\n", 298 DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%08x, len %zd\n",
298 flash->spi->dev.bus_id, __func__, "from", 299 dev_name(&flash->spi->dev), __func__, "from",
299 (u32)from, len); 300 (u32)from, len);
300 301
301 /* sanity checks */ 302 /* sanity checks */
@@ -367,7 +368,7 @@ static int m25p80_write(struct mtd_info *mtd, loff_t to, size_t len,
367 struct spi_message m; 368 struct spi_message m;
368 369
369 DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%08x, len %zd\n", 370 DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%08x, len %zd\n",
370 flash->spi->dev.bus_id, __func__, "to", 371 dev_name(&flash->spi->dev), __func__, "to",
371 (u32)to, len); 372 (u32)to, len);
372 373
373 if (retlen) 374 if (retlen)
@@ -563,7 +564,7 @@ static struct flash_info *__devinit jedec_probe(struct spi_device *spi)
563 tmp = spi_write_then_read(spi, &code, 1, id, 5); 564 tmp = spi_write_then_read(spi, &code, 1, id, 5);
564 if (tmp < 0) { 565 if (tmp < 0) {
565 DEBUG(MTD_DEBUG_LEVEL0, "%s: error %d reading JEDEC ID\n", 566 DEBUG(MTD_DEBUG_LEVEL0, "%s: error %d reading JEDEC ID\n",
566 spi->dev.bus_id, tmp); 567 dev_name(&spi->dev), tmp);
567 return NULL; 568 return NULL;
568 } 569 }
569 jedec = id[0]; 570 jedec = id[0];
@@ -617,7 +618,7 @@ static int __devinit m25p_probe(struct spi_device *spi)
617 /* unrecognized chip? */ 618 /* unrecognized chip? */
618 if (i == ARRAY_SIZE(m25p_data)) { 619 if (i == ARRAY_SIZE(m25p_data)) {
619 DEBUG(MTD_DEBUG_LEVEL0, "%s: unrecognized id %s\n", 620 DEBUG(MTD_DEBUG_LEVEL0, "%s: unrecognized id %s\n",
620 spi->dev.bus_id, data->type); 621 dev_name(&spi->dev), data->type);
621 info = NULL; 622 info = NULL;
622 623
623 /* recognized; is that chip really what's there? */ 624 /* recognized; is that chip really what's there? */
@@ -658,7 +659,7 @@ static int __devinit m25p_probe(struct spi_device *spi)
658 if (data && data->name) 659 if (data && data->name)
659 flash->mtd.name = data->name; 660 flash->mtd.name = data->name;
660 else 661 else
661 flash->mtd.name = spi->dev.bus_id; 662 flash->mtd.name = dev_name(&spi->dev);
662 663
663 flash->mtd.type = MTD_NORFLASH; 664 flash->mtd.type = MTD_NORFLASH;
664 flash->mtd.writesize = 1; 665 flash->mtd.writesize = 1;
@@ -677,24 +678,24 @@ static int __devinit m25p_probe(struct spi_device *spi)
677 flash->mtd.erasesize = info->sector_size; 678 flash->mtd.erasesize = info->sector_size;
678 } 679 }
679 680
680 dev_info(&spi->dev, "%s (%d Kbytes)\n", info->name, 681 dev_info(&spi->dev, "%s (%lld Kbytes)\n", info->name,
681 flash->mtd.size / 1024); 682 (long long)flash->mtd.size >> 10);
682 683
683 DEBUG(MTD_DEBUG_LEVEL2, 684 DEBUG(MTD_DEBUG_LEVEL2,
684 "mtd .name = %s, .size = 0x%.8x (%uMiB) " 685 "mtd .name = %s, .size = 0x%llx (%lldMiB) "
685 ".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n", 686 ".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n",
686 flash->mtd.name, 687 flash->mtd.name,
687 flash->mtd.size, flash->mtd.size / (1024*1024), 688 (long long)flash->mtd.size, (long long)(flash->mtd.size >> 20),
688 flash->mtd.erasesize, flash->mtd.erasesize / 1024, 689 flash->mtd.erasesize, flash->mtd.erasesize / 1024,
689 flash->mtd.numeraseregions); 690 flash->mtd.numeraseregions);
690 691
691 if (flash->mtd.numeraseregions) 692 if (flash->mtd.numeraseregions)
692 for (i = 0; i < flash->mtd.numeraseregions; i++) 693 for (i = 0; i < flash->mtd.numeraseregions; i++)
693 DEBUG(MTD_DEBUG_LEVEL2, 694 DEBUG(MTD_DEBUG_LEVEL2,
694 "mtd.eraseregions[%d] = { .offset = 0x%.8x, " 695 "mtd.eraseregions[%d] = { .offset = 0x%llx, "
695 ".erasesize = 0x%.8x (%uKiB), " 696 ".erasesize = 0x%.8x (%uKiB), "
696 ".numblocks = %d }\n", 697 ".numblocks = %d }\n",
697 i, flash->mtd.eraseregions[i].offset, 698 i, (long long)flash->mtd.eraseregions[i].offset,
698 flash->mtd.eraseregions[i].erasesize, 699 flash->mtd.eraseregions[i].erasesize,
699 flash->mtd.eraseregions[i].erasesize / 1024, 700 flash->mtd.eraseregions[i].erasesize / 1024,
700 flash->mtd.eraseregions[i].numblocks); 701 flash->mtd.eraseregions[i].numblocks);
@@ -722,12 +723,12 @@ static int __devinit m25p_probe(struct spi_device *spi)
722 if (nr_parts > 0) { 723 if (nr_parts > 0) {
723 for (i = 0; i < nr_parts; i++) { 724 for (i = 0; i < nr_parts; i++) {
724 DEBUG(MTD_DEBUG_LEVEL2, "partitions[%d] = " 725 DEBUG(MTD_DEBUG_LEVEL2, "partitions[%d] = "
725 "{.name = %s, .offset = 0x%.8x, " 726 "{.name = %s, .offset = 0x%llx, "
726 ".size = 0x%.8x (%uKiB) }\n", 727 ".size = 0x%llx (%lldKiB) }\n",
727 i, parts[i].name, 728 i, parts[i].name,
728 parts[i].offset, 729 (long long)parts[i].offset,
729 parts[i].size, 730 (long long)parts[i].size,
730 parts[i].size / 1024); 731 (long long)(parts[i].size >> 10));
731 } 732 }
732 flash->partitioned = 1; 733 flash->partitioned = 1;
733 return add_mtd_partitions(&flash->mtd, parts, nr_parts); 734 return add_mtd_partitions(&flash->mtd, parts, nr_parts);
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index 6dd9aff8bb2d..d44f741ae229 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -16,6 +16,7 @@
16#include <linux/device.h> 16#include <linux/device.h>
17#include <linux/mutex.h> 17#include <linux/mutex.h>
18#include <linux/err.h> 18#include <linux/err.h>
19#include <linux/math64.h>
19 20
20#include <linux/spi/spi.h> 21#include <linux/spi/spi.h>
21#include <linux/spi/flash.h> 22#include <linux/spi/flash.h>
@@ -128,7 +129,7 @@ static int dataflash_waitready(struct spi_device *spi)
128 status = dataflash_status(spi); 129 status = dataflash_status(spi);
129 if (status < 0) { 130 if (status < 0) {
130 DEBUG(MTD_DEBUG_LEVEL1, "%s: status %d?\n", 131 DEBUG(MTD_DEBUG_LEVEL1, "%s: status %d?\n",
131 spi->dev.bus_id, status); 132 dev_name(&spi->dev), status);
132 status = 0; 133 status = 0;
133 } 134 }
134 135
@@ -152,15 +153,20 @@ static int dataflash_erase(struct mtd_info *mtd, struct erase_info *instr)
152 struct spi_message msg; 153 struct spi_message msg;
153 unsigned blocksize = priv->page_size << 3; 154 unsigned blocksize = priv->page_size << 3;
154 uint8_t *command; 155 uint8_t *command;
156 uint32_t rem;
155 157
156 DEBUG(MTD_DEBUG_LEVEL2, "%s: erase addr=0x%x len 0x%x\n", 158 DEBUG(MTD_DEBUG_LEVEL2, "%s: erase addr=0x%llx len 0x%llx\n",
157 spi->dev.bus_id, 159 dev_name(&spi->dev), (long long)instr->addr,
158 instr->addr, instr->len); 160 (long long)instr->len);
159 161
160 /* Sanity checks */ 162 /* Sanity checks */
161 if ((instr->addr + instr->len) > mtd->size 163 if (instr->addr + instr->len > mtd->size)
162 || (instr->len % priv->page_size) != 0 164 return -EINVAL;
163 || (instr->addr % priv->page_size) != 0) 165 div_u64_rem(instr->len, priv->page_size, &rem);
166 if (rem)
167 return -EINVAL;
168 div_u64_rem(instr->addr, priv->page_size, &rem);
169 if (rem)
164 return -EINVAL; 170 return -EINVAL;
165 171
166 spi_message_init(&msg); 172 spi_message_init(&msg);
@@ -178,7 +184,7 @@ static int dataflash_erase(struct mtd_info *mtd, struct erase_info *instr)
178 /* Calculate flash page address; use block erase (for speed) if 184 /* Calculate flash page address; use block erase (for speed) if
179 * we're at a block boundary and need to erase the whole block. 185 * we're at a block boundary and need to erase the whole block.
180 */ 186 */
181 pageaddr = instr->addr / priv->page_size; 187 pageaddr = div_u64(instr->len, priv->page_size);
182 do_block = (pageaddr & 0x7) == 0 && instr->len >= blocksize; 188 do_block = (pageaddr & 0x7) == 0 && instr->len >= blocksize;
183 pageaddr = pageaddr << priv->page_offset; 189 pageaddr = pageaddr << priv->page_offset;
184 190
@@ -197,7 +203,7 @@ static int dataflash_erase(struct mtd_info *mtd, struct erase_info *instr)
197 203
198 if (status < 0) { 204 if (status < 0) {
199 printk(KERN_ERR "%s: erase %x, err %d\n", 205 printk(KERN_ERR "%s: erase %x, err %d\n",
200 spi->dev.bus_id, pageaddr, status); 206 dev_name(&spi->dev), pageaddr, status);
201 /* REVISIT: can retry instr->retries times; or 207 /* REVISIT: can retry instr->retries times; or
202 * giveup and instr->fail_addr = instr->addr; 208 * giveup and instr->fail_addr = instr->addr;
203 */ 209 */
@@ -239,7 +245,7 @@ static int dataflash_read(struct mtd_info *mtd, loff_t from, size_t len,
239 int status; 245 int status;
240 246
241 DEBUG(MTD_DEBUG_LEVEL2, "%s: read 0x%x..0x%x\n", 247 DEBUG(MTD_DEBUG_LEVEL2, "%s: read 0x%x..0x%x\n",
242 priv->spi->dev.bus_id, (unsigned)from, (unsigned)(from + len)); 248 dev_name(&priv->spi->dev), (unsigned)from, (unsigned)(from + len));
243 249
244 *retlen = 0; 250 *retlen = 0;
245 251
@@ -288,7 +294,7 @@ static int dataflash_read(struct mtd_info *mtd, loff_t from, size_t len,
288 status = 0; 294 status = 0;
289 } else 295 } else
290 DEBUG(MTD_DEBUG_LEVEL1, "%s: read %x..%x --> %d\n", 296 DEBUG(MTD_DEBUG_LEVEL1, "%s: read %x..%x --> %d\n",
291 priv->spi->dev.bus_id, 297 dev_name(&priv->spi->dev),
292 (unsigned)from, (unsigned)(from + len), 298 (unsigned)from, (unsigned)(from + len),
293 status); 299 status);
294 return status; 300 return status;
@@ -315,7 +321,7 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len,
315 uint8_t *command; 321 uint8_t *command;
316 322
317 DEBUG(MTD_DEBUG_LEVEL2, "%s: write 0x%x..0x%x\n", 323 DEBUG(MTD_DEBUG_LEVEL2, "%s: write 0x%x..0x%x\n",
318 spi->dev.bus_id, (unsigned)to, (unsigned)(to + len)); 324 dev_name(&spi->dev), (unsigned)to, (unsigned)(to + len));
319 325
320 *retlen = 0; 326 *retlen = 0;
321 327
@@ -374,7 +380,7 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len,
374 status = spi_sync(spi, &msg); 380 status = spi_sync(spi, &msg);
375 if (status < 0) 381 if (status < 0)
376 DEBUG(MTD_DEBUG_LEVEL1, "%s: xfer %u -> %d \n", 382 DEBUG(MTD_DEBUG_LEVEL1, "%s: xfer %u -> %d \n",
377 spi->dev.bus_id, addr, status); 383 dev_name(&spi->dev), addr, status);
378 384
379 (void) dataflash_waitready(priv->spi); 385 (void) dataflash_waitready(priv->spi);
380 } 386 }
@@ -396,7 +402,7 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len,
396 spi_transfer_del(x + 1); 402 spi_transfer_del(x + 1);
397 if (status < 0) 403 if (status < 0)
398 DEBUG(MTD_DEBUG_LEVEL1, "%s: pgm %u/%u -> %d \n", 404 DEBUG(MTD_DEBUG_LEVEL1, "%s: pgm %u/%u -> %d \n",
399 spi->dev.bus_id, addr, writelen, status); 405 dev_name(&spi->dev), addr, writelen, status);
400 406
401 (void) dataflash_waitready(priv->spi); 407 (void) dataflash_waitready(priv->spi);
402 408
@@ -416,14 +422,14 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len,
416 status = spi_sync(spi, &msg); 422 status = spi_sync(spi, &msg);
417 if (status < 0) 423 if (status < 0)
418 DEBUG(MTD_DEBUG_LEVEL1, "%s: compare %u -> %d \n", 424 DEBUG(MTD_DEBUG_LEVEL1, "%s: compare %u -> %d \n",
419 spi->dev.bus_id, addr, status); 425 dev_name(&spi->dev), addr, status);
420 426
421 status = dataflash_waitready(priv->spi); 427 status = dataflash_waitready(priv->spi);
422 428
423 /* Check result of the compare operation */ 429 /* Check result of the compare operation */
424 if (status & (1 << 6)) { 430 if (status & (1 << 6)) {
425 printk(KERN_ERR "%s: compare page %u, err %d\n", 431 printk(KERN_ERR "%s: compare page %u, err %d\n",
426 spi->dev.bus_id, pageaddr, status); 432 dev_name(&spi->dev), pageaddr, status);
427 remaining = 0; 433 remaining = 0;
428 status = -EIO; 434 status = -EIO;
429 break; 435 break;
@@ -667,8 +673,8 @@ add_dataflash_otp(struct spi_device *spi, char *name,
667 if (revision >= 'c') 673 if (revision >= 'c')
668 otp_tag = otp_setup(device, revision); 674 otp_tag = otp_setup(device, revision);
669 675
670 dev_info(&spi->dev, "%s (%d KBytes) pagesize %d bytes%s\n", 676 dev_info(&spi->dev, "%s (%lld KBytes) pagesize %d bytes%s\n",
671 name, DIV_ROUND_UP(device->size, 1024), 677 name, (long long)((device->size + 1023) >> 10),
672 pagesize, otp_tag); 678 pagesize, otp_tag);
673 dev_set_drvdata(&spi->dev, priv); 679 dev_set_drvdata(&spi->dev, priv);
674 680
@@ -779,7 +785,7 @@ static struct flash_info *__devinit jedec_probe(struct spi_device *spi)
779 tmp = spi_write_then_read(spi, &code, 1, id, 3); 785 tmp = spi_write_then_read(spi, &code, 1, id, 3);
780 if (tmp < 0) { 786 if (tmp < 0) {
781 DEBUG(MTD_DEBUG_LEVEL0, "%s: error %d reading JEDEC ID\n", 787 DEBUG(MTD_DEBUG_LEVEL0, "%s: error %d reading JEDEC ID\n",
782 spi->dev.bus_id, tmp); 788 dev_name(&spi->dev), tmp);
783 return ERR_PTR(tmp); 789 return ERR_PTR(tmp);
784 } 790 }
785 if (id[0] != 0x1f) 791 if (id[0] != 0x1f)
@@ -869,7 +875,7 @@ static int __devinit dataflash_probe(struct spi_device *spi)
869 status = dataflash_status(spi); 875 status = dataflash_status(spi);
870 if (status <= 0 || status == 0xff) { 876 if (status <= 0 || status == 0xff) {
871 DEBUG(MTD_DEBUG_LEVEL1, "%s: status error %d\n", 877 DEBUG(MTD_DEBUG_LEVEL1, "%s: status error %d\n",
872 spi->dev.bus_id, status); 878 dev_name(&spi->dev), status);
873 if (status == 0 || status == 0xff) 879 if (status == 0 || status == 0xff)
874 status = -ENODEV; 880 status = -ENODEV;
875 return status; 881 return status;
@@ -905,13 +911,13 @@ static int __devinit dataflash_probe(struct spi_device *spi)
905 /* obsolete AT45DB1282 not (yet?) supported */ 911 /* obsolete AT45DB1282 not (yet?) supported */
906 default: 912 default:
907 DEBUG(MTD_DEBUG_LEVEL1, "%s: unsupported device (%x)\n", 913 DEBUG(MTD_DEBUG_LEVEL1, "%s: unsupported device (%x)\n",
908 spi->dev.bus_id, status & 0x3c); 914 dev_name(&spi->dev), status & 0x3c);
909 status = -ENODEV; 915 status = -ENODEV;
910 } 916 }
911 917
912 if (status < 0) 918 if (status < 0)
913 DEBUG(MTD_DEBUG_LEVEL1, "%s: add_dataflash --> %d\n", 919 DEBUG(MTD_DEBUG_LEVEL1, "%s: add_dataflash --> %d\n",
914 spi->dev.bus_id, status); 920 dev_name(&spi->dev), status);
915 921
916 return status; 922 return status;
917} 923}
@@ -921,7 +927,7 @@ static int __devexit dataflash_remove(struct spi_device *spi)
921 struct dataflash *flash = dev_get_drvdata(&spi->dev); 927 struct dataflash *flash = dev_get_drvdata(&spi->dev);
922 int status; 928 int status;
923 929
924 DEBUG(MTD_DEBUG_LEVEL1, "%s: remove\n", spi->dev.bus_id); 930 DEBUG(MTD_DEBUG_LEVEL1, "%s: remove\n", dev_name(&spi->dev));
925 931
926 if (mtd_has_partitions() && flash->partitioned) 932 if (mtd_has_partitions() && flash->partitioned)
927 status = del_mtd_partitions(&flash->mtd); 933 status = del_mtd_partitions(&flash->mtd);
diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c
index 9bf581c4f740..a790c062af1f 100644
--- a/drivers/mtd/ftl.c
+++ b/drivers/mtd/ftl.c
@@ -109,25 +109,25 @@ module_param(shuffle_freq, int, 0);
109/* Each memory region corresponds to a minor device */ 109/* Each memory region corresponds to a minor device */
110typedef struct partition_t { 110typedef struct partition_t {
111 struct mtd_blktrans_dev mbd; 111 struct mtd_blktrans_dev mbd;
112 u_int32_t state; 112 uint32_t state;
113 u_int32_t *VirtualBlockMap; 113 uint32_t *VirtualBlockMap;
114 u_int32_t *VirtualPageMap; 114 uint32_t *VirtualPageMap;
115 u_int32_t FreeTotal; 115 uint32_t FreeTotal;
116 struct eun_info_t { 116 struct eun_info_t {
117 u_int32_t Offset; 117 uint32_t Offset;
118 u_int32_t EraseCount; 118 uint32_t EraseCount;
119 u_int32_t Free; 119 uint32_t Free;
120 u_int32_t Deleted; 120 uint32_t Deleted;
121 } *EUNInfo; 121 } *EUNInfo;
122 struct xfer_info_t { 122 struct xfer_info_t {
123 u_int32_t Offset; 123 uint32_t Offset;
124 u_int32_t EraseCount; 124 uint32_t EraseCount;
125 u_int16_t state; 125 uint16_t state;
126 } *XferInfo; 126 } *XferInfo;
127 u_int16_t bam_index; 127 uint16_t bam_index;
128 u_int32_t *bam_cache; 128 uint32_t *bam_cache;
129 u_int16_t DataUnits; 129 uint16_t DataUnits;
130 u_int32_t BlocksPerUnit; 130 uint32_t BlocksPerUnit;
131 erase_unit_header_t header; 131 erase_unit_header_t header;
132} partition_t; 132} partition_t;
133 133
@@ -199,8 +199,8 @@ static int scan_header(partition_t *part)
199static int build_maps(partition_t *part) 199static int build_maps(partition_t *part)
200{ 200{
201 erase_unit_header_t header; 201 erase_unit_header_t header;
202 u_int16_t xvalid, xtrans, i; 202 uint16_t xvalid, xtrans, i;
203 u_int blocks, j; 203 unsigned blocks, j;
204 int hdr_ok, ret = -1; 204 int hdr_ok, ret = -1;
205 ssize_t retval; 205 ssize_t retval;
206 loff_t offset; 206 loff_t offset;
@@ -269,14 +269,14 @@ static int build_maps(partition_t *part)
269 269
270 /* Set up virtual page map */ 270 /* Set up virtual page map */
271 blocks = le32_to_cpu(header.FormattedSize) >> header.BlockSize; 271 blocks = le32_to_cpu(header.FormattedSize) >> header.BlockSize;
272 part->VirtualBlockMap = vmalloc(blocks * sizeof(u_int32_t)); 272 part->VirtualBlockMap = vmalloc(blocks * sizeof(uint32_t));
273 if (!part->VirtualBlockMap) 273 if (!part->VirtualBlockMap)
274 goto out_XferInfo; 274 goto out_XferInfo;
275 275
276 memset(part->VirtualBlockMap, 0xff, blocks * sizeof(u_int32_t)); 276 memset(part->VirtualBlockMap, 0xff, blocks * sizeof(uint32_t));
277 part->BlocksPerUnit = (1 << header.EraseUnitSize) >> header.BlockSize; 277 part->BlocksPerUnit = (1 << header.EraseUnitSize) >> header.BlockSize;
278 278
279 part->bam_cache = kmalloc(part->BlocksPerUnit * sizeof(u_int32_t), 279 part->bam_cache = kmalloc(part->BlocksPerUnit * sizeof(uint32_t),
280 GFP_KERNEL); 280 GFP_KERNEL);
281 if (!part->bam_cache) 281 if (!part->bam_cache)
282 goto out_VirtualBlockMap; 282 goto out_VirtualBlockMap;
@@ -290,7 +290,7 @@ static int build_maps(partition_t *part)
290 offset = part->EUNInfo[i].Offset + le32_to_cpu(header.BAMOffset); 290 offset = part->EUNInfo[i].Offset + le32_to_cpu(header.BAMOffset);
291 291
292 ret = part->mbd.mtd->read(part->mbd.mtd, offset, 292 ret = part->mbd.mtd->read(part->mbd.mtd, offset,
293 part->BlocksPerUnit * sizeof(u_int32_t), &retval, 293 part->BlocksPerUnit * sizeof(uint32_t), &retval,
294 (unsigned char *)part->bam_cache); 294 (unsigned char *)part->bam_cache);
295 295
296 if (ret) 296 if (ret)
@@ -332,7 +332,7 @@ out:
332======================================================================*/ 332======================================================================*/
333 333
334static int erase_xfer(partition_t *part, 334static int erase_xfer(partition_t *part,
335 u_int16_t xfernum) 335 uint16_t xfernum)
336{ 336{
337 int ret; 337 int ret;
338 struct xfer_info_t *xfer; 338 struct xfer_info_t *xfer;
@@ -408,7 +408,7 @@ static int prepare_xfer(partition_t *part, int i)
408 erase_unit_header_t header; 408 erase_unit_header_t header;
409 struct xfer_info_t *xfer; 409 struct xfer_info_t *xfer;
410 int nbam, ret; 410 int nbam, ret;
411 u_int32_t ctl; 411 uint32_t ctl;
412 ssize_t retlen; 412 ssize_t retlen;
413 loff_t offset; 413 loff_t offset;
414 414
@@ -430,15 +430,15 @@ static int prepare_xfer(partition_t *part, int i)
430 } 430 }
431 431
432 /* Write the BAM stub */ 432 /* Write the BAM stub */
433 nbam = (part->BlocksPerUnit * sizeof(u_int32_t) + 433 nbam = (part->BlocksPerUnit * sizeof(uint32_t) +
434 le32_to_cpu(part->header.BAMOffset) + SECTOR_SIZE - 1) / SECTOR_SIZE; 434 le32_to_cpu(part->header.BAMOffset) + SECTOR_SIZE - 1) / SECTOR_SIZE;
435 435
436 offset = xfer->Offset + le32_to_cpu(part->header.BAMOffset); 436 offset = xfer->Offset + le32_to_cpu(part->header.BAMOffset);
437 ctl = cpu_to_le32(BLOCK_CONTROL); 437 ctl = cpu_to_le32(BLOCK_CONTROL);
438 438
439 for (i = 0; i < nbam; i++, offset += sizeof(u_int32_t)) { 439 for (i = 0; i < nbam; i++, offset += sizeof(uint32_t)) {
440 440
441 ret = part->mbd.mtd->write(part->mbd.mtd, offset, sizeof(u_int32_t), 441 ret = part->mbd.mtd->write(part->mbd.mtd, offset, sizeof(uint32_t),
442 &retlen, (u_char *)&ctl); 442 &retlen, (u_char *)&ctl);
443 443
444 if (ret) 444 if (ret)
@@ -461,18 +461,18 @@ static int prepare_xfer(partition_t *part, int i)
461 461
462======================================================================*/ 462======================================================================*/
463 463
464static int copy_erase_unit(partition_t *part, u_int16_t srcunit, 464static int copy_erase_unit(partition_t *part, uint16_t srcunit,
465 u_int16_t xferunit) 465 uint16_t xferunit)
466{ 466{
467 u_char buf[SECTOR_SIZE]; 467 u_char buf[SECTOR_SIZE];
468 struct eun_info_t *eun; 468 struct eun_info_t *eun;
469 struct xfer_info_t *xfer; 469 struct xfer_info_t *xfer;
470 u_int32_t src, dest, free, i; 470 uint32_t src, dest, free, i;
471 u_int16_t unit; 471 uint16_t unit;
472 int ret; 472 int ret;
473 ssize_t retlen; 473 ssize_t retlen;
474 loff_t offset; 474 loff_t offset;
475 u_int16_t srcunitswap = cpu_to_le16(srcunit); 475 uint16_t srcunitswap = cpu_to_le16(srcunit);
476 476
477 eun = &part->EUNInfo[srcunit]; 477 eun = &part->EUNInfo[srcunit];
478 xfer = &part->XferInfo[xferunit]; 478 xfer = &part->XferInfo[xferunit];
@@ -486,7 +486,7 @@ static int copy_erase_unit(partition_t *part, u_int16_t srcunit,
486 offset = eun->Offset + le32_to_cpu(part->header.BAMOffset); 486 offset = eun->Offset + le32_to_cpu(part->header.BAMOffset);
487 487
488 ret = part->mbd.mtd->read(part->mbd.mtd, offset, 488 ret = part->mbd.mtd->read(part->mbd.mtd, offset,
489 part->BlocksPerUnit * sizeof(u_int32_t), 489 part->BlocksPerUnit * sizeof(uint32_t),
490 &retlen, (u_char *) (part->bam_cache)); 490 &retlen, (u_char *) (part->bam_cache));
491 491
492 /* mark the cache bad, in case we get an error later */ 492 /* mark the cache bad, in case we get an error later */
@@ -503,7 +503,7 @@ static int copy_erase_unit(partition_t *part, u_int16_t srcunit,
503 offset = xfer->Offset + 20; /* Bad! */ 503 offset = xfer->Offset + 20; /* Bad! */
504 unit = cpu_to_le16(0x7fff); 504 unit = cpu_to_le16(0x7fff);
505 505
506 ret = part->mbd.mtd->write(part->mbd.mtd, offset, sizeof(u_int16_t), 506 ret = part->mbd.mtd->write(part->mbd.mtd, offset, sizeof(uint16_t),
507 &retlen, (u_char *) &unit); 507 &retlen, (u_char *) &unit);
508 508
509 if (ret) { 509 if (ret) {
@@ -560,7 +560,7 @@ static int copy_erase_unit(partition_t *part, u_int16_t srcunit,
560 560
561 561
562 /* All clear? Then update the LogicalEUN again */ 562 /* All clear? Then update the LogicalEUN again */
563 ret = part->mbd.mtd->write(part->mbd.mtd, xfer->Offset + 20, sizeof(u_int16_t), 563 ret = part->mbd.mtd->write(part->mbd.mtd, xfer->Offset + 20, sizeof(uint16_t),
564 &retlen, (u_char *)&srcunitswap); 564 &retlen, (u_char *)&srcunitswap);
565 565
566 if (ret) { 566 if (ret) {
@@ -605,8 +605,8 @@ static int copy_erase_unit(partition_t *part, u_int16_t srcunit,
605 605
606static int reclaim_block(partition_t *part) 606static int reclaim_block(partition_t *part)
607{ 607{
608 u_int16_t i, eun, xfer; 608 uint16_t i, eun, xfer;
609 u_int32_t best; 609 uint32_t best;
610 int queued, ret; 610 int queued, ret;
611 611
612 DEBUG(0, "ftl_cs: reclaiming space...\n"); 612 DEBUG(0, "ftl_cs: reclaiming space...\n");
@@ -723,10 +723,10 @@ static void dump_lists(partition_t *part)
723} 723}
724#endif 724#endif
725 725
726static u_int32_t find_free(partition_t *part) 726static uint32_t find_free(partition_t *part)
727{ 727{
728 u_int16_t stop, eun; 728 uint16_t stop, eun;
729 u_int32_t blk; 729 uint32_t blk;
730 size_t retlen; 730 size_t retlen;
731 int ret; 731 int ret;
732 732
@@ -749,7 +749,7 @@ static u_int32_t find_free(partition_t *part)
749 749
750 ret = part->mbd.mtd->read(part->mbd.mtd, 750 ret = part->mbd.mtd->read(part->mbd.mtd,
751 part->EUNInfo[eun].Offset + le32_to_cpu(part->header.BAMOffset), 751 part->EUNInfo[eun].Offset + le32_to_cpu(part->header.BAMOffset),
752 part->BlocksPerUnit * sizeof(u_int32_t), 752 part->BlocksPerUnit * sizeof(uint32_t),
753 &retlen, (u_char *) (part->bam_cache)); 753 &retlen, (u_char *) (part->bam_cache));
754 754
755 if (ret) { 755 if (ret) {
@@ -786,7 +786,7 @@ static u_int32_t find_free(partition_t *part)
786static int ftl_read(partition_t *part, caddr_t buffer, 786static int ftl_read(partition_t *part, caddr_t buffer,
787 u_long sector, u_long nblocks) 787 u_long sector, u_long nblocks)
788{ 788{
789 u_int32_t log_addr, bsize; 789 uint32_t log_addr, bsize;
790 u_long i; 790 u_long i;
791 int ret; 791 int ret;
792 size_t offset, retlen; 792 size_t offset, retlen;
@@ -829,14 +829,14 @@ static int ftl_read(partition_t *part, caddr_t buffer,
829 829
830======================================================================*/ 830======================================================================*/
831 831
832static int set_bam_entry(partition_t *part, u_int32_t log_addr, 832static int set_bam_entry(partition_t *part, uint32_t log_addr,
833 u_int32_t virt_addr) 833 uint32_t virt_addr)
834{ 834{
835 u_int32_t bsize, blk, le_virt_addr; 835 uint32_t bsize, blk, le_virt_addr;
836#ifdef PSYCHO_DEBUG 836#ifdef PSYCHO_DEBUG
837 u_int32_t old_addr; 837 uint32_t old_addr;
838#endif 838#endif
839 u_int16_t eun; 839 uint16_t eun;
840 int ret; 840 int ret;
841 size_t retlen, offset; 841 size_t retlen, offset;
842 842
@@ -845,11 +845,11 @@ static int set_bam_entry(partition_t *part, u_int32_t log_addr,
845 bsize = 1 << part->header.EraseUnitSize; 845 bsize = 1 << part->header.EraseUnitSize;
846 eun = log_addr / bsize; 846 eun = log_addr / bsize;
847 blk = (log_addr % bsize) / SECTOR_SIZE; 847 blk = (log_addr % bsize) / SECTOR_SIZE;
848 offset = (part->EUNInfo[eun].Offset + blk * sizeof(u_int32_t) + 848 offset = (part->EUNInfo[eun].Offset + blk * sizeof(uint32_t) +
849 le32_to_cpu(part->header.BAMOffset)); 849 le32_to_cpu(part->header.BAMOffset));
850 850
851#ifdef PSYCHO_DEBUG 851#ifdef PSYCHO_DEBUG
852 ret = part->mbd.mtd->read(part->mbd.mtd, offset, sizeof(u_int32_t), 852 ret = part->mbd.mtd->read(part->mbd.mtd, offset, sizeof(uint32_t),
853 &retlen, (u_char *)&old_addr); 853 &retlen, (u_char *)&old_addr);
854 if (ret) { 854 if (ret) {
855 printk(KERN_WARNING"ftl: Error reading old_addr in set_bam_entry: %d\n",ret); 855 printk(KERN_WARNING"ftl: Error reading old_addr in set_bam_entry: %d\n",ret);
@@ -886,7 +886,7 @@ static int set_bam_entry(partition_t *part, u_int32_t log_addr,
886#endif 886#endif
887 part->bam_cache[blk] = le_virt_addr; 887 part->bam_cache[blk] = le_virt_addr;
888 } 888 }
889 ret = part->mbd.mtd->write(part->mbd.mtd, offset, sizeof(u_int32_t), 889 ret = part->mbd.mtd->write(part->mbd.mtd, offset, sizeof(uint32_t),
890 &retlen, (u_char *)&le_virt_addr); 890 &retlen, (u_char *)&le_virt_addr);
891 891
892 if (ret) { 892 if (ret) {
@@ -900,7 +900,7 @@ static int set_bam_entry(partition_t *part, u_int32_t log_addr,
900static int ftl_write(partition_t *part, caddr_t buffer, 900static int ftl_write(partition_t *part, caddr_t buffer,
901 u_long sector, u_long nblocks) 901 u_long sector, u_long nblocks)
902{ 902{
903 u_int32_t bsize, log_addr, virt_addr, old_addr, blk; 903 uint32_t bsize, log_addr, virt_addr, old_addr, blk;
904 u_long i; 904 u_long i;
905 int ret; 905 int ret;
906 size_t retlen, offset; 906 size_t retlen, offset;
diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c
index 50ce13887f63..73f05227dc8c 100644
--- a/drivers/mtd/inftlcore.c
+++ b/drivers/mtd/inftlcore.c
@@ -50,7 +50,7 @@ static void inftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
50 struct INFTLrecord *inftl; 50 struct INFTLrecord *inftl;
51 unsigned long temp; 51 unsigned long temp;
52 52
53 if (mtd->type != MTD_NANDFLASH) 53 if (mtd->type != MTD_NANDFLASH || mtd->size > UINT_MAX)
54 return; 54 return;
55 /* OK, this is moderately ugly. But probably safe. Alternatives? */ 55 /* OK, this is moderately ugly. But probably safe. Alternatives? */
56 if (memcmp(mtd->name, "DiskOnChip", 10)) 56 if (memcmp(mtd->name, "DiskOnChip", 10))
diff --git a/drivers/mtd/inftlmount.c b/drivers/mtd/inftlmount.c
index 9113628ed1ef..f751dd97c549 100644
--- a/drivers/mtd/inftlmount.c
+++ b/drivers/mtd/inftlmount.c
@@ -63,7 +63,7 @@ static int find_boot_record(struct INFTLrecord *inftl)
63 * otherwise. 63 * otherwise.
64 */ 64 */
65 inftl->EraseSize = inftl->mbd.mtd->erasesize; 65 inftl->EraseSize = inftl->mbd.mtd->erasesize;
66 inftl->nb_blocks = inftl->mbd.mtd->size / inftl->EraseSize; 66 inftl->nb_blocks = (u32)inftl->mbd.mtd->size / inftl->EraseSize;
67 67
68 inftl->MediaUnit = BLOCK_NIL; 68 inftl->MediaUnit = BLOCK_NIL;
69 69
@@ -187,7 +187,7 @@ static int find_boot_record(struct INFTLrecord *inftl)
187 mh->BlockMultiplierBits); 187 mh->BlockMultiplierBits);
188 inftl->EraseSize = inftl->mbd.mtd->erasesize << 188 inftl->EraseSize = inftl->mbd.mtd->erasesize <<
189 mh->BlockMultiplierBits; 189 mh->BlockMultiplierBits;
190 inftl->nb_blocks = inftl->mbd.mtd->size / inftl->EraseSize; 190 inftl->nb_blocks = (u32)inftl->mbd.mtd->size / inftl->EraseSize;
191 block >>= mh->BlockMultiplierBits; 191 block >>= mh->BlockMultiplierBits;
192 } 192 }
193 193
diff --git a/drivers/mtd/lpddr/Kconfig b/drivers/mtd/lpddr/Kconfig
new file mode 100644
index 000000000000..acd4ea9b2278
--- /dev/null
+++ b/drivers/mtd/lpddr/Kconfig
@@ -0,0 +1,22 @@
1# drivers/mtd/chips/Kconfig
2
3menu "LPDDR flash memory drivers"
4 depends on MTD!=n
5
6config MTD_LPDDR
7 tristate "Support for LPDDR flash chips"
8 select MTD_QINFO_PROBE
9 help
10 This option enables support of LPDDR (Low power double data rate)
11 flash chips. Synonymous with Mobile-DDR. It is a new standard for
12 DDR memories, intended for battery-operated systems.
13
14config MTD_QINFO_PROBE
15 tristate "Detect flash chips by QINFO probe"
16 help
17 Device Information for LPDDR chips is offered through the Overlay
18 Window QINFO interface, permits software to be used for entire
19 families of devices. This serves similar purpose of CFI on legacy
20 Flash products
21endmenu
22
diff --git a/drivers/mtd/lpddr/Makefile b/drivers/mtd/lpddr/Makefile
new file mode 100644
index 000000000000..da48e46b5812
--- /dev/null
+++ b/drivers/mtd/lpddr/Makefile
@@ -0,0 +1,6 @@
1#
2# linux/drivers/mtd/lpddr/Makefile
3#
4
5obj-$(CONFIG_MTD_QINFO_PROBE) += qinfo_probe.o
6obj-$(CONFIG_MTD_LPDDR) += lpddr_cmds.o
diff --git a/drivers/mtd/lpddr/lpddr_cmds.c b/drivers/mtd/lpddr/lpddr_cmds.c
new file mode 100644
index 000000000000..e22ca49583e7
--- /dev/null
+++ b/drivers/mtd/lpddr/lpddr_cmds.c
@@ -0,0 +1,796 @@
1/*
2 * LPDDR flash memory device operations. This module provides read, write,
3 * erase, lock/unlock support for LPDDR flash memories
4 * (C) 2008 Korolev Alexey <akorolev@infradead.org>
5 * (C) 2008 Vasiliy Leonenko <vasiliy.leonenko@gmail.com>
6 * Many thanks to Roman Borisov for intial enabling
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version 2
11 * of the License, or (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 * 02110-1301, USA.
22 * TODO:
23 * Implement VPP management
24 * Implement XIP support
25 * Implement OTP support
26 */
27#include <linux/mtd/pfow.h>
28#include <linux/mtd/qinfo.h>
29
30static int lpddr_read(struct mtd_info *mtd, loff_t adr, size_t len,
31 size_t *retlen, u_char *buf);
32static int lpddr_write_buffers(struct mtd_info *mtd, loff_t to,
33 size_t len, size_t *retlen, const u_char *buf);
34static int lpddr_writev(struct mtd_info *mtd, const struct kvec *vecs,
35 unsigned long count, loff_t to, size_t *retlen);
36static int lpddr_erase(struct mtd_info *mtd, struct erase_info *instr);
37static int lpddr_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
38static int lpddr_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
39static int lpddr_point(struct mtd_info *mtd, loff_t adr, size_t len,
40 size_t *retlen, void **mtdbuf, resource_size_t *phys);
41static void lpddr_unpoint(struct mtd_info *mtd, loff_t adr, size_t len);
42static int get_chip(struct map_info *map, struct flchip *chip, int mode);
43static int chip_ready(struct map_info *map, struct flchip *chip, int mode);
44static void put_chip(struct map_info *map, struct flchip *chip);
45
46struct mtd_info *lpddr_cmdset(struct map_info *map)
47{
48 struct lpddr_private *lpddr = map->fldrv_priv;
49 struct flchip_shared *shared;
50 struct flchip *chip;
51 struct mtd_info *mtd;
52 int numchips;
53 int i, j;
54
55 mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
56 if (!mtd) {
57 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
58 return NULL;
59 }
60 mtd->priv = map;
61 mtd->type = MTD_NORFLASH;
62
63 /* Fill in the default mtd operations */
64 mtd->read = lpddr_read;
65 mtd->type = MTD_NORFLASH;
66 mtd->flags = MTD_CAP_NORFLASH;
67 mtd->flags &= ~MTD_BIT_WRITEABLE;
68 mtd->erase = lpddr_erase;
69 mtd->write = lpddr_write_buffers;
70 mtd->writev = lpddr_writev;
71 mtd->read_oob = NULL;
72 mtd->write_oob = NULL;
73 mtd->sync = NULL;
74 mtd->lock = lpddr_lock;
75 mtd->unlock = lpddr_unlock;
76 mtd->suspend = NULL;
77 mtd->resume = NULL;
78 if (map_is_linear(map)) {
79 mtd->point = lpddr_point;
80 mtd->unpoint = lpddr_unpoint;
81 }
82 mtd->block_isbad = NULL;
83 mtd->block_markbad = NULL;
84 mtd->size = 1 << lpddr->qinfo->DevSizeShift;
85 mtd->erasesize = 1 << lpddr->qinfo->UniformBlockSizeShift;
86 mtd->writesize = 1 << lpddr->qinfo->BufSizeShift;
87
88 shared = kmalloc(sizeof(struct flchip_shared) * lpddr->numchips,
89 GFP_KERNEL);
90 if (!shared) {
91 kfree(lpddr);
92 kfree(mtd);
93 return NULL;
94 }
95
96 chip = &lpddr->chips[0];
97 numchips = lpddr->numchips / lpddr->qinfo->HWPartsNum;
98 for (i = 0; i < numchips; i++) {
99 shared[i].writing = shared[i].erasing = NULL;
100 spin_lock_init(&shared[i].lock);
101 for (j = 0; j < lpddr->qinfo->HWPartsNum; j++) {
102 *chip = lpddr->chips[i];
103 chip->start += j << lpddr->chipshift;
104 chip->oldstate = chip->state = FL_READY;
105 chip->priv = &shared[i];
106 /* those should be reset too since
107 they create memory references. */
108 init_waitqueue_head(&chip->wq);
109 spin_lock_init(&chip->_spinlock);
110 chip->mutex = &chip->_spinlock;
111 chip++;
112 }
113 }
114
115 return mtd;
116}
117EXPORT_SYMBOL(lpddr_cmdset);
118
119static int wait_for_ready(struct map_info *map, struct flchip *chip,
120 unsigned int chip_op_time)
121{
122 unsigned int timeo, reset_timeo, sleep_time;
123 unsigned int dsr;
124 flstate_t chip_state = chip->state;
125 int ret = 0;
126
127 /* set our timeout to 8 times the expected delay */
128 timeo = chip_op_time * 8;
129 if (!timeo)
130 timeo = 500000;
131 reset_timeo = timeo;
132 sleep_time = chip_op_time / 2;
133
134 for (;;) {
135 dsr = CMDVAL(map_read(map, map->pfow_base + PFOW_DSR));
136 if (dsr & DSR_READY_STATUS)
137 break;
138 if (!timeo) {
139 printk(KERN_ERR "%s: Flash timeout error state %d \n",
140 map->name, chip_state);
141 ret = -ETIME;
142 break;
143 }
144
145 /* OK Still waiting. Drop the lock, wait a while and retry. */
146 spin_unlock(chip->mutex);
147 if (sleep_time >= 1000000/HZ) {
148 /*
149 * Half of the normal delay still remaining
150 * can be performed with a sleeping delay instead
151 * of busy waiting.
152 */
153 msleep(sleep_time/1000);
154 timeo -= sleep_time;
155 sleep_time = 1000000/HZ;
156 } else {
157 udelay(1);
158 cond_resched();
159 timeo--;
160 }
161 spin_lock(chip->mutex);
162
163 while (chip->state != chip_state) {
164 /* Someone's suspended the operation: sleep */
165 DECLARE_WAITQUEUE(wait, current);
166 set_current_state(TASK_UNINTERRUPTIBLE);
167 add_wait_queue(&chip->wq, &wait);
168 spin_unlock(chip->mutex);
169 schedule();
170 remove_wait_queue(&chip->wq, &wait);
171 spin_lock(chip->mutex);
172 }
173 if (chip->erase_suspended || chip->write_suspended) {
174 /* Suspend has occured while sleep: reset timeout */
175 timeo = reset_timeo;
176 chip->erase_suspended = chip->write_suspended = 0;
177 }
178 }
179 /* check status for errors */
180 if (dsr & DSR_ERR) {
181 /* Clear DSR*/
182 map_write(map, CMD(~(DSR_ERR)), map->pfow_base + PFOW_DSR);
183 printk(KERN_WARNING"%s: Bad status on wait: 0x%x \n",
184 map->name, dsr);
185 print_drs_error(dsr);
186 ret = -EIO;
187 }
188 chip->state = FL_READY;
189 return ret;
190}
191
192static int get_chip(struct map_info *map, struct flchip *chip, int mode)
193{
194 int ret;
195 DECLARE_WAITQUEUE(wait, current);
196
197 retry:
198 if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING)
199 && chip->state != FL_SYNCING) {
200 /*
201 * OK. We have possibility for contension on the write/erase
202 * operations which are global to the real chip and not per
203 * partition. So let's fight it over in the partition which
204 * currently has authority on the operation.
205 *
206 * The rules are as follows:
207 *
208 * - any write operation must own shared->writing.
209 *
210 * - any erase operation must own _both_ shared->writing and
211 * shared->erasing.
212 *
213 * - contension arbitration is handled in the owner's context.
214 *
215 * The 'shared' struct can be read and/or written only when
216 * its lock is taken.
217 */
218 struct flchip_shared *shared = chip->priv;
219 struct flchip *contender;
220 spin_lock(&shared->lock);
221 contender = shared->writing;
222 if (contender && contender != chip) {
223 /*
224 * The engine to perform desired operation on this
225 * partition is already in use by someone else.
226 * Let's fight over it in the context of the chip
227 * currently using it. If it is possible to suspend,
228 * that other partition will do just that, otherwise
229 * it'll happily send us to sleep. In any case, when
230 * get_chip returns success we're clear to go ahead.
231 */
232 ret = spin_trylock(contender->mutex);
233 spin_unlock(&shared->lock);
234 if (!ret)
235 goto retry;
236 spin_unlock(chip->mutex);
237 ret = chip_ready(map, contender, mode);
238 spin_lock(chip->mutex);
239
240 if (ret == -EAGAIN) {
241 spin_unlock(contender->mutex);
242 goto retry;
243 }
244 if (ret) {
245 spin_unlock(contender->mutex);
246 return ret;
247 }
248 spin_lock(&shared->lock);
249
250 /* We should not own chip if it is already in FL_SYNCING
251 * state. Put contender and retry. */
252 if (chip->state == FL_SYNCING) {
253 put_chip(map, contender);
254 spin_unlock(contender->mutex);
255 goto retry;
256 }
257 spin_unlock(contender->mutex);
258 }
259
260 /* Check if we have suspended erase on this chip.
261 Must sleep in such a case. */
262 if (mode == FL_ERASING && shared->erasing
263 && shared->erasing->oldstate == FL_ERASING) {
264 spin_unlock(&shared->lock);
265 set_current_state(TASK_UNINTERRUPTIBLE);
266 add_wait_queue(&chip->wq, &wait);
267 spin_unlock(chip->mutex);
268 schedule();
269 remove_wait_queue(&chip->wq, &wait);
270 spin_lock(chip->mutex);
271 goto retry;
272 }
273
274 /* We now own it */
275 shared->writing = chip;
276 if (mode == FL_ERASING)
277 shared->erasing = chip;
278 spin_unlock(&shared->lock);
279 }
280
281 ret = chip_ready(map, chip, mode);
282 if (ret == -EAGAIN)
283 goto retry;
284
285 return ret;
286}
287
288static int chip_ready(struct map_info *map, struct flchip *chip, int mode)
289{
290 struct lpddr_private *lpddr = map->fldrv_priv;
291 int ret = 0;
292 DECLARE_WAITQUEUE(wait, current);
293
294 /* Prevent setting state FL_SYNCING for chip in suspended state. */
295 if (FL_SYNCING == mode && FL_READY != chip->oldstate)
296 goto sleep;
297
298 switch (chip->state) {
299 case FL_READY:
300 case FL_JEDEC_QUERY:
301 return 0;
302
303 case FL_ERASING:
304 if (!lpddr->qinfo->SuspEraseSupp ||
305 !(mode == FL_READY || mode == FL_POINT))
306 goto sleep;
307
308 map_write(map, CMD(LPDDR_SUSPEND),
309 map->pfow_base + PFOW_PROGRAM_ERASE_SUSPEND);
310 chip->oldstate = FL_ERASING;
311 chip->state = FL_ERASE_SUSPENDING;
312 ret = wait_for_ready(map, chip, 0);
313 if (ret) {
314 /* Oops. something got wrong. */
315 /* Resume and pretend we weren't here. */
316 map_write(map, CMD(LPDDR_RESUME),
317 map->pfow_base + PFOW_COMMAND_CODE);
318 map_write(map, CMD(LPDDR_START_EXECUTION),
319 map->pfow_base + PFOW_COMMAND_EXECUTE);
320 chip->state = FL_ERASING;
321 chip->oldstate = FL_READY;
322 printk(KERN_ERR "%s: suspend operation failed."
323 "State may be wrong \n", map->name);
324 return -EIO;
325 }
326 chip->erase_suspended = 1;
327 chip->state = FL_READY;
328 return 0;
329 /* Erase suspend */
330 case FL_POINT:
331 /* Only if there's no operation suspended... */
332 if (mode == FL_READY && chip->oldstate == FL_READY)
333 return 0;
334
335 default:
336sleep:
337 set_current_state(TASK_UNINTERRUPTIBLE);
338 add_wait_queue(&chip->wq, &wait);
339 spin_unlock(chip->mutex);
340 schedule();
341 remove_wait_queue(&chip->wq, &wait);
342 spin_lock(chip->mutex);
343 return -EAGAIN;
344 }
345}
346
347static void put_chip(struct map_info *map, struct flchip *chip)
348{
349 if (chip->priv) {
350 struct flchip_shared *shared = chip->priv;
351 spin_lock(&shared->lock);
352 if (shared->writing == chip && chip->oldstate == FL_READY) {
353 /* We own the ability to write, but we're done */
354 shared->writing = shared->erasing;
355 if (shared->writing && shared->writing != chip) {
356 /* give back the ownership */
357 struct flchip *loaner = shared->writing;
358 spin_lock(loaner->mutex);
359 spin_unlock(&shared->lock);
360 spin_unlock(chip->mutex);
361 put_chip(map, loaner);
362 spin_lock(chip->mutex);
363 spin_unlock(loaner->mutex);
364 wake_up(&chip->wq);
365 return;
366 }
367 shared->erasing = NULL;
368 shared->writing = NULL;
369 } else if (shared->erasing == chip && shared->writing != chip) {
370 /*
371 * We own the ability to erase without the ability
372 * to write, which means the erase was suspended
373 * and some other partition is currently writing.
374 * Don't let the switch below mess things up since
375 * we don't have ownership to resume anything.
376 */
377 spin_unlock(&shared->lock);
378 wake_up(&chip->wq);
379 return;
380 }
381 spin_unlock(&shared->lock);
382 }
383
384 switch (chip->oldstate) {
385 case FL_ERASING:
386 chip->state = chip->oldstate;
387 map_write(map, CMD(LPDDR_RESUME),
388 map->pfow_base + PFOW_COMMAND_CODE);
389 map_write(map, CMD(LPDDR_START_EXECUTION),
390 map->pfow_base + PFOW_COMMAND_EXECUTE);
391 chip->oldstate = FL_READY;
392 chip->state = FL_ERASING;
393 break;
394 case FL_READY:
395 break;
396 default:
397 printk(KERN_ERR "%s: put_chip() called with oldstate %d!\n",
398 map->name, chip->oldstate);
399 }
400 wake_up(&chip->wq);
401}
402
403int do_write_buffer(struct map_info *map, struct flchip *chip,
404 unsigned long adr, const struct kvec **pvec,
405 unsigned long *pvec_seek, int len)
406{
407 struct lpddr_private *lpddr = map->fldrv_priv;
408 map_word datum;
409 int ret, wbufsize, word_gap, words;
410 const struct kvec *vec;
411 unsigned long vec_seek;
412 unsigned long prog_buf_ofs;
413
414 wbufsize = 1 << lpddr->qinfo->BufSizeShift;
415
416 spin_lock(chip->mutex);
417 ret = get_chip(map, chip, FL_WRITING);
418 if (ret) {
419 spin_unlock(chip->mutex);
420 return ret;
421 }
422 /* Figure out the number of words to write */
423 word_gap = (-adr & (map_bankwidth(map)-1));
424 words = (len - word_gap + map_bankwidth(map) - 1) / map_bankwidth(map);
425 if (!word_gap) {
426 words--;
427 } else {
428 word_gap = map_bankwidth(map) - word_gap;
429 adr -= word_gap;
430 datum = map_word_ff(map);
431 }
432 /* Write data */
433 /* Get the program buffer offset from PFOW register data first*/
434 prog_buf_ofs = map->pfow_base + CMDVAL(map_read(map,
435 map->pfow_base + PFOW_PROGRAM_BUFFER_OFFSET));
436 vec = *pvec;
437 vec_seek = *pvec_seek;
438 do {
439 int n = map_bankwidth(map) - word_gap;
440
441 if (n > vec->iov_len - vec_seek)
442 n = vec->iov_len - vec_seek;
443 if (n > len)
444 n = len;
445
446 if (!word_gap && (len < map_bankwidth(map)))
447 datum = map_word_ff(map);
448
449 datum = map_word_load_partial(map, datum,
450 vec->iov_base + vec_seek, word_gap, n);
451
452 len -= n;
453 word_gap += n;
454 if (!len || word_gap == map_bankwidth(map)) {
455 map_write(map, datum, prog_buf_ofs);
456 prog_buf_ofs += map_bankwidth(map);
457 word_gap = 0;
458 }
459
460 vec_seek += n;
461 if (vec_seek == vec->iov_len) {
462 vec++;
463 vec_seek = 0;
464 }
465 } while (len);
466 *pvec = vec;
467 *pvec_seek = vec_seek;
468
469 /* GO GO GO */
470 send_pfow_command(map, LPDDR_BUFF_PROGRAM, adr, wbufsize, NULL);
471 chip->state = FL_WRITING;
472 ret = wait_for_ready(map, chip, (1<<lpddr->qinfo->ProgBufferTime));
473 if (ret) {
474 printk(KERN_WARNING"%s Buffer program error: %d at %lx; \n",
475 map->name, ret, adr);
476 goto out;
477 }
478
479 out: put_chip(map, chip);
480 spin_unlock(chip->mutex);
481 return ret;
482}
483
484int do_erase_oneblock(struct mtd_info *mtd, loff_t adr)
485{
486 struct map_info *map = mtd->priv;
487 struct lpddr_private *lpddr = map->fldrv_priv;
488 int chipnum = adr >> lpddr->chipshift;
489 struct flchip *chip = &lpddr->chips[chipnum];
490 int ret;
491
492 spin_lock(chip->mutex);
493 ret = get_chip(map, chip, FL_ERASING);
494 if (ret) {
495 spin_unlock(chip->mutex);
496 return ret;
497 }
498 send_pfow_command(map, LPDDR_BLOCK_ERASE, adr, 0, NULL);
499 chip->state = FL_ERASING;
500 ret = wait_for_ready(map, chip, (1<<lpddr->qinfo->BlockEraseTime)*1000);
501 if (ret) {
502 printk(KERN_WARNING"%s Erase block error %d at : %llx\n",
503 map->name, ret, adr);
504 goto out;
505 }
506 out: put_chip(map, chip);
507 spin_unlock(chip->mutex);
508 return ret;
509}
510
511static int lpddr_read(struct mtd_info *mtd, loff_t adr, size_t len,
512 size_t *retlen, u_char *buf)
513{
514 struct map_info *map = mtd->priv;
515 struct lpddr_private *lpddr = map->fldrv_priv;
516 int chipnum = adr >> lpddr->chipshift;
517 struct flchip *chip = &lpddr->chips[chipnum];
518 int ret = 0;
519
520 spin_lock(chip->mutex);
521 ret = get_chip(map, chip, FL_READY);
522 if (ret) {
523 spin_unlock(chip->mutex);
524 return ret;
525 }
526
527 map_copy_from(map, buf, adr, len);
528 *retlen = len;
529
530 put_chip(map, chip);
531 spin_unlock(chip->mutex);
532 return ret;
533}
534
535static int lpddr_point(struct mtd_info *mtd, loff_t adr, size_t len,
536 size_t *retlen, void **mtdbuf, resource_size_t *phys)
537{
538 struct map_info *map = mtd->priv;
539 struct lpddr_private *lpddr = map->fldrv_priv;
540 int chipnum = adr >> lpddr->chipshift;
541 unsigned long ofs, last_end = 0;
542 struct flchip *chip = &lpddr->chips[chipnum];
543 int ret = 0;
544
545 if (!map->virt || (adr + len > mtd->size))
546 return -EINVAL;
547
548 /* ofs: offset within the first chip that the first read should start */
549 ofs = adr - (chipnum << lpddr->chipshift);
550
551 *mtdbuf = (void *)map->virt + chip->start + ofs;
552 *retlen = 0;
553
554 while (len) {
555 unsigned long thislen;
556
557 if (chipnum >= lpddr->numchips)
558 break;
559
560 /* We cannot point across chips that are virtually disjoint */
561 if (!last_end)
562 last_end = chip->start;
563 else if (chip->start != last_end)
564 break;
565
566 if ((len + ofs - 1) >> lpddr->chipshift)
567 thislen = (1<<lpddr->chipshift) - ofs;
568 else
569 thislen = len;
570 /* get the chip */
571 spin_lock(chip->mutex);
572 ret = get_chip(map, chip, FL_POINT);
573 spin_unlock(chip->mutex);
574 if (ret)
575 break;
576
577 chip->state = FL_POINT;
578 chip->ref_point_counter++;
579 *retlen += thislen;
580 len -= thislen;
581
582 ofs = 0;
583 last_end += 1 << lpddr->chipshift;
584 chipnum++;
585 chip = &lpddr->chips[chipnum];
586 }
587 return 0;
588}
589
590static void lpddr_unpoint (struct mtd_info *mtd, loff_t adr, size_t len)
591{
592 struct map_info *map = mtd->priv;
593 struct lpddr_private *lpddr = map->fldrv_priv;
594 int chipnum = adr >> lpddr->chipshift;
595 unsigned long ofs;
596
597 /* ofs: offset within the first chip that the first read should start */
598 ofs = adr - (chipnum << lpddr->chipshift);
599
600 while (len) {
601 unsigned long thislen;
602 struct flchip *chip;
603
604 chip = &lpddr->chips[chipnum];
605 if (chipnum >= lpddr->numchips)
606 break;
607
608 if ((len + ofs - 1) >> lpddr->chipshift)
609 thislen = (1<<lpddr->chipshift) - ofs;
610 else
611 thislen = len;
612
613 spin_lock(chip->mutex);
614 if (chip->state == FL_POINT) {
615 chip->ref_point_counter--;
616 if (chip->ref_point_counter == 0)
617 chip->state = FL_READY;
618 } else
619 printk(KERN_WARNING "%s: Warning: unpoint called on non"
620 "pointed region\n", map->name);
621
622 put_chip(map, chip);
623 spin_unlock(chip->mutex);
624
625 len -= thislen;
626 ofs = 0;
627 chipnum++;
628 }
629}
630
631static int lpddr_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
632 size_t *retlen, const u_char *buf)
633{
634 struct kvec vec;
635
636 vec.iov_base = (void *) buf;
637 vec.iov_len = len;
638
639 return lpddr_writev(mtd, &vec, 1, to, retlen);
640}
641
642
643static int lpddr_writev(struct mtd_info *mtd, const struct kvec *vecs,
644 unsigned long count, loff_t to, size_t *retlen)
645{
646 struct map_info *map = mtd->priv;
647 struct lpddr_private *lpddr = map->fldrv_priv;
648 int ret = 0;
649 int chipnum;
650 unsigned long ofs, vec_seek, i;
651 int wbufsize = 1 << lpddr->qinfo->BufSizeShift;
652
653 size_t len = 0;
654
655 for (i = 0; i < count; i++)
656 len += vecs[i].iov_len;
657
658 *retlen = 0;
659 if (!len)
660 return 0;
661
662 chipnum = to >> lpddr->chipshift;
663
664 ofs = to;
665 vec_seek = 0;
666
667 do {
668 /* We must not cross write block boundaries */
669 int size = wbufsize - (ofs & (wbufsize-1));
670
671 if (size > len)
672 size = len;
673
674 ret = do_write_buffer(map, &lpddr->chips[chipnum],
675 ofs, &vecs, &vec_seek, size);
676 if (ret)
677 return ret;
678
679 ofs += size;
680 (*retlen) += size;
681 len -= size;
682
683 /* Be nice and reschedule with the chip in a usable
684 * state for other processes */
685 cond_resched();
686
687 } while (len);
688
689 return 0;
690}
691
692static int lpddr_erase(struct mtd_info *mtd, struct erase_info *instr)
693{
694 unsigned long ofs, len;
695 int ret;
696 struct map_info *map = mtd->priv;
697 struct lpddr_private *lpddr = map->fldrv_priv;
698 int size = 1 << lpddr->qinfo->UniformBlockSizeShift;
699
700 ofs = instr->addr;
701 len = instr->len;
702
703 if (ofs > mtd->size || (len + ofs) > mtd->size)
704 return -EINVAL;
705
706 while (len > 0) {
707 ret = do_erase_oneblock(mtd, ofs);
708 if (ret)
709 return ret;
710 ofs += size;
711 len -= size;
712 }
713 instr->state = MTD_ERASE_DONE;
714 mtd_erase_callback(instr);
715
716 return 0;
717}
718
719#define DO_XXLOCK_LOCK 1
720#define DO_XXLOCK_UNLOCK 2
721int do_xxlock(struct mtd_info *mtd, loff_t adr, uint32_t len, int thunk)
722{
723 int ret = 0;
724 struct map_info *map = mtd->priv;
725 struct lpddr_private *lpddr = map->fldrv_priv;
726 int chipnum = adr >> lpddr->chipshift;
727 struct flchip *chip = &lpddr->chips[chipnum];
728
729 spin_lock(chip->mutex);
730 ret = get_chip(map, chip, FL_LOCKING);
731 if (ret) {
732 spin_unlock(chip->mutex);
733 return ret;
734 }
735
736 if (thunk == DO_XXLOCK_LOCK) {
737 send_pfow_command(map, LPDDR_LOCK_BLOCK, adr, adr + len, NULL);
738 chip->state = FL_LOCKING;
739 } else if (thunk == DO_XXLOCK_UNLOCK) {
740 send_pfow_command(map, LPDDR_UNLOCK_BLOCK, adr, adr + len, NULL);
741 chip->state = FL_UNLOCKING;
742 } else
743 BUG();
744
745 ret = wait_for_ready(map, chip, 1);
746 if (ret) {
747 printk(KERN_ERR "%s: block unlock error status %d \n",
748 map->name, ret);
749 goto out;
750 }
751out: put_chip(map, chip);
752 spin_unlock(chip->mutex);
753 return ret;
754}
755
756static int lpddr_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
757{
758 return do_xxlock(mtd, ofs, len, DO_XXLOCK_LOCK);
759}
760
761static int lpddr_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
762{
763 return do_xxlock(mtd, ofs, len, DO_XXLOCK_UNLOCK);
764}
765
766int word_program(struct map_info *map, loff_t adr, uint32_t curval)
767{
768 int ret;
769 struct lpddr_private *lpddr = map->fldrv_priv;
770 int chipnum = adr >> lpddr->chipshift;
771 struct flchip *chip = &lpddr->chips[chipnum];
772
773 spin_lock(chip->mutex);
774 ret = get_chip(map, chip, FL_WRITING);
775 if (ret) {
776 spin_unlock(chip->mutex);
777 return ret;
778 }
779
780 send_pfow_command(map, LPDDR_WORD_PROGRAM, adr, 0x00, (map_word *)&curval);
781
782 ret = wait_for_ready(map, chip, (1<<lpddr->qinfo->SingleWordProgTime));
783 if (ret) {
784 printk(KERN_WARNING"%s word_program error at: %llx; val: %x\n",
785 map->name, adr, curval);
786 goto out;
787 }
788
789out: put_chip(map, chip);
790 spin_unlock(chip->mutex);
791 return ret;
792}
793
794MODULE_LICENSE("GPL");
795MODULE_AUTHOR("Alexey Korolev <akorolev@infradead.org>");
796MODULE_DESCRIPTION("MTD driver for LPDDR flash chips");
diff --git a/drivers/mtd/lpddr/qinfo_probe.c b/drivers/mtd/lpddr/qinfo_probe.c
new file mode 100644
index 000000000000..79bf40f48b75
--- /dev/null
+++ b/drivers/mtd/lpddr/qinfo_probe.c
@@ -0,0 +1,255 @@
1/*
2 * Probing flash chips with QINFO records.
3 * (C) 2008 Korolev Alexey <akorolev@infradead.org>
4 * (C) 2008 Vasiliy Leonenko <vasiliy.leonenko@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 * 02110-1301, USA.
20 */
21#include <linux/module.h>
22#include <linux/types.h>
23#include <linux/kernel.h>
24#include <linux/init.h>
25#include <linux/errno.h>
26#include <linux/slab.h>
27#include <linux/interrupt.h>
28
29#include <linux/mtd/xip.h>
30#include <linux/mtd/map.h>
31#include <linux/mtd/pfow.h>
32#include <linux/mtd/qinfo.h>
33
34static int lpddr_chip_setup(struct map_info *map, struct lpddr_private *lpddr);
35struct mtd_info *lpddr_probe(struct map_info *map);
36static struct lpddr_private *lpddr_probe_chip(struct map_info *map);
37static int lpddr_pfow_present(struct map_info *map,
38 struct lpddr_private *lpddr);
39
40static struct qinfo_query_info qinfo_array[] = {
41 /* General device info */
42 {0, 0, "DevSizeShift", "Device size 2^n bytes"},
43 {0, 3, "BufSizeShift", "Program buffer size 2^n bytes"},
44 /* Erase block information */
45 {1, 1, "TotalBlocksNum", "Total number of blocks"},
46 {1, 2, "UniformBlockSizeShift", "Uniform block size 2^n bytes"},
47 /* Partition information */
48 {2, 1, "HWPartsNum", "Number of hardware partitions"},
49 /* Optional features */
50 {5, 1, "SuspEraseSupp", "Suspend erase supported"},
51 /* Operation typical time */
52 {10, 0, "SingleWordProgTime", "Single word program 2^n u-sec"},
53 {10, 1, "ProgBufferTime", "Program buffer write 2^n u-sec"},
54 {10, 2, "BlockEraseTime", "Block erase 2^n m-sec"},
55 {10, 3, "FullChipEraseTime", "Full chip erase 2^n m-sec"},
56};
57
58static long lpddr_get_qinforec_pos(struct map_info *map, char *id_str)
59{
60 int qinfo_lines = sizeof(qinfo_array)/sizeof(struct qinfo_query_info);
61 int i;
62 int bankwidth = map_bankwidth(map) * 8;
63 int major, minor;
64
65 for (i = 0; i < qinfo_lines; i++) {
66 if (strcmp(id_str, qinfo_array[i].id_str) == 0) {
67 major = qinfo_array[i].major & ((1 << bankwidth) - 1);
68 minor = qinfo_array[i].minor & ((1 << bankwidth) - 1);
69 return minor | (major << bankwidth);
70 }
71 }
72 printk(KERN_ERR"%s qinfo id string is wrong! \n", map->name);
73 BUG();
74 return -1;
75}
76
77static uint16_t lpddr_info_query(struct map_info *map, char *id_str)
78{
79 unsigned int dsr, val;
80 int bits_per_chip = map_bankwidth(map) * 8;
81 unsigned long adr = lpddr_get_qinforec_pos(map, id_str);
82 int attempts = 20;
83
84 /* Write a request for the PFOW record */
85 map_write(map, CMD(LPDDR_INFO_QUERY),
86 map->pfow_base + PFOW_COMMAND_CODE);
87 map_write(map, CMD(adr & ((1 << bits_per_chip) - 1)),
88 map->pfow_base + PFOW_COMMAND_ADDRESS_L);
89 map_write(map, CMD(adr >> bits_per_chip),
90 map->pfow_base + PFOW_COMMAND_ADDRESS_H);
91 map_write(map, CMD(LPDDR_START_EXECUTION),
92 map->pfow_base + PFOW_COMMAND_EXECUTE);
93
94 while ((attempts--) > 0) {
95 dsr = CMDVAL(map_read(map, map->pfow_base + PFOW_DSR));
96 if (dsr & DSR_READY_STATUS)
97 break;
98 udelay(10);
99 }
100
101 val = CMDVAL(map_read(map, map->pfow_base + PFOW_COMMAND_DATA));
102 return val;
103}
104
105static int lpddr_pfow_present(struct map_info *map, struct lpddr_private *lpddr)
106{
107 map_word pfow_val[4];
108
109 /* Check identification string */
110 pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
111 pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
112 pfow_val[2] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_O);
113 pfow_val[3] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_W);
114
115 if (!map_word_equal(map, CMD('P'), pfow_val[0]))
116 goto out;
117
118 if (!map_word_equal(map, CMD('F'), pfow_val[1]))
119 goto out;
120
121 if (!map_word_equal(map, CMD('O'), pfow_val[2]))
122 goto out;
123
124 if (!map_word_equal(map, CMD('W'), pfow_val[3]))
125 goto out;
126
127 return 1; /* "PFOW" is found */
128out:
129 printk(KERN_WARNING"%s: PFOW string at 0x%lx is not found \n",
130 map->name, map->pfow_base);
131 return 0;
132}
133
134static int lpddr_chip_setup(struct map_info *map, struct lpddr_private *lpddr)
135{
136
137 lpddr->qinfo = kmalloc(sizeof(struct qinfo_chip), GFP_KERNEL);
138 if (!lpddr->qinfo) {
139 printk(KERN_WARNING "%s: no memory for LPDDR qinfo structure\n",
140 map->name);
141 return 0;
142 }
143 memset(lpddr->qinfo, 0, sizeof(struct qinfo_chip));
144
145 /* Get the ManuID */
146 lpddr->ManufactId = CMDVAL(map_read(map, map->pfow_base + PFOW_MANUFACTURER_ID));
147 /* Get the DeviceID */
148 lpddr->DevId = CMDVAL(map_read(map, map->pfow_base + PFOW_DEVICE_ID));
149 /* read parameters from chip qinfo table */
150 lpddr->qinfo->DevSizeShift = lpddr_info_query(map, "DevSizeShift");
151 lpddr->qinfo->TotalBlocksNum = lpddr_info_query(map, "TotalBlocksNum");
152 lpddr->qinfo->BufSizeShift = lpddr_info_query(map, "BufSizeShift");
153 lpddr->qinfo->HWPartsNum = lpddr_info_query(map, "HWPartsNum");
154 lpddr->qinfo->UniformBlockSizeShift =
155 lpddr_info_query(map, "UniformBlockSizeShift");
156 lpddr->qinfo->SuspEraseSupp = lpddr_info_query(map, "SuspEraseSupp");
157 lpddr->qinfo->SingleWordProgTime =
158 lpddr_info_query(map, "SingleWordProgTime");
159 lpddr->qinfo->ProgBufferTime = lpddr_info_query(map, "ProgBufferTime");
160 lpddr->qinfo->BlockEraseTime = lpddr_info_query(map, "BlockEraseTime");
161 return 1;
162}
163static struct lpddr_private *lpddr_probe_chip(struct map_info *map)
164{
165 struct lpddr_private lpddr;
166 struct lpddr_private *retlpddr;
167 int numvirtchips;
168
169
170 if ((map->pfow_base + 0x1000) >= map->size) {
171 printk(KERN_NOTICE"%s Probe at base (0x%08lx) past the end of"
172 "the map(0x%08lx)\n", map->name,
173 (unsigned long)map->pfow_base, map->size - 1);
174 return NULL;
175 }
176 memset(&lpddr, 0, sizeof(struct lpddr_private));
177 if (!lpddr_pfow_present(map, &lpddr))
178 return NULL;
179
180 if (!lpddr_chip_setup(map, &lpddr))
181 return NULL;
182
183 /* Ok so we found a chip */
184 lpddr.chipshift = lpddr.qinfo->DevSizeShift;
185 lpddr.numchips = 1;
186
187 numvirtchips = lpddr.numchips * lpddr.qinfo->HWPartsNum;
188 retlpddr = kmalloc(sizeof(struct lpddr_private) +
189 numvirtchips * sizeof(struct flchip), GFP_KERNEL);
190 if (!retlpddr)
191 return NULL;
192
193 memset(retlpddr, 0, sizeof(struct lpddr_private) +
194 numvirtchips * sizeof(struct flchip));
195 memcpy(retlpddr, &lpddr, sizeof(struct lpddr_private));
196
197 retlpddr->numchips = numvirtchips;
198 retlpddr->chipshift = retlpddr->qinfo->DevSizeShift -
199 __ffs(retlpddr->qinfo->HWPartsNum);
200
201 return retlpddr;
202}
203
204struct mtd_info *lpddr_probe(struct map_info *map)
205{
206 struct mtd_info *mtd = NULL;
207 struct lpddr_private *lpddr;
208
209 /* First probe the map to see if we havecan open PFOW here */
210 lpddr = lpddr_probe_chip(map);
211 if (!lpddr)
212 return NULL;
213
214 map->fldrv_priv = lpddr;
215 mtd = lpddr_cmdset(map);
216 if (mtd) {
217 if (mtd->size > map->size) {
218 printk(KERN_WARNING "Reducing visibility of %ldKiB chip"
219 "to %ldKiB\n", (unsigned long)mtd->size >> 10,
220 (unsigned long)map->size >> 10);
221 mtd->size = map->size;
222 }
223 return mtd;
224 }
225
226 kfree(lpddr->qinfo);
227 kfree(lpddr);
228 map->fldrv_priv = NULL;
229 return NULL;
230}
231
232static struct mtd_chip_driver lpddr_chipdrv = {
233 .probe = lpddr_probe,
234 .name = "qinfo_probe",
235 .module = THIS_MODULE
236};
237
238static int __init lpddr_probe_init(void)
239{
240 register_mtd_chip_driver(&lpddr_chipdrv);
241 return 0;
242}
243
244static void __exit lpddr_probe_exit(void)
245{
246 unregister_mtd_chip_driver(&lpddr_chipdrv);
247}
248
249module_init(lpddr_probe_init);
250module_exit(lpddr_probe_exit);
251
252MODULE_LICENSE("GPL");
253MODULE_AUTHOR("Vasiliy Leonenko <vasiliy.leonenko@gmail.com>");
254MODULE_DESCRIPTION("Driver to probe qinfo flash chips");
255
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index 5ea169362164..0225cbbf22de 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -10,8 +10,8 @@ config MTD_COMPLEX_MAPPINGS
10 paged mappings of flash chips. 10 paged mappings of flash chips.
11 11
12config MTD_PHYSMAP 12config MTD_PHYSMAP
13 tristate "CFI Flash device in physical memory map" 13 tristate "Flash device in physical memory map"
14 depends on MTD_CFI || MTD_JEDECPROBE || MTD_ROM 14 depends on MTD_CFI || MTD_JEDECPROBE || MTD_ROM || MTD_LPDDR
15 help 15 help
16 This provides a 'mapping' driver which allows the NOR Flash and 16 This provides a 'mapping' driver which allows the NOR Flash and
17 ROM driver code to communicate with chips which are mapped 17 ROM driver code to communicate with chips which are mapped
@@ -23,9 +23,20 @@ config MTD_PHYSMAP
23 To compile this driver as a module, choose M here: the 23 To compile this driver as a module, choose M here: the
24 module will be called physmap. 24 module will be called physmap.
25 25
26config MTD_PHYSMAP_COMPAT
27 bool "Physmap compat support"
28 depends on MTD_PHYSMAP
29 default n
30 help
31 Setup a simple mapping via the Kconfig options. Normally the
32 physmap configuration options are done via your board's
33 resource file.
34
35 If unsure, say N here.
36
26config MTD_PHYSMAP_START 37config MTD_PHYSMAP_START
27 hex "Physical start address of flash mapping" 38 hex "Physical start address of flash mapping"
28 depends on MTD_PHYSMAP 39 depends on MTD_PHYSMAP_COMPAT
29 default "0x8000000" 40 default "0x8000000"
30 help 41 help
31 This is the physical memory location at which the flash chips 42 This is the physical memory location at which the flash chips
@@ -37,7 +48,7 @@ config MTD_PHYSMAP_START
37 48
38config MTD_PHYSMAP_LEN 49config MTD_PHYSMAP_LEN
39 hex "Physical length of flash mapping" 50 hex "Physical length of flash mapping"
40 depends on MTD_PHYSMAP 51 depends on MTD_PHYSMAP_COMPAT
41 default "0" 52 default "0"
42 help 53 help
43 This is the total length of the mapping of the flash chips on 54 This is the total length of the mapping of the flash chips on
@@ -51,7 +62,7 @@ config MTD_PHYSMAP_LEN
51 62
52config MTD_PHYSMAP_BANKWIDTH 63config MTD_PHYSMAP_BANKWIDTH
53 int "Bank width in octets" 64 int "Bank width in octets"
54 depends on MTD_PHYSMAP 65 depends on MTD_PHYSMAP_COMPAT
55 default "2" 66 default "2"
56 help 67 help
57 This is the total width of the data bus of the flash devices 68 This is the total width of the data bus of the flash devices
diff --git a/drivers/mtd/maps/alchemy-flash.c b/drivers/mtd/maps/alchemy-flash.c
index 82811bcb0436..845ad4f2a542 100644
--- a/drivers/mtd/maps/alchemy-flash.c
+++ b/drivers/mtd/maps/alchemy-flash.c
@@ -111,7 +111,7 @@ static struct mtd_partition alchemy_partitions[] = {
111 111
112static struct mtd_info *mymtd; 112static struct mtd_info *mymtd;
113 113
114int __init alchemy_mtd_init(void) 114static int __init alchemy_mtd_init(void)
115{ 115{
116 struct mtd_partition *parts; 116 struct mtd_partition *parts;
117 int nb_parts = 0; 117 int nb_parts = 0;
diff --git a/drivers/mtd/maps/amd76xrom.c b/drivers/mtd/maps/amd76xrom.c
index d1eec7d3243f..237733d094c4 100644
--- a/drivers/mtd/maps/amd76xrom.c
+++ b/drivers/mtd/maps/amd76xrom.c
@@ -232,8 +232,8 @@ static int __devinit amd76xrom_init_one (struct pci_dev *pdev,
232 /* Trim the size if we are larger than the map */ 232 /* Trim the size if we are larger than the map */
233 if (map->mtd->size > map->map.size) { 233 if (map->mtd->size > map->map.size) {
234 printk(KERN_WARNING MOD_NAME 234 printk(KERN_WARNING MOD_NAME
235 " rom(%u) larger than window(%lu). fixing...\n", 235 " rom(%llu) larger than window(%lu). fixing...\n",
236 map->mtd->size, map->map.size); 236 (unsigned long long)map->mtd->size, map->map.size);
237 map->mtd->size = map->map.size; 237 map->mtd->size = map->map.size;
238 } 238 }
239 if (window->rsrc.parent) { 239 if (window->rsrc.parent) {
diff --git a/drivers/mtd/maps/cdb89712.c b/drivers/mtd/maps/cdb89712.c
index e5059aa3c724..8d92d8db9a98 100644
--- a/drivers/mtd/maps/cdb89712.c
+++ b/drivers/mtd/maps/cdb89712.c
@@ -14,7 +14,18 @@
14#include <linux/mtd/map.h> 14#include <linux/mtd/map.h>
15#include <linux/mtd/partitions.h> 15#include <linux/mtd/partitions.h>
16 16
17 17/* dynamic ioremap() areas */
18#define FLASH_START 0x00000000
19#define FLASH_SIZE 0x800000
20#define FLASH_WIDTH 4
21
22#define SRAM_START 0x60000000
23#define SRAM_SIZE 0xc000
24#define SRAM_WIDTH 4
25
26#define BOOTROM_START 0x70000000
27#define BOOTROM_SIZE 0x80
28#define BOOTROM_WIDTH 4
18 29
19 30
20static struct mtd_info *flash_mtd; 31static struct mtd_info *flash_mtd;
diff --git a/drivers/mtd/maps/cfi_flagadm.c b/drivers/mtd/maps/cfi_flagadm.c
index 0ecc3f6d735b..b4ed81611918 100644
--- a/drivers/mtd/maps/cfi_flagadm.c
+++ b/drivers/mtd/maps/cfi_flagadm.c
@@ -88,7 +88,7 @@ struct mtd_partition flagadm_parts[] = {
88 88
89static struct mtd_info *mymtd; 89static struct mtd_info *mymtd;
90 90
91int __init init_flagadm(void) 91static int __init init_flagadm(void)
92{ 92{
93 printk(KERN_NOTICE "FlagaDM flash device: %x at %x\n", 93 printk(KERN_NOTICE "FlagaDM flash device: %x at %x\n",
94 FLASH_SIZE, FLASH_PHYS_ADDR); 94 FLASH_SIZE, FLASH_PHYS_ADDR);
diff --git a/drivers/mtd/maps/ck804xrom.c b/drivers/mtd/maps/ck804xrom.c
index 1a6feb4474de..5f7a245ed132 100644
--- a/drivers/mtd/maps/ck804xrom.c
+++ b/drivers/mtd/maps/ck804xrom.c
@@ -263,8 +263,8 @@ static int __devinit ck804xrom_init_one (struct pci_dev *pdev,
263 /* Trim the size if we are larger than the map */ 263 /* Trim the size if we are larger than the map */
264 if (map->mtd->size > map->map.size) { 264 if (map->mtd->size > map->map.size) {
265 printk(KERN_WARNING MOD_NAME 265 printk(KERN_WARNING MOD_NAME
266 " rom(%u) larger than window(%lu). fixing...\n", 266 " rom(%llu) larger than window(%lu). fixing...\n",
267 map->mtd->size, map->map.size); 267 (unsigned long long)map->mtd->size, map->map.size);
268 map->mtd->size = map->map.size; 268 map->mtd->size = map->map.size;
269 } 269 }
270 if (window->rsrc.parent) { 270 if (window->rsrc.parent) {
diff --git a/drivers/mtd/maps/dbox2-flash.c b/drivers/mtd/maps/dbox2-flash.c
index e115667bf1d0..cfacfa6f45dd 100644
--- a/drivers/mtd/maps/dbox2-flash.c
+++ b/drivers/mtd/maps/dbox2-flash.c
@@ -69,7 +69,7 @@ struct map_info dbox2_flash_map = {
69 .phys = WINDOW_ADDR, 69 .phys = WINDOW_ADDR,
70}; 70};
71 71
72int __init init_dbox2_flash(void) 72static int __init init_dbox2_flash(void)
73{ 73{
74 printk(KERN_NOTICE "D-Box 2 flash driver (size->0x%X mem->0x%X)\n", WINDOW_SIZE, WINDOW_ADDR); 74 printk(KERN_NOTICE "D-Box 2 flash driver (size->0x%X mem->0x%X)\n", WINDOW_SIZE, WINDOW_ADDR);
75 dbox2_flash_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE); 75 dbox2_flash_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE);
diff --git a/drivers/mtd/maps/dc21285.c b/drivers/mtd/maps/dc21285.c
index 3aa018c092f8..42969fe051b2 100644
--- a/drivers/mtd/maps/dc21285.c
+++ b/drivers/mtd/maps/dc21285.c
@@ -32,16 +32,15 @@ static struct mtd_info *dc21285_mtd;
32 */ 32 */
33static void nw_en_write(void) 33static void nw_en_write(void)
34{ 34{
35 extern spinlock_t gpio_lock;
36 unsigned long flags; 35 unsigned long flags;
37 36
38 /* 37 /*
39 * we want to write a bit pattern XXX1 to Xilinx to enable 38 * we want to write a bit pattern XXX1 to Xilinx to enable
40 * the write gate, which will be open for about the next 2ms. 39 * the write gate, which will be open for about the next 2ms.
41 */ 40 */
42 spin_lock_irqsave(&gpio_lock, flags); 41 spin_lock_irqsave(&nw_gpio_lock, flags);
43 cpld_modify(1, 1); 42 nw_cpld_modify(CPLD_FLASH_WR_ENABLE, CPLD_FLASH_WR_ENABLE);
44 spin_unlock_irqrestore(&gpio_lock, flags); 43 spin_unlock_irqrestore(&nw_gpio_lock, flags);
45 44
46 /* 45 /*
47 * let the ISA bus to catch on... 46 * let the ISA bus to catch on...
diff --git a/drivers/mtd/maps/edb7312.c b/drivers/mtd/maps/edb7312.c
index 9433738c1664..be9e90b44587 100644
--- a/drivers/mtd/maps/edb7312.c
+++ b/drivers/mtd/maps/edb7312.c
@@ -71,7 +71,7 @@ static const char *probes[] = { "RedBoot", "cmdlinepart", NULL };
71static int mtd_parts_nb = 0; 71static int mtd_parts_nb = 0;
72static struct mtd_partition *mtd_parts = 0; 72static struct mtd_partition *mtd_parts = 0;
73 73
74int __init init_edb7312nor(void) 74static int __init init_edb7312nor(void)
75{ 75{
76 static const char *rom_probe_types[] = PROBETYPES; 76 static const char *rom_probe_types[] = PROBETYPES;
77 const char **type; 77 const char **type;
diff --git a/drivers/mtd/maps/esb2rom.c b/drivers/mtd/maps/esb2rom.c
index bbbcdd4c8d13..11a2f57df9cf 100644
--- a/drivers/mtd/maps/esb2rom.c
+++ b/drivers/mtd/maps/esb2rom.c
@@ -324,8 +324,8 @@ static int __devinit esb2rom_init_one(struct pci_dev *pdev,
324 /* Trim the size if we are larger than the map */ 324 /* Trim the size if we are larger than the map */
325 if (map->mtd->size > map->map.size) { 325 if (map->mtd->size > map->map.size) {
326 printk(KERN_WARNING MOD_NAME 326 printk(KERN_WARNING MOD_NAME
327 " rom(%u) larger than window(%lu). fixing...\n", 327 " rom(%llu) larger than window(%lu). fixing...\n",
328 map->mtd->size, map->map.size); 328 (unsigned long long)map->mtd->size, map->map.size);
329 map->mtd->size = map->map.size; 329 map->mtd->size = map->map.size;
330 } 330 }
331 if (window->rsrc.parent) { 331 if (window->rsrc.parent) {
diff --git a/drivers/mtd/maps/fortunet.c b/drivers/mtd/maps/fortunet.c
index a8e3fde4cbd5..1e43124d498b 100644
--- a/drivers/mtd/maps/fortunet.c
+++ b/drivers/mtd/maps/fortunet.c
@@ -181,7 +181,7 @@ __setup("MTD_Partition=", MTD_New_Partition);
181/* Backwards-spelling-compatibility */ 181/* Backwards-spelling-compatibility */
182__setup("MTD_Partion=", MTD_New_Partition); 182__setup("MTD_Partion=", MTD_New_Partition);
183 183
184int __init init_fortunet(void) 184static int __init init_fortunet(void)
185{ 185{
186 int ix,iy; 186 int ix,iy;
187 for(iy=ix=0;ix<MAX_NUM_REGIONS;ix++) 187 for(iy=ix=0;ix<MAX_NUM_REGIONS;ix++)
diff --git a/drivers/mtd/maps/h720x-flash.c b/drivers/mtd/maps/h720x-flash.c
index 35fef655ccc4..72c724fa8c27 100644
--- a/drivers/mtd/maps/h720x-flash.c
+++ b/drivers/mtd/maps/h720x-flash.c
@@ -24,8 +24,8 @@ static struct mtd_info *mymtd;
24static struct map_info h720x_map = { 24static struct map_info h720x_map = {
25 .name = "H720X", 25 .name = "H720X",
26 .bankwidth = 4, 26 .bankwidth = 4,
27 .size = FLASH_SIZE, 27 .size = H720X_FLASH_SIZE,
28 .phys = FLASH_PHYS, 28 .phys = H720X_FLASH_PHYS,
29}; 29};
30 30
31static struct mtd_partition h720x_partitions[] = { 31static struct mtd_partition h720x_partitions[] = {
@@ -65,12 +65,12 @@ static const char *probes[] = { "cmdlinepart", NULL };
65/* 65/*
66 * Initialize FLASH support 66 * Initialize FLASH support
67 */ 67 */
68int __init h720x_mtd_init(void) 68static int __init h720x_mtd_init(void)
69{ 69{
70 70
71 char *part_type = NULL; 71 char *part_type = NULL;
72 72
73 h720x_map.virt = ioremap(FLASH_PHYS, FLASH_SIZE); 73 h720x_map.virt = ioremap(h720x_map.phys, h720x_map.size);
74 74
75 if (!h720x_map.virt) { 75 if (!h720x_map.virt) {
76 printk(KERN_ERR "H720x-MTD: ioremap failed\n"); 76 printk(KERN_ERR "H720x-MTD: ioremap failed\n");
diff --git a/drivers/mtd/maps/ichxrom.c b/drivers/mtd/maps/ichxrom.c
index aeb6c916e23f..c32bc28920b3 100644
--- a/drivers/mtd/maps/ichxrom.c
+++ b/drivers/mtd/maps/ichxrom.c
@@ -258,8 +258,8 @@ static int __devinit ichxrom_init_one (struct pci_dev *pdev,
258 /* Trim the size if we are larger than the map */ 258 /* Trim the size if we are larger than the map */
259 if (map->mtd->size > map->map.size) { 259 if (map->mtd->size > map->map.size) {
260 printk(KERN_WARNING MOD_NAME 260 printk(KERN_WARNING MOD_NAME
261 " rom(%u) larger than window(%lu). fixing...\n", 261 " rom(%llu) larger than window(%lu). fixing...\n",
262 map->mtd->size, map->map.size); 262 (unsigned long long)map->mtd->size, map->map.size);
263 map->mtd->size = map->map.size; 263 map->mtd->size = map->map.size;
264 } 264 }
265 if (window->rsrc.parent) { 265 if (window->rsrc.parent) {
diff --git a/drivers/mtd/maps/impa7.c b/drivers/mtd/maps/impa7.c
index 2682ab51a367..998a27da97f3 100644
--- a/drivers/mtd/maps/impa7.c
+++ b/drivers/mtd/maps/impa7.c
@@ -70,7 +70,7 @@ static struct mtd_partition *mtd_parts[NUM_FLASHBANKS];
70 70
71static const char *probes[] = { "cmdlinepart", NULL }; 71static const char *probes[] = { "cmdlinepart", NULL };
72 72
73int __init init_impa7(void) 73static int __init init_impa7(void)
74{ 74{
75 static const char *rom_probe_types[] = PROBETYPES; 75 static const char *rom_probe_types[] = PROBETYPES;
76 const char **type; 76 const char **type;
diff --git a/drivers/mtd/maps/integrator-flash.c b/drivers/mtd/maps/integrator-flash.c
index 7100ee3c7b01..d2ec262666c7 100644
--- a/drivers/mtd/maps/integrator-flash.c
+++ b/drivers/mtd/maps/integrator-flash.c
@@ -105,7 +105,7 @@ static int armflash_probe(struct platform_device *dev)
105 info->map.bankwidth = plat->width; 105 info->map.bankwidth = plat->width;
106 info->map.phys = res->start; 106 info->map.phys = res->start;
107 info->map.virt = base; 107 info->map.virt = base;
108 info->map.name = dev->dev.bus_id; 108 info->map.name = dev_name(&dev->dev);
109 info->map.set_vpp = armflash_set_vpp; 109 info->map.set_vpp = armflash_set_vpp;
110 110
111 simple_map_init(&info->map); 111 simple_map_init(&info->map);
diff --git a/drivers/mtd/maps/ipaq-flash.c b/drivers/mtd/maps/ipaq-flash.c
index ed58f6a77bd9..748c85f635f1 100644
--- a/drivers/mtd/maps/ipaq-flash.c
+++ b/drivers/mtd/maps/ipaq-flash.c
@@ -202,7 +202,7 @@ static const char *part_probes[] = { "cmdlinepart", "RedBoot", NULL };
202 202
203static int __init h1900_special_case(void); 203static int __init h1900_special_case(void);
204 204
205int __init ipaq_mtd_init(void) 205static int __init ipaq_mtd_init(void)
206{ 206{
207 struct mtd_partition *parts = NULL; 207 struct mtd_partition *parts = NULL;
208 int nb_parts = 0; 208 int nb_parts = 0;
diff --git a/drivers/mtd/maps/ixp2000.c b/drivers/mtd/maps/ixp2000.c
index dcdb1f17577d..d4fb9a3ab4df 100644
--- a/drivers/mtd/maps/ixp2000.c
+++ b/drivers/mtd/maps/ixp2000.c
@@ -170,7 +170,7 @@ static int ixp2000_flash_probe(struct platform_device *dev)
170 err = -ENOMEM; 170 err = -ENOMEM;
171 goto Error; 171 goto Error;
172 } 172 }
173 memzero(info, sizeof(struct ixp2000_flash_info)); 173 memset(info, 0, sizeof(struct ixp2000_flash_info));
174 174
175 platform_set_drvdata(dev, info); 175 platform_set_drvdata(dev, info);
176 176
@@ -188,7 +188,7 @@ static int ixp2000_flash_probe(struct platform_device *dev)
188 */ 188 */
189 info->map.map_priv_2 = (unsigned long) ixp_data->bank_setup; 189 info->map.map_priv_2 = (unsigned long) ixp_data->bank_setup;
190 190
191 info->map.name = dev->dev.bus_id; 191 info->map.name = dev_name(&dev->dev);
192 info->map.read = ixp2000_flash_read8; 192 info->map.read = ixp2000_flash_read8;
193 info->map.write = ixp2000_flash_write8; 193 info->map.write = ixp2000_flash_write8;
194 info->map.copy_from = ixp2000_flash_copy_from; 194 info->map.copy_from = ixp2000_flash_copy_from;
@@ -196,7 +196,7 @@ static int ixp2000_flash_probe(struct platform_device *dev)
196 196
197 info->res = request_mem_region(dev->resource->start, 197 info->res = request_mem_region(dev->resource->start,
198 dev->resource->end - dev->resource->start + 1, 198 dev->resource->end - dev->resource->start + 1,
199 dev->dev.bus_id); 199 dev_name(&dev->dev));
200 if (!info->res) { 200 if (!info->res) {
201 dev_err(&dev->dev, "Could not reserve memory region\n"); 201 dev_err(&dev->dev, "Could not reserve memory region\n");
202 err = -ENOMEM; 202 err = -ENOMEM;
diff --git a/drivers/mtd/maps/ixp4xx.c b/drivers/mtd/maps/ixp4xx.c
index 9c7a5fbd4e51..7214b876feba 100644
--- a/drivers/mtd/maps/ixp4xx.c
+++ b/drivers/mtd/maps/ixp4xx.c
@@ -201,7 +201,7 @@ static int ixp4xx_flash_probe(struct platform_device *dev)
201 err = -ENOMEM; 201 err = -ENOMEM;
202 goto Error; 202 goto Error;
203 } 203 }
204 memzero(info, sizeof(struct ixp4xx_flash_info)); 204 memset(info, 0, sizeof(struct ixp4xx_flash_info));
205 205
206 platform_set_drvdata(dev, info); 206 platform_set_drvdata(dev, info);
207 207
@@ -218,7 +218,7 @@ static int ixp4xx_flash_probe(struct platform_device *dev)
218 * handle that. 218 * handle that.
219 */ 219 */
220 info->map.bankwidth = 2; 220 info->map.bankwidth = 2;
221 info->map.name = dev->dev.bus_id; 221 info->map.name = dev_name(&dev->dev);
222 info->map.read = ixp4xx_read16, 222 info->map.read = ixp4xx_read16,
223 info->map.write = ixp4xx_probe_write16, 223 info->map.write = ixp4xx_probe_write16,
224 info->map.copy_from = ixp4xx_copy_from, 224 info->map.copy_from = ixp4xx_copy_from,
diff --git a/drivers/mtd/maps/mbx860.c b/drivers/mtd/maps/mbx860.c
index 706f67394b07..0eb5a7c85380 100644
--- a/drivers/mtd/maps/mbx860.c
+++ b/drivers/mtd/maps/mbx860.c
@@ -55,7 +55,7 @@ struct map_info mbx_map = {
55 .bankwidth = 4, 55 .bankwidth = 4,
56}; 56};
57 57
58int __init init_mbx(void) 58static int __init init_mbx(void)
59{ 59{
60 printk(KERN_NOTICE "Motorola MBX flash device: 0x%x at 0x%x\n", WINDOW_SIZE*4, WINDOW_ADDR); 60 printk(KERN_NOTICE "Motorola MBX flash device: 0x%x at 0x%x\n", WINDOW_SIZE*4, WINDOW_ADDR);
61 mbx_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE * 4); 61 mbx_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE * 4);
diff --git a/drivers/mtd/maps/nettel.c b/drivers/mtd/maps/nettel.c
index 965e6c6d6ab0..df682667604a 100644
--- a/drivers/mtd/maps/nettel.c
+++ b/drivers/mtd/maps/nettel.c
@@ -226,7 +226,7 @@ static int __init nettel_init(void)
226 226
227 if ((amd_mtd = do_map_probe("jedec_probe", &nettel_amd_map))) { 227 if ((amd_mtd = do_map_probe("jedec_probe", &nettel_amd_map))) {
228 printk(KERN_NOTICE "SNAPGEAR: AMD flash device size = %dK\n", 228 printk(KERN_NOTICE "SNAPGEAR: AMD flash device size = %dK\n",
229 amd_mtd->size>>10); 229 (int)(amd_mtd->size>>10));
230 230
231 amd_mtd->owner = THIS_MODULE; 231 amd_mtd->owner = THIS_MODULE;
232 232
@@ -362,8 +362,7 @@ static int __init nettel_init(void)
362 362
363 intel_mtd->owner = THIS_MODULE; 363 intel_mtd->owner = THIS_MODULE;
364 364
365 num_intel_partitions = sizeof(nettel_intel_partitions) / 365 num_intel_partitions = ARRAY_SIZE(nettel_intel_partitions);
366 sizeof(nettel_intel_partitions[0]);
367 366
368 if (intelboot) { 367 if (intelboot) {
369 /* 368 /*
diff --git a/drivers/mtd/maps/octagon-5066.c b/drivers/mtd/maps/octagon-5066.c
index 43e04c1d22a9..2b2e45093218 100644
--- a/drivers/mtd/maps/octagon-5066.c
+++ b/drivers/mtd/maps/octagon-5066.c
@@ -184,7 +184,7 @@ void cleanup_oct5066(void)
184 release_region(PAGE_IO, 1); 184 release_region(PAGE_IO, 1);
185} 185}
186 186
187int __init init_oct5066(void) 187static int __init init_oct5066(void)
188{ 188{
189 int i; 189 int i;
190 int ret = 0; 190 int ret = 0;
diff --git a/drivers/mtd/maps/omap_nor.c b/drivers/mtd/maps/omap_nor.c
index 05f276af15da..7e50e9b1b781 100644
--- a/drivers/mtd/maps/omap_nor.c
+++ b/drivers/mtd/maps/omap_nor.c
@@ -101,7 +101,7 @@ static int __init omapflash_probe(struct platform_device *pdev)
101 err = -ENOMEM; 101 err = -ENOMEM;
102 goto out_release_mem_region; 102 goto out_release_mem_region;
103 } 103 }
104 info->map.name = pdev->dev.bus_id; 104 info->map.name = dev_name(&pdev->dev);
105 info->map.phys = res->start; 105 info->map.phys = res->start;
106 info->map.size = size; 106 info->map.size = size;
107 info->map.bankwidth = pdata->width; 107 info->map.bankwidth = pdata->width;
diff --git a/drivers/mtd/maps/physmap.c b/drivers/mtd/maps/physmap.c
index dfbf3f270cea..87743661d48e 100644
--- a/drivers/mtd/maps/physmap.c
+++ b/drivers/mtd/maps/physmap.c
@@ -29,7 +29,6 @@ struct physmap_flash_info {
29 struct map_info map[MAX_RESOURCES]; 29 struct map_info map[MAX_RESOURCES];
30#ifdef CONFIG_MTD_PARTITIONS 30#ifdef CONFIG_MTD_PARTITIONS
31 int nr_parts; 31 int nr_parts;
32 struct mtd_partition *parts;
33#endif 32#endif
34}; 33};
35 34
@@ -56,14 +55,10 @@ static int physmap_flash_remove(struct platform_device *dev)
56 for (i = 0; i < MAX_RESOURCES; i++) { 55 for (i = 0; i < MAX_RESOURCES; i++) {
57 if (info->mtd[i] != NULL) { 56 if (info->mtd[i] != NULL) {
58#ifdef CONFIG_MTD_PARTITIONS 57#ifdef CONFIG_MTD_PARTITIONS
59 if (info->nr_parts) { 58 if (info->nr_parts || physmap_data->nr_parts)
60 del_mtd_partitions(info->mtd[i]); 59 del_mtd_partitions(info->mtd[i]);
61 kfree(info->parts); 60 else
62 } else if (physmap_data->nr_parts) {
63 del_mtd_partitions(info->mtd[i]);
64 } else {
65 del_mtd_device(info->mtd[i]); 61 del_mtd_device(info->mtd[i]);
66 }
67#else 62#else
68 del_mtd_device(info->mtd[i]); 63 del_mtd_device(info->mtd[i]);
69#endif 64#endif
@@ -73,7 +68,12 @@ static int physmap_flash_remove(struct platform_device *dev)
73 return 0; 68 return 0;
74} 69}
75 70
76static const char *rom_probe_types[] = { "cfi_probe", "jedec_probe", "map_rom", NULL }; 71static const char *rom_probe_types[] = {
72 "cfi_probe",
73 "jedec_probe",
74 "qinfo_probe",
75 "map_rom",
76 NULL };
77#ifdef CONFIG_MTD_PARTITIONS 77#ifdef CONFIG_MTD_PARTITIONS
78static const char *part_probe_types[] = { "cmdlinepart", "RedBoot", NULL }; 78static const char *part_probe_types[] = { "cmdlinepart", "RedBoot", NULL };
79#endif 79#endif
@@ -86,6 +86,9 @@ static int physmap_flash_probe(struct platform_device *dev)
86 int err = 0; 86 int err = 0;
87 int i; 87 int i;
88 int devices_found = 0; 88 int devices_found = 0;
89#ifdef CONFIG_MTD_PARTITIONS
90 struct mtd_partition *parts;
91#endif
89 92
90 physmap_data = dev->dev.platform_data; 93 physmap_data = dev->dev.platform_data;
91 if (physmap_data == NULL) 94 if (physmap_data == NULL)
@@ -108,17 +111,18 @@ static int physmap_flash_probe(struct platform_device *dev)
108 if (!devm_request_mem_region(&dev->dev, 111 if (!devm_request_mem_region(&dev->dev,
109 dev->resource[i].start, 112 dev->resource[i].start,
110 dev->resource[i].end - dev->resource[i].start + 1, 113 dev->resource[i].end - dev->resource[i].start + 1,
111 dev->dev.bus_id)) { 114 dev_name(&dev->dev))) {
112 dev_err(&dev->dev, "Could not reserve memory region\n"); 115 dev_err(&dev->dev, "Could not reserve memory region\n");
113 err = -ENOMEM; 116 err = -ENOMEM;
114 goto err_out; 117 goto err_out;
115 } 118 }
116 119
117 info->map[i].name = dev->dev.bus_id; 120 info->map[i].name = dev_name(&dev->dev);
118 info->map[i].phys = dev->resource[i].start; 121 info->map[i].phys = dev->resource[i].start;
119 info->map[i].size = dev->resource[i].end - dev->resource[i].start + 1; 122 info->map[i].size = dev->resource[i].end - dev->resource[i].start + 1;
120 info->map[i].bankwidth = physmap_data->width; 123 info->map[i].bankwidth = physmap_data->width;
121 info->map[i].set_vpp = physmap_data->set_vpp; 124 info->map[i].set_vpp = physmap_data->set_vpp;
125 info->map[i].pfow_base = physmap_data->pfow_base;
122 126
123 info->map[i].virt = devm_ioremap(&dev->dev, info->map[i].phys, 127 info->map[i].virt = devm_ioremap(&dev->dev, info->map[i].phys,
124 info->map[i].size); 128 info->map[i].size);
@@ -150,7 +154,7 @@ static int physmap_flash_probe(struct platform_device *dev)
150 * We detected multiple devices. Concatenate them together. 154 * We detected multiple devices. Concatenate them together.
151 */ 155 */
152#ifdef CONFIG_MTD_CONCAT 156#ifdef CONFIG_MTD_CONCAT
153 info->cmtd = mtd_concat_create(info->mtd, devices_found, dev->dev.bus_id); 157 info->cmtd = mtd_concat_create(info->mtd, devices_found, dev_name(&dev->dev));
154 if (info->cmtd == NULL) 158 if (info->cmtd == NULL)
155 err = -ENXIO; 159 err = -ENXIO;
156#else 160#else
@@ -163,9 +167,10 @@ static int physmap_flash_probe(struct platform_device *dev)
163 goto err_out; 167 goto err_out;
164 168
165#ifdef CONFIG_MTD_PARTITIONS 169#ifdef CONFIG_MTD_PARTITIONS
166 err = parse_mtd_partitions(info->cmtd, part_probe_types, &info->parts, 0); 170 err = parse_mtd_partitions(info->cmtd, part_probe_types, &parts, 0);
167 if (err > 0) { 171 if (err > 0) {
168 add_mtd_partitions(info->cmtd, info->parts, err); 172 add_mtd_partitions(info->cmtd, parts, err);
173 kfree(parts);
169 return 0; 174 return 0;
170 } 175 }
171 176
@@ -251,14 +256,7 @@ static struct platform_driver physmap_flash_driver = {
251}; 256};
252 257
253 258
254#ifdef CONFIG_MTD_PHYSMAP_LEN 259#ifdef CONFIG_MTD_PHYSMAP_COMPAT
255#if CONFIG_MTD_PHYSMAP_LEN != 0
256#warning using PHYSMAP compat code
257#define PHYSMAP_COMPAT
258#endif
259#endif
260
261#ifdef PHYSMAP_COMPAT
262static struct physmap_flash_data physmap_flash_data = { 260static struct physmap_flash_data physmap_flash_data = {
263 .width = CONFIG_MTD_PHYSMAP_BANKWIDTH, 261 .width = CONFIG_MTD_PHYSMAP_BANKWIDTH,
264}; 262};
@@ -302,7 +300,7 @@ static int __init physmap_init(void)
302 int err; 300 int err;
303 301
304 err = platform_driver_register(&physmap_flash_driver); 302 err = platform_driver_register(&physmap_flash_driver);
305#ifdef PHYSMAP_COMPAT 303#ifdef CONFIG_MTD_PHYSMAP_COMPAT
306 if (err == 0) 304 if (err == 0)
307 platform_device_register(&physmap_flash); 305 platform_device_register(&physmap_flash);
308#endif 306#endif
@@ -312,7 +310,7 @@ static int __init physmap_init(void)
312 310
313static void __exit physmap_exit(void) 311static void __exit physmap_exit(void)
314{ 312{
315#ifdef PHYSMAP_COMPAT 313#ifdef CONFIG_MTD_PHYSMAP_COMPAT
316 platform_device_unregister(&physmap_flash); 314 platform_device_unregister(&physmap_flash);
317#endif 315#endif
318 platform_driver_unregister(&physmap_flash_driver); 316 platform_driver_unregister(&physmap_flash_driver);
@@ -326,8 +324,7 @@ MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
326MODULE_DESCRIPTION("Generic configurable MTD map driver"); 324MODULE_DESCRIPTION("Generic configurable MTD map driver");
327 325
328/* legacy platform drivers can't hotplug or coldplg */ 326/* legacy platform drivers can't hotplug or coldplg */
329#ifndef PHYSMAP_COMPAT 327#ifndef CONFIG_MTD_PHYSMAP_COMPAT
330/* work with hotplug and coldplug */ 328/* work with hotplug and coldplug */
331MODULE_ALIAS("platform:physmap-flash"); 329MODULE_ALIAS("platform:physmap-flash");
332#endif 330#endif
333
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index 5fcfec034a94..fbf0ca939d72 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -183,7 +183,7 @@ static int __devinit of_flash_probe(struct of_device *dev,
183 183
184 err = -EBUSY; 184 err = -EBUSY;
185 info->res = request_mem_region(res.start, res.end - res.start + 1, 185 info->res = request_mem_region(res.start, res.end - res.start + 1,
186 dev->dev.bus_id); 186 dev_name(&dev->dev));
187 if (!info->res) 187 if (!info->res)
188 goto err_out; 188 goto err_out;
189 189
@@ -194,7 +194,7 @@ static int __devinit of_flash_probe(struct of_device *dev,
194 goto err_out; 194 goto err_out;
195 } 195 }
196 196
197 info->map.name = dev->dev.bus_id; 197 info->map.name = dev_name(&dev->dev);
198 info->map.phys = res.start; 198 info->map.phys = res.start;
199 info->map.size = res.end - res.start + 1; 199 info->map.size = res.end - res.start + 1;
200 info->map.bankwidth = *width; 200 info->map.bankwidth = *width;
diff --git a/drivers/mtd/maps/pmcmsp-flash.c b/drivers/mtd/maps/pmcmsp-flash.c
index f43ba2815cbb..4768bd5459d6 100644
--- a/drivers/mtd/maps/pmcmsp-flash.c
+++ b/drivers/mtd/maps/pmcmsp-flash.c
@@ -48,7 +48,7 @@ static int fcnt;
48 48
49#define DEBUG_MARKER printk(KERN_NOTICE "%s[%d]\n", __func__, __LINE__) 49#define DEBUG_MARKER printk(KERN_NOTICE "%s[%d]\n", __func__, __LINE__)
50 50
51int __init init_msp_flash(void) 51static int __init init_msp_flash(void)
52{ 52{
53 int i, j; 53 int i, j;
54 int offset, coff; 54 int offset, coff;
diff --git a/drivers/mtd/maps/redwood.c b/drivers/mtd/maps/redwood.c
index de002eb1a7fe..933c0b63b016 100644
--- a/drivers/mtd/maps/redwood.c
+++ b/drivers/mtd/maps/redwood.c
@@ -122,7 +122,7 @@ struct map_info redwood_flash_map = {
122 122
123static struct mtd_info *redwood_mtd; 123static struct mtd_info *redwood_mtd;
124 124
125int __init init_redwood_flash(void) 125static int __init init_redwood_flash(void)
126{ 126{
127 int err; 127 int err;
128 128
diff --git a/drivers/mtd/maps/rpxlite.c b/drivers/mtd/maps/rpxlite.c
index 14d90edb4430..3e3ef53d4fd4 100644
--- a/drivers/mtd/maps/rpxlite.c
+++ b/drivers/mtd/maps/rpxlite.c
@@ -23,7 +23,7 @@ static struct map_info rpxlite_map = {
23 .phys = WINDOW_ADDR, 23 .phys = WINDOW_ADDR,
24}; 24};
25 25
26int __init init_rpxlite(void) 26static int __init init_rpxlite(void)
27{ 27{
28 printk(KERN_NOTICE "RPX Lite or CLLF flash device: %x at %x\n", WINDOW_SIZE*4, WINDOW_ADDR); 28 printk(KERN_NOTICE "RPX Lite or CLLF flash device: %x at %x\n", WINDOW_SIZE*4, WINDOW_ADDR);
29 rpxlite_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE * 4); 29 rpxlite_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE * 4);
diff --git a/drivers/mtd/maps/sbc8240.c b/drivers/mtd/maps/sbc8240.c
index 6e1e99cd2b59..d5374cdcb163 100644
--- a/drivers/mtd/maps/sbc8240.c
+++ b/drivers/mtd/maps/sbc8240.c
@@ -136,7 +136,7 @@ static struct mtd_part_def sbc8240_part_banks[NUM_FLASH_BANKS];
136#endif /* CONFIG_MTD_PARTITIONS */ 136#endif /* CONFIG_MTD_PARTITIONS */
137 137
138 138
139int __init init_sbc8240_mtd (void) 139static int __init init_sbc8240_mtd (void)
140{ 140{
141 static struct _cjs { 141 static struct _cjs {
142 u_long addr; 142 u_long addr;
diff --git a/drivers/mtd/maps/scb2_flash.c b/drivers/mtd/maps/scb2_flash.c
index 21169e6d646c..7e329f09a548 100644
--- a/drivers/mtd/maps/scb2_flash.c
+++ b/drivers/mtd/maps/scb2_flash.c
@@ -118,7 +118,8 @@ scb2_fixup_mtd(struct mtd_info *mtd)
118 struct mtd_erase_region_info *region = &mtd->eraseregions[i]; 118 struct mtd_erase_region_info *region = &mtd->eraseregions[i];
119 119
120 if (region->numblocks * region->erasesize > mtd->size) { 120 if (region->numblocks * region->erasesize > mtd->size) {
121 region->numblocks = (mtd->size / region->erasesize); 121 region->numblocks = ((unsigned long)mtd->size /
122 region->erasesize);
122 done = 1; 123 done = 1;
123 } else { 124 } else {
124 region->numblocks = 0; 125 region->numblocks = 0;
@@ -187,8 +188,9 @@ scb2_flash_probe(struct pci_dev *dev, const struct pci_device_id *ent)
187 return -ENODEV; 188 return -ENODEV;
188 } 189 }
189 190
190 printk(KERN_NOTICE MODNAME ": chip size 0x%x at offset 0x%x\n", 191 printk(KERN_NOTICE MODNAME ": chip size 0x%llx at offset 0x%llx\n",
191 scb2_mtd->size, SCB2_WINDOW - scb2_mtd->size); 192 (unsigned long long)scb2_mtd->size,
193 (unsigned long long)(SCB2_WINDOW - scb2_mtd->size));
192 194
193 add_mtd_device(scb2_mtd); 195 add_mtd_device(scb2_mtd);
194 196
diff --git a/drivers/mtd/maps/sharpsl-flash.c b/drivers/mtd/maps/sharpsl-flash.c
index 026eab028189..b392f096c706 100644
--- a/drivers/mtd/maps/sharpsl-flash.c
+++ b/drivers/mtd/maps/sharpsl-flash.c
@@ -47,7 +47,7 @@ static struct mtd_partition sharpsl_partitions[1] = {
47 } 47 }
48}; 48};
49 49
50int __init init_sharpsl(void) 50static int __init init_sharpsl(void)
51{ 51{
52 struct mtd_partition *parts; 52 struct mtd_partition *parts;
53 int nb_parts = 0; 53 int nb_parts = 0;
diff --git a/drivers/mtd/maps/tqm8xxl.c b/drivers/mtd/maps/tqm8xxl.c
index a5d3d8531faa..60146984f4be 100644
--- a/drivers/mtd/maps/tqm8xxl.c
+++ b/drivers/mtd/maps/tqm8xxl.c
@@ -109,7 +109,7 @@ static struct mtd_partition tqm8xxl_fs_partitions[] = {
109}; 109};
110#endif 110#endif
111 111
112int __init init_tqm_mtd(void) 112static int __init init_tqm_mtd(void)
113{ 113{
114 int idx = 0, ret = 0; 114 int idx = 0, ret = 0;
115 unsigned long flash_addr, flash_size, mtd_size = 0; 115 unsigned long flash_addr, flash_size, mtd_size = 0;
diff --git a/drivers/mtd/maps/uclinux.c b/drivers/mtd/maps/uclinux.c
index 0dc645f8152f..81756e397711 100644
--- a/drivers/mtd/maps/uclinux.c
+++ b/drivers/mtd/maps/uclinux.c
@@ -51,7 +51,7 @@ int uclinux_point(struct mtd_info *mtd, loff_t from, size_t len,
51 51
52/****************************************************************************/ 52/****************************************************************************/
53 53
54int __init uclinux_mtd_init(void) 54static int __init uclinux_mtd_init(void)
55{ 55{
56 struct mtd_info *mtd; 56 struct mtd_info *mtd;
57 struct map_info *mapp; 57 struct map_info *mapp;
@@ -94,7 +94,7 @@ int __init uclinux_mtd_init(void)
94 94
95/****************************************************************************/ 95/****************************************************************************/
96 96
97void __exit uclinux_mtd_cleanup(void) 97static void __exit uclinux_mtd_cleanup(void)
98{ 98{
99 if (uclinux_ram_mtdinfo) { 99 if (uclinux_ram_mtdinfo) {
100 del_mtd_partitions(uclinux_ram_mtdinfo); 100 del_mtd_partitions(uclinux_ram_mtdinfo);
diff --git a/drivers/mtd/maps/vmax301.c b/drivers/mtd/maps/vmax301.c
index 5a0c9a353b0f..6d452dcdfe34 100644
--- a/drivers/mtd/maps/vmax301.c
+++ b/drivers/mtd/maps/vmax301.c
@@ -146,7 +146,7 @@ static void __exit cleanup_vmax301(void)
146 iounmap((void *)vmax_map[0].map_priv_1 - WINDOW_START); 146 iounmap((void *)vmax_map[0].map_priv_1 - WINDOW_START);
147} 147}
148 148
149int __init init_vmax301(void) 149static int __init init_vmax301(void)
150{ 150{
151 int i; 151 int i;
152 unsigned long iomapadr; 152 unsigned long iomapadr;
diff --git a/drivers/mtd/maps/wr_sbc82xx_flash.c b/drivers/mtd/maps/wr_sbc82xx_flash.c
index 413b0cf9bbd2..933a2b6598b4 100644
--- a/drivers/mtd/maps/wr_sbc82xx_flash.c
+++ b/drivers/mtd/maps/wr_sbc82xx_flash.c
@@ -74,7 +74,7 @@ do { \
74 } \ 74 } \
75} while (0); 75} while (0);
76 76
77int __init init_sbc82xx_flash(void) 77static int __init init_sbc82xx_flash(void)
78{ 78{
79 volatile memctl_cpm2_t *mc = &cpm2_immr->im_memctl; 79 volatile memctl_cpm2_t *mc = &cpm2_immr->im_memctl;
80 int bigflash; 80 int bigflash;
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index 681d5aca2af4..1409f01406f6 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -133,15 +133,12 @@ static void mtd_blktrans_request(struct request_queue *rq)
133} 133}
134 134
135 135
136static int blktrans_open(struct inode *i, struct file *f) 136static int blktrans_open(struct block_device *bdev, fmode_t mode)
137{ 137{
138 struct mtd_blktrans_dev *dev; 138 struct mtd_blktrans_dev *dev = bdev->bd_disk->private_data;
139 struct mtd_blktrans_ops *tr; 139 struct mtd_blktrans_ops *tr = dev->tr;
140 int ret = -ENODEV; 140 int ret = -ENODEV;
141 141
142 dev = i->i_bdev->bd_disk->private_data;
143 tr = dev->tr;
144
145 if (!try_module_get(dev->mtd->owner)) 142 if (!try_module_get(dev->mtd->owner))
146 goto out; 143 goto out;
147 144
@@ -164,15 +161,12 @@ static int blktrans_open(struct inode *i, struct file *f)
164 return ret; 161 return ret;
165} 162}
166 163
167static int blktrans_release(struct inode *i, struct file *f) 164static int blktrans_release(struct gendisk *disk, fmode_t mode)
168{ 165{
169 struct mtd_blktrans_dev *dev; 166 struct mtd_blktrans_dev *dev = disk->private_data;
170 struct mtd_blktrans_ops *tr; 167 struct mtd_blktrans_ops *tr = dev->tr;
171 int ret = 0; 168 int ret = 0;
172 169
173 dev = i->i_bdev->bd_disk->private_data;
174 tr = dev->tr;
175
176 if (tr->release) 170 if (tr->release)
177 ret = tr->release(dev); 171 ret = tr->release(dev);
178 172
@@ -194,10 +188,10 @@ static int blktrans_getgeo(struct block_device *bdev, struct hd_geometry *geo)
194 return -ENOTTY; 188 return -ENOTTY;
195} 189}
196 190
197static int blktrans_ioctl(struct inode *inode, struct file *file, 191static int blktrans_ioctl(struct block_device *bdev, fmode_t mode,
198 unsigned int cmd, unsigned long arg) 192 unsigned int cmd, unsigned long arg)
199{ 193{
200 struct mtd_blktrans_dev *dev = inode->i_bdev->bd_disk->private_data; 194 struct mtd_blktrans_dev *dev = bdev->bd_disk->private_data;
201 struct mtd_blktrans_ops *tr = dev->tr; 195 struct mtd_blktrans_ops *tr = dev->tr;
202 196
203 switch (cmd) { 197 switch (cmd) {
@@ -215,7 +209,7 @@ static struct block_device_operations mtd_blktrans_ops = {
215 .owner = THIS_MODULE, 209 .owner = THIS_MODULE,
216 .open = blktrans_open, 210 .open = blktrans_open,
217 .release = blktrans_release, 211 .release = blktrans_release,
218 .ioctl = blktrans_ioctl, 212 .locked_ioctl = blktrans_ioctl,
219 .getgeo = blktrans_getgeo, 213 .getgeo = blktrans_getgeo,
220}; 214};
221 215
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index 963840e9b5bf..e9ec59e9a566 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -96,7 +96,7 @@ static int mtd_open(struct inode *inode, struct file *file)
96 return -ENODEV; 96 return -ENODEV;
97 97
98 /* You can't open the RO devices RW */ 98 /* You can't open the RO devices RW */
99 if ((file->f_mode & 2) && (minor & 1)) 99 if ((file->f_mode & FMODE_WRITE) && (minor & 1))
100 return -EACCES; 100 return -EACCES;
101 101
102 lock_kernel(); 102 lock_kernel();
@@ -114,7 +114,7 @@ static int mtd_open(struct inode *inode, struct file *file)
114 } 114 }
115 115
116 /* You can't open it RW if it's not a writeable device */ 116 /* You can't open it RW if it's not a writeable device */
117 if ((file->f_mode & 2) && !(mtd->flags & MTD_WRITEABLE)) { 117 if ((file->f_mode & FMODE_WRITE) && !(mtd->flags & MTD_WRITEABLE)) {
118 put_mtd_device(mtd); 118 put_mtd_device(mtd);
119 ret = -EACCES; 119 ret = -EACCES;
120 goto out; 120 goto out;
@@ -144,7 +144,7 @@ static int mtd_close(struct inode *inode, struct file *file)
144 DEBUG(MTD_DEBUG_LEVEL0, "MTD_close\n"); 144 DEBUG(MTD_DEBUG_LEVEL0, "MTD_close\n");
145 145
146 /* Only sync if opened RW */ 146 /* Only sync if opened RW */
147 if ((file->f_mode & 2) && mtd->sync) 147 if ((file->f_mode & FMODE_WRITE) && mtd->sync)
148 mtd->sync(mtd); 148 mtd->sync(mtd);
149 149
150 put_mtd_device(mtd); 150 put_mtd_device(mtd);
@@ -443,23 +443,27 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
443 { 443 {
444 struct erase_info *erase; 444 struct erase_info *erase;
445 445
446 if(!(file->f_mode & 2)) 446 if(!(file->f_mode & FMODE_WRITE))
447 return -EPERM; 447 return -EPERM;
448 448
449 erase=kzalloc(sizeof(struct erase_info),GFP_KERNEL); 449 erase=kzalloc(sizeof(struct erase_info),GFP_KERNEL);
450 if (!erase) 450 if (!erase)
451 ret = -ENOMEM; 451 ret = -ENOMEM;
452 else { 452 else {
453 struct erase_info_user einfo;
454
453 wait_queue_head_t waitq; 455 wait_queue_head_t waitq;
454 DECLARE_WAITQUEUE(wait, current); 456 DECLARE_WAITQUEUE(wait, current);
455 457
456 init_waitqueue_head(&waitq); 458 init_waitqueue_head(&waitq);
457 459
458 if (copy_from_user(&erase->addr, argp, 460 if (copy_from_user(&einfo, argp,
459 sizeof(struct erase_info_user))) { 461 sizeof(struct erase_info_user))) {
460 kfree(erase); 462 kfree(erase);
461 return -EFAULT; 463 return -EFAULT;
462 } 464 }
465 erase->addr = einfo.start;
466 erase->len = einfo.length;
463 erase->mtd = mtd; 467 erase->mtd = mtd;
464 erase->callback = mtdchar_erase_callback; 468 erase->callback = mtdchar_erase_callback;
465 erase->priv = (unsigned long)&waitq; 469 erase->priv = (unsigned long)&waitq;
@@ -497,7 +501,7 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
497 struct mtd_oob_buf __user *user_buf = argp; 501 struct mtd_oob_buf __user *user_buf = argp;
498 uint32_t retlen; 502 uint32_t retlen;
499 503
500 if(!(file->f_mode & 2)) 504 if(!(file->f_mode & FMODE_WRITE))
501 return -EPERM; 505 return -EPERM;
502 506
503 if (copy_from_user(&buf, argp, sizeof(struct mtd_oob_buf))) 507 if (copy_from_user(&buf, argp, sizeof(struct mtd_oob_buf)))
diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c
index 789842d0e6f2..3dbb1b38db66 100644
--- a/drivers/mtd/mtdconcat.c
+++ b/drivers/mtd/mtdconcat.c
@@ -197,7 +197,7 @@ concat_writev(struct mtd_info *mtd, const struct kvec *vecs,
197 continue; 197 continue;
198 } 198 }
199 199
200 size = min(total_len, (size_t)(subdev->size - to)); 200 size = min_t(uint64_t, total_len, subdev->size - to);
201 wsize = size; /* store for future use */ 201 wsize = size; /* store for future use */
202 202
203 entry_high = entry_low; 203 entry_high = entry_low;
@@ -385,7 +385,7 @@ static int concat_erase(struct mtd_info *mtd, struct erase_info *instr)
385 struct mtd_concat *concat = CONCAT(mtd); 385 struct mtd_concat *concat = CONCAT(mtd);
386 struct mtd_info *subdev; 386 struct mtd_info *subdev;
387 int i, err; 387 int i, err;
388 u_int32_t length, offset = 0; 388 uint64_t length, offset = 0;
389 struct erase_info *erase; 389 struct erase_info *erase;
390 390
391 if (!(mtd->flags & MTD_WRITEABLE)) 391 if (!(mtd->flags & MTD_WRITEABLE))
@@ -518,7 +518,7 @@ static int concat_erase(struct mtd_info *mtd, struct erase_info *instr)
518 return 0; 518 return 0;
519} 519}
520 520
521static int concat_lock(struct mtd_info *mtd, loff_t ofs, size_t len) 521static int concat_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
522{ 522{
523 struct mtd_concat *concat = CONCAT(mtd); 523 struct mtd_concat *concat = CONCAT(mtd);
524 int i, err = -EINVAL; 524 int i, err = -EINVAL;
@@ -528,7 +528,7 @@ static int concat_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
528 528
529 for (i = 0; i < concat->num_subdev; i++) { 529 for (i = 0; i < concat->num_subdev; i++) {
530 struct mtd_info *subdev = concat->subdev[i]; 530 struct mtd_info *subdev = concat->subdev[i];
531 size_t size; 531 uint64_t size;
532 532
533 if (ofs >= subdev->size) { 533 if (ofs >= subdev->size) {
534 size = 0; 534 size = 0;
@@ -556,7 +556,7 @@ static int concat_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
556 return err; 556 return err;
557} 557}
558 558
559static int concat_unlock(struct mtd_info *mtd, loff_t ofs, size_t len) 559static int concat_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
560{ 560{
561 struct mtd_concat *concat = CONCAT(mtd); 561 struct mtd_concat *concat = CONCAT(mtd);
562 int i, err = 0; 562 int i, err = 0;
@@ -566,7 +566,7 @@ static int concat_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
566 566
567 for (i = 0; i < concat->num_subdev; i++) { 567 for (i = 0; i < concat->num_subdev; i++) {
568 struct mtd_info *subdev = concat->subdev[i]; 568 struct mtd_info *subdev = concat->subdev[i];
569 size_t size; 569 uint64_t size;
570 570
571 if (ofs >= subdev->size) { 571 if (ofs >= subdev->size) {
572 size = 0; 572 size = 0;
@@ -691,12 +691,12 @@ static int concat_block_markbad(struct mtd_info *mtd, loff_t ofs)
691 */ 691 */
692struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to concatenate */ 692struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to concatenate */
693 int num_devs, /* number of subdevices */ 693 int num_devs, /* number of subdevices */
694 char *name) 694 const char *name)
695{ /* name for the new device */ 695{ /* name for the new device */
696 int i; 696 int i;
697 size_t size; 697 size_t size;
698 struct mtd_concat *concat; 698 struct mtd_concat *concat;
699 u_int32_t max_erasesize, curr_erasesize; 699 uint32_t max_erasesize, curr_erasesize;
700 int num_erase_region; 700 int num_erase_region;
701 701
702 printk(KERN_NOTICE "Concatenating MTD devices:\n"); 702 printk(KERN_NOTICE "Concatenating MTD devices:\n");
@@ -842,12 +842,14 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c
842 concat->mtd.erasesize = curr_erasesize; 842 concat->mtd.erasesize = curr_erasesize;
843 concat->mtd.numeraseregions = 0; 843 concat->mtd.numeraseregions = 0;
844 } else { 844 } else {
845 uint64_t tmp64;
846
845 /* 847 /*
846 * erase block size varies across the subdevices: allocate 848 * erase block size varies across the subdevices: allocate
847 * space to store the data describing the variable erase regions 849 * space to store the data describing the variable erase regions
848 */ 850 */
849 struct mtd_erase_region_info *erase_region_p; 851 struct mtd_erase_region_info *erase_region_p;
850 u_int32_t begin, position; 852 uint64_t begin, position;
851 853
852 concat->mtd.erasesize = max_erasesize; 854 concat->mtd.erasesize = max_erasesize;
853 concat->mtd.numeraseregions = num_erase_region; 855 concat->mtd.numeraseregions = num_erase_region;
@@ -879,8 +881,9 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c
879 erase_region_p->offset = begin; 881 erase_region_p->offset = begin;
880 erase_region_p->erasesize = 882 erase_region_p->erasesize =
881 curr_erasesize; 883 curr_erasesize;
882 erase_region_p->numblocks = 884 tmp64 = position - begin;
883 (position - begin) / curr_erasesize; 885 do_div(tmp64, curr_erasesize);
886 erase_region_p->numblocks = tmp64;
884 begin = position; 887 begin = position;
885 888
886 curr_erasesize = subdev[i]->erasesize; 889 curr_erasesize = subdev[i]->erasesize;
@@ -897,9 +900,9 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c
897 erase_region_p->offset = begin; 900 erase_region_p->offset = begin;
898 erase_region_p->erasesize = 901 erase_region_p->erasesize =
899 curr_erasesize; 902 curr_erasesize;
900 erase_region_p->numblocks = 903 tmp64 = position - begin;
901 (position - 904 do_div(tmp64, curr_erasesize);
902 begin) / curr_erasesize; 905 erase_region_p->numblocks = tmp64;
903 begin = position; 906 begin = position;
904 907
905 curr_erasesize = 908 curr_erasesize =
@@ -909,14 +912,16 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c
909 } 912 }
910 position += 913 position +=
911 subdev[i]->eraseregions[j]. 914 subdev[i]->eraseregions[j].
912 numblocks * curr_erasesize; 915 numblocks * (uint64_t)curr_erasesize;
913 } 916 }
914 } 917 }
915 } 918 }
916 /* Now write the final entry */ 919 /* Now write the final entry */
917 erase_region_p->offset = begin; 920 erase_region_p->offset = begin;
918 erase_region_p->erasesize = curr_erasesize; 921 erase_region_p->erasesize = curr_erasesize;
919 erase_region_p->numblocks = (position - begin) / curr_erasesize; 922 tmp64 = position - begin;
923 do_div(tmp64, curr_erasesize);
924 erase_region_p->numblocks = tmp64;
920 } 925 }
921 926
922 return &concat->mtd; 927 return &concat->mtd;
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index a9d246949820..76fe0a1e7a5e 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -57,6 +57,19 @@ int add_mtd_device(struct mtd_info *mtd)
57 mtd->index = i; 57 mtd->index = i;
58 mtd->usecount = 0; 58 mtd->usecount = 0;
59 59
60 if (is_power_of_2(mtd->erasesize))
61 mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
62 else
63 mtd->erasesize_shift = 0;
64
65 if (is_power_of_2(mtd->writesize))
66 mtd->writesize_shift = ffs(mtd->writesize) - 1;
67 else
68 mtd->writesize_shift = 0;
69
70 mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1;
71 mtd->writesize_mask = (1 << mtd->writesize_shift) - 1;
72
60 /* Some chips always power up locked. Unlock them now */ 73 /* Some chips always power up locked. Unlock them now */
61 if ((mtd->flags & MTD_WRITEABLE) 74 if ((mtd->flags & MTD_WRITEABLE)
62 && (mtd->flags & MTD_POWERUP_LOCK) && mtd->unlock) { 75 && (mtd->flags & MTD_POWERUP_LOCK) && mtd->unlock) {
@@ -344,7 +357,8 @@ static inline int mtd_proc_info (char *buf, int i)
344 if (!this) 357 if (!this)
345 return 0; 358 return 0;
346 359
347 return sprintf(buf, "mtd%d: %8.8x %8.8x \"%s\"\n", i, this->size, 360 return sprintf(buf, "mtd%d: %8.8llx %8.8x \"%s\"\n", i,
361 (unsigned long long)this->size,
348 this->erasesize, this->name); 362 this->erasesize, this->name);
349} 363}
350 364
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
index aebb3b27edbd..1a6b3beabe8d 100644
--- a/drivers/mtd/mtdoops.c
+++ b/drivers/mtd/mtdoops.c
@@ -80,9 +80,9 @@ static int mtdoops_erase_block(struct mtd_info *mtd, int offset)
80 if (ret) { 80 if (ret) {
81 set_current_state(TASK_RUNNING); 81 set_current_state(TASK_RUNNING);
82 remove_wait_queue(&wait_q, &wait); 82 remove_wait_queue(&wait_q, &wait);
83 printk (KERN_WARNING "mtdoops: erase of region [0x%x, 0x%x] " 83 printk (KERN_WARNING "mtdoops: erase of region [0x%llx, 0x%llx] "
84 "on \"%s\" failed\n", 84 "on \"%s\" failed\n",
85 erase.addr, erase.len, mtd->name); 85 (unsigned long long)erase.addr, (unsigned long long)erase.len, mtd->name);
86 return ret; 86 return ret;
87 } 87 }
88 88
@@ -289,7 +289,10 @@ static void mtdoops_notify_add(struct mtd_info *mtd)
289 } 289 }
290 290
291 cxt->mtd = mtd; 291 cxt->mtd = mtd;
292 cxt->oops_pages = mtd->size / OOPS_PAGE_SIZE; 292 if (mtd->size > INT_MAX)
293 cxt->oops_pages = INT_MAX / OOPS_PAGE_SIZE;
294 else
295 cxt->oops_pages = (int)mtd->size / OOPS_PAGE_SIZE;
293 296
294 find_next_position(cxt); 297 find_next_position(cxt);
295 298
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 3728913fa5fa..144e6b613a77 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -26,7 +26,7 @@ static LIST_HEAD(mtd_partitions);
26struct mtd_part { 26struct mtd_part {
27 struct mtd_info mtd; 27 struct mtd_info mtd;
28 struct mtd_info *master; 28 struct mtd_info *master;
29 u_int32_t offset; 29 uint64_t offset;
30 int index; 30 int index;
31 struct list_head list; 31 struct list_head list;
32 int registered; 32 int registered;
@@ -235,7 +235,7 @@ void mtd_erase_callback(struct erase_info *instr)
235} 235}
236EXPORT_SYMBOL_GPL(mtd_erase_callback); 236EXPORT_SYMBOL_GPL(mtd_erase_callback);
237 237
238static int part_lock(struct mtd_info *mtd, loff_t ofs, size_t len) 238static int part_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
239{ 239{
240 struct mtd_part *part = PART(mtd); 240 struct mtd_part *part = PART(mtd);
241 if ((len + ofs) > mtd->size) 241 if ((len + ofs) > mtd->size)
@@ -243,7 +243,7 @@ static int part_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
243 return part->master->lock(part->master, ofs + part->offset, len); 243 return part->master->lock(part->master, ofs + part->offset, len);
244} 244}
245 245
246static int part_unlock(struct mtd_info *mtd, loff_t ofs, size_t len) 246static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
247{ 247{
248 struct mtd_part *part = PART(mtd); 248 struct mtd_part *part = PART(mtd);
249 if ((len + ofs) > mtd->size) 249 if ((len + ofs) > mtd->size)
@@ -317,7 +317,7 @@ EXPORT_SYMBOL(del_mtd_partitions);
317 317
318static struct mtd_part *add_one_partition(struct mtd_info *master, 318static struct mtd_part *add_one_partition(struct mtd_info *master,
319 const struct mtd_partition *part, int partno, 319 const struct mtd_partition *part, int partno,
320 u_int32_t cur_offset) 320 uint64_t cur_offset)
321{ 321{
322 struct mtd_part *slave; 322 struct mtd_part *slave;
323 323
@@ -395,19 +395,19 @@ static struct mtd_part *add_one_partition(struct mtd_info *master,
395 slave->offset = cur_offset; 395 slave->offset = cur_offset;
396 if (slave->offset == MTDPART_OFS_NXTBLK) { 396 if (slave->offset == MTDPART_OFS_NXTBLK) {
397 slave->offset = cur_offset; 397 slave->offset = cur_offset;
398 if ((cur_offset % master->erasesize) != 0) { 398 if (mtd_mod_by_eb(cur_offset, master) != 0) {
399 /* Round up to next erasesize */ 399 /* Round up to next erasesize */
400 slave->offset = ((cur_offset / master->erasesize) + 1) * master->erasesize; 400 slave->offset = (mtd_div_by_eb(cur_offset, master) + 1) * master->erasesize;
401 printk(KERN_NOTICE "Moving partition %d: " 401 printk(KERN_NOTICE "Moving partition %d: "
402 "0x%08x -> 0x%08x\n", partno, 402 "0x%012llx -> 0x%012llx\n", partno,
403 cur_offset, slave->offset); 403 (unsigned long long)cur_offset, (unsigned long long)slave->offset);
404 } 404 }
405 } 405 }
406 if (slave->mtd.size == MTDPART_SIZ_FULL) 406 if (slave->mtd.size == MTDPART_SIZ_FULL)
407 slave->mtd.size = master->size - slave->offset; 407 slave->mtd.size = master->size - slave->offset;
408 408
409 printk(KERN_NOTICE "0x%08x-0x%08x : \"%s\"\n", slave->offset, 409 printk(KERN_NOTICE "0x%012llx-0x%012llx : \"%s\"\n", (unsigned long long)slave->offset,
410 slave->offset + slave->mtd.size, slave->mtd.name); 410 (unsigned long long)(slave->offset + slave->mtd.size), slave->mtd.name);
411 411
412 /* let's do some sanity checks */ 412 /* let's do some sanity checks */
413 if (slave->offset >= master->size) { 413 if (slave->offset >= master->size) {
@@ -420,13 +420,13 @@ static struct mtd_part *add_one_partition(struct mtd_info *master,
420 } 420 }
421 if (slave->offset + slave->mtd.size > master->size) { 421 if (slave->offset + slave->mtd.size > master->size) {
422 slave->mtd.size = master->size - slave->offset; 422 slave->mtd.size = master->size - slave->offset;
423 printk(KERN_WARNING"mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#x\n", 423 printk(KERN_WARNING"mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#llx\n",
424 part->name, master->name, slave->mtd.size); 424 part->name, master->name, (unsigned long long)slave->mtd.size);
425 } 425 }
426 if (master->numeraseregions > 1) { 426 if (master->numeraseregions > 1) {
427 /* Deal with variable erase size stuff */ 427 /* Deal with variable erase size stuff */
428 int i, max = master->numeraseregions; 428 int i, max = master->numeraseregions;
429 u32 end = slave->offset + slave->mtd.size; 429 u64 end = slave->offset + slave->mtd.size;
430 struct mtd_erase_region_info *regions = master->eraseregions; 430 struct mtd_erase_region_info *regions = master->eraseregions;
431 431
432 /* Find the first erase regions which is part of this 432 /* Find the first erase regions which is part of this
@@ -449,7 +449,7 @@ static struct mtd_part *add_one_partition(struct mtd_info *master,
449 } 449 }
450 450
451 if ((slave->mtd.flags & MTD_WRITEABLE) && 451 if ((slave->mtd.flags & MTD_WRITEABLE) &&
452 (slave->offset % slave->mtd.erasesize)) { 452 mtd_mod_by_eb(slave->offset, &slave->mtd)) {
453 /* Doesn't start on a boundary of major erase size */ 453 /* Doesn't start on a boundary of major erase size */
454 /* FIXME: Let it be writable if it is on a boundary of 454 /* FIXME: Let it be writable if it is on a boundary of
455 * _minor_ erase size though */ 455 * _minor_ erase size though */
@@ -458,7 +458,7 @@ static struct mtd_part *add_one_partition(struct mtd_info *master,
458 part->name); 458 part->name);
459 } 459 }
460 if ((slave->mtd.flags & MTD_WRITEABLE) && 460 if ((slave->mtd.flags & MTD_WRITEABLE) &&
461 (slave->mtd.size % slave->mtd.erasesize)) { 461 mtd_mod_by_eb(slave->mtd.size, &slave->mtd)) {
462 slave->mtd.flags &= ~MTD_WRITEABLE; 462 slave->mtd.flags &= ~MTD_WRITEABLE;
463 printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n", 463 printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n",
464 part->name); 464 part->name);
@@ -466,7 +466,7 @@ static struct mtd_part *add_one_partition(struct mtd_info *master,
466 466
467 slave->mtd.ecclayout = master->ecclayout; 467 slave->mtd.ecclayout = master->ecclayout;
468 if (master->block_isbad) { 468 if (master->block_isbad) {
469 uint32_t offs = 0; 469 uint64_t offs = 0;
470 470
471 while (offs < slave->mtd.size) { 471 while (offs < slave->mtd.size) {
472 if (master->block_isbad(master, 472 if (master->block_isbad(master,
@@ -501,7 +501,7 @@ int add_mtd_partitions(struct mtd_info *master,
501 int nbparts) 501 int nbparts)
502{ 502{
503 struct mtd_part *slave; 503 struct mtd_part *slave;
504 u_int32_t cur_offset = 0; 504 uint64_t cur_offset = 0;
505 int i; 505 int i;
506 506
507 printk(KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name); 507 printk(KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name);
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 1c2e9450d663..f8ae0400c49c 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -408,7 +408,7 @@ config MTD_NAND_FSL_UPM
408 408
409config MTD_NAND_MXC 409config MTD_NAND_MXC
410 tristate "MXC NAND support" 410 tristate "MXC NAND support"
411 depends on ARCH_MX2 411 depends on ARCH_MX2 || ARCH_MX3
412 help 412 help
413 This enables the driver for the NAND flash controller on the 413 This enables the driver for the NAND flash controller on the
414 MXC processors. 414 MXC processors.
diff --git a/drivers/mtd/nand/alauda.c b/drivers/mtd/nand/alauda.c
index 962380394855..6d9649159a18 100644
--- a/drivers/mtd/nand/alauda.c
+++ b/drivers/mtd/nand/alauda.c
@@ -676,11 +676,11 @@ static int alauda_probe(struct usb_interface *interface,
676 goto error; 676 goto error;
677 677
678 al->write_out = usb_sndbulkpipe(al->dev, 678 al->write_out = usb_sndbulkpipe(al->dev,
679 ep_wr->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); 679 usb_endpoint_num(ep_wr));
680 al->bulk_in = usb_rcvbulkpipe(al->dev, 680 al->bulk_in = usb_rcvbulkpipe(al->dev,
681 ep_in->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); 681 usb_endpoint_num(ep_in));
682 al->bulk_out = usb_sndbulkpipe(al->dev, 682 al->bulk_out = usb_sndbulkpipe(al->dev,
683 ep_out->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); 683 usb_endpoint_num(ep_out));
684 684
685 /* second device is identical up to now */ 685 /* second device is identical up to now */
686 memcpy(al+1, al, sizeof(*al)); 686 memcpy(al+1, al, sizeof(*al));
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index 4aa5bd6158da..65929db29446 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -777,7 +777,9 @@ static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
777 /* Fill in fsl_elbc_mtd structure */ 777 /* Fill in fsl_elbc_mtd structure */
778 priv->mtd.priv = chip; 778 priv->mtd.priv = chip;
779 priv->mtd.owner = THIS_MODULE; 779 priv->mtd.owner = THIS_MODULE;
780 priv->fmr = 0; /* rest filled in later */ 780
781 /* Set the ECCM according to the settings in bootloader.*/
782 priv->fmr = in_be32(&lbc->fmr) & FMR_ECCM;
781 783
782 /* fill in nand_chip structure */ 784 /* fill in nand_chip structure */
783 /* set up function call table */ 785 /* set up function call table */
diff --git a/drivers/mtd/nand/fsl_upm.c b/drivers/mtd/nand/fsl_upm.c
index a83192f80eba..7815a404a632 100644
--- a/drivers/mtd/nand/fsl_upm.c
+++ b/drivers/mtd/nand/fsl_upm.c
@@ -222,7 +222,7 @@ static int __devinit fun_probe(struct of_device *ofdev,
222 222
223 fun->rnb_gpio = of_get_gpio(ofdev->node, 0); 223 fun->rnb_gpio = of_get_gpio(ofdev->node, 0);
224 if (fun->rnb_gpio >= 0) { 224 if (fun->rnb_gpio >= 0) {
225 ret = gpio_request(fun->rnb_gpio, ofdev->dev.bus_id); 225 ret = gpio_request(fun->rnb_gpio, dev_name(&ofdev->dev));
226 if (ret) { 226 if (ret) {
227 dev_err(&ofdev->dev, "can't request RNB gpio\n"); 227 dev_err(&ofdev->dev, "can't request RNB gpio\n");
228 goto err2; 228 goto err2;
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 0a9c9cd33f96..0c3afccde8a2 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -2014,13 +2014,14 @@ static int nand_erase(struct mtd_info *mtd, struct erase_info *instr)
2014int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr, 2014int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
2015 int allowbbt) 2015 int allowbbt)
2016{ 2016{
2017 int page, len, status, pages_per_block, ret, chipnr; 2017 int page, status, pages_per_block, ret, chipnr;
2018 struct nand_chip *chip = mtd->priv; 2018 struct nand_chip *chip = mtd->priv;
2019 int rewrite_bbt[NAND_MAX_CHIPS]={0}; 2019 loff_t rewrite_bbt[NAND_MAX_CHIPS]={0};
2020 unsigned int bbt_masked_page = 0xffffffff; 2020 unsigned int bbt_masked_page = 0xffffffff;
2021 loff_t len;
2021 2022
2022 DEBUG(MTD_DEBUG_LEVEL3, "nand_erase: start = 0x%08x, len = %i\n", 2023 DEBUG(MTD_DEBUG_LEVEL3, "nand_erase: start = 0x%012llx, len = %llu\n",
2023 (unsigned int)instr->addr, (unsigned int)instr->len); 2024 (unsigned long long)instr->addr, (unsigned long long)instr->len);
2024 2025
2025 /* Start address must align on block boundary */ 2026 /* Start address must align on block boundary */
2026 if (instr->addr & ((1 << chip->phys_erase_shift) - 1)) { 2027 if (instr->addr & ((1 << chip->phys_erase_shift) - 1)) {
@@ -2116,7 +2117,8 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
2116 DEBUG(MTD_DEBUG_LEVEL0, "nand_erase: " 2117 DEBUG(MTD_DEBUG_LEVEL0, "nand_erase: "
2117 "Failed erase, page 0x%08x\n", page); 2118 "Failed erase, page 0x%08x\n", page);
2118 instr->state = MTD_ERASE_FAILED; 2119 instr->state = MTD_ERASE_FAILED;
2119 instr->fail_addr = (page << chip->page_shift); 2120 instr->fail_addr =
2121 ((loff_t)page << chip->page_shift);
2120 goto erase_exit; 2122 goto erase_exit;
2121 } 2123 }
2122 2124
@@ -2126,7 +2128,8 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
2126 */ 2128 */
2127 if (bbt_masked_page != 0xffffffff && 2129 if (bbt_masked_page != 0xffffffff &&
2128 (page & BBT_PAGE_MASK) == bbt_masked_page) 2130 (page & BBT_PAGE_MASK) == bbt_masked_page)
2129 rewrite_bbt[chipnr] = (page << chip->page_shift); 2131 rewrite_bbt[chipnr] =
2132 ((loff_t)page << chip->page_shift);
2130 2133
2131 /* Increment page address and decrement length */ 2134 /* Increment page address and decrement length */
2132 len -= (1 << chip->phys_erase_shift); 2135 len -= (1 << chip->phys_erase_shift);
@@ -2173,7 +2176,7 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
2173 continue; 2176 continue;
2174 /* update the BBT for chip */ 2177 /* update the BBT for chip */
2175 DEBUG(MTD_DEBUG_LEVEL0, "nand_erase_nand: nand_update_bbt " 2178 DEBUG(MTD_DEBUG_LEVEL0, "nand_erase_nand: nand_update_bbt "
2176 "(%d:0x%0x 0x%0x)\n", chipnr, rewrite_bbt[chipnr], 2179 "(%d:0x%0llx 0x%0x)\n", chipnr, rewrite_bbt[chipnr],
2177 chip->bbt_td->pages[chipnr]); 2180 chip->bbt_td->pages[chipnr]);
2178 nand_update_bbt(mtd, rewrite_bbt[chipnr]); 2181 nand_update_bbt(mtd, rewrite_bbt[chipnr]);
2179 } 2182 }
@@ -2365,7 +2368,7 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
2365 if (!mtd->name) 2368 if (!mtd->name)
2366 mtd->name = type->name; 2369 mtd->name = type->name;
2367 2370
2368 chip->chipsize = type->chipsize << 20; 2371 chip->chipsize = (uint64_t)type->chipsize << 20;
2369 2372
2370 /* Newer devices have all the information in additional id bytes */ 2373 /* Newer devices have all the information in additional id bytes */
2371 if (!type->pagesize) { 2374 if (!type->pagesize) {
@@ -2423,7 +2426,10 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
2423 2426
2424 chip->bbt_erase_shift = chip->phys_erase_shift = 2427 chip->bbt_erase_shift = chip->phys_erase_shift =
2425 ffs(mtd->erasesize) - 1; 2428 ffs(mtd->erasesize) - 1;
2426 chip->chip_shift = ffs(chip->chipsize) - 1; 2429 if (chip->chipsize & 0xffffffff)
2430 chip->chip_shift = ffs((unsigned)chip->chipsize) - 1;
2431 else
2432 chip->chip_shift = ffs((unsigned)(chip->chipsize >> 32)) + 32 - 1;
2427 2433
2428 /* Set the bad block position */ 2434 /* Set the bad block position */
2429 chip->badblockpos = mtd->writesize > 512 ? 2435 chip->badblockpos = mtd->writesize > 512 ?
@@ -2517,7 +2523,6 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips)
2517/** 2523/**
2518 * nand_scan_tail - [NAND Interface] Scan for the NAND device 2524 * nand_scan_tail - [NAND Interface] Scan for the NAND device
2519 * @mtd: MTD device structure 2525 * @mtd: MTD device structure
2520 * @maxchips: Number of chips to scan for
2521 * 2526 *
2522 * This is the second phase of the normal nand_scan() function. It 2527 * This is the second phase of the normal nand_scan() function. It
2523 * fills out all the uninitialized function pointers with the defaults 2528 * fills out all the uninitialized function pointers with the defaults
diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c
index 0b1c48595f12..55c23e5cd210 100644
--- a/drivers/mtd/nand/nand_bbt.c
+++ b/drivers/mtd/nand/nand_bbt.c
@@ -171,16 +171,16 @@ static int read_bbt(struct mtd_info *mtd, uint8_t *buf, int page, int num,
171 if (tmp == msk) 171 if (tmp == msk)
172 continue; 172 continue;
173 if (reserved_block_code && (tmp == reserved_block_code)) { 173 if (reserved_block_code && (tmp == reserved_block_code)) {
174 printk(KERN_DEBUG "nand_read_bbt: Reserved block at 0x%08x\n", 174 printk(KERN_DEBUG "nand_read_bbt: Reserved block at 0x%012llx\n",
175 ((offs << 2) + (act >> 1)) << this->bbt_erase_shift); 175 (loff_t)((offs << 2) + (act >> 1)) << this->bbt_erase_shift);
176 this->bbt[offs + (act >> 3)] |= 0x2 << (act & 0x06); 176 this->bbt[offs + (act >> 3)] |= 0x2 << (act & 0x06);
177 mtd->ecc_stats.bbtblocks++; 177 mtd->ecc_stats.bbtblocks++;
178 continue; 178 continue;
179 } 179 }
180 /* Leave it for now, if its matured we can move this 180 /* Leave it for now, if its matured we can move this
181 * message to MTD_DEBUG_LEVEL0 */ 181 * message to MTD_DEBUG_LEVEL0 */
182 printk(KERN_DEBUG "nand_read_bbt: Bad block at 0x%08x\n", 182 printk(KERN_DEBUG "nand_read_bbt: Bad block at 0x%012llx\n",
183 ((offs << 2) + (act >> 1)) << this->bbt_erase_shift); 183 (loff_t)((offs << 2) + (act >> 1)) << this->bbt_erase_shift);
184 /* Factory marked bad or worn out ? */ 184 /* Factory marked bad or worn out ? */
185 if (tmp == 0) 185 if (tmp == 0)
186 this->bbt[offs + (act >> 3)] |= 0x3 << (act & 0x06); 186 this->bbt[offs + (act >> 3)] |= 0x3 << (act & 0x06);
@@ -284,7 +284,7 @@ static int read_abs_bbts(struct mtd_info *mtd, uint8_t *buf,
284 284
285 /* Read the primary version, if available */ 285 /* Read the primary version, if available */
286 if (td->options & NAND_BBT_VERSION) { 286 if (td->options & NAND_BBT_VERSION) {
287 scan_read_raw(mtd, buf, td->pages[0] << this->page_shift, 287 scan_read_raw(mtd, buf, (loff_t)td->pages[0] << this->page_shift,
288 mtd->writesize); 288 mtd->writesize);
289 td->version[0] = buf[mtd->writesize + td->veroffs]; 289 td->version[0] = buf[mtd->writesize + td->veroffs];
290 printk(KERN_DEBUG "Bad block table at page %d, version 0x%02X\n", 290 printk(KERN_DEBUG "Bad block table at page %d, version 0x%02X\n",
@@ -293,7 +293,7 @@ static int read_abs_bbts(struct mtd_info *mtd, uint8_t *buf,
293 293
294 /* Read the mirror version, if available */ 294 /* Read the mirror version, if available */
295 if (md && (md->options & NAND_BBT_VERSION)) { 295 if (md && (md->options & NAND_BBT_VERSION)) {
296 scan_read_raw(mtd, buf, md->pages[0] << this->page_shift, 296 scan_read_raw(mtd, buf, (loff_t)md->pages[0] << this->page_shift,
297 mtd->writesize); 297 mtd->writesize);
298 md->version[0] = buf[mtd->writesize + md->veroffs]; 298 md->version[0] = buf[mtd->writesize + md->veroffs];
299 printk(KERN_DEBUG "Bad block table at page %d, version 0x%02X\n", 299 printk(KERN_DEBUG "Bad block table at page %d, version 0x%02X\n",
@@ -411,7 +411,7 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
411 numblocks = this->chipsize >> (this->bbt_erase_shift - 1); 411 numblocks = this->chipsize >> (this->bbt_erase_shift - 1);
412 startblock = chip * numblocks; 412 startblock = chip * numblocks;
413 numblocks += startblock; 413 numblocks += startblock;
414 from = startblock << (this->bbt_erase_shift - 1); 414 from = (loff_t)startblock << (this->bbt_erase_shift - 1);
415 } 415 }
416 416
417 for (i = startblock; i < numblocks;) { 417 for (i = startblock; i < numblocks;) {
@@ -428,8 +428,8 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
428 428
429 if (ret) { 429 if (ret) {
430 this->bbt[i >> 3] |= 0x03 << (i & 0x6); 430 this->bbt[i >> 3] |= 0x03 << (i & 0x6);
431 printk(KERN_WARNING "Bad eraseblock %d at 0x%08x\n", 431 printk(KERN_WARNING "Bad eraseblock %d at 0x%012llx\n",
432 i >> 1, (unsigned int)from); 432 i >> 1, (unsigned long long)from);
433 mtd->ecc_stats.badblocks++; 433 mtd->ecc_stats.badblocks++;
434 } 434 }
435 435
@@ -495,7 +495,7 @@ static int search_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr
495 for (block = 0; block < td->maxblocks; block++) { 495 for (block = 0; block < td->maxblocks; block++) {
496 496
497 int actblock = startblock + dir * block; 497 int actblock = startblock + dir * block;
498 loff_t offs = actblock << this->bbt_erase_shift; 498 loff_t offs = (loff_t)actblock << this->bbt_erase_shift;
499 499
500 /* Read first page */ 500 /* Read first page */
501 scan_read_raw(mtd, buf, offs, mtd->writesize); 501 scan_read_raw(mtd, buf, offs, mtd->writesize);
@@ -719,7 +719,7 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
719 719
720 memset(&einfo, 0, sizeof(einfo)); 720 memset(&einfo, 0, sizeof(einfo));
721 einfo.mtd = mtd; 721 einfo.mtd = mtd;
722 einfo.addr = (unsigned long)to; 722 einfo.addr = to;
723 einfo.len = 1 << this->bbt_erase_shift; 723 einfo.len = 1 << this->bbt_erase_shift;
724 res = nand_erase_nand(mtd, &einfo, 1); 724 res = nand_erase_nand(mtd, &einfo, 1);
725 if (res < 0) 725 if (res < 0)
@@ -729,8 +729,8 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
729 if (res < 0) 729 if (res < 0)
730 goto outerr; 730 goto outerr;
731 731
732 printk(KERN_DEBUG "Bad block table written to 0x%08x, version " 732 printk(KERN_DEBUG "Bad block table written to 0x%012llx, version "
733 "0x%02X\n", (unsigned int)to, td->version[chip]); 733 "0x%02X\n", (unsigned long long)to, td->version[chip]);
734 734
735 /* Mark it as used */ 735 /* Mark it as used */
736 td->pages[chip] = page; 736 td->pages[chip] = page;
@@ -910,7 +910,7 @@ static void mark_bbt_region(struct mtd_info *mtd, struct nand_bbt_descr *td)
910 newval = oldval | (0x2 << (block & 0x06)); 910 newval = oldval | (0x2 << (block & 0x06));
911 this->bbt[(block >> 3)] = newval; 911 this->bbt[(block >> 3)] = newval;
912 if ((oldval != newval) && td->reserved_block_code) 912 if ((oldval != newval) && td->reserved_block_code)
913 nand_update_bbt(mtd, block << (this->bbt_erase_shift - 1)); 913 nand_update_bbt(mtd, (loff_t)block << (this->bbt_erase_shift - 1));
914 continue; 914 continue;
915 } 915 }
916 update = 0; 916 update = 0;
@@ -931,7 +931,7 @@ static void mark_bbt_region(struct mtd_info *mtd, struct nand_bbt_descr *td)
931 new ones have been marked, then we need to update the stored 931 new ones have been marked, then we need to update the stored
932 bbts. This should only happen once. */ 932 bbts. This should only happen once. */
933 if (update && td->reserved_block_code) 933 if (update && td->reserved_block_code)
934 nand_update_bbt(mtd, (block - 2) << (this->bbt_erase_shift - 1)); 934 nand_update_bbt(mtd, (loff_t)(block - 2) << (this->bbt_erase_shift - 1));
935 } 935 }
936} 936}
937 937
@@ -1027,7 +1027,6 @@ int nand_update_bbt(struct mtd_info *mtd, loff_t offs)
1027 if (!this->bbt || !td) 1027 if (!this->bbt || !td)
1028 return -EINVAL; 1028 return -EINVAL;
1029 1029
1030 len = mtd->size >> (this->bbt_erase_shift + 2);
1031 /* Allocate a temporary buffer for one eraseblock incl. oob */ 1030 /* Allocate a temporary buffer for one eraseblock incl. oob */
1032 len = (1 << this->bbt_erase_shift); 1031 len = (1 << this->bbt_erase_shift);
1033 len += (len >> this->page_shift) * mtd->oobsize; 1032 len += (len >> this->page_shift) * mtd->oobsize;
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index ae7c57781a68..cd0711b83ac4 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -38,6 +38,9 @@
38#include <linux/delay.h> 38#include <linux/delay.h>
39#include <linux/list.h> 39#include <linux/list.h>
40#include <linux/random.h> 40#include <linux/random.h>
41#include <linux/sched.h>
42#include <linux/fs.h>
43#include <linux/pagemap.h>
41 44
42/* Default simulator parameters values */ 45/* Default simulator parameters values */
43#if !defined(CONFIG_NANDSIM_FIRST_ID_BYTE) || \ 46#if !defined(CONFIG_NANDSIM_FIRST_ID_BYTE) || \
@@ -100,6 +103,7 @@ static unsigned int bitflips = 0;
100static char *gravepages = NULL; 103static char *gravepages = NULL;
101static unsigned int rptwear = 0; 104static unsigned int rptwear = 0;
102static unsigned int overridesize = 0; 105static unsigned int overridesize = 0;
106static char *cache_file = NULL;
103 107
104module_param(first_id_byte, uint, 0400); 108module_param(first_id_byte, uint, 0400);
105module_param(second_id_byte, uint, 0400); 109module_param(second_id_byte, uint, 0400);
@@ -122,12 +126,13 @@ module_param(bitflips, uint, 0400);
122module_param(gravepages, charp, 0400); 126module_param(gravepages, charp, 0400);
123module_param(rptwear, uint, 0400); 127module_param(rptwear, uint, 0400);
124module_param(overridesize, uint, 0400); 128module_param(overridesize, uint, 0400);
129module_param(cache_file, charp, 0400);
125 130
126MODULE_PARM_DESC(first_id_byte, "The first byte returned by NAND Flash 'read ID' command (manufacturer ID)"); 131MODULE_PARM_DESC(first_id_byte, "The first byte returned by NAND Flash 'read ID' command (manufacturer ID)");
127MODULE_PARM_DESC(second_id_byte, "The second byte returned by NAND Flash 'read ID' command (chip ID)"); 132MODULE_PARM_DESC(second_id_byte, "The second byte returned by NAND Flash 'read ID' command (chip ID)");
128MODULE_PARM_DESC(third_id_byte, "The third byte returned by NAND Flash 'read ID' command"); 133MODULE_PARM_DESC(third_id_byte, "The third byte returned by NAND Flash 'read ID' command");
129MODULE_PARM_DESC(fourth_id_byte, "The fourth byte returned by NAND Flash 'read ID' command"); 134MODULE_PARM_DESC(fourth_id_byte, "The fourth byte returned by NAND Flash 'read ID' command");
130MODULE_PARM_DESC(access_delay, "Initial page access delay (microiseconds)"); 135MODULE_PARM_DESC(access_delay, "Initial page access delay (microseconds)");
131MODULE_PARM_DESC(programm_delay, "Page programm delay (microseconds"); 136MODULE_PARM_DESC(programm_delay, "Page programm delay (microseconds");
132MODULE_PARM_DESC(erase_delay, "Sector erase delay (milliseconds)"); 137MODULE_PARM_DESC(erase_delay, "Sector erase delay (milliseconds)");
133MODULE_PARM_DESC(output_cycle, "Word output (from flash) time (nanodeconds)"); 138MODULE_PARM_DESC(output_cycle, "Word output (from flash) time (nanodeconds)");
@@ -153,6 +158,7 @@ MODULE_PARM_DESC(rptwear, "Number of erases inbetween reporting wear, if
153MODULE_PARM_DESC(overridesize, "Specifies the NAND Flash size overriding the ID bytes. " 158MODULE_PARM_DESC(overridesize, "Specifies the NAND Flash size overriding the ID bytes. "
154 "The size is specified in erase blocks and as the exponent of a power of two" 159 "The size is specified in erase blocks and as the exponent of a power of two"
155 " e.g. 5 means a size of 32 erase blocks"); 160 " e.g. 5 means a size of 32 erase blocks");
161MODULE_PARM_DESC(cache_file, "File to use to cache nand pages instead of memory");
156 162
157/* The largest possible page size */ 163/* The largest possible page size */
158#define NS_LARGEST_PAGE_SIZE 2048 164#define NS_LARGEST_PAGE_SIZE 2048
@@ -266,6 +272,9 @@ MODULE_PARM_DESC(overridesize, "Specifies the NAND Flash size overriding the I
266 */ 272 */
267#define NS_MAX_PREVSTATES 1 273#define NS_MAX_PREVSTATES 1
268 274
275/* Maximum page cache pages needed to read or write a NAND page to the cache_file */
276#define NS_MAX_HELD_PAGES 16
277
269/* 278/*
270 * A union to represent flash memory contents and flash buffer. 279 * A union to represent flash memory contents and flash buffer.
271 */ 280 */
@@ -295,6 +304,9 @@ struct nandsim {
295 /* The simulated NAND flash pages array */ 304 /* The simulated NAND flash pages array */
296 union ns_mem *pages; 305 union ns_mem *pages;
297 306
307 /* Slab allocator for nand pages */
308 struct kmem_cache *nand_pages_slab;
309
298 /* Internal buffer of page + OOB size bytes */ 310 /* Internal buffer of page + OOB size bytes */
299 union ns_mem buf; 311 union ns_mem buf;
300 312
@@ -335,6 +347,13 @@ struct nandsim {
335 int ale; /* address Latch Enable */ 347 int ale; /* address Latch Enable */
336 int wp; /* write Protect */ 348 int wp; /* write Protect */
337 } lines; 349 } lines;
350
351 /* Fields needed when using a cache file */
352 struct file *cfile; /* Open file */
353 unsigned char *pages_written; /* Which pages have been written */
354 void *file_buf;
355 struct page *held_pages[NS_MAX_HELD_PAGES];
356 int held_cnt;
338}; 357};
339 358
340/* 359/*
@@ -420,25 +439,69 @@ static struct mtd_info *nsmtd;
420static u_char ns_verify_buf[NS_LARGEST_PAGE_SIZE]; 439static u_char ns_verify_buf[NS_LARGEST_PAGE_SIZE];
421 440
422/* 441/*
423 * Allocate array of page pointers and initialize the array to NULL 442 * Allocate array of page pointers, create slab allocation for an array
424 * pointers. 443 * and initialize the array by NULL pointers.
425 * 444 *
426 * RETURNS: 0 if success, -ENOMEM if memory alloc fails. 445 * RETURNS: 0 if success, -ENOMEM if memory alloc fails.
427 */ 446 */
428static int alloc_device(struct nandsim *ns) 447static int alloc_device(struct nandsim *ns)
429{ 448{
430 int i; 449 struct file *cfile;
450 int i, err;
451
452 if (cache_file) {
453 cfile = filp_open(cache_file, O_CREAT | O_RDWR | O_LARGEFILE, 0600);
454 if (IS_ERR(cfile))
455 return PTR_ERR(cfile);
456 if (!cfile->f_op || (!cfile->f_op->read && !cfile->f_op->aio_read)) {
457 NS_ERR("alloc_device: cache file not readable\n");
458 err = -EINVAL;
459 goto err_close;
460 }
461 if (!cfile->f_op->write && !cfile->f_op->aio_write) {
462 NS_ERR("alloc_device: cache file not writeable\n");
463 err = -EINVAL;
464 goto err_close;
465 }
466 ns->pages_written = vmalloc(ns->geom.pgnum);
467 if (!ns->pages_written) {
468 NS_ERR("alloc_device: unable to allocate pages written array\n");
469 err = -ENOMEM;
470 goto err_close;
471 }
472 ns->file_buf = kmalloc(ns->geom.pgszoob, GFP_KERNEL);
473 if (!ns->file_buf) {
474 NS_ERR("alloc_device: unable to allocate file buf\n");
475 err = -ENOMEM;
476 goto err_free;
477 }
478 ns->cfile = cfile;
479 memset(ns->pages_written, 0, ns->geom.pgnum);
480 return 0;
481 }
431 482
432 ns->pages = vmalloc(ns->geom.pgnum * sizeof(union ns_mem)); 483 ns->pages = vmalloc(ns->geom.pgnum * sizeof(union ns_mem));
433 if (!ns->pages) { 484 if (!ns->pages) {
434 NS_ERR("alloc_map: unable to allocate page array\n"); 485 NS_ERR("alloc_device: unable to allocate page array\n");
435 return -ENOMEM; 486 return -ENOMEM;
436 } 487 }
437 for (i = 0; i < ns->geom.pgnum; i++) { 488 for (i = 0; i < ns->geom.pgnum; i++) {
438 ns->pages[i].byte = NULL; 489 ns->pages[i].byte = NULL;
439 } 490 }
491 ns->nand_pages_slab = kmem_cache_create("nandsim",
492 ns->geom.pgszoob, 0, 0, NULL);
493 if (!ns->nand_pages_slab) {
494 NS_ERR("cache_create: unable to create kmem_cache\n");
495 return -ENOMEM;
496 }
440 497
441 return 0; 498 return 0;
499
500err_free:
501 vfree(ns->pages_written);
502err_close:
503 filp_close(cfile, NULL);
504 return err;
442} 505}
443 506
444/* 507/*
@@ -448,11 +511,20 @@ static void free_device(struct nandsim *ns)
448{ 511{
449 int i; 512 int i;
450 513
514 if (ns->cfile) {
515 kfree(ns->file_buf);
516 vfree(ns->pages_written);
517 filp_close(ns->cfile, NULL);
518 return;
519 }
520
451 if (ns->pages) { 521 if (ns->pages) {
452 for (i = 0; i < ns->geom.pgnum; i++) { 522 for (i = 0; i < ns->geom.pgnum; i++) {
453 if (ns->pages[i].byte) 523 if (ns->pages[i].byte)
454 kfree(ns->pages[i].byte); 524 kmem_cache_free(ns->nand_pages_slab,
525 ns->pages[i].byte);
455 } 526 }
527 kmem_cache_destroy(ns->nand_pages_slab);
456 vfree(ns->pages); 528 vfree(ns->pages);
457 } 529 }
458} 530}
@@ -464,7 +536,7 @@ static char *get_partition_name(int i)
464 return kstrdup(buf, GFP_KERNEL); 536 return kstrdup(buf, GFP_KERNEL);
465} 537}
466 538
467static u_int64_t divide(u_int64_t n, u_int32_t d) 539static uint64_t divide(uint64_t n, uint32_t d)
468{ 540{
469 do_div(n, d); 541 do_div(n, d);
470 return n; 542 return n;
@@ -480,8 +552,8 @@ static int init_nandsim(struct mtd_info *mtd)
480 struct nand_chip *chip = (struct nand_chip *)mtd->priv; 552 struct nand_chip *chip = (struct nand_chip *)mtd->priv;
481 struct nandsim *ns = (struct nandsim *)(chip->priv); 553 struct nandsim *ns = (struct nandsim *)(chip->priv);
482 int i, ret = 0; 554 int i, ret = 0;
483 u_int64_t remains; 555 uint64_t remains;
484 u_int64_t next_offset; 556 uint64_t next_offset;
485 557
486 if (NS_IS_INITIALIZED(ns)) { 558 if (NS_IS_INITIALIZED(ns)) {
487 NS_ERR("init_nandsim: nandsim is already initialized\n"); 559 NS_ERR("init_nandsim: nandsim is already initialized\n");
@@ -548,7 +620,7 @@ static int init_nandsim(struct mtd_info *mtd)
548 remains = ns->geom.totsz; 620 remains = ns->geom.totsz;
549 next_offset = 0; 621 next_offset = 0;
550 for (i = 0; i < parts_num; ++i) { 622 for (i = 0; i < parts_num; ++i) {
551 u_int64_t part_sz = (u_int64_t)parts[i] * ns->geom.secsz; 623 uint64_t part_sz = (uint64_t)parts[i] * ns->geom.secsz;
552 624
553 if (!part_sz || part_sz > remains) { 625 if (!part_sz || part_sz > remains) {
554 NS_ERR("bad partition size.\n"); 626 NS_ERR("bad partition size.\n");
@@ -1211,6 +1283,97 @@ static int find_operation(struct nandsim *ns, uint32_t flag)
1211 return -1; 1283 return -1;
1212} 1284}
1213 1285
1286static void put_pages(struct nandsim *ns)
1287{
1288 int i;
1289
1290 for (i = 0; i < ns->held_cnt; i++)
1291 page_cache_release(ns->held_pages[i]);
1292}
1293
1294/* Get page cache pages in advance to provide NOFS memory allocation */
1295static int get_pages(struct nandsim *ns, struct file *file, size_t count, loff_t pos)
1296{
1297 pgoff_t index, start_index, end_index;
1298 struct page *page;
1299 struct address_space *mapping = file->f_mapping;
1300
1301 start_index = pos >> PAGE_CACHE_SHIFT;
1302 end_index = (pos + count - 1) >> PAGE_CACHE_SHIFT;
1303 if (end_index - start_index + 1 > NS_MAX_HELD_PAGES)
1304 return -EINVAL;
1305 ns->held_cnt = 0;
1306 for (index = start_index; index <= end_index; index++) {
1307 page = find_get_page(mapping, index);
1308 if (page == NULL) {
1309 page = find_or_create_page(mapping, index, GFP_NOFS);
1310 if (page == NULL) {
1311 write_inode_now(mapping->host, 1);
1312 page = find_or_create_page(mapping, index, GFP_NOFS);
1313 }
1314 if (page == NULL) {
1315 put_pages(ns);
1316 return -ENOMEM;
1317 }
1318 unlock_page(page);
1319 }
1320 ns->held_pages[ns->held_cnt++] = page;
1321 }
1322 return 0;
1323}
1324
1325static int set_memalloc(void)
1326{
1327 if (current->flags & PF_MEMALLOC)
1328 return 0;
1329 current->flags |= PF_MEMALLOC;
1330 return 1;
1331}
1332
1333static void clear_memalloc(int memalloc)
1334{
1335 if (memalloc)
1336 current->flags &= ~PF_MEMALLOC;
1337}
1338
1339static ssize_t read_file(struct nandsim *ns, struct file *file, void *buf, size_t count, loff_t *pos)
1340{
1341 mm_segment_t old_fs;
1342 ssize_t tx;
1343 int err, memalloc;
1344
1345 err = get_pages(ns, file, count, *pos);
1346 if (err)
1347 return err;
1348 old_fs = get_fs();
1349 set_fs(get_ds());
1350 memalloc = set_memalloc();
1351 tx = vfs_read(file, (char __user *)buf, count, pos);
1352 clear_memalloc(memalloc);
1353 set_fs(old_fs);
1354 put_pages(ns);
1355 return tx;
1356}
1357
1358static ssize_t write_file(struct nandsim *ns, struct file *file, void *buf, size_t count, loff_t *pos)
1359{
1360 mm_segment_t old_fs;
1361 ssize_t tx;
1362 int err, memalloc;
1363
1364 err = get_pages(ns, file, count, *pos);
1365 if (err)
1366 return err;
1367 old_fs = get_fs();
1368 set_fs(get_ds());
1369 memalloc = set_memalloc();
1370 tx = vfs_write(file, (char __user *)buf, count, pos);
1371 clear_memalloc(memalloc);
1372 set_fs(old_fs);
1373 put_pages(ns);
1374 return tx;
1375}
1376
1214/* 1377/*
1215 * Returns a pointer to the current page. 1378 * Returns a pointer to the current page.
1216 */ 1379 */
@@ -1227,6 +1390,38 @@ static inline u_char *NS_PAGE_BYTE_OFF(struct nandsim *ns)
1227 return NS_GET_PAGE(ns)->byte + ns->regs.column + ns->regs.off; 1390 return NS_GET_PAGE(ns)->byte + ns->regs.column + ns->regs.off;
1228} 1391}
1229 1392
1393int do_read_error(struct nandsim *ns, int num)
1394{
1395 unsigned int page_no = ns->regs.row;
1396
1397 if (read_error(page_no)) {
1398 int i;
1399 memset(ns->buf.byte, 0xFF, num);
1400 for (i = 0; i < num; ++i)
1401 ns->buf.byte[i] = random32();
1402 NS_WARN("simulating read error in page %u\n", page_no);
1403 return 1;
1404 }
1405 return 0;
1406}
1407
1408void do_bit_flips(struct nandsim *ns, int num)
1409{
1410 if (bitflips && random32() < (1 << 22)) {
1411 int flips = 1;
1412 if (bitflips > 1)
1413 flips = (random32() % (int) bitflips) + 1;
1414 while (flips--) {
1415 int pos = random32() % (num * 8);
1416 ns->buf.byte[pos / 8] ^= (1 << (pos % 8));
1417 NS_WARN("read_page: flipping bit %d in page %d "
1418 "reading from %d ecc: corrected=%u failed=%u\n",
1419 pos, ns->regs.row, ns->regs.column + ns->regs.off,
1420 nsmtd->ecc_stats.corrected, nsmtd->ecc_stats.failed);
1421 }
1422 }
1423}
1424
1230/* 1425/*
1231 * Fill the NAND buffer with data read from the specified page. 1426 * Fill the NAND buffer with data read from the specified page.
1232 */ 1427 */
@@ -1234,36 +1429,40 @@ static void read_page(struct nandsim *ns, int num)
1234{ 1429{
1235 union ns_mem *mypage; 1430 union ns_mem *mypage;
1236 1431
1432 if (ns->cfile) {
1433 if (!ns->pages_written[ns->regs.row]) {
1434 NS_DBG("read_page: page %d not written\n", ns->regs.row);
1435 memset(ns->buf.byte, 0xFF, num);
1436 } else {
1437 loff_t pos;
1438 ssize_t tx;
1439
1440 NS_DBG("read_page: page %d written, reading from %d\n",
1441 ns->regs.row, ns->regs.column + ns->regs.off);
1442 if (do_read_error(ns, num))
1443 return;
1444 pos = (loff_t)ns->regs.row * ns->geom.pgszoob + ns->regs.column + ns->regs.off;
1445 tx = read_file(ns, ns->cfile, ns->buf.byte, num, &pos);
1446 if (tx != num) {
1447 NS_ERR("read_page: read error for page %d ret %ld\n", ns->regs.row, (long)tx);
1448 return;
1449 }
1450 do_bit_flips(ns, num);
1451 }
1452 return;
1453 }
1454
1237 mypage = NS_GET_PAGE(ns); 1455 mypage = NS_GET_PAGE(ns);
1238 if (mypage->byte == NULL) { 1456 if (mypage->byte == NULL) {
1239 NS_DBG("read_page: page %d not allocated\n", ns->regs.row); 1457 NS_DBG("read_page: page %d not allocated\n", ns->regs.row);
1240 memset(ns->buf.byte, 0xFF, num); 1458 memset(ns->buf.byte, 0xFF, num);
1241 } else { 1459 } else {
1242 unsigned int page_no = ns->regs.row;
1243 NS_DBG("read_page: page %d allocated, reading from %d\n", 1460 NS_DBG("read_page: page %d allocated, reading from %d\n",
1244 ns->regs.row, ns->regs.column + ns->regs.off); 1461 ns->regs.row, ns->regs.column + ns->regs.off);
1245 if (read_error(page_no)) { 1462 if (do_read_error(ns, num))
1246 int i;
1247 memset(ns->buf.byte, 0xFF, num);
1248 for (i = 0; i < num; ++i)
1249 ns->buf.byte[i] = random32();
1250 NS_WARN("simulating read error in page %u\n", page_no);
1251 return; 1463 return;
1252 }
1253 memcpy(ns->buf.byte, NS_PAGE_BYTE_OFF(ns), num); 1464 memcpy(ns->buf.byte, NS_PAGE_BYTE_OFF(ns), num);
1254 if (bitflips && random32() < (1 << 22)) { 1465 do_bit_flips(ns, num);
1255 int flips = 1;
1256 if (bitflips > 1)
1257 flips = (random32() % (int) bitflips) + 1;
1258 while (flips--) {
1259 int pos = random32() % (num * 8);
1260 ns->buf.byte[pos / 8] ^= (1 << (pos % 8));
1261 NS_WARN("read_page: flipping bit %d in page %d "
1262 "reading from %d ecc: corrected=%u failed=%u\n",
1263 pos, ns->regs.row, ns->regs.column + ns->regs.off,
1264 nsmtd->ecc_stats.corrected, nsmtd->ecc_stats.failed);
1265 }
1266 }
1267 } 1466 }
1268} 1467}
1269 1468
@@ -1275,11 +1474,20 @@ static void erase_sector(struct nandsim *ns)
1275 union ns_mem *mypage; 1474 union ns_mem *mypage;
1276 int i; 1475 int i;
1277 1476
1477 if (ns->cfile) {
1478 for (i = 0; i < ns->geom.pgsec; i++)
1479 if (ns->pages_written[ns->regs.row + i]) {
1480 NS_DBG("erase_sector: freeing page %d\n", ns->regs.row + i);
1481 ns->pages_written[ns->regs.row + i] = 0;
1482 }
1483 return;
1484 }
1485
1278 mypage = NS_GET_PAGE(ns); 1486 mypage = NS_GET_PAGE(ns);
1279 for (i = 0; i < ns->geom.pgsec; i++) { 1487 for (i = 0; i < ns->geom.pgsec; i++) {
1280 if (mypage->byte != NULL) { 1488 if (mypage->byte != NULL) {
1281 NS_DBG("erase_sector: freeing page %d\n", ns->regs.row+i); 1489 NS_DBG("erase_sector: freeing page %d\n", ns->regs.row+i);
1282 kfree(mypage->byte); 1490 kmem_cache_free(ns->nand_pages_slab, mypage->byte);
1283 mypage->byte = NULL; 1491 mypage->byte = NULL;
1284 } 1492 }
1285 mypage++; 1493 mypage++;
@@ -1295,16 +1503,57 @@ static int prog_page(struct nandsim *ns, int num)
1295 union ns_mem *mypage; 1503 union ns_mem *mypage;
1296 u_char *pg_off; 1504 u_char *pg_off;
1297 1505
1506 if (ns->cfile) {
1507 loff_t off, pos;
1508 ssize_t tx;
1509 int all;
1510
1511 NS_DBG("prog_page: writing page %d\n", ns->regs.row);
1512 pg_off = ns->file_buf + ns->regs.column + ns->regs.off;
1513 off = (loff_t)ns->regs.row * ns->geom.pgszoob + ns->regs.column + ns->regs.off;
1514 if (!ns->pages_written[ns->regs.row]) {
1515 all = 1;
1516 memset(ns->file_buf, 0xff, ns->geom.pgszoob);
1517 } else {
1518 all = 0;
1519 pos = off;
1520 tx = read_file(ns, ns->cfile, pg_off, num, &pos);
1521 if (tx != num) {
1522 NS_ERR("prog_page: read error for page %d ret %ld\n", ns->regs.row, (long)tx);
1523 return -1;
1524 }
1525 }
1526 for (i = 0; i < num; i++)
1527 pg_off[i] &= ns->buf.byte[i];
1528 if (all) {
1529 pos = (loff_t)ns->regs.row * ns->geom.pgszoob;
1530 tx = write_file(ns, ns->cfile, ns->file_buf, ns->geom.pgszoob, &pos);
1531 if (tx != ns->geom.pgszoob) {
1532 NS_ERR("prog_page: write error for page %d ret %ld\n", ns->regs.row, (long)tx);
1533 return -1;
1534 }
1535 ns->pages_written[ns->regs.row] = 1;
1536 } else {
1537 pos = off;
1538 tx = write_file(ns, ns->cfile, pg_off, num, &pos);
1539 if (tx != num) {
1540 NS_ERR("prog_page: write error for page %d ret %ld\n", ns->regs.row, (long)tx);
1541 return -1;
1542 }
1543 }
1544 return 0;
1545 }
1546
1298 mypage = NS_GET_PAGE(ns); 1547 mypage = NS_GET_PAGE(ns);
1299 if (mypage->byte == NULL) { 1548 if (mypage->byte == NULL) {
1300 NS_DBG("prog_page: allocating page %d\n", ns->regs.row); 1549 NS_DBG("prog_page: allocating page %d\n", ns->regs.row);
1301 /* 1550 /*
1302 * We allocate memory with GFP_NOFS because a flash FS may 1551 * We allocate memory with GFP_NOFS because a flash FS may
1303 * utilize this. If it is holding an FS lock, then gets here, 1552 * utilize this. If it is holding an FS lock, then gets here,
1304 * then kmalloc runs writeback which goes to the FS again 1553 * then kernel memory alloc runs writeback which goes to the FS
1305 * and deadlocks. This was seen in practice. 1554 * again and deadlocks. This was seen in practice.
1306 */ 1555 */
1307 mypage->byte = kmalloc(ns->geom.pgszoob, GFP_NOFS); 1556 mypage->byte = kmem_cache_alloc(ns->nand_pages_slab, GFP_NOFS);
1308 if (mypage->byte == NULL) { 1557 if (mypage->byte == NULL) {
1309 NS_ERR("prog_page: error allocating memory for page %d\n", ns->regs.row); 1558 NS_ERR("prog_page: error allocating memory for page %d\n", ns->regs.row);
1310 return -1; 1559 return -1;
@@ -1736,13 +1985,17 @@ static void ns_nand_write_byte(struct mtd_info *mtd, u_char byte)
1736 1985
1737 /* Check if chip is expecting command */ 1986 /* Check if chip is expecting command */
1738 if (NS_STATE(ns->nxstate) != STATE_UNKNOWN && !(ns->nxstate & STATE_CMD_MASK)) { 1987 if (NS_STATE(ns->nxstate) != STATE_UNKNOWN && !(ns->nxstate & STATE_CMD_MASK)) {
1739 /* 1988 /* Do not warn if only 2 id bytes are read */
1740 * We are in situation when something else (not command) 1989 if (!(ns->regs.command == NAND_CMD_READID &&
1741 * was expected but command was input. In this case ignore 1990 NS_STATE(ns->state) == STATE_DATAOUT_ID && ns->regs.count == 2)) {
1742 * previous command(s)/state(s) and accept the last one. 1991 /*
1743 */ 1992 * We are in situation when something else (not command)
1744 NS_WARN("write_byte: command (%#x) wasn't expected, expected state is %s, " 1993 * was expected but command was input. In this case ignore
1745 "ignore previous states\n", (uint)byte, get_state_name(ns->nxstate)); 1994 * previous command(s)/state(s) and accept the last one.
1995 */
1996 NS_WARN("write_byte: command (%#x) wasn't expected, expected state is %s, "
1997 "ignore previous states\n", (uint)byte, get_state_name(ns->nxstate));
1998 }
1746 switch_to_ready_state(ns, NS_STATUS_FAILED(ns)); 1999 switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
1747 } 2000 }
1748 2001
@@ -2044,7 +2297,7 @@ static int __init ns_init_module(void)
2044 } 2297 }
2045 2298
2046 if (overridesize) { 2299 if (overridesize) {
2047 u_int64_t new_size = (u_int64_t)nsmtd->erasesize << overridesize; 2300 uint64_t new_size = (uint64_t)nsmtd->erasesize << overridesize;
2048 if (new_size >> overridesize != nsmtd->erasesize) { 2301 if (new_size >> overridesize != nsmtd->erasesize) {
2049 NS_ERR("overridesize is too big\n"); 2302 NS_ERR("overridesize is too big\n");
2050 goto err_exit; 2303 goto err_exit;
diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c
index f674c5427b17..75f9f4874ecf 100644
--- a/drivers/mtd/nand/plat_nand.c
+++ b/drivers/mtd/nand/plat_nand.c
@@ -54,7 +54,7 @@ static int __init plat_nand_probe(struct platform_device *pdev)
54 data->chip.priv = &data; 54 data->chip.priv = &data;
55 data->mtd.priv = &data->chip; 55 data->mtd.priv = &data->chip;
56 data->mtd.owner = THIS_MODULE; 56 data->mtd.owner = THIS_MODULE;
57 data->mtd.name = pdev->dev.bus_id; 57 data->mtd.name = dev_name(&pdev->dev);
58 58
59 data->chip.IO_ADDR_R = data->io_base; 59 data->chip.IO_ADDR_R = data->io_base;
60 data->chip.IO_ADDR_W = data->io_base; 60 data->chip.IO_ADDR_W = data->io_base;
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 15f0a26730ae..cc55cbc2b308 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -20,8 +20,8 @@
20#include <linux/mtd/partitions.h> 20#include <linux/mtd/partitions.h>
21#include <linux/io.h> 21#include <linux/io.h>
22#include <linux/irq.h> 22#include <linux/irq.h>
23#include <asm/dma.h>
24 23
24#include <mach/dma.h>
25#include <mach/pxa-regs.h> 25#include <mach/pxa-regs.h>
26#include <mach/pxa3xx_nand.h> 26#include <mach/pxa3xx_nand.h>
27 27
@@ -298,7 +298,7 @@ static struct pxa3xx_nand_flash *builtin_flash_types[] = {
298#define NDTR1_tAR(c) (min((c), 15) << 0) 298#define NDTR1_tAR(c) (min((c), 15) << 0)
299 299
300/* convert nano-seconds to nand flash controller clock cycles */ 300/* convert nano-seconds to nand flash controller clock cycles */
301#define ns2cycle(ns, clk) (int)(((ns) * (clk / 1000000) / 1000) + 1) 301#define ns2cycle(ns, clk) (int)(((ns) * (clk / 1000000) / 1000) - 1)
302 302
303static void pxa3xx_nand_set_timing(struct pxa3xx_nand_info *info, 303static void pxa3xx_nand_set_timing(struct pxa3xx_nand_info *info,
304 const struct pxa3xx_nand_timing *t) 304 const struct pxa3xx_nand_timing *t)
@@ -368,14 +368,14 @@ static int prepare_read_prog_cmd(struct pxa3xx_nand_info *info,
368 /* large block, 2 cycles for column address 368 /* large block, 2 cycles for column address
369 * row address starts from 3rd cycle 369 * row address starts from 3rd cycle
370 */ 370 */
371 info->ndcb1 |= (page_addr << 16) | (column & 0xffff); 371 info->ndcb1 |= page_addr << 16;
372 if (info->row_addr_cycles == 3) 372 if (info->row_addr_cycles == 3)
373 info->ndcb2 = (page_addr >> 16) & 0xff; 373 info->ndcb2 = (page_addr >> 16) & 0xff;
374 } else 374 } else
375 /* small block, 1 cycles for column address 375 /* small block, 1 cycles for column address
376 * row address starts from 2nd cycle 376 * row address starts from 2nd cycle
377 */ 377 */
378 info->ndcb1 = (page_addr << 8) | (column & 0xff); 378 info->ndcb1 = page_addr << 8;
379 379
380 if (cmd == cmdset->program) 380 if (cmd == cmdset->program)
381 info->ndcb0 |= NDCB0_CMD_TYPE(1) | NDCB0_AUTO_RS; 381 info->ndcb0 |= NDCB0_CMD_TYPE(1) | NDCB0_AUTO_RS;
@@ -1080,7 +1080,7 @@ static int pxa3xx_nand_probe(struct platform_device *pdev)
1080 this = &info->nand_chip; 1080 this = &info->nand_chip;
1081 mtd->priv = info; 1081 mtd->priv = info;
1082 1082
1083 info->clk = clk_get(&pdev->dev, "NANDCLK"); 1083 info->clk = clk_get(&pdev->dev, NULL);
1084 if (IS_ERR(info->clk)) { 1084 if (IS_ERR(info->clk)) {
1085 dev_err(&pdev->dev, "failed to get nand clock\n"); 1085 dev_err(&pdev->dev, "failed to get nand clock\n");
1086 ret = PTR_ERR(info->clk); 1086 ret = PTR_ERR(info->clk);
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index 556139ed1fdf..8e375d5fe231 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -45,8 +45,8 @@
45 45
46#include <asm/io.h> 46#include <asm/io.h>
47 47
48#include <asm/plat-s3c/regs-nand.h> 48#include <plat/regs-nand.h>
49#include <asm/plat-s3c/nand.h> 49#include <plat/nand.h>
50 50
51#ifdef CONFIG_MTD_NAND_S3C2410_HWECC 51#ifdef CONFIG_MTD_NAND_S3C2410_HWECC
52static int hardware_ecc = 1; 52static int hardware_ecc = 1;
@@ -818,7 +818,7 @@ static int s3c24xx_nand_probe(struct platform_device *pdev,
818 goto exit_error; 818 goto exit_error;
819 } 819 }
820 820
821 memzero(info, sizeof(*info)); 821 memset(info, 0, sizeof(*info));
822 platform_set_drvdata(pdev, info); 822 platform_set_drvdata(pdev, info);
823 823
824 spin_lock_init(&info->controller.lock); 824 spin_lock_init(&info->controller.lock);
@@ -883,7 +883,7 @@ static int s3c24xx_nand_probe(struct platform_device *pdev,
883 goto exit_error; 883 goto exit_error;
884 } 884 }
885 885
886 memzero(info->mtds, size); 886 memset(info->mtds, 0, size);
887 887
888 /* initialise all possible chips */ 888 /* initialise all possible chips */
889 889
diff --git a/drivers/mtd/nand/sharpsl.c b/drivers/mtd/nand/sharpsl.c
index 30a518e211bd..54ec7542a7b7 100644
--- a/drivers/mtd/nand/sharpsl.c
+++ b/drivers/mtd/nand/sharpsl.c
@@ -2,6 +2,7 @@
2 * drivers/mtd/nand/sharpsl.c 2 * drivers/mtd/nand/sharpsl.c
3 * 3 *
4 * Copyright (C) 2004 Richard Purdie 4 * Copyright (C) 2004 Richard Purdie
5 * Copyright (C) 2008 Dmitry Baryshkov
5 * 6 *
6 * Based on Sharp's NAND driver sharp_sl.c 7 * Based on Sharp's NAND driver sharp_sl.c
7 * 8 *
@@ -19,22 +20,31 @@
19#include <linux/mtd/nand.h> 20#include <linux/mtd/nand.h>
20#include <linux/mtd/nand_ecc.h> 21#include <linux/mtd/nand_ecc.h>
21#include <linux/mtd/partitions.h> 22#include <linux/mtd/partitions.h>
23#include <linux/mtd/sharpsl.h>
22#include <linux/interrupt.h> 24#include <linux/interrupt.h>
25#include <linux/platform_device.h>
26
23#include <asm/io.h> 27#include <asm/io.h>
24#include <mach/hardware.h> 28#include <mach/hardware.h>
25#include <asm/mach-types.h> 29#include <asm/mach-types.h>
26 30
27static void __iomem *sharpsl_io_base; 31struct sharpsl_nand {
28static int sharpsl_phys_base = 0x0C000000; 32 struct mtd_info mtd;
33 struct nand_chip chip;
34
35 void __iomem *io;
36};
37
38#define mtd_to_sharpsl(_mtd) container_of(_mtd, struct sharpsl_nand, mtd)
29 39
30/* register offset */ 40/* register offset */
31#define ECCLPLB sharpsl_io_base+0x00 /* line parity 7 - 0 bit */ 41#define ECCLPLB 0x00 /* line parity 7 - 0 bit */
32#define ECCLPUB sharpsl_io_base+0x04 /* line parity 15 - 8 bit */ 42#define ECCLPUB 0x04 /* line parity 15 - 8 bit */
33#define ECCCP sharpsl_io_base+0x08 /* column parity 5 - 0 bit */ 43#define ECCCP 0x08 /* column parity 5 - 0 bit */
34#define ECCCNTR sharpsl_io_base+0x0C /* ECC byte counter */ 44#define ECCCNTR 0x0C /* ECC byte counter */
35#define ECCCLRR sharpsl_io_base+0x10 /* cleare ECC */ 45#define ECCCLRR 0x10 /* cleare ECC */
36#define FLASHIO sharpsl_io_base+0x14 /* Flash I/O */ 46#define FLASHIO 0x14 /* Flash I/O */
37#define FLASHCTL sharpsl_io_base+0x18 /* Flash Control */ 47#define FLASHCTL 0x18 /* Flash Control */
38 48
39/* Flash control bit */ 49/* Flash control bit */
40#define FLRYBY (1 << 5) 50#define FLRYBY (1 << 5)
@@ -45,35 +55,6 @@ static int sharpsl_phys_base = 0x0C000000;
45#define FLCE0 (1 << 0) 55#define FLCE0 (1 << 0)
46 56
47/* 57/*
48 * MTD structure for SharpSL
49 */
50static struct mtd_info *sharpsl_mtd = NULL;
51
52/*
53 * Define partitions for flash device
54 */
55#define DEFAULT_NUM_PARTITIONS 3
56
57static int nr_partitions;
58static struct mtd_partition sharpsl_nand_default_partition_info[] = {
59 {
60 .name = "System Area",
61 .offset = 0,
62 .size = 7 * 1024 * 1024,
63 },
64 {
65 .name = "Root Filesystem",
66 .offset = 7 * 1024 * 1024,
67 .size = 30 * 1024 * 1024,
68 },
69 {
70 .name = "Home Filesystem",
71 .offset = MTDPART_OFS_APPEND,
72 .size = MTDPART_SIZ_FULL,
73 },
74};
75
76/*
77 * hardware specific access to control-lines 58 * hardware specific access to control-lines
78 * ctrl: 59 * ctrl:
79 * NAND_CNE: bit 0 -> ! bit 0 & 4 60 * NAND_CNE: bit 0 -> ! bit 0 & 4
@@ -84,6 +65,7 @@ static struct mtd_partition sharpsl_nand_default_partition_info[] = {
84static void sharpsl_nand_hwcontrol(struct mtd_info *mtd, int cmd, 65static void sharpsl_nand_hwcontrol(struct mtd_info *mtd, int cmd,
85 unsigned int ctrl) 66 unsigned int ctrl)
86{ 67{
68 struct sharpsl_nand *sharpsl = mtd_to_sharpsl(mtd);
87 struct nand_chip *chip = mtd->priv; 69 struct nand_chip *chip = mtd->priv;
88 70
89 if (ctrl & NAND_CTRL_CHANGE) { 71 if (ctrl & NAND_CTRL_CHANGE) {
@@ -93,103 +75,97 @@ static void sharpsl_nand_hwcontrol(struct mtd_info *mtd, int cmd,
93 75
94 bits ^= 0x11; 76 bits ^= 0x11;
95 77
96 writeb((readb(FLASHCTL) & ~0x17) | bits, FLASHCTL); 78 writeb((readb(sharpsl->io + FLASHCTL) & ~0x17) | bits, sharpsl->io + FLASHCTL);
97 } 79 }
98 80
99 if (cmd != NAND_CMD_NONE) 81 if (cmd != NAND_CMD_NONE)
100 writeb(cmd, chip->IO_ADDR_W); 82 writeb(cmd, chip->IO_ADDR_W);
101} 83}
102 84
103static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
104
105static struct nand_bbt_descr sharpsl_bbt = {
106 .options = 0,
107 .offs = 4,
108 .len = 2,
109 .pattern = scan_ff_pattern
110};
111
112static struct nand_bbt_descr sharpsl_akita_bbt = {
113 .options = 0,
114 .offs = 4,
115 .len = 1,
116 .pattern = scan_ff_pattern
117};
118
119static struct nand_ecclayout akita_oobinfo = {
120 .eccbytes = 24,
121 .eccpos = {
122 0x5, 0x1, 0x2, 0x3, 0x6, 0x7, 0x15, 0x11,
123 0x12, 0x13, 0x16, 0x17, 0x25, 0x21, 0x22, 0x23,
124 0x26, 0x27, 0x35, 0x31, 0x32, 0x33, 0x36, 0x37},
125 .oobfree = {{0x08, 0x09}}
126};
127
128static int sharpsl_nand_dev_ready(struct mtd_info *mtd) 85static int sharpsl_nand_dev_ready(struct mtd_info *mtd)
129{ 86{
130 return !((readb(FLASHCTL) & FLRYBY) == 0); 87 struct sharpsl_nand *sharpsl = mtd_to_sharpsl(mtd);
88 return !((readb(sharpsl->io + FLASHCTL) & FLRYBY) == 0);
131} 89}
132 90
133static void sharpsl_nand_enable_hwecc(struct mtd_info *mtd, int mode) 91static void sharpsl_nand_enable_hwecc(struct mtd_info *mtd, int mode)
134{ 92{
135 writeb(0, ECCCLRR); 93 struct sharpsl_nand *sharpsl = mtd_to_sharpsl(mtd);
94 writeb(0, sharpsl->io + ECCCLRR);
136} 95}
137 96
138static int sharpsl_nand_calculate_ecc(struct mtd_info *mtd, const u_char * dat, u_char * ecc_code) 97static int sharpsl_nand_calculate_ecc(struct mtd_info *mtd, const u_char * dat, u_char * ecc_code)
139{ 98{
140 ecc_code[0] = ~readb(ECCLPUB); 99 struct sharpsl_nand *sharpsl = mtd_to_sharpsl(mtd);
141 ecc_code[1] = ~readb(ECCLPLB); 100 ecc_code[0] = ~readb(sharpsl->io + ECCLPUB);
142 ecc_code[2] = (~readb(ECCCP) << 2) | 0x03; 101 ecc_code[1] = ~readb(sharpsl->io + ECCLPLB);
143 return readb(ECCCNTR) != 0; 102 ecc_code[2] = (~readb(sharpsl->io + ECCCP) << 2) | 0x03;
103 return readb(sharpsl->io + ECCCNTR) != 0;
144} 104}
145 105
146#ifdef CONFIG_MTD_PARTITIONS 106#ifdef CONFIG_MTD_PARTITIONS
147const char *part_probes[] = { "cmdlinepart", NULL }; 107static const char *part_probes[] = { "cmdlinepart", NULL };
148#endif 108#endif
149 109
150/* 110/*
151 * Main initialization routine 111 * Main initialization routine
152 */ 112 */
153static int __init sharpsl_nand_init(void) 113static int __devinit sharpsl_nand_probe(struct platform_device *pdev)
154{ 114{
155 struct nand_chip *this; 115 struct nand_chip *this;
116#ifdef CONFIG_MTD_PARTITIONS
156 struct mtd_partition *sharpsl_partition_info; 117 struct mtd_partition *sharpsl_partition_info;
118 int nr_partitions;
119#endif
120 struct resource *r;
157 int err = 0; 121 int err = 0;
122 struct sharpsl_nand *sharpsl;
123 struct sharpsl_nand_platform_data *data = pdev->dev.platform_data;
124
125 if (!data) {
126 dev_err(&pdev->dev, "no platform data!\n");
127 return -EINVAL;
128 }
158 129
159 /* Allocate memory for MTD device structure and private data */ 130 /* Allocate memory for MTD device structure and private data */
160 sharpsl_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL); 131 sharpsl = kzalloc(sizeof(struct sharpsl_nand), GFP_KERNEL);
161 if (!sharpsl_mtd) { 132 if (!sharpsl) {
162 printk("Unable to allocate SharpSL NAND MTD device structure.\n"); 133 printk("Unable to allocate SharpSL NAND MTD device structure.\n");
163 return -ENOMEM; 134 return -ENOMEM;
164 } 135 }
165 136
137 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
138 if (!r) {
139 dev_err(&pdev->dev, "no io memory resource defined!\n");
140 err = -ENODEV;
141 goto err_get_res;
142 }
143
166 /* map physical address */ 144 /* map physical address */
167 sharpsl_io_base = ioremap(sharpsl_phys_base, 0x1000); 145 sharpsl->io = ioremap(r->start, resource_size(r));
168 if (!sharpsl_io_base) { 146 if (!sharpsl->io) {
169 printk("ioremap to access Sharp SL NAND chip failed\n"); 147 printk("ioremap to access Sharp SL NAND chip failed\n");
170 kfree(sharpsl_mtd); 148 err = -EIO;
171 return -EIO; 149 goto err_ioremap;
172 } 150 }
173 151
174 /* Get pointer to private data */ 152 /* Get pointer to private data */
175 this = (struct nand_chip *)(&sharpsl_mtd[1]); 153 this = (struct nand_chip *)(&sharpsl->chip);
176
177 /* Initialize structures */
178 memset(sharpsl_mtd, 0, sizeof(struct mtd_info));
179 memset(this, 0, sizeof(struct nand_chip));
180 154
181 /* Link the private data with the MTD structure */ 155 /* Link the private data with the MTD structure */
182 sharpsl_mtd->priv = this; 156 sharpsl->mtd.priv = this;
183 sharpsl_mtd->owner = THIS_MODULE; 157 sharpsl->mtd.owner = THIS_MODULE;
158
159 platform_set_drvdata(pdev, sharpsl);
184 160
185 /* 161 /*
186 * PXA initialize 162 * PXA initialize
187 */ 163 */
188 writeb(readb(FLASHCTL) | FLWP, FLASHCTL); 164 writeb(readb(sharpsl->io + FLASHCTL) | FLWP, sharpsl->io + FLASHCTL);
189 165
190 /* Set address of NAND IO lines */ 166 /* Set address of NAND IO lines */
191 this->IO_ADDR_R = FLASHIO; 167 this->IO_ADDR_R = sharpsl->io + FLASHIO;
192 this->IO_ADDR_W = FLASHIO; 168 this->IO_ADDR_W = sharpsl->io + FLASHIO;
193 /* Set address of hardware control function */ 169 /* Set address of hardware control function */
194 this->cmd_ctrl = sharpsl_nand_hwcontrol; 170 this->cmd_ctrl = sharpsl_nand_hwcontrol;
195 this->dev_ready = sharpsl_nand_dev_ready; 171 this->dev_ready = sharpsl_nand_dev_ready;
@@ -199,68 +175,89 @@ static int __init sharpsl_nand_init(void)
199 this->ecc.mode = NAND_ECC_HW; 175 this->ecc.mode = NAND_ECC_HW;
200 this->ecc.size = 256; 176 this->ecc.size = 256;
201 this->ecc.bytes = 3; 177 this->ecc.bytes = 3;
202 this->badblock_pattern = &sharpsl_bbt; 178 this->badblock_pattern = data->badblock_pattern;
203 if (machine_is_akita() || machine_is_borzoi()) { 179 this->ecc.layout = data->ecc_layout;
204 this->badblock_pattern = &sharpsl_akita_bbt;
205 this->ecc.layout = &akita_oobinfo;
206 }
207 this->ecc.hwctl = sharpsl_nand_enable_hwecc; 180 this->ecc.hwctl = sharpsl_nand_enable_hwecc;
208 this->ecc.calculate = sharpsl_nand_calculate_ecc; 181 this->ecc.calculate = sharpsl_nand_calculate_ecc;
209 this->ecc.correct = nand_correct_data; 182 this->ecc.correct = nand_correct_data;
210 183
211 /* Scan to find existence of the device */ 184 /* Scan to find existence of the device */
212 err = nand_scan(sharpsl_mtd, 1); 185 err = nand_scan(&sharpsl->mtd, 1);
213 if (err) { 186 if (err)
214 iounmap(sharpsl_io_base); 187 goto err_scan;
215 kfree(sharpsl_mtd);
216 return err;
217 }
218 188
219 /* Register the partitions */ 189 /* Register the partitions */
220 sharpsl_mtd->name = "sharpsl-nand"; 190 sharpsl->mtd.name = "sharpsl-nand";
221 nr_partitions = parse_mtd_partitions(sharpsl_mtd, part_probes, &sharpsl_partition_info, 0); 191#ifdef CONFIG_MTD_PARTITIONS
222 192 nr_partitions = parse_mtd_partitions(&sharpsl->mtd, part_probes, &sharpsl_partition_info, 0);
223 if (nr_partitions <= 0) { 193 if (nr_partitions <= 0) {
224 nr_partitions = DEFAULT_NUM_PARTITIONS; 194 nr_partitions = data->nr_partitions;
225 sharpsl_partition_info = sharpsl_nand_default_partition_info; 195 sharpsl_partition_info = data->partitions;
226 if (machine_is_poodle()) {
227 sharpsl_partition_info[1].size = 22 * 1024 * 1024;
228 } else if (machine_is_corgi() || machine_is_shepherd()) {
229 sharpsl_partition_info[1].size = 25 * 1024 * 1024;
230 } else if (machine_is_husky()) {
231 sharpsl_partition_info[1].size = 53 * 1024 * 1024;
232 } else if (machine_is_spitz()) {
233 sharpsl_partition_info[1].size = 5 * 1024 * 1024;
234 } else if (machine_is_akita()) {
235 sharpsl_partition_info[1].size = 58 * 1024 * 1024;
236 } else if (machine_is_borzoi()) {
237 sharpsl_partition_info[1].size = 32 * 1024 * 1024;
238 }
239 } 196 }
240 197
241 add_mtd_partitions(sharpsl_mtd, sharpsl_partition_info, nr_partitions); 198 if (nr_partitions > 0)
199 err = add_mtd_partitions(&sharpsl->mtd, sharpsl_partition_info, nr_partitions);
200 else
201#endif
202 err = add_mtd_device(&sharpsl->mtd);
203 if (err)
204 goto err_add;
242 205
243 /* Return happy */ 206 /* Return happy */
244 return 0; 207 return 0;
245}
246 208
247module_init(sharpsl_nand_init); 209err_add:
210 nand_release(&sharpsl->mtd);
211
212err_scan:
213 platform_set_drvdata(pdev, NULL);
214 iounmap(sharpsl->io);
215err_ioremap:
216err_get_res:
217 kfree(sharpsl);
218 return err;
219}
248 220
249/* 221/*
250 * Clean up routine 222 * Clean up routine
251 */ 223 */
252static void __exit sharpsl_nand_cleanup(void) 224static int __devexit sharpsl_nand_remove(struct platform_device *pdev)
253{ 225{
226 struct sharpsl_nand *sharpsl = platform_get_drvdata(pdev);
227
254 /* Release resources, unregister device */ 228 /* Release resources, unregister device */
255 nand_release(sharpsl_mtd); 229 nand_release(&sharpsl->mtd);
256 230
257 iounmap(sharpsl_io_base); 231 platform_set_drvdata(pdev, NULL);
232
233 iounmap(sharpsl->io);
258 234
259 /* Free the MTD device structure */ 235 /* Free the MTD device structure */
260 kfree(sharpsl_mtd); 236 kfree(sharpsl);
237
238 return 0;
239}
240
241static struct platform_driver sharpsl_nand_driver = {
242 .driver = {
243 .name = "sharpsl-nand",
244 .owner = THIS_MODULE,
245 },
246 .probe = sharpsl_nand_probe,
247 .remove = __devexit_p(sharpsl_nand_remove),
248};
249
250static int __init sharpsl_nand_init(void)
251{
252 return platform_driver_register(&sharpsl_nand_driver);
261} 253}
254module_init(sharpsl_nand_init);
262 255
263module_exit(sharpsl_nand_cleanup); 256static void __exit sharpsl_nand_exit(void)
257{
258 platform_driver_unregister(&sharpsl_nand_driver);
259}
260module_exit(sharpsl_nand_exit);
264 261
265MODULE_LICENSE("GPL"); 262MODULE_LICENSE("GPL");
266MODULE_AUTHOR("Richard Purdie <rpurdie@rpsys.net>"); 263MODULE_AUTHOR("Richard Purdie <rpurdie@rpsys.net>");
diff --git a/drivers/mtd/nand/tmio_nand.c b/drivers/mtd/nand/tmio_nand.c
index edb1e322113d..daa6a4c3b8ce 100644
--- a/drivers/mtd/nand/tmio_nand.c
+++ b/drivers/mtd/nand/tmio_nand.c
@@ -433,7 +433,7 @@ static int tmio_probe(struct platform_device *dev)
433 nand_chip->chip_delay = 15; 433 nand_chip->chip_delay = 15;
434 434
435 retval = request_irq(irq, &tmio_irq, 435 retval = request_irq(irq, &tmio_irq,
436 IRQF_DISABLED, dev->dev.bus_id, tmio); 436 IRQF_DISABLED, dev_name(&dev->dev), tmio);
437 if (retval) { 437 if (retval) {
438 dev_err(&dev->dev, "request_irq error %d\n", retval); 438 dev_err(&dev->dev, "request_irq error %d\n", retval);
439 goto err_irq; 439 goto err_irq;
diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c
index 320b929abe79..d1c4546513f7 100644
--- a/drivers/mtd/nftlcore.c
+++ b/drivers/mtd/nftlcore.c
@@ -39,7 +39,7 @@ static void nftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
39 struct NFTLrecord *nftl; 39 struct NFTLrecord *nftl;
40 unsigned long temp; 40 unsigned long temp;
41 41
42 if (mtd->type != MTD_NANDFLASH) 42 if (mtd->type != MTD_NANDFLASH || mtd->size > UINT_MAX)
43 return; 43 return;
44 /* OK, this is moderately ugly. But probably safe. Alternatives? */ 44 /* OK, this is moderately ugly. But probably safe. Alternatives? */
45 if (memcmp(mtd->name, "DiskOnChip", 10)) 45 if (memcmp(mtd->name, "DiskOnChip", 10))
diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
index ccc4f209fbb5..8b22b1836e9f 100644
--- a/drivers/mtd/nftlmount.c
+++ b/drivers/mtd/nftlmount.c
@@ -51,7 +51,7 @@ static int find_boot_record(struct NFTLrecord *nftl)
51 the mtd device accordingly. We could even get rid of 51 the mtd device accordingly. We could even get rid of
52 nftl->EraseSize if there were any point in doing so. */ 52 nftl->EraseSize if there were any point in doing so. */
53 nftl->EraseSize = nftl->mbd.mtd->erasesize; 53 nftl->EraseSize = nftl->mbd.mtd->erasesize;
54 nftl->nb_blocks = nftl->mbd.mtd->size / nftl->EraseSize; 54 nftl->nb_blocks = (u32)nftl->mbd.mtd->size / nftl->EraseSize;
55 55
56 nftl->MediaUnit = BLOCK_NIL; 56 nftl->MediaUnit = BLOCK_NIL;
57 nftl->SpareMediaUnit = BLOCK_NIL; 57 nftl->SpareMediaUnit = BLOCK_NIL;
@@ -168,7 +168,7 @@ device is already correct.
168 printk(KERN_NOTICE "WARNING: Support for NFTL with UnitSizeFactor 0x%02x is experimental\n", 168 printk(KERN_NOTICE "WARNING: Support for NFTL with UnitSizeFactor 0x%02x is experimental\n",
169 mh->UnitSizeFactor); 169 mh->UnitSizeFactor);
170 nftl->EraseSize = nftl->mbd.mtd->erasesize << (0xff - mh->UnitSizeFactor); 170 nftl->EraseSize = nftl->mbd.mtd->erasesize << (0xff - mh->UnitSizeFactor);
171 nftl->nb_blocks = nftl->mbd.mtd->size / nftl->EraseSize; 171 nftl->nb_blocks = (u32)nftl->mbd.mtd->size / nftl->EraseSize;
172 } 172 }
173#endif 173#endif
174 nftl->nb_boot_blocks = le16_to_cpu(mh->FirstPhysicalEUN); 174 nftl->nb_boot_blocks = le16_to_cpu(mh->FirstPhysicalEUN);
diff --git a/drivers/mtd/onenand/generic.c b/drivers/mtd/onenand/generic.c
index ad81ab8e95e2..5b69e7773c6c 100644
--- a/drivers/mtd/onenand/generic.c
+++ b/drivers/mtd/onenand/generic.c
@@ -63,7 +63,7 @@ static int __devinit generic_onenand_probe(struct device *dev)
63 info->onenand.mmcontrol = pdata->mmcontrol; 63 info->onenand.mmcontrol = pdata->mmcontrol;
64 info->onenand.irq = platform_get_irq(pdev, 0); 64 info->onenand.irq = platform_get_irq(pdev, 0);
65 65
66 info->mtd.name = pdev->dev.bus_id; 66 info->mtd.name = dev_name(&pdev->dev);
67 info->mtd.priv = &info->onenand; 67 info->mtd.priv = &info->onenand;
68 info->mtd.owner = THIS_MODULE; 68 info->mtd.owner = THIS_MODULE;
69 69
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index a7e4d985f5ef..96ecc1766fa8 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -149,7 +149,7 @@ static int omap2_onenand_wait(struct mtd_info *mtd, int state)
149 149
150 INIT_COMPLETION(c->irq_done); 150 INIT_COMPLETION(c->irq_done);
151 if (c->gpio_irq) { 151 if (c->gpio_irq) {
152 result = omap_get_gpio_datain(c->gpio_irq); 152 result = gpio_get_value(c->gpio_irq);
153 if (result == -1) { 153 if (result == -1) {
154 ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS); 154 ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
155 intr = read_reg(c, ONENAND_REG_INTERRUPT); 155 intr = read_reg(c, ONENAND_REG_INTERRUPT);
@@ -634,9 +634,9 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev)
634 "OneNAND\n", c->gpio_irq); 634 "OneNAND\n", c->gpio_irq);
635 goto err_iounmap; 635 goto err_iounmap;
636 } 636 }
637 omap_set_gpio_direction(c->gpio_irq, 1); 637 gpio_direction_input(c->gpio_irq);
638 638
639 if ((r = request_irq(OMAP_GPIO_IRQ(c->gpio_irq), 639 if ((r = request_irq(gpio_to_irq(c->gpio_irq),
640 omap2_onenand_interrupt, IRQF_TRIGGER_RISING, 640 omap2_onenand_interrupt, IRQF_TRIGGER_RISING,
641 pdev->dev.driver->name, c)) < 0) 641 pdev->dev.driver->name, c)) < 0)
642 goto err_release_gpio; 642 goto err_release_gpio;
@@ -668,7 +668,7 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev)
668 c->onenand.base); 668 c->onenand.base);
669 669
670 c->pdev = pdev; 670 c->pdev = pdev;
671 c->mtd.name = pdev->dev.bus_id; 671 c->mtd.name = dev_name(&pdev->dev);
672 c->mtd.priv = &c->onenand; 672 c->mtd.priv = &c->onenand;
673 c->mtd.owner = THIS_MODULE; 673 c->mtd.owner = THIS_MODULE;
674 674
@@ -723,7 +723,7 @@ err_release_dma:
723 if (c->dma_channel != -1) 723 if (c->dma_channel != -1)
724 omap_free_dma(c->dma_channel); 724 omap_free_dma(c->dma_channel);
725 if (c->gpio_irq) 725 if (c->gpio_irq)
726 free_irq(OMAP_GPIO_IRQ(c->gpio_irq), c); 726 free_irq(gpio_to_irq(c->gpio_irq), c);
727err_release_gpio: 727err_release_gpio:
728 if (c->gpio_irq) 728 if (c->gpio_irq)
729 omap_free_gpio(c->gpio_irq); 729 omap_free_gpio(c->gpio_irq);
@@ -760,7 +760,7 @@ static int __devexit omap2_onenand_remove(struct platform_device *pdev)
760 omap2_onenand_shutdown(pdev); 760 omap2_onenand_shutdown(pdev);
761 platform_set_drvdata(pdev, NULL); 761 platform_set_drvdata(pdev, NULL);
762 if (c->gpio_irq) { 762 if (c->gpio_irq) {
763 free_irq(OMAP_GPIO_IRQ(c->gpio_irq), c); 763 free_irq(gpio_to_irq(c->gpio_irq), c);
764 omap_free_gpio(c->gpio_irq); 764 omap_free_gpio(c->gpio_irq);
765 } 765 }
766 iounmap(c->onenand.base); 766 iounmap(c->onenand.base);
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index 90ed319f26e6..529af271db17 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -1772,7 +1772,7 @@ static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr)
1772 int len; 1772 int len;
1773 int ret = 0; 1773 int ret = 0;
1774 1774
1775 DEBUG(MTD_DEBUG_LEVEL3, "onenand_erase: start = 0x%08x, len = %i\n", (unsigned int) instr->addr, (unsigned int) instr->len); 1775 DEBUG(MTD_DEBUG_LEVEL3, "onenand_erase: start = 0x%012llx, len = %llu\n", (unsigned long long) instr->addr, (unsigned long long) instr->len);
1776 1776
1777 block_size = (1 << this->erase_shift); 1777 block_size = (1 << this->erase_shift);
1778 1778
@@ -1810,7 +1810,7 @@ static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr)
1810 1810
1811 /* Check if we have a bad block, we do not erase bad blocks */ 1811 /* Check if we have a bad block, we do not erase bad blocks */
1812 if (onenand_block_isbad_nolock(mtd, addr, 0)) { 1812 if (onenand_block_isbad_nolock(mtd, addr, 0)) {
1813 printk (KERN_WARNING "onenand_erase: attempt to erase a bad block at addr 0x%08x\n", (unsigned int) addr); 1813 printk (KERN_WARNING "onenand_erase: attempt to erase a bad block at addr 0x%012llx\n", (unsigned long long) addr);
1814 instr->state = MTD_ERASE_FAILED; 1814 instr->state = MTD_ERASE_FAILED;
1815 goto erase_exit; 1815 goto erase_exit;
1816 } 1816 }
@@ -2029,7 +2029,7 @@ static int onenand_do_lock_cmd(struct mtd_info *mtd, loff_t ofs, size_t len, int
2029 * 2029 *
2030 * Lock one or more blocks 2030 * Lock one or more blocks
2031 */ 2031 */
2032static int onenand_lock(struct mtd_info *mtd, loff_t ofs, size_t len) 2032static int onenand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2033{ 2033{
2034 int ret; 2034 int ret;
2035 2035
@@ -2047,7 +2047,7 @@ static int onenand_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
2047 * 2047 *
2048 * Unlock one or more blocks 2048 * Unlock one or more blocks
2049 */ 2049 */
2050static int onenand_unlock(struct mtd_info *mtd, loff_t ofs, size_t len) 2050static int onenand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2051{ 2051{
2052 int ret; 2052 int ret;
2053 2053
diff --git a/drivers/mtd/rfd_ftl.c b/drivers/mtd/rfd_ftl.c
index e538c0a72abb..d2aa9c46530f 100644
--- a/drivers/mtd/rfd_ftl.c
+++ b/drivers/mtd/rfd_ftl.c
@@ -21,8 +21,6 @@
21 21
22#include <asm/types.h> 22#include <asm/types.h>
23 23
24#define const_cpu_to_le16 __constant_cpu_to_le16
25
26static int block_size = 0; 24static int block_size = 0;
27module_param(block_size, int, 0); 25module_param(block_size, int, 0);
28MODULE_PARM_DESC(block_size, "Block size to use by RFD, defaults to erase unit size"); 26MODULE_PARM_DESC(block_size, "Block size to use by RFD, defaults to erase unit size");
@@ -156,7 +154,7 @@ static int scan_header(struct partition *part)
156 size_t retlen; 154 size_t retlen;
157 155
158 sectors_per_block = part->block_size / SECTOR_SIZE; 156 sectors_per_block = part->block_size / SECTOR_SIZE;
159 part->total_blocks = part->mbd.mtd->size / part->block_size; 157 part->total_blocks = (u32)part->mbd.mtd->size / part->block_size;
160 158
161 if (part->total_blocks < 2) 159 if (part->total_blocks < 2)
162 return -ENOENT; 160 return -ENOENT;
@@ -276,16 +274,17 @@ static void erase_callback(struct erase_info *erase)
276 274
277 part = (struct partition*)erase->priv; 275 part = (struct partition*)erase->priv;
278 276
279 i = erase->addr / part->block_size; 277 i = (u32)erase->addr / part->block_size;
280 if (i >= part->total_blocks || part->blocks[i].offset != erase->addr) { 278 if (i >= part->total_blocks || part->blocks[i].offset != erase->addr ||
281 printk(KERN_ERR PREFIX "erase callback for unknown offset %x " 279 erase->addr > UINT_MAX) {
282 "on '%s'\n", erase->addr, part->mbd.mtd->name); 280 printk(KERN_ERR PREFIX "erase callback for unknown offset %llx "
281 "on '%s'\n", (unsigned long long)erase->addr, part->mbd.mtd->name);
283 return; 282 return;
284 } 283 }
285 284
286 if (erase->state != MTD_ERASE_DONE) { 285 if (erase->state != MTD_ERASE_DONE) {
287 printk(KERN_WARNING PREFIX "erase failed at 0x%x on '%s', " 286 printk(KERN_WARNING PREFIX "erase failed at 0x%llx on '%s', "
288 "state %d\n", erase->addr, 287 "state %d\n", (unsigned long long)erase->addr,
289 part->mbd.mtd->name, erase->state); 288 part->mbd.mtd->name, erase->state);
290 289
291 part->blocks[i].state = BLOCK_FAILED; 290 part->blocks[i].state = BLOCK_FAILED;
@@ -297,7 +296,7 @@ static void erase_callback(struct erase_info *erase)
297 return; 296 return;
298 } 297 }
299 298
300 magic = const_cpu_to_le16(RFD_MAGIC); 299 magic = cpu_to_le16(RFD_MAGIC);
301 300
302 part->blocks[i].state = BLOCK_ERASED; 301 part->blocks[i].state = BLOCK_ERASED;
303 part->blocks[i].free_sectors = part->data_sectors_per_block; 302 part->blocks[i].free_sectors = part->data_sectors_per_block;
@@ -345,9 +344,9 @@ static int erase_block(struct partition *part, int block)
345 rc = part->mbd.mtd->erase(part->mbd.mtd, erase); 344 rc = part->mbd.mtd->erase(part->mbd.mtd, erase);
346 345
347 if (rc) { 346 if (rc) {
348 printk(KERN_ERR PREFIX "erase of region %x,%x on '%s' " 347 printk(KERN_ERR PREFIX "erase of region %llx,%llx on '%s' "
349 "failed\n", erase->addr, erase->len, 348 "failed\n", (unsigned long long)erase->addr,
350 part->mbd.mtd->name); 349 (unsigned long long)erase->len, part->mbd.mtd->name);
351 kfree(erase); 350 kfree(erase);
352 } 351 }
353 352
@@ -587,7 +586,7 @@ static int mark_sector_deleted(struct partition *part, u_long old_addr)
587 int block, offset, rc; 586 int block, offset, rc;
588 u_long addr; 587 u_long addr;
589 size_t retlen; 588 size_t retlen;
590 u16 del = const_cpu_to_le16(SECTOR_DELETED); 589 u16 del = cpu_to_le16(SECTOR_DELETED);
591 590
592 block = old_addr / part->block_size; 591 block = old_addr / part->block_size;
593 offset = (old_addr % part->block_size) / SECTOR_SIZE - 592 offset = (old_addr % part->block_size) / SECTOR_SIZE -
@@ -763,7 +762,7 @@ static void rfd_ftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
763{ 762{
764 struct partition *part; 763 struct partition *part;
765 764
766 if (mtd->type != MTD_NORFLASH) 765 if (mtd->type != MTD_NORFLASH || mtd->size > UINT_MAX)
767 return; 766 return;
768 767
769 part = kzalloc(sizeof(struct partition), GFP_KERNEL); 768 part = kzalloc(sizeof(struct partition), GFP_KERNEL);
diff --git a/drivers/mtd/ssfdc.c b/drivers/mtd/ssfdc.c
index 33a5d6ed6f18..3f67e00d98e0 100644
--- a/drivers/mtd/ssfdc.c
+++ b/drivers/mtd/ssfdc.c
@@ -294,7 +294,8 @@ static void ssfdcr_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
294 int cis_sector; 294 int cis_sector;
295 295
296 /* Check for small page NAND flash */ 296 /* Check for small page NAND flash */
297 if (mtd->type != MTD_NANDFLASH || mtd->oobsize != OOB_SIZE) 297 if (mtd->type != MTD_NANDFLASH || mtd->oobsize != OOB_SIZE ||
298 mtd->size > UINT_MAX)
298 return; 299 return;
299 300
300 /* Check for SSDFC format by reading CIS/IDI sector */ 301 /* Check for SSDFC format by reading CIS/IDI sector */
@@ -316,7 +317,7 @@ static void ssfdcr_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
316 317
317 ssfdc->cis_block = cis_sector / (mtd->erasesize >> SECTOR_SHIFT); 318 ssfdc->cis_block = cis_sector / (mtd->erasesize >> SECTOR_SHIFT);
318 ssfdc->erase_size = mtd->erasesize; 319 ssfdc->erase_size = mtd->erasesize;
319 ssfdc->map_len = mtd->size / mtd->erasesize; 320 ssfdc->map_len = (u32)mtd->size / mtd->erasesize;
320 321
321 DEBUG(MTD_DEBUG_LEVEL1, 322 DEBUG(MTD_DEBUG_LEVEL1,
322 "SSFDC_RO: cis_block=%d,erase_size=%d,map_len=%d,n_zones=%d\n", 323 "SSFDC_RO: cis_block=%d,erase_size=%d,map_len=%d,n_zones=%d\n",
@@ -327,7 +328,7 @@ static void ssfdcr_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
327 ssfdc->heads = 16; 328 ssfdc->heads = 16;
328 ssfdc->sectors = 32; 329 ssfdc->sectors = 32;
329 get_chs(mtd->size, NULL, &ssfdc->heads, &ssfdc->sectors); 330 get_chs(mtd->size, NULL, &ssfdc->heads, &ssfdc->sectors);
330 ssfdc->cylinders = (unsigned short)((mtd->size >> SECTOR_SHIFT) / 331 ssfdc->cylinders = (unsigned short)(((u32)mtd->size >> SECTOR_SHIFT) /
331 ((long)ssfdc->sectors * (long)ssfdc->heads)); 332 ((long)ssfdc->sectors * (long)ssfdc->heads));
332 333
333 DEBUG(MTD_DEBUG_LEVEL1, "SSFDC_RO: using C:%d H:%d S:%d == %ld sects\n", 334 DEBUG(MTD_DEBUG_LEVEL1, "SSFDC_RO: using C:%d H:%d S:%d == %ld sects\n",
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index c7630a228310..9082768cc6c3 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -280,7 +280,7 @@ static int ubi_sysfs_init(struct ubi_device *ubi)
280 ubi->dev.release = dev_release; 280 ubi->dev.release = dev_release;
281 ubi->dev.devt = ubi->cdev.dev; 281 ubi->dev.devt = ubi->cdev.dev;
282 ubi->dev.class = ubi_class; 282 ubi->dev.class = ubi_class;
283 sprintf(&ubi->dev.bus_id[0], UBI_NAME_STR"%d", ubi->ubi_num); 283 dev_set_name(&ubi->dev, UBI_NAME_STR"%d", ubi->ubi_num);
284 err = device_register(&ubi->dev); 284 err = device_register(&ubi->dev);
285 if (err) 285 if (err)
286 return err; 286 return err;
@@ -561,7 +561,7 @@ static int io_init(struct ubi_device *ubi)
561 */ 561 */
562 562
563 ubi->peb_size = ubi->mtd->erasesize; 563 ubi->peb_size = ubi->mtd->erasesize;
564 ubi->peb_count = ubi->mtd->size / ubi->mtd->erasesize; 564 ubi->peb_count = mtd_div_by_eb(ubi->mtd->size, ubi->mtd);
565 ubi->flash_size = ubi->mtd->size; 565 ubi->flash_size = ubi->mtd->size;
566 566
567 if (ubi->mtd->block_isbad && ubi->mtd->block_markbad) 567 if (ubi->mtd->block_isbad && ubi->mtd->block_markbad)
@@ -815,19 +815,20 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
815 if (err) 815 if (err)
816 goto out_free; 816 goto out_free;
817 817
818 err = -ENOMEM;
818 ubi->peb_buf1 = vmalloc(ubi->peb_size); 819 ubi->peb_buf1 = vmalloc(ubi->peb_size);
819 if (!ubi->peb_buf1) 820 if (!ubi->peb_buf1)
820 goto out_free; 821 goto out_free;
821 822
822 ubi->peb_buf2 = vmalloc(ubi->peb_size); 823 ubi->peb_buf2 = vmalloc(ubi->peb_size);
823 if (!ubi->peb_buf2) 824 if (!ubi->peb_buf2)
824 goto out_free; 825 goto out_free;
825 826
826#ifdef CONFIG_MTD_UBI_DEBUG 827#ifdef CONFIG_MTD_UBI_DEBUG
827 mutex_init(&ubi->dbg_buf_mutex); 828 mutex_init(&ubi->dbg_buf_mutex);
828 ubi->dbg_peb_buf = vmalloc(ubi->peb_size); 829 ubi->dbg_peb_buf = vmalloc(ubi->peb_size);
829 if (!ubi->dbg_peb_buf) 830 if (!ubi->dbg_peb_buf)
830 goto out_free; 831 goto out_free;
831#endif 832#endif
832 833
833 err = attach_by_scanning(ubi); 834 err = attach_by_scanning(ubi);
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
index b30a0b83d7f1..98cf31ed0814 100644
--- a/drivers/mtd/ubi/cdev.c
+++ b/drivers/mtd/ubi/cdev.c
@@ -721,7 +721,8 @@ static int rename_volumes(struct ubi_device *ubi,
721 * It seems we need to remove volume with name @re->new_name, 721 * It seems we need to remove volume with name @re->new_name,
722 * if it exists. 722 * if it exists.
723 */ 723 */
724 desc = ubi_open_volume_nm(ubi->ubi_num, re->new_name, UBI_EXCLUSIVE); 724 desc = ubi_open_volume_nm(ubi->ubi_num, re->new_name,
725 UBI_EXCLUSIVE);
725 if (IS_ERR(desc)) { 726 if (IS_ERR(desc)) {
726 err = PTR_ERR(desc); 727 err = PTR_ERR(desc);
727 if (err == -ENODEV) 728 if (err == -ENODEV)
diff --git a/drivers/mtd/ubi/debug.h b/drivers/mtd/ubi/debug.h
index 78e914d23ece..13777e5beac9 100644
--- a/drivers/mtd/ubi/debug.h
+++ b/drivers/mtd/ubi/debug.h
@@ -27,11 +27,11 @@
27#define dbg_err(fmt, ...) ubi_err(fmt, ##__VA_ARGS__) 27#define dbg_err(fmt, ...) ubi_err(fmt, ##__VA_ARGS__)
28 28
29#define ubi_assert(expr) do { \ 29#define ubi_assert(expr) do { \
30 if (unlikely(!(expr))) { \ 30 if (unlikely(!(expr))) { \
31 printk(KERN_CRIT "UBI assert failed in %s at %u (pid %d)\n", \ 31 printk(KERN_CRIT "UBI assert failed in %s at %u (pid %d)\n", \
32 __func__, __LINE__, current->pid); \ 32 __func__, __LINE__, current->pid); \
33 ubi_dbg_dump_stack(); \ 33 ubi_dbg_dump_stack(); \
34 } \ 34 } \
35} while (0) 35} while (0)
36 36
37#define dbg_msg(fmt, ...) \ 37#define dbg_msg(fmt, ...) \
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index e04bcf1dff87..048a606cebde 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -504,12 +504,9 @@ static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum,
504 if (!vid_hdr) 504 if (!vid_hdr)
505 return -ENOMEM; 505 return -ENOMEM;
506 506
507 mutex_lock(&ubi->buf_mutex);
508
509retry: 507retry:
510 new_pnum = ubi_wl_get_peb(ubi, UBI_UNKNOWN); 508 new_pnum = ubi_wl_get_peb(ubi, UBI_UNKNOWN);
511 if (new_pnum < 0) { 509 if (new_pnum < 0) {
512 mutex_unlock(&ubi->buf_mutex);
513 ubi_free_vid_hdr(ubi, vid_hdr); 510 ubi_free_vid_hdr(ubi, vid_hdr);
514 return new_pnum; 511 return new_pnum;
515 } 512 }
@@ -529,20 +526,23 @@ retry:
529 goto write_error; 526 goto write_error;
530 527
531 data_size = offset + len; 528 data_size = offset + len;
529 mutex_lock(&ubi->buf_mutex);
532 memset(ubi->peb_buf1 + offset, 0xFF, len); 530 memset(ubi->peb_buf1 + offset, 0xFF, len);
533 531
534 /* Read everything before the area where the write failure happened */ 532 /* Read everything before the area where the write failure happened */
535 if (offset > 0) { 533 if (offset > 0) {
536 err = ubi_io_read_data(ubi, ubi->peb_buf1, pnum, 0, offset); 534 err = ubi_io_read_data(ubi, ubi->peb_buf1, pnum, 0, offset);
537 if (err && err != UBI_IO_BITFLIPS) 535 if (err && err != UBI_IO_BITFLIPS)
538 goto out_put; 536 goto out_unlock;
539 } 537 }
540 538
541 memcpy(ubi->peb_buf1 + offset, buf, len); 539 memcpy(ubi->peb_buf1 + offset, buf, len);
542 540
543 err = ubi_io_write_data(ubi, ubi->peb_buf1, new_pnum, 0, data_size); 541 err = ubi_io_write_data(ubi, ubi->peb_buf1, new_pnum, 0, data_size);
544 if (err) 542 if (err) {
543 mutex_unlock(&ubi->buf_mutex);
545 goto write_error; 544 goto write_error;
545 }
546 546
547 mutex_unlock(&ubi->buf_mutex); 547 mutex_unlock(&ubi->buf_mutex);
548 ubi_free_vid_hdr(ubi, vid_hdr); 548 ubi_free_vid_hdr(ubi, vid_hdr);
@@ -553,8 +553,9 @@ retry:
553 ubi_msg("data was successfully recovered"); 553 ubi_msg("data was successfully recovered");
554 return 0; 554 return 0;
555 555
556out_put: 556out_unlock:
557 mutex_unlock(&ubi->buf_mutex); 557 mutex_unlock(&ubi->buf_mutex);
558out_put:
558 ubi_wl_put_peb(ubi, new_pnum, 1); 559 ubi_wl_put_peb(ubi, new_pnum, 1);
559 ubi_free_vid_hdr(ubi, vid_hdr); 560 ubi_free_vid_hdr(ubi, vid_hdr);
560 return err; 561 return err;
@@ -567,7 +568,6 @@ write_error:
567 ubi_warn("failed to write to PEB %d", new_pnum); 568 ubi_warn("failed to write to PEB %d", new_pnum);
568 ubi_wl_put_peb(ubi, new_pnum, 1); 569 ubi_wl_put_peb(ubi, new_pnum, 1);
569 if (++tries > UBI_IO_RETRIES) { 570 if (++tries > UBI_IO_RETRIES) {
570 mutex_unlock(&ubi->buf_mutex);
571 ubi_free_vid_hdr(ubi, vid_hdr); 571 ubi_free_vid_hdr(ubi, vid_hdr);
572 return err; 572 return err;
573 } 573 }
@@ -949,10 +949,14 @@ write_error:
949 * This function copies logical eraseblock from physical eraseblock @from to 949 * This function copies logical eraseblock from physical eraseblock @from to
950 * physical eraseblock @to. The @vid_hdr buffer may be changed by this 950 * physical eraseblock @to. The @vid_hdr buffer may be changed by this
951 * function. Returns: 951 * function. Returns:
952 * o %0 in case of success; 952 * o %0 in case of success;
953 * o %1 if the operation was canceled and should be tried later (e.g., 953 * o %1 if the operation was canceled because the volume is being deleted
954 * because a bit-flip was detected at the target PEB); 954 * or because the PEB was put meanwhile;
955 * o %2 if the volume is being deleted and this LEB should not be moved. 955 * o %2 if the operation was canceled because there was a write error to the
956 * target PEB;
957 * o %-EAGAIN if the operation was canceled because a bit-flip was detected
958 * in the target PEB;
959 * o a negative error code in case of failure.
956 */ 960 */
957int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, 961int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
958 struct ubi_vid_hdr *vid_hdr) 962 struct ubi_vid_hdr *vid_hdr)
@@ -978,7 +982,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
978 /* 982 /*
979 * Note, we may race with volume deletion, which means that the volume 983 * Note, we may race with volume deletion, which means that the volume
980 * this logical eraseblock belongs to might be being deleted. Since the 984 * this logical eraseblock belongs to might be being deleted. Since the
981 * volume deletion unmaps all the volume's logical eraseblocks, it will 985 * volume deletion un-maps all the volume's logical eraseblocks, it will
982 * be locked in 'ubi_wl_put_peb()' and wait for the WL worker to finish. 986 * be locked in 'ubi_wl_put_peb()' and wait for the WL worker to finish.
983 */ 987 */
984 vol = ubi->volumes[idx]; 988 vol = ubi->volumes[idx];
@@ -986,7 +990,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
986 /* No need to do further work, cancel */ 990 /* No need to do further work, cancel */
987 dbg_eba("volume %d is being removed, cancel", vol_id); 991 dbg_eba("volume %d is being removed, cancel", vol_id);
988 spin_unlock(&ubi->volumes_lock); 992 spin_unlock(&ubi->volumes_lock);
989 return 2; 993 return 1;
990 } 994 }
991 spin_unlock(&ubi->volumes_lock); 995 spin_unlock(&ubi->volumes_lock);
992 996
@@ -1022,8 +1026,8 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
1022 } 1026 }
1023 1027
1024 /* 1028 /*
1025 * OK, now the LEB is locked and we can safely start moving iy. Since 1029 * OK, now the LEB is locked and we can safely start moving it. Since
1026 * this function utilizes thie @ubi->peb1_buf buffer which is shared 1030 * this function utilizes the @ubi->peb1_buf buffer which is shared
1027 * with some other functions, so lock the buffer by taking the 1031 * with some other functions, so lock the buffer by taking the
1028 * @ubi->buf_mutex. 1032 * @ubi->buf_mutex.
1029 */ 1033 */
@@ -1068,8 +1072,11 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
1068 vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi)); 1072 vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
1069 1073
1070 err = ubi_io_write_vid_hdr(ubi, to, vid_hdr); 1074 err = ubi_io_write_vid_hdr(ubi, to, vid_hdr);
1071 if (err) 1075 if (err) {
1076 if (err == -EIO)
1077 err = 2;
1072 goto out_unlock_buf; 1078 goto out_unlock_buf;
1079 }
1073 1080
1074 cond_resched(); 1081 cond_resched();
1075 1082
@@ -1079,14 +1086,17 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
1079 if (err != UBI_IO_BITFLIPS) 1086 if (err != UBI_IO_BITFLIPS)
1080 ubi_warn("cannot read VID header back from PEB %d", to); 1087 ubi_warn("cannot read VID header back from PEB %d", to);
1081 else 1088 else
1082 err = 1; 1089 err = -EAGAIN;
1083 goto out_unlock_buf; 1090 goto out_unlock_buf;
1084 } 1091 }
1085 1092
1086 if (data_size > 0) { 1093 if (data_size > 0) {
1087 err = ubi_io_write_data(ubi, ubi->peb_buf1, to, 0, aldata_size); 1094 err = ubi_io_write_data(ubi, ubi->peb_buf1, to, 0, aldata_size);
1088 if (err) 1095 if (err) {
1096 if (err == -EIO)
1097 err = 2;
1089 goto out_unlock_buf; 1098 goto out_unlock_buf;
1099 }
1090 1100
1091 cond_resched(); 1101 cond_resched();
1092 1102
@@ -1101,15 +1111,16 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
1101 ubi_warn("cannot read data back from PEB %d", 1111 ubi_warn("cannot read data back from PEB %d",
1102 to); 1112 to);
1103 else 1113 else
1104 err = 1; 1114 err = -EAGAIN;
1105 goto out_unlock_buf; 1115 goto out_unlock_buf;
1106 } 1116 }
1107 1117
1108 cond_resched(); 1118 cond_resched();
1109 1119
1110 if (memcmp(ubi->peb_buf1, ubi->peb_buf2, aldata_size)) { 1120 if (memcmp(ubi->peb_buf1, ubi->peb_buf2, aldata_size)) {
1111 ubi_warn("read data back from PEB %d - it is different", 1121 ubi_warn("read data back from PEB %d and it is "
1112 to); 1122 "different", to);
1123 err = -EINVAL;
1113 goto out_unlock_buf; 1124 goto out_unlock_buf;
1114 } 1125 }
1115 } 1126 }
diff --git a/drivers/mtd/ubi/gluebi.c b/drivers/mtd/ubi/gluebi.c
index 605812bb0b1a..6dd4f5e77f82 100644
--- a/drivers/mtd/ubi/gluebi.c
+++ b/drivers/mtd/ubi/gluebi.c
@@ -215,7 +215,8 @@ static int gluebi_erase(struct mtd_info *mtd, struct erase_info *instr)
215 struct ubi_volume *vol; 215 struct ubi_volume *vol;
216 struct ubi_device *ubi; 216 struct ubi_device *ubi;
217 217
218 dbg_gen("erase %u bytes at offset %u", instr->len, instr->addr); 218 dbg_gen("erase %llu bytes at offset %llu", (unsigned long long)instr->len,
219 (unsigned long long)instr->addr);
219 220
220 if (instr->addr < 0 || instr->addr > mtd->size - mtd->erasesize) 221 if (instr->addr < 0 || instr->addr > mtd->size - mtd->erasesize)
221 return -EINVAL; 222 return -EINVAL;
@@ -223,11 +224,11 @@ static int gluebi_erase(struct mtd_info *mtd, struct erase_info *instr)
223 if (instr->len < 0 || instr->addr + instr->len > mtd->size) 224 if (instr->len < 0 || instr->addr + instr->len > mtd->size)
224 return -EINVAL; 225 return -EINVAL;
225 226
226 if (instr->addr % mtd->writesize || instr->len % mtd->writesize) 227 if (mtd_mod_by_ws(instr->addr, mtd) || mtd_mod_by_ws(instr->len, mtd))
227 return -EINVAL; 228 return -EINVAL;
228 229
229 lnum = instr->addr / mtd->erasesize; 230 lnum = mtd_div_by_eb(instr->addr, mtd);
230 count = instr->len / mtd->erasesize; 231 count = mtd_div_by_eb(instr->len, mtd);
231 232
232 vol = container_of(mtd, struct ubi_volume, gluebi_mtd); 233 vol = container_of(mtd, struct ubi_volume, gluebi_mtd);
233 ubi = vol->ubi; 234 ubi = vol->ubi;
@@ -255,7 +256,7 @@ static int gluebi_erase(struct mtd_info *mtd, struct erase_info *instr)
255 256
256out_err: 257out_err:
257 instr->state = MTD_ERASE_FAILED; 258 instr->state = MTD_ERASE_FAILED;
258 instr->fail_addr = lnum * mtd->erasesize; 259 instr->fail_addr = (long long)lnum * mtd->erasesize;
259 return err; 260 return err;
260} 261}
261 262
@@ -294,7 +295,7 @@ int ubi_create_gluebi(struct ubi_device *ubi, struct ubi_volume *vol)
294 * bytes. 295 * bytes.
295 */ 296 */
296 if (vol->vol_type == UBI_DYNAMIC_VOLUME) 297 if (vol->vol_type == UBI_DYNAMIC_VOLUME)
297 mtd->size = vol->usable_leb_size * vol->reserved_pebs; 298 mtd->size = (long long)vol->usable_leb_size * vol->reserved_pebs;
298 else 299 else
299 mtd->size = vol->used_bytes; 300 mtd->size = vol->used_bytes;
300 301
@@ -304,8 +305,8 @@ int ubi_create_gluebi(struct ubi_device *ubi, struct ubi_volume *vol)
304 return -ENFILE; 305 return -ENFILE;
305 } 306 }
306 307
307 dbg_gen("added mtd%d (\"%s\"), size %u, EB size %u", 308 dbg_gen("added mtd%d (\"%s\"), size %llu, EB size %u",
308 mtd->index, mtd->name, mtd->size, mtd->erasesize); 309 mtd->index, mtd->name, (unsigned long long)mtd->size, mtd->erasesize);
309 return 0; 310 return 0;
310} 311}
311 312
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index 2fb64be44f1b..a74118c05745 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -637,8 +637,6 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
637 637
638 dbg_io("read EC header from PEB %d", pnum); 638 dbg_io("read EC header from PEB %d", pnum);
639 ubi_assert(pnum >= 0 && pnum < ubi->peb_count); 639 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
640 if (UBI_IO_DEBUG)
641 verbose = 1;
642 640
643 err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE); 641 err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE);
644 if (err) { 642 if (err) {
@@ -685,6 +683,9 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
685 if (verbose) 683 if (verbose)
686 ubi_warn("no EC header found at PEB %d, " 684 ubi_warn("no EC header found at PEB %d, "
687 "only 0xFF bytes", pnum); 685 "only 0xFF bytes", pnum);
686 else if (UBI_IO_DEBUG)
687 dbg_msg("no EC header found at PEB %d, "
688 "only 0xFF bytes", pnum);
688 return UBI_IO_PEB_EMPTY; 689 return UBI_IO_PEB_EMPTY;
689 } 690 }
690 691
@@ -696,7 +697,9 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
696 ubi_warn("bad magic number at PEB %d: %08x instead of " 697 ubi_warn("bad magic number at PEB %d: %08x instead of "
697 "%08x", pnum, magic, UBI_EC_HDR_MAGIC); 698 "%08x", pnum, magic, UBI_EC_HDR_MAGIC);
698 ubi_dbg_dump_ec_hdr(ec_hdr); 699 ubi_dbg_dump_ec_hdr(ec_hdr);
699 } 700 } else if (UBI_IO_DEBUG)
701 dbg_msg("bad magic number at PEB %d: %08x instead of "
702 "%08x", pnum, magic, UBI_EC_HDR_MAGIC);
700 return UBI_IO_BAD_EC_HDR; 703 return UBI_IO_BAD_EC_HDR;
701 } 704 }
702 705
@@ -708,7 +711,9 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
708 ubi_warn("bad EC header CRC at PEB %d, calculated " 711 ubi_warn("bad EC header CRC at PEB %d, calculated "
709 "%#08x, read %#08x", pnum, crc, hdr_crc); 712 "%#08x, read %#08x", pnum, crc, hdr_crc);
710 ubi_dbg_dump_ec_hdr(ec_hdr); 713 ubi_dbg_dump_ec_hdr(ec_hdr);
711 } 714 } else if (UBI_IO_DEBUG)
715 dbg_msg("bad EC header CRC at PEB %d, calculated "
716 "%#08x, read %#08x", pnum, crc, hdr_crc);
712 return UBI_IO_BAD_EC_HDR; 717 return UBI_IO_BAD_EC_HDR;
713 } 718 }
714 719
@@ -912,8 +917,6 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
912 917
913 dbg_io("read VID header from PEB %d", pnum); 918 dbg_io("read VID header from PEB %d", pnum);
914 ubi_assert(pnum >= 0 && pnum < ubi->peb_count); 919 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
915 if (UBI_IO_DEBUG)
916 verbose = 1;
917 920
918 p = (char *)vid_hdr - ubi->vid_hdr_shift; 921 p = (char *)vid_hdr - ubi->vid_hdr_shift;
919 err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset, 922 err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset,
@@ -960,6 +963,9 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
960 if (verbose) 963 if (verbose)
961 ubi_warn("no VID header found at PEB %d, " 964 ubi_warn("no VID header found at PEB %d, "
962 "only 0xFF bytes", pnum); 965 "only 0xFF bytes", pnum);
966 else if (UBI_IO_DEBUG)
967 dbg_msg("no VID header found at PEB %d, "
968 "only 0xFF bytes", pnum);
963 return UBI_IO_PEB_FREE; 969 return UBI_IO_PEB_FREE;
964 } 970 }
965 971
@@ -971,7 +977,9 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
971 ubi_warn("bad magic number at PEB %d: %08x instead of " 977 ubi_warn("bad magic number at PEB %d: %08x instead of "
972 "%08x", pnum, magic, UBI_VID_HDR_MAGIC); 978 "%08x", pnum, magic, UBI_VID_HDR_MAGIC);
973 ubi_dbg_dump_vid_hdr(vid_hdr); 979 ubi_dbg_dump_vid_hdr(vid_hdr);
974 } 980 } else if (UBI_IO_DEBUG)
981 dbg_msg("bad magic number at PEB %d: %08x instead of "
982 "%08x", pnum, magic, UBI_VID_HDR_MAGIC);
975 return UBI_IO_BAD_VID_HDR; 983 return UBI_IO_BAD_VID_HDR;
976 } 984 }
977 985
@@ -983,7 +991,9 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
983 ubi_warn("bad CRC at PEB %d, calculated %#08x, " 991 ubi_warn("bad CRC at PEB %d, calculated %#08x, "
984 "read %#08x", pnum, crc, hdr_crc); 992 "read %#08x", pnum, crc, hdr_crc);
985 ubi_dbg_dump_vid_hdr(vid_hdr); 993 ubi_dbg_dump_vid_hdr(vid_hdr);
986 } 994 } else if (UBI_IO_DEBUG)
995 dbg_msg("bad CRC at PEB %d, calculated %#08x, "
996 "read %#08x", pnum, crc, hdr_crc);
987 return UBI_IO_BAD_VID_HDR; 997 return UBI_IO_BAD_VID_HDR;
988 } 998 }
989 999
@@ -1024,7 +1034,7 @@ int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
1024 1034
1025 err = paranoid_check_peb_ec_hdr(ubi, pnum); 1035 err = paranoid_check_peb_ec_hdr(ubi, pnum);
1026 if (err) 1036 if (err)
1027 return err > 0 ? -EINVAL: err; 1037 return err > 0 ? -EINVAL : err;
1028 1038
1029 vid_hdr->magic = cpu_to_be32(UBI_VID_HDR_MAGIC); 1039 vid_hdr->magic = cpu_to_be32(UBI_VID_HDR_MAGIC);
1030 vid_hdr->version = UBI_VERSION; 1040 vid_hdr->version = UBI_VERSION;
diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c
index 4f2daa5bbecf..41d47e1cf15c 100644
--- a/drivers/mtd/ubi/scan.c
+++ b/drivers/mtd/ubi/scan.c
@@ -320,7 +320,7 @@ static int compare_lebs(struct ubi_device *ubi, const struct ubi_scan_leb *seb,
320 } 320 }
321 321
322 err = ubi_io_read_data(ubi, buf, pnum, 0, len); 322 err = ubi_io_read_data(ubi, buf, pnum, 0, len);
323 if (err && err != UBI_IO_BITFLIPS) 323 if (err && err != UBI_IO_BITFLIPS && err != -EBADMSG)
324 goto out_free_buf; 324 goto out_free_buf;
325 325
326 data_crc = be32_to_cpu(vid_hdr->data_crc); 326 data_crc = be32_to_cpu(vid_hdr->data_crc);
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index 1c3fa18c26a7..4a8ec485c91d 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -74,6 +74,13 @@
74#define UBI_IO_RETRIES 3 74#define UBI_IO_RETRIES 3
75 75
76/* 76/*
77 * Length of the protection queue. The length is effectively equivalent to the
78 * number of (global) erase cycles PEBs are protected from the wear-leveling
79 * worker.
80 */
81#define UBI_PROT_QUEUE_LEN 10
82
83/*
77 * Error codes returned by the I/O sub-system. 84 * Error codes returned by the I/O sub-system.
78 * 85 *
79 * UBI_IO_PEB_EMPTY: the physical eraseblock is empty, i.e. it contains only 86 * UBI_IO_PEB_EMPTY: the physical eraseblock is empty, i.e. it contains only
@@ -95,7 +102,8 @@ enum {
95 102
96/** 103/**
97 * struct ubi_wl_entry - wear-leveling entry. 104 * struct ubi_wl_entry - wear-leveling entry.
98 * @rb: link in the corresponding RB-tree 105 * @u.rb: link in the corresponding (free/used) RB-tree
106 * @u.list: link in the protection queue
99 * @ec: erase counter 107 * @ec: erase counter
100 * @pnum: physical eraseblock number 108 * @pnum: physical eraseblock number
101 * 109 *
@@ -104,7 +112,10 @@ enum {
104 * RB-trees. See WL sub-system for details. 112 * RB-trees. See WL sub-system for details.
105 */ 113 */
106struct ubi_wl_entry { 114struct ubi_wl_entry {
107 struct rb_node rb; 115 union {
116 struct rb_node rb;
117 struct list_head list;
118 } u;
108 int ec; 119 int ec;
109 int pnum; 120 int pnum;
110}; 121};
@@ -288,7 +299,7 @@ struct ubi_wl_entry;
288 * @beb_rsvd_level: normal level of PEBs reserved for bad PEB handling 299 * @beb_rsvd_level: normal level of PEBs reserved for bad PEB handling
289 * 300 *
290 * @autoresize_vol_id: ID of the volume which has to be auto-resized at the end 301 * @autoresize_vol_id: ID of the volume which has to be auto-resized at the end
291 * of UBI ititializetion 302 * of UBI initialization
292 * @vtbl_slots: how many slots are available in the volume table 303 * @vtbl_slots: how many slots are available in the volume table
293 * @vtbl_size: size of the volume table in bytes 304 * @vtbl_size: size of the volume table in bytes
294 * @vtbl: in-RAM volume table copy 305 * @vtbl: in-RAM volume table copy
@@ -306,18 +317,17 @@ struct ubi_wl_entry;
306 * @used: RB-tree of used physical eraseblocks 317 * @used: RB-tree of used physical eraseblocks
307 * @free: RB-tree of free physical eraseblocks 318 * @free: RB-tree of free physical eraseblocks
308 * @scrub: RB-tree of physical eraseblocks which need scrubbing 319 * @scrub: RB-tree of physical eraseblocks which need scrubbing
309 * @prot: protection trees 320 * @pq: protection queue (contain physical eraseblocks which are temporarily
310 * @prot.pnum: protection tree indexed by physical eraseblock numbers 321 * protected from the wear-leveling worker)
311 * @prot.aec: protection tree indexed by absolute erase counter value 322 * @pq_head: protection queue head
312 * @wl_lock: protects the @used, @free, @prot, @lookuptbl, @abs_ec, @move_from, 323 * @wl_lock: protects the @used, @free, @pq, @pq_head, @lookuptbl, @move_from,
313 * @move_to, @move_to_put @erase_pending, @wl_scheduled, and @works 324 * @move_to, @move_to_put @erase_pending, @wl_scheduled and @works
314 * fields 325 * fields
315 * @move_mutex: serializes eraseblock moves 326 * @move_mutex: serializes eraseblock moves
316 * @work_sem: sycnhronizes the WL worker with use tasks 327 * @work_sem: synchronizes the WL worker with use tasks
317 * @wl_scheduled: non-zero if the wear-leveling was scheduled 328 * @wl_scheduled: non-zero if the wear-leveling was scheduled
318 * @lookuptbl: a table to quickly find a &struct ubi_wl_entry object for any 329 * @lookuptbl: a table to quickly find a &struct ubi_wl_entry object for any
319 * physical eraseblock 330 * physical eraseblock
320 * @abs_ec: absolute erase counter
321 * @move_from: physical eraseblock from where the data is being moved 331 * @move_from: physical eraseblock from where the data is being moved
322 * @move_to: physical eraseblock where the data is being moved to 332 * @move_to: physical eraseblock where the data is being moved to
323 * @move_to_put: if the "to" PEB was put 333 * @move_to_put: if the "to" PEB was put
@@ -351,11 +361,11 @@ struct ubi_wl_entry;
351 * 361 *
352 * @peb_buf1: a buffer of PEB size used for different purposes 362 * @peb_buf1: a buffer of PEB size used for different purposes
353 * @peb_buf2: another buffer of PEB size used for different purposes 363 * @peb_buf2: another buffer of PEB size used for different purposes
354 * @buf_mutex: proptects @peb_buf1 and @peb_buf2 364 * @buf_mutex: protects @peb_buf1 and @peb_buf2
355 * @ckvol_mutex: serializes static volume checking when opening 365 * @ckvol_mutex: serializes static volume checking when opening
356 * @mult_mutex: serializes operations on multiple volumes, like re-nameing 366 * @mult_mutex: serializes operations on multiple volumes, like re-naming
357 * @dbg_peb_buf: buffer of PEB size used for debugging 367 * @dbg_peb_buf: buffer of PEB size used for debugging
358 * @dbg_buf_mutex: proptects @dbg_peb_buf 368 * @dbg_buf_mutex: protects @dbg_peb_buf
359 */ 369 */
360struct ubi_device { 370struct ubi_device {
361 struct cdev cdev; 371 struct cdev cdev;
@@ -392,16 +402,13 @@ struct ubi_device {
392 struct rb_root used; 402 struct rb_root used;
393 struct rb_root free; 403 struct rb_root free;
394 struct rb_root scrub; 404 struct rb_root scrub;
395 struct { 405 struct list_head pq[UBI_PROT_QUEUE_LEN];
396 struct rb_root pnum; 406 int pq_head;
397 struct rb_root aec;
398 } prot;
399 spinlock_t wl_lock; 407 spinlock_t wl_lock;
400 struct mutex move_mutex; 408 struct mutex move_mutex;
401 struct rw_semaphore work_sem; 409 struct rw_semaphore work_sem;
402 int wl_scheduled; 410 int wl_scheduled;
403 struct ubi_wl_entry **lookuptbl; 411 struct ubi_wl_entry **lookuptbl;
404 unsigned long long abs_ec;
405 struct ubi_wl_entry *move_from; 412 struct ubi_wl_entry *move_from;
406 struct ubi_wl_entry *move_to; 413 struct ubi_wl_entry *move_to;
407 int move_to_put; 414 int move_to_put;
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
index 3531ca9a1e24..22e1d7398fce 100644
--- a/drivers/mtd/ubi/vmt.c
+++ b/drivers/mtd/ubi/vmt.c
@@ -329,7 +329,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
329 vol->dev.devt = dev; 329 vol->dev.devt = dev;
330 vol->dev.class = ubi_class; 330 vol->dev.class = ubi_class;
331 331
332 sprintf(&vol->dev.bus_id[0], "%s_%d", ubi->ubi_name, vol->vol_id); 332 dev_set_name(&vol->dev, "%s_%d", ubi->ubi_name, vol->vol_id);
333 err = device_register(&vol->dev); 333 err = device_register(&vol->dev);
334 if (err) { 334 if (err) {
335 ubi_err("cannot register device"); 335 ubi_err("cannot register device");
@@ -678,7 +678,7 @@ int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol)
678 vol->dev.parent = &ubi->dev; 678 vol->dev.parent = &ubi->dev;
679 vol->dev.devt = dev; 679 vol->dev.devt = dev;
680 vol->dev.class = ubi_class; 680 vol->dev.class = ubi_class;
681 sprintf(&vol->dev.bus_id[0], "%s_%d", ubi->ubi_name, vol->vol_id); 681 dev_set_name(&vol->dev, "%s_%d", ubi->ubi_name, vol->vol_id);
682 err = device_register(&vol->dev); 682 err = device_register(&vol->dev);
683 if (err) 683 if (err)
684 goto out_gluebi; 684 goto out_gluebi;
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 05d70937b543..14901cb82c18 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -22,7 +22,7 @@
22 * UBI wear-leveling sub-system. 22 * UBI wear-leveling sub-system.
23 * 23 *
24 * This sub-system is responsible for wear-leveling. It works in terms of 24 * This sub-system is responsible for wear-leveling. It works in terms of
25 * physical* eraseblocks and erase counters and knows nothing about logical 25 * physical eraseblocks and erase counters and knows nothing about logical
26 * eraseblocks, volumes, etc. From this sub-system's perspective all physical 26 * eraseblocks, volumes, etc. From this sub-system's perspective all physical
27 * eraseblocks are of two types - used and free. Used physical eraseblocks are 27 * eraseblocks are of two types - used and free. Used physical eraseblocks are
28 * those that were "get" by the 'ubi_wl_get_peb()' function, and free physical 28 * those that were "get" by the 'ubi_wl_get_peb()' function, and free physical
@@ -55,8 +55,39 @@
55 * 55 *
56 * As it was said, for the UBI sub-system all physical eraseblocks are either 56 * As it was said, for the UBI sub-system all physical eraseblocks are either
57 * "free" or "used". Free eraseblock are kept in the @wl->free RB-tree, while 57 * "free" or "used". Free eraseblock are kept in the @wl->free RB-tree, while
58 * used eraseblocks are kept in a set of different RB-trees: @wl->used, 58 * used eraseblocks are kept in @wl->used or @wl->scrub RB-trees, or
59 * @wl->prot.pnum, @wl->prot.aec, and @wl->scrub. 59 * (temporarily) in the @wl->pq queue.
60 *
61 * When the WL sub-system returns a physical eraseblock, the physical
62 * eraseblock is protected from being moved for some "time". For this reason,
63 * the physical eraseblock is not directly moved from the @wl->free tree to the
64 * @wl->used tree. There is a protection queue in between where this
65 * physical eraseblock is temporarily stored (@wl->pq).
66 *
67 * All this protection stuff is needed because:
68 * o we don't want to move physical eraseblocks just after we have given them
69 * to the user; instead, we first want to let users fill them up with data;
70 *
71 * o there is a chance that the user will put the physical eraseblock very
72 * soon, so it makes sense not to move it for some time, but wait; this is
73 * especially important in case of "short term" physical eraseblocks.
74 *
75 * Physical eraseblocks stay protected only for limited time. But the "time" is
76 * measured in erase cycles in this case. This is implemented with help of the
77 * protection queue. Eraseblocks are put to the tail of this queue when they
78 * are returned by the 'ubi_wl_get_peb()', and eraseblocks are removed from the
79 * head of the queue on each erase operation (for any eraseblock). So the
80 * length of the queue defines how may (global) erase cycles PEBs are protected.
81 *
82 * To put it differently, each physical eraseblock has 2 main states: free and
83 * used. The former state corresponds to the @wl->free tree. The latter state
84 * is split up on several sub-states:
85 * o the WL movement is allowed (@wl->used tree);
86 * o the WL movement is temporarily prohibited (@wl->pq queue);
87 * o scrubbing is needed (@wl->scrub tree).
88 *
89 * Depending on the sub-state, wear-leveling entries of the used physical
90 * eraseblocks may be kept in one of those structures.
60 * 91 *
61 * Note, in this implementation, we keep a small in-RAM object for each physical 92 * Note, in this implementation, we keep a small in-RAM object for each physical
62 * eraseblock. This is surely not a scalable solution. But it appears to be good 93 * eraseblock. This is surely not a scalable solution. But it appears to be good
@@ -70,9 +101,6 @@
70 * target PEB, we pick a PEB with the highest EC if our PEB is "old" and we 101 * target PEB, we pick a PEB with the highest EC if our PEB is "old" and we
71 * pick target PEB with an average EC if our PEB is not very "old". This is a 102 * pick target PEB with an average EC if our PEB is not very "old". This is a
72 * room for future re-works of the WL sub-system. 103 * room for future re-works of the WL sub-system.
73 *
74 * Note: the stuff with protection trees looks too complex and is difficult to
75 * understand. Should be fixed.
76 */ 104 */
77 105
78#include <linux/slab.h> 106#include <linux/slab.h>
@@ -85,14 +113,6 @@
85#define WL_RESERVED_PEBS 1 113#define WL_RESERVED_PEBS 1
86 114
87/* 115/*
88 * How many erase cycles are short term, unknown, and long term physical
89 * eraseblocks protected.
90 */
91#define ST_PROTECTION 16
92#define U_PROTECTION 10
93#define LT_PROTECTION 4
94
95/*
96 * Maximum difference between two erase counters. If this threshold is 116 * Maximum difference between two erase counters. If this threshold is
97 * exceeded, the WL sub-system starts moving data from used physical 117 * exceeded, the WL sub-system starts moving data from used physical
98 * eraseblocks with low erase counter to free physical eraseblocks with high 118 * eraseblocks with low erase counter to free physical eraseblocks with high
@@ -120,64 +140,9 @@
120#define WL_MAX_FAILURES 32 140#define WL_MAX_FAILURES 32
121 141
122/** 142/**
123 * struct ubi_wl_prot_entry - PEB protection entry.
124 * @rb_pnum: link in the @wl->prot.pnum RB-tree
125 * @rb_aec: link in the @wl->prot.aec RB-tree
126 * @abs_ec: the absolute erase counter value when the protection ends
127 * @e: the wear-leveling entry of the physical eraseblock under protection
128 *
129 * When the WL sub-system returns a physical eraseblock, the physical
130 * eraseblock is protected from being moved for some "time". For this reason,
131 * the physical eraseblock is not directly moved from the @wl->free tree to the
132 * @wl->used tree. There is one more tree in between where this physical
133 * eraseblock is temporarily stored (@wl->prot).
134 *
135 * All this protection stuff is needed because:
136 * o we don't want to move physical eraseblocks just after we have given them
137 * to the user; instead, we first want to let users fill them up with data;
138 *
139 * o there is a chance that the user will put the physical eraseblock very
140 * soon, so it makes sense not to move it for some time, but wait; this is
141 * especially important in case of "short term" physical eraseblocks.
142 *
143 * Physical eraseblocks stay protected only for limited time. But the "time" is
144 * measured in erase cycles in this case. This is implemented with help of the
145 * absolute erase counter (@wl->abs_ec). When it reaches certain value, the
146 * physical eraseblocks are moved from the protection trees (@wl->prot.*) to
147 * the @wl->used tree.
148 *
149 * Protected physical eraseblocks are searched by physical eraseblock number
150 * (when they are put) and by the absolute erase counter (to check if it is
151 * time to move them to the @wl->used tree). So there are actually 2 RB-trees
152 * storing the protected physical eraseblocks: @wl->prot.pnum and
153 * @wl->prot.aec. They are referred to as the "protection" trees. The
154 * first one is indexed by the physical eraseblock number. The second one is
155 * indexed by the absolute erase counter. Both trees store
156 * &struct ubi_wl_prot_entry objects.
157 *
158 * Each physical eraseblock has 2 main states: free and used. The former state
159 * corresponds to the @wl->free tree. The latter state is split up on several
160 * sub-states:
161 * o the WL movement is allowed (@wl->used tree);
162 * o the WL movement is temporarily prohibited (@wl->prot.pnum and
163 * @wl->prot.aec trees);
164 * o scrubbing is needed (@wl->scrub tree).
165 *
166 * Depending on the sub-state, wear-leveling entries of the used physical
167 * eraseblocks may be kept in one of those trees.
168 */
169struct ubi_wl_prot_entry {
170 struct rb_node rb_pnum;
171 struct rb_node rb_aec;
172 unsigned long long abs_ec;
173 struct ubi_wl_entry *e;
174};
175
176/**
177 * struct ubi_work - UBI work description data structure. 143 * struct ubi_work - UBI work description data structure.
178 * @list: a link in the list of pending works 144 * @list: a link in the list of pending works
179 * @func: worker function 145 * @func: worker function
180 * @priv: private data of the worker function
181 * @e: physical eraseblock to erase 146 * @e: physical eraseblock to erase
182 * @torture: if the physical eraseblock has to be tortured 147 * @torture: if the physical eraseblock has to be tortured
183 * 148 *
@@ -198,9 +163,11 @@ struct ubi_work {
198static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec); 163static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec);
199static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e, 164static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
200 struct rb_root *root); 165 struct rb_root *root);
166static int paranoid_check_in_pq(struct ubi_device *ubi, struct ubi_wl_entry *e);
201#else 167#else
202#define paranoid_check_ec(ubi, pnum, ec) 0 168#define paranoid_check_ec(ubi, pnum, ec) 0
203#define paranoid_check_in_wl_tree(e, root) 169#define paranoid_check_in_wl_tree(e, root)
170#define paranoid_check_in_pq(ubi, e) 0
204#endif 171#endif
205 172
206/** 173/**
@@ -220,7 +187,7 @@ static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root)
220 struct ubi_wl_entry *e1; 187 struct ubi_wl_entry *e1;
221 188
222 parent = *p; 189 parent = *p;
223 e1 = rb_entry(parent, struct ubi_wl_entry, rb); 190 e1 = rb_entry(parent, struct ubi_wl_entry, u.rb);
224 191
225 if (e->ec < e1->ec) 192 if (e->ec < e1->ec)
226 p = &(*p)->rb_left; 193 p = &(*p)->rb_left;
@@ -235,8 +202,8 @@ static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root)
235 } 202 }
236 } 203 }
237 204
238 rb_link_node(&e->rb, parent, p); 205 rb_link_node(&e->u.rb, parent, p);
239 rb_insert_color(&e->rb, root); 206 rb_insert_color(&e->u.rb, root);
240} 207}
241 208
242/** 209/**
@@ -331,7 +298,7 @@ static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root)
331 while (p) { 298 while (p) {
332 struct ubi_wl_entry *e1; 299 struct ubi_wl_entry *e1;
333 300
334 e1 = rb_entry(p, struct ubi_wl_entry, rb); 301 e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
335 302
336 if (e->pnum == e1->pnum) { 303 if (e->pnum == e1->pnum) {
337 ubi_assert(e == e1); 304 ubi_assert(e == e1);
@@ -355,50 +322,24 @@ static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root)
355} 322}
356 323
357/** 324/**
358 * prot_tree_add - add physical eraseblock to protection trees. 325 * prot_queue_add - add physical eraseblock to the protection queue.
359 * @ubi: UBI device description object 326 * @ubi: UBI device description object
360 * @e: the physical eraseblock to add 327 * @e: the physical eraseblock to add
361 * @pe: protection entry object to use
362 * @abs_ec: absolute erase counter value when this physical eraseblock has
363 * to be removed from the protection trees.
364 * 328 *
365 * @wl->lock has to be locked. 329 * This function adds @e to the tail of the protection queue @ubi->pq, where
330 * @e will stay for %UBI_PROT_QUEUE_LEN erase operations and will be
331 * temporarily protected from the wear-leveling worker. Note, @wl->lock has to
332 * be locked.
366 */ 333 */
367static void prot_tree_add(struct ubi_device *ubi, struct ubi_wl_entry *e, 334static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e)
368 struct ubi_wl_prot_entry *pe, int abs_ec)
369{ 335{
370 struct rb_node **p, *parent = NULL; 336 int pq_tail = ubi->pq_head - 1;
371 struct ubi_wl_prot_entry *pe1;
372
373 pe->e = e;
374 pe->abs_ec = ubi->abs_ec + abs_ec;
375
376 p = &ubi->prot.pnum.rb_node;
377 while (*p) {
378 parent = *p;
379 pe1 = rb_entry(parent, struct ubi_wl_prot_entry, rb_pnum);
380
381 if (e->pnum < pe1->e->pnum)
382 p = &(*p)->rb_left;
383 else
384 p = &(*p)->rb_right;
385 }
386 rb_link_node(&pe->rb_pnum, parent, p);
387 rb_insert_color(&pe->rb_pnum, &ubi->prot.pnum);
388
389 p = &ubi->prot.aec.rb_node;
390 parent = NULL;
391 while (*p) {
392 parent = *p;
393 pe1 = rb_entry(parent, struct ubi_wl_prot_entry, rb_aec);
394 337
395 if (pe->abs_ec < pe1->abs_ec) 338 if (pq_tail < 0)
396 p = &(*p)->rb_left; 339 pq_tail = UBI_PROT_QUEUE_LEN - 1;
397 else 340 ubi_assert(pq_tail >= 0 && pq_tail < UBI_PROT_QUEUE_LEN);
398 p = &(*p)->rb_right; 341 list_add_tail(&e->u.list, &ubi->pq[pq_tail]);
399 } 342 dbg_wl("added PEB %d EC %d to the protection queue", e->pnum, e->ec);
400 rb_link_node(&pe->rb_aec, parent, p);
401 rb_insert_color(&pe->rb_aec, &ubi->prot.aec);
402} 343}
403 344
404/** 345/**
@@ -414,14 +355,14 @@ static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int max)
414 struct rb_node *p; 355 struct rb_node *p;
415 struct ubi_wl_entry *e; 356 struct ubi_wl_entry *e;
416 357
417 e = rb_entry(rb_first(root), struct ubi_wl_entry, rb); 358 e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
418 max += e->ec; 359 max += e->ec;
419 360
420 p = root->rb_node; 361 p = root->rb_node;
421 while (p) { 362 while (p) {
422 struct ubi_wl_entry *e1; 363 struct ubi_wl_entry *e1;
423 364
424 e1 = rb_entry(p, struct ubi_wl_entry, rb); 365 e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
425 if (e1->ec >= max) 366 if (e1->ec >= max)
426 p = p->rb_left; 367 p = p->rb_left;
427 else { 368 else {
@@ -443,17 +384,12 @@ static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int max)
443 */ 384 */
444int ubi_wl_get_peb(struct ubi_device *ubi, int dtype) 385int ubi_wl_get_peb(struct ubi_device *ubi, int dtype)
445{ 386{
446 int err, protect, medium_ec; 387 int err, medium_ec;
447 struct ubi_wl_entry *e, *first, *last; 388 struct ubi_wl_entry *e, *first, *last;
448 struct ubi_wl_prot_entry *pe;
449 389
450 ubi_assert(dtype == UBI_LONGTERM || dtype == UBI_SHORTTERM || 390 ubi_assert(dtype == UBI_LONGTERM || dtype == UBI_SHORTTERM ||
451 dtype == UBI_UNKNOWN); 391 dtype == UBI_UNKNOWN);
452 392
453 pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_NOFS);
454 if (!pe)
455 return -ENOMEM;
456
457retry: 393retry:
458 spin_lock(&ubi->wl_lock); 394 spin_lock(&ubi->wl_lock);
459 if (!ubi->free.rb_node) { 395 if (!ubi->free.rb_node) {
@@ -461,16 +397,13 @@ retry:
461 ubi_assert(list_empty(&ubi->works)); 397 ubi_assert(list_empty(&ubi->works));
462 ubi_err("no free eraseblocks"); 398 ubi_err("no free eraseblocks");
463 spin_unlock(&ubi->wl_lock); 399 spin_unlock(&ubi->wl_lock);
464 kfree(pe);
465 return -ENOSPC; 400 return -ENOSPC;
466 } 401 }
467 spin_unlock(&ubi->wl_lock); 402 spin_unlock(&ubi->wl_lock);
468 403
469 err = produce_free_peb(ubi); 404 err = produce_free_peb(ubi);
470 if (err < 0) { 405 if (err < 0)
471 kfree(pe);
472 return err; 406 return err;
473 }
474 goto retry; 407 goto retry;
475 } 408 }
476 409
@@ -483,7 +416,6 @@ retry:
483 * %WL_FREE_MAX_DIFF. 416 * %WL_FREE_MAX_DIFF.
484 */ 417 */
485 e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); 418 e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
486 protect = LT_PROTECTION;
487 break; 419 break;
488 case UBI_UNKNOWN: 420 case UBI_UNKNOWN:
489 /* 421 /*
@@ -492,81 +424,63 @@ retry:
492 * eraseblock with erase counter greater or equivalent than the 424 * eraseblock with erase counter greater or equivalent than the
493 * lowest erase counter plus %WL_FREE_MAX_DIFF. 425 * lowest erase counter plus %WL_FREE_MAX_DIFF.
494 */ 426 */
495 first = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry, rb); 427 first = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry,
496 last = rb_entry(rb_last(&ubi->free), struct ubi_wl_entry, rb); 428 u.rb);
429 last = rb_entry(rb_last(&ubi->free), struct ubi_wl_entry, u.rb);
497 430
498 if (last->ec - first->ec < WL_FREE_MAX_DIFF) 431 if (last->ec - first->ec < WL_FREE_MAX_DIFF)
499 e = rb_entry(ubi->free.rb_node, 432 e = rb_entry(ubi->free.rb_node,
500 struct ubi_wl_entry, rb); 433 struct ubi_wl_entry, u.rb);
501 else { 434 else {
502 medium_ec = (first->ec + WL_FREE_MAX_DIFF)/2; 435 medium_ec = (first->ec + WL_FREE_MAX_DIFF)/2;
503 e = find_wl_entry(&ubi->free, medium_ec); 436 e = find_wl_entry(&ubi->free, medium_ec);
504 } 437 }
505 protect = U_PROTECTION;
506 break; 438 break;
507 case UBI_SHORTTERM: 439 case UBI_SHORTTERM:
508 /* 440 /*
509 * For short term data we pick a physical eraseblock with the 441 * For short term data we pick a physical eraseblock with the
510 * lowest erase counter as we expect it will be erased soon. 442 * lowest erase counter as we expect it will be erased soon.
511 */ 443 */
512 e = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry, rb); 444 e = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry, u.rb);
513 protect = ST_PROTECTION;
514 break; 445 break;
515 default: 446 default:
516 protect = 0;
517 e = NULL;
518 BUG(); 447 BUG();
519 } 448 }
520 449
450 paranoid_check_in_wl_tree(e, &ubi->free);
451
521 /* 452 /*
522 * Move the physical eraseblock to the protection trees where it will 453 * Move the physical eraseblock to the protection queue where it will
523 * be protected from being moved for some time. 454 * be protected from being moved for some time.
524 */ 455 */
525 paranoid_check_in_wl_tree(e, &ubi->free); 456 rb_erase(&e->u.rb, &ubi->free);
526 rb_erase(&e->rb, &ubi->free); 457 dbg_wl("PEB %d EC %d", e->pnum, e->ec);
527 prot_tree_add(ubi, e, pe, protect); 458 prot_queue_add(ubi, e);
528
529 dbg_wl("PEB %d EC %d, protection %d", e->pnum, e->ec, protect);
530 spin_unlock(&ubi->wl_lock); 459 spin_unlock(&ubi->wl_lock);
531
532 return e->pnum; 460 return e->pnum;
533} 461}
534 462
535/** 463/**
536 * prot_tree_del - remove a physical eraseblock from the protection trees 464 * prot_queue_del - remove a physical eraseblock from the protection queue.
537 * @ubi: UBI device description object 465 * @ubi: UBI device description object
538 * @pnum: the physical eraseblock to remove 466 * @pnum: the physical eraseblock to remove
539 * 467 *
540 * This function returns PEB @pnum from the protection trees and returns zero 468 * This function deletes PEB @pnum from the protection queue and returns zero
541 * in case of success and %-ENODEV if the PEB was not found in the protection 469 * in case of success and %-ENODEV if the PEB was not found.
542 * trees.
543 */ 470 */
544static int prot_tree_del(struct ubi_device *ubi, int pnum) 471static int prot_queue_del(struct ubi_device *ubi, int pnum)
545{ 472{
546 struct rb_node *p; 473 struct ubi_wl_entry *e;
547 struct ubi_wl_prot_entry *pe = NULL;
548
549 p = ubi->prot.pnum.rb_node;
550 while (p) {
551
552 pe = rb_entry(p, struct ubi_wl_prot_entry, rb_pnum);
553
554 if (pnum == pe->e->pnum)
555 goto found;
556 474
557 if (pnum < pe->e->pnum) 475 e = ubi->lookuptbl[pnum];
558 p = p->rb_left; 476 if (!e)
559 else 477 return -ENODEV;
560 p = p->rb_right;
561 }
562 478
563 return -ENODEV; 479 if (paranoid_check_in_pq(ubi, e))
480 return -ENODEV;
564 481
565found: 482 list_del(&e->u.list);
566 ubi_assert(pe->e->pnum == pnum); 483 dbg_wl("deleted PEB %d from the protection queue", e->pnum);
567 rb_erase(&pe->rb_aec, &ubi->prot.aec);
568 rb_erase(&pe->rb_pnum, &ubi->prot.pnum);
569 kfree(pe);
570 return 0; 484 return 0;
571} 485}
572 486
@@ -632,47 +546,47 @@ out_free:
632} 546}
633 547
634/** 548/**
635 * check_protection_over - check if it is time to stop protecting some PEBs. 549 * serve_prot_queue - check if it is time to stop protecting PEBs.
636 * @ubi: UBI device description object 550 * @ubi: UBI device description object
637 * 551 *
638 * This function is called after each erase operation, when the absolute erase 552 * This function is called after each erase operation and removes PEBs from the
639 * counter is incremented, to check if some physical eraseblock have not to be 553 * tail of the protection queue. These PEBs have been protected for long enough
640 * protected any longer. These physical eraseblocks are moved from the 554 * and should be moved to the used tree.
641 * protection trees to the used tree.
642 */ 555 */
643static void check_protection_over(struct ubi_device *ubi) 556static void serve_prot_queue(struct ubi_device *ubi)
644{ 557{
645 struct ubi_wl_prot_entry *pe; 558 struct ubi_wl_entry *e, *tmp;
559 int count;
646 560
647 /* 561 /*
648 * There may be several protected physical eraseblock to remove, 562 * There may be several protected physical eraseblock to remove,
649 * process them all. 563 * process them all.
650 */ 564 */
651 while (1) { 565repeat:
652 spin_lock(&ubi->wl_lock); 566 count = 0;
653 if (!ubi->prot.aec.rb_node) { 567 spin_lock(&ubi->wl_lock);
654 spin_unlock(&ubi->wl_lock); 568 list_for_each_entry_safe(e, tmp, &ubi->pq[ubi->pq_head], u.list) {
655 break; 569 dbg_wl("PEB %d EC %d protection over, move to used tree",
656 } 570 e->pnum, e->ec);
657
658 pe = rb_entry(rb_first(&ubi->prot.aec),
659 struct ubi_wl_prot_entry, rb_aec);
660 571
661 if (pe->abs_ec > ubi->abs_ec) { 572 list_del(&e->u.list);
573 wl_tree_add(e, &ubi->used);
574 if (count++ > 32) {
575 /*
576 * Let's be nice and avoid holding the spinlock for
577 * too long.
578 */
662 spin_unlock(&ubi->wl_lock); 579 spin_unlock(&ubi->wl_lock);
663 break; 580 cond_resched();
581 goto repeat;
664 } 582 }
665
666 dbg_wl("PEB %d protection over, abs_ec %llu, PEB abs_ec %llu",
667 pe->e->pnum, ubi->abs_ec, pe->abs_ec);
668 rb_erase(&pe->rb_aec, &ubi->prot.aec);
669 rb_erase(&pe->rb_pnum, &ubi->prot.pnum);
670 wl_tree_add(pe->e, &ubi->used);
671 spin_unlock(&ubi->wl_lock);
672
673 kfree(pe);
674 cond_resched();
675 } 583 }
584
585 ubi->pq_head += 1;
586 if (ubi->pq_head == UBI_PROT_QUEUE_LEN)
587 ubi->pq_head = 0;
588 ubi_assert(ubi->pq_head >= 0 && ubi->pq_head < UBI_PROT_QUEUE_LEN);
589 spin_unlock(&ubi->wl_lock);
676} 590}
677 591
678/** 592/**
@@ -680,8 +594,8 @@ static void check_protection_over(struct ubi_device *ubi)
680 * @ubi: UBI device description object 594 * @ubi: UBI device description object
681 * @wrk: the work to schedule 595 * @wrk: the work to schedule
682 * 596 *
683 * This function enqueues a work defined by @wrk to the tail of the pending 597 * This function adds a work defined by @wrk to the tail of the pending works
684 * works list. 598 * list.
685 */ 599 */
686static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk) 600static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
687{ 601{
@@ -739,13 +653,11 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
739static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, 653static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
740 int cancel) 654 int cancel)
741{ 655{
742 int err, put = 0, scrubbing = 0, protect = 0; 656 int err, scrubbing = 0, torture = 0;
743 struct ubi_wl_prot_entry *uninitialized_var(pe);
744 struct ubi_wl_entry *e1, *e2; 657 struct ubi_wl_entry *e1, *e2;
745 struct ubi_vid_hdr *vid_hdr; 658 struct ubi_vid_hdr *vid_hdr;
746 659
747 kfree(wrk); 660 kfree(wrk);
748
749 if (cancel) 661 if (cancel)
750 return 0; 662 return 0;
751 663
@@ -781,7 +693,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
781 * highly worn-out free physical eraseblock. If the erase 693 * highly worn-out free physical eraseblock. If the erase
782 * counters differ much enough, start wear-leveling. 694 * counters differ much enough, start wear-leveling.
783 */ 695 */
784 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, rb); 696 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
785 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); 697 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
786 698
787 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) { 699 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
@@ -790,21 +702,21 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
790 goto out_cancel; 702 goto out_cancel;
791 } 703 }
792 paranoid_check_in_wl_tree(e1, &ubi->used); 704 paranoid_check_in_wl_tree(e1, &ubi->used);
793 rb_erase(&e1->rb, &ubi->used); 705 rb_erase(&e1->u.rb, &ubi->used);
794 dbg_wl("move PEB %d EC %d to PEB %d EC %d", 706 dbg_wl("move PEB %d EC %d to PEB %d EC %d",
795 e1->pnum, e1->ec, e2->pnum, e2->ec); 707 e1->pnum, e1->ec, e2->pnum, e2->ec);
796 } else { 708 } else {
797 /* Perform scrubbing */ 709 /* Perform scrubbing */
798 scrubbing = 1; 710 scrubbing = 1;
799 e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, rb); 711 e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb);
800 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); 712 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
801 paranoid_check_in_wl_tree(e1, &ubi->scrub); 713 paranoid_check_in_wl_tree(e1, &ubi->scrub);
802 rb_erase(&e1->rb, &ubi->scrub); 714 rb_erase(&e1->u.rb, &ubi->scrub);
803 dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum); 715 dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum);
804 } 716 }
805 717
806 paranoid_check_in_wl_tree(e2, &ubi->free); 718 paranoid_check_in_wl_tree(e2, &ubi->free);
807 rb_erase(&e2->rb, &ubi->free); 719 rb_erase(&e2->u.rb, &ubi->free);
808 ubi->move_from = e1; 720 ubi->move_from = e1;
809 ubi->move_to = e2; 721 ubi->move_to = e2;
810 spin_unlock(&ubi->wl_lock); 722 spin_unlock(&ubi->wl_lock);
@@ -844,46 +756,67 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
844 756
845 err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr); 757 err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr);
846 if (err) { 758 if (err) {
847 759 if (err == -EAGAIN)
760 goto out_not_moved;
848 if (err < 0) 761 if (err < 0)
849 goto out_error; 762 goto out_error;
850 if (err == 1) 763 if (err == 2) {
764 /* Target PEB write error, torture it */
765 torture = 1;
851 goto out_not_moved; 766 goto out_not_moved;
767 }
852 768
853 /* 769 /*
854 * For some reason the LEB was not moved - it might be because 770 * The LEB has not been moved because the volume is being
855 * the volume is being deleted. We should prevent this PEB from 771 * deleted or the PEB has been put meanwhile. We should prevent
856 * being selected for wear-levelling movement for some "time", 772 * this PEB from being selected for wear-leveling movement
857 * so put it to the protection tree. 773 * again, so put it to the protection queue.
858 */ 774 */
859 775
860 dbg_wl("cancelled moving PEB %d", e1->pnum); 776 dbg_wl("canceled moving PEB %d", e1->pnum);
861 pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_NOFS); 777 ubi_assert(err == 1);
862 if (!pe) { 778
863 err = -ENOMEM; 779 ubi_free_vid_hdr(ubi, vid_hdr);
864 goto out_error; 780 vid_hdr = NULL;
865 } 781
782 spin_lock(&ubi->wl_lock);
783 prot_queue_add(ubi, e1);
784 ubi_assert(!ubi->move_to_put);
785 ubi->move_from = ubi->move_to = NULL;
786 ubi->wl_scheduled = 0;
787 spin_unlock(&ubi->wl_lock);
866 788
867 protect = 1; 789 e1 = NULL;
790 err = schedule_erase(ubi, e2, 0);
791 if (err)
792 goto out_error;
793 mutex_unlock(&ubi->move_mutex);
794 return 0;
868 } 795 }
869 796
797 /* The PEB has been successfully moved */
870 ubi_free_vid_hdr(ubi, vid_hdr); 798 ubi_free_vid_hdr(ubi, vid_hdr);
871 if (scrubbing && !protect) 799 vid_hdr = NULL;
800 if (scrubbing)
872 ubi_msg("scrubbed PEB %d, data moved to PEB %d", 801 ubi_msg("scrubbed PEB %d, data moved to PEB %d",
873 e1->pnum, e2->pnum); 802 e1->pnum, e2->pnum);
874 803
875 spin_lock(&ubi->wl_lock); 804 spin_lock(&ubi->wl_lock);
876 if (protect) 805 if (!ubi->move_to_put) {
877 prot_tree_add(ubi, e1, pe, protect);
878 if (!ubi->move_to_put)
879 wl_tree_add(e2, &ubi->used); 806 wl_tree_add(e2, &ubi->used);
880 else 807 e2 = NULL;
881 put = 1; 808 }
882 ubi->move_from = ubi->move_to = NULL; 809 ubi->move_from = ubi->move_to = NULL;
883 ubi->move_to_put = ubi->wl_scheduled = 0; 810 ubi->move_to_put = ubi->wl_scheduled = 0;
884 spin_unlock(&ubi->wl_lock); 811 spin_unlock(&ubi->wl_lock);
885 812
886 if (put) { 813 err = schedule_erase(ubi, e1, 0);
814 if (err) {
815 e1 = NULL;
816 goto out_error;
817 }
818
819 if (e2) {
887 /* 820 /*
888 * Well, the target PEB was put meanwhile, schedule it for 821 * Well, the target PEB was put meanwhile, schedule it for
889 * erasure. 822 * erasure.
@@ -894,13 +827,6 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
894 goto out_error; 827 goto out_error;
895 } 828 }
896 829
897 if (!protect) {
898 err = schedule_erase(ubi, e1, 0);
899 if (err)
900 goto out_error;
901 }
902
903
904 dbg_wl("done"); 830 dbg_wl("done");
905 mutex_unlock(&ubi->move_mutex); 831 mutex_unlock(&ubi->move_mutex);
906 return 0; 832 return 0;
@@ -908,20 +834,24 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
908 /* 834 /*
909 * For some reasons the LEB was not moved, might be an error, might be 835 * For some reasons the LEB was not moved, might be an error, might be
910 * something else. @e1 was not changed, so return it back. @e2 might 836 * something else. @e1 was not changed, so return it back. @e2 might
911 * be changed, schedule it for erasure. 837 * have been changed, schedule it for erasure.
912 */ 838 */
913out_not_moved: 839out_not_moved:
840 dbg_wl("canceled moving PEB %d", e1->pnum);
914 ubi_free_vid_hdr(ubi, vid_hdr); 841 ubi_free_vid_hdr(ubi, vid_hdr);
842 vid_hdr = NULL;
915 spin_lock(&ubi->wl_lock); 843 spin_lock(&ubi->wl_lock);
916 if (scrubbing) 844 if (scrubbing)
917 wl_tree_add(e1, &ubi->scrub); 845 wl_tree_add(e1, &ubi->scrub);
918 else 846 else
919 wl_tree_add(e1, &ubi->used); 847 wl_tree_add(e1, &ubi->used);
848 ubi_assert(!ubi->move_to_put);
920 ubi->move_from = ubi->move_to = NULL; 849 ubi->move_from = ubi->move_to = NULL;
921 ubi->move_to_put = ubi->wl_scheduled = 0; 850 ubi->wl_scheduled = 0;
922 spin_unlock(&ubi->wl_lock); 851 spin_unlock(&ubi->wl_lock);
923 852
924 err = schedule_erase(ubi, e2, 0); 853 e1 = NULL;
854 err = schedule_erase(ubi, e2, torture);
925 if (err) 855 if (err)
926 goto out_error; 856 goto out_error;
927 857
@@ -938,8 +868,10 @@ out_error:
938 ubi->move_to_put = ubi->wl_scheduled = 0; 868 ubi->move_to_put = ubi->wl_scheduled = 0;
939 spin_unlock(&ubi->wl_lock); 869 spin_unlock(&ubi->wl_lock);
940 870
941 kmem_cache_free(ubi_wl_entry_slab, e1); 871 if (e1)
942 kmem_cache_free(ubi_wl_entry_slab, e2); 872 kmem_cache_free(ubi_wl_entry_slab, e1);
873 if (e2)
874 kmem_cache_free(ubi_wl_entry_slab, e2);
943 ubi_ro_mode(ubi); 875 ubi_ro_mode(ubi);
944 876
945 mutex_unlock(&ubi->move_mutex); 877 mutex_unlock(&ubi->move_mutex);
@@ -988,7 +920,7 @@ static int ensure_wear_leveling(struct ubi_device *ubi)
988 * erase counter of free physical eraseblocks is greater then 920 * erase counter of free physical eraseblocks is greater then
989 * %UBI_WL_THRESHOLD. 921 * %UBI_WL_THRESHOLD.
990 */ 922 */
991 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, rb); 923 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
992 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); 924 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
993 925
994 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) 926 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD))
@@ -1050,7 +982,6 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1050 kfree(wl_wrk); 982 kfree(wl_wrk);
1051 983
1052 spin_lock(&ubi->wl_lock); 984 spin_lock(&ubi->wl_lock);
1053 ubi->abs_ec += 1;
1054 wl_tree_add(e, &ubi->free); 985 wl_tree_add(e, &ubi->free);
1055 spin_unlock(&ubi->wl_lock); 986 spin_unlock(&ubi->wl_lock);
1056 987
@@ -1058,7 +989,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1058 * One more erase operation has happened, take care about 989 * One more erase operation has happened, take care about
1059 * protected physical eraseblocks. 990 * protected physical eraseblocks.
1060 */ 991 */
1061 check_protection_over(ubi); 992 serve_prot_queue(ubi);
1062 993
1063 /* And take care about wear-leveling */ 994 /* And take care about wear-leveling */
1064 err = ensure_wear_leveling(ubi); 995 err = ensure_wear_leveling(ubi);
@@ -1190,12 +1121,12 @@ retry:
1190 } else { 1121 } else {
1191 if (in_wl_tree(e, &ubi->used)) { 1122 if (in_wl_tree(e, &ubi->used)) {
1192 paranoid_check_in_wl_tree(e, &ubi->used); 1123 paranoid_check_in_wl_tree(e, &ubi->used);
1193 rb_erase(&e->rb, &ubi->used); 1124 rb_erase(&e->u.rb, &ubi->used);
1194 } else if (in_wl_tree(e, &ubi->scrub)) { 1125 } else if (in_wl_tree(e, &ubi->scrub)) {
1195 paranoid_check_in_wl_tree(e, &ubi->scrub); 1126 paranoid_check_in_wl_tree(e, &ubi->scrub);
1196 rb_erase(&e->rb, &ubi->scrub); 1127 rb_erase(&e->u.rb, &ubi->scrub);
1197 } else { 1128 } else {
1198 err = prot_tree_del(ubi, e->pnum); 1129 err = prot_queue_del(ubi, e->pnum);
1199 if (err) { 1130 if (err) {
1200 ubi_err("PEB %d not found", pnum); 1131 ubi_err("PEB %d not found", pnum);
1201 ubi_ro_mode(ubi); 1132 ubi_ro_mode(ubi);
@@ -1255,11 +1186,11 @@ retry:
1255 1186
1256 if (in_wl_tree(e, &ubi->used)) { 1187 if (in_wl_tree(e, &ubi->used)) {
1257 paranoid_check_in_wl_tree(e, &ubi->used); 1188 paranoid_check_in_wl_tree(e, &ubi->used);
1258 rb_erase(&e->rb, &ubi->used); 1189 rb_erase(&e->u.rb, &ubi->used);
1259 } else { 1190 } else {
1260 int err; 1191 int err;
1261 1192
1262 err = prot_tree_del(ubi, e->pnum); 1193 err = prot_queue_del(ubi, e->pnum);
1263 if (err) { 1194 if (err) {
1264 ubi_err("PEB %d not found", pnum); 1195 ubi_err("PEB %d not found", pnum);
1265 ubi_ro_mode(ubi); 1196 ubi_ro_mode(ubi);
@@ -1290,7 +1221,7 @@ int ubi_wl_flush(struct ubi_device *ubi)
1290 int err; 1221 int err;
1291 1222
1292 /* 1223 /*
1293 * Erase while the pending works queue is not empty, but not more then 1224 * Erase while the pending works queue is not empty, but not more than
1294 * the number of currently pending works. 1225 * the number of currently pending works.
1295 */ 1226 */
1296 dbg_wl("flush (%d pending works)", ubi->works_count); 1227 dbg_wl("flush (%d pending works)", ubi->works_count);
@@ -1308,7 +1239,7 @@ int ubi_wl_flush(struct ubi_device *ubi)
1308 up_write(&ubi->work_sem); 1239 up_write(&ubi->work_sem);
1309 1240
1310 /* 1241 /*
1311 * And in case last was the WL worker and it cancelled the LEB 1242 * And in case last was the WL worker and it canceled the LEB
1312 * movement, flush again. 1243 * movement, flush again.
1313 */ 1244 */
1314 while (ubi->works_count) { 1245 while (ubi->works_count) {
@@ -1337,11 +1268,11 @@ static void tree_destroy(struct rb_root *root)
1337 else if (rb->rb_right) 1268 else if (rb->rb_right)
1338 rb = rb->rb_right; 1269 rb = rb->rb_right;
1339 else { 1270 else {
1340 e = rb_entry(rb, struct ubi_wl_entry, rb); 1271 e = rb_entry(rb, struct ubi_wl_entry, u.rb);
1341 1272
1342 rb = rb_parent(rb); 1273 rb = rb_parent(rb);
1343 if (rb) { 1274 if (rb) {
1344 if (rb->rb_left == &e->rb) 1275 if (rb->rb_left == &e->u.rb)
1345 rb->rb_left = NULL; 1276 rb->rb_left = NULL;
1346 else 1277 else
1347 rb->rb_right = NULL; 1278 rb->rb_right = NULL;
@@ -1396,7 +1327,8 @@ int ubi_thread(void *u)
1396 ubi_msg("%s: %d consecutive failures", 1327 ubi_msg("%s: %d consecutive failures",
1397 ubi->bgt_name, WL_MAX_FAILURES); 1328 ubi->bgt_name, WL_MAX_FAILURES);
1398 ubi_ro_mode(ubi); 1329 ubi_ro_mode(ubi);
1399 break; 1330 ubi->thread_enabled = 0;
1331 continue;
1400 } 1332 }
1401 } else 1333 } else
1402 failures = 0; 1334 failures = 0;
@@ -1435,15 +1367,13 @@ static void cancel_pending(struct ubi_device *ubi)
1435 */ 1367 */
1436int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si) 1368int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1437{ 1369{
1438 int err; 1370 int err, i;
1439 struct rb_node *rb1, *rb2; 1371 struct rb_node *rb1, *rb2;
1440 struct ubi_scan_volume *sv; 1372 struct ubi_scan_volume *sv;
1441 struct ubi_scan_leb *seb, *tmp; 1373 struct ubi_scan_leb *seb, *tmp;
1442 struct ubi_wl_entry *e; 1374 struct ubi_wl_entry *e;
1443 1375
1444
1445 ubi->used = ubi->free = ubi->scrub = RB_ROOT; 1376 ubi->used = ubi->free = ubi->scrub = RB_ROOT;
1446 ubi->prot.pnum = ubi->prot.aec = RB_ROOT;
1447 spin_lock_init(&ubi->wl_lock); 1377 spin_lock_init(&ubi->wl_lock);
1448 mutex_init(&ubi->move_mutex); 1378 mutex_init(&ubi->move_mutex);
1449 init_rwsem(&ubi->work_sem); 1379 init_rwsem(&ubi->work_sem);
@@ -1457,6 +1387,10 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1457 if (!ubi->lookuptbl) 1387 if (!ubi->lookuptbl)
1458 return err; 1388 return err;
1459 1389
1390 for (i = 0; i < UBI_PROT_QUEUE_LEN; i++)
1391 INIT_LIST_HEAD(&ubi->pq[i]);
1392 ubi->pq_head = 0;
1393
1460 list_for_each_entry_safe(seb, tmp, &si->erase, u.list) { 1394 list_for_each_entry_safe(seb, tmp, &si->erase, u.list) {
1461 cond_resched(); 1395 cond_resched();
1462 1396
@@ -1551,33 +1485,18 @@ out_free:
1551} 1485}
1552 1486
1553/** 1487/**
1554 * protection_trees_destroy - destroy the protection RB-trees. 1488 * protection_queue_destroy - destroy the protection queue.
1555 * @ubi: UBI device description object 1489 * @ubi: UBI device description object
1556 */ 1490 */
1557static void protection_trees_destroy(struct ubi_device *ubi) 1491static void protection_queue_destroy(struct ubi_device *ubi)
1558{ 1492{
1559 struct rb_node *rb; 1493 int i;
1560 struct ubi_wl_prot_entry *pe; 1494 struct ubi_wl_entry *e, *tmp;
1561
1562 rb = ubi->prot.aec.rb_node;
1563 while (rb) {
1564 if (rb->rb_left)
1565 rb = rb->rb_left;
1566 else if (rb->rb_right)
1567 rb = rb->rb_right;
1568 else {
1569 pe = rb_entry(rb, struct ubi_wl_prot_entry, rb_aec);
1570
1571 rb = rb_parent(rb);
1572 if (rb) {
1573 if (rb->rb_left == &pe->rb_aec)
1574 rb->rb_left = NULL;
1575 else
1576 rb->rb_right = NULL;
1577 }
1578 1495
1579 kmem_cache_free(ubi_wl_entry_slab, pe->e); 1496 for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i) {
1580 kfree(pe); 1497 list_for_each_entry_safe(e, tmp, &ubi->pq[i], u.list) {
1498 list_del(&e->u.list);
1499 kmem_cache_free(ubi_wl_entry_slab, e);
1581 } 1500 }
1582 } 1501 }
1583} 1502}
@@ -1590,7 +1509,7 @@ void ubi_wl_close(struct ubi_device *ubi)
1590{ 1509{
1591 dbg_wl("close the WL sub-system"); 1510 dbg_wl("close the WL sub-system");
1592 cancel_pending(ubi); 1511 cancel_pending(ubi);
1593 protection_trees_destroy(ubi); 1512 protection_queue_destroy(ubi);
1594 tree_destroy(&ubi->used); 1513 tree_destroy(&ubi->used);
1595 tree_destroy(&ubi->free); 1514 tree_destroy(&ubi->free);
1596 tree_destroy(&ubi->scrub); 1515 tree_destroy(&ubi->scrub);
@@ -1660,4 +1579,27 @@ static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
1660 return 1; 1579 return 1;
1661} 1580}
1662 1581
1582/**
1583 * paranoid_check_in_pq - check if wear-leveling entry is in the protection
1584 * queue.
1585 * @ubi: UBI device description object
1586 * @e: the wear-leveling entry to check
1587 *
1588 * This function returns zero if @e is in @ubi->pq and %1 if it is not.
1589 */
1590static int paranoid_check_in_pq(struct ubi_device *ubi, struct ubi_wl_entry *e)
1591{
1592 struct ubi_wl_entry *p;
1593 int i;
1594
1595 for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i)
1596 list_for_each_entry(p, &ubi->pq[i], u.list)
1597 if (p == e)
1598 return 0;
1599
1600 ubi_err("paranoid check failed for PEB %d, EC %d, Protect queue",
1601 e->pnum, e->ec);
1602 ubi_dbg_dump_stack();
1603 return 1;
1604}
1663#endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */ 1605#endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */