aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mtd
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2013-01-22 10:38:37 -0500
committerJonathan Herman <hermanjl@cs.unc.edu>2013-01-22 10:38:37 -0500
commitfcc9d2e5a6c89d22b8b773a64fb4ad21ac318446 (patch)
treea57612d1888735a2ec7972891b68c1ac5ec8faea /drivers/mtd
parent8dea78da5cee153b8af9c07a2745f6c55057fe12 (diff)
Added missing tegra files.HEADmaster
Diffstat (limited to 'drivers/mtd')
-rw-r--r--drivers/mtd/devices/tegra_nand.c1802
-rw-r--r--drivers/mtd/devices/tegra_nand.h148
-rw-r--r--drivers/mtd/maps/bcm963xx-flash.c276
-rw-r--r--drivers/mtd/maps/cdb89712.c278
-rw-r--r--drivers/mtd/maps/ceiva.c341
-rw-r--r--drivers/mtd/maps/edb7312.c134
-rw-r--r--drivers/mtd/maps/fortunet.c277
-rw-r--r--drivers/mtd/maps/tegra_nor.c483
-rw-r--r--drivers/mtd/maps/wr_sbc82xx_flash.c181
-rw-r--r--drivers/mtd/nand/autcpu12.c239
-rw-r--r--drivers/mtd/nand/bcm_umi_bch.c213
-rw-r--r--drivers/mtd/nand/bcm_umi_nand.c579
-rw-r--r--drivers/mtd/nand/edb7312.c203
-rw-r--r--drivers/mtd/nand/nand_bcm_umi.c149
-rw-r--r--drivers/mtd/nand/nand_bcm_umi.h337
-rw-r--r--drivers/mtd/nand/nomadik_nand.c246
-rw-r--r--drivers/mtd/nand/spia.c176
-rw-r--r--drivers/mtd/ubi/scan.c1605
-rw-r--r--drivers/mtd/ubi/scan.h174
19 files changed, 7841 insertions, 0 deletions
diff --git a/drivers/mtd/devices/tegra_nand.c b/drivers/mtd/devices/tegra_nand.c
new file mode 100644
index 00000000000..c8a3e7090b9
--- /dev/null
+++ b/drivers/mtd/devices/tegra_nand.c
@@ -0,0 +1,1802 @@
1/*
2 * drivers/mtd/devices/tegra_nand.c
3 *
4 * Copyright (C) 2010 Google, Inc.
5 * Author: Dima Zavin <dima@android.com>
6 * Colin Cross <ccross@android.com>
7 *
8 * Copyright (C) 2010-2011 Nvidia Graphics Pvt. Ltd.
9 * http://www.nvidia.com
10 *
11 * This software is licensed under the terms of the GNU General Public
12 * License version 2, as published by the Free Software Foundation, and
13 * may be copied, distributed, and modified under those terms.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * Derived from: drivers/mtd/nand/nand_base.c
21 * drivers/mtd/nand/pxa3xx.c
22 *
23 */
24
25#include <linux/delay.h>
26#include <linux/dma-mapping.h>
27#include <linux/init.h>
28#include <linux/interrupt.h>
29#include <linux/io.h>
30#include <linux/module.h>
31#include <linux/mutex.h>
32#include <linux/mtd/nand.h>
33#include <linux/mtd/mtd.h>
34#include <linux/mtd/partitions.h>
35#include <linux/platform_device.h>
36#include <linux/types.h>
37#include <linux/clk.h>
38#include <linux/slab.h>
39#include <linux/gpio.h>
40
41#include <mach/nand.h>
42
43#include "tegra_nand.h"
44
45#define DRIVER_NAME "tegra_nand"
46#define DRIVER_DESC "Nvidia Tegra NAND Flash Controller driver"
47
48#define MAX_DMA_SZ SZ_64K
49#define ECC_BUF_SZ SZ_1K
50
51/* FIXME: is this right?!
52 * NvRM code says it should be 128 bytes, but that seems awfully small
53 */
54
55/*#define TEGRA_NAND_DEBUG
56#define TEGRA_NAND_DEBUG_PEDANTIC*/
57
58#ifdef TEGRA_NAND_DEBUG
59#define TEGRA_DBG(fmt, args...) \
60 do { pr_info(fmt, ##args); } while (0)
61#else
62#define TEGRA_DBG(fmt, args...)
63#endif
64
65/* TODO: will vary with devices, move into appropriate device spcific header */
66#define SCAN_TIMING_VAL 0x3f0bd214
67#define SCAN_TIMING2_VAL 0xb
68
69#define TIMEOUT (2 * HZ)
70/* TODO: pull in the register defs (fields, masks, etc) from Nvidia files
71 * so we don't have to redefine them */
72
73static const char *part_probes[] = { "cmdlinepart", NULL, };
74
75struct tegra_nand_chip {
76 spinlock_t lock;
77 uint32_t chipsize;
78 int num_chips;
79 int curr_chip;
80
81 /* addr >> chip_shift == chip number */
82 uint32_t chip_shift;
83 /* (addr >> page_shift) & page_mask == page number within chip */
84 uint32_t page_shift;
85 uint32_t page_mask;
86 /* column within page */
87 uint32_t column_mask;
88 /* addr >> block_shift == block number (across the whole mtd dev, not
89 * just a single chip. */
90 uint32_t block_shift;
91
92 void *priv;
93};
94
95struct tegra_nand_info {
96 struct tegra_nand_chip chip;
97 struct mtd_info mtd;
98 struct tegra_nand_platform *plat;
99 struct device *dev;
100 struct mtd_partition *parts;
101
102 /* synchronizes access to accessing the actual NAND controller */
103 struct mutex lock;
104 /* partial_unaligned_rw_buffer is temporary buffer used during
105 reading of unaligned data from nand pages or if data to be read
106 is less than nand page size.
107 */
108 uint8_t *partial_unaligned_rw_buffer;
109
110 void *oob_dma_buf;
111 dma_addr_t oob_dma_addr;
112 /* ecc error vector info (offset into page and data mask to apply */
113 void *ecc_buf;
114 dma_addr_t ecc_addr;
115 /* ecc error status (page number, err_cnt) */
116 uint32_t *ecc_errs;
117 uint32_t num_ecc_errs;
118 uint32_t max_ecc_errs;
119 spinlock_t ecc_lock;
120
121 uint32_t command_reg;
122 uint32_t config_reg;
123 uint32_t dmactrl_reg;
124
125 struct completion cmd_complete;
126 struct completion dma_complete;
127
128 /* bad block bitmap: 1 == good, 0 == bad/unknown */
129 unsigned long *bb_bitmap;
130
131 struct clk *clk;
132 uint32_t is_data_bus_width_16;
133 uint32_t device_id;
134 uint32_t vendor_id;
135 uint32_t dev_parms;
136 uint32_t num_bad_blocks;
137};
138#define MTD_TO_INFO(mtd) container_of((mtd), struct tegra_nand_info, mtd)
139
140/* 64 byte oob block info for large page (== 2KB) device
141 *
142 * OOB flash layout for Tegra with Reed-Solomon 4 symbol correct ECC:
143 * Skipped bytes(4)
144 * Main area Ecc(36)
145 * Tag data(20)
146 * Tag data Ecc(4)
147 *
148 * Yaffs2 will use 16 tag bytes.
149 */
150
151static struct nand_ecclayout tegra_nand_oob_64 = {
152 .eccbytes = 36,
153 .eccpos = {
154 4, 5, 6, 7, 8, 9, 10, 11, 12,
155 13, 14, 15, 16, 17, 18, 19, 20, 21,
156 22, 23, 24, 25, 26, 27, 28, 29, 30,
157 31, 32, 33, 34, 35, 36, 37, 38, 39,
158 },
159 .oobavail = 20,
160 .oobfree = {
161 {.offset = 40,
162 .length = 20,
163 },
164 },
165};
166
167static struct nand_ecclayout tegra_nand_oob_128 = {
168 .eccbytes = 72,
169 .eccpos = {
170 4, 5, 6, 7, 8, 9, 10, 11, 12,
171 13, 14, 15, 16, 17, 18, 19, 20, 21,
172 22, 23, 24, 25, 26, 27, 28, 29, 30,
173 31, 32, 33, 34, 35, 36, 37, 38, 39,
174 40, 41, 42, 43, 44, 45, 46, 47, 48,
175 49, 50, 51, 52, 53, 54, 55, 56, 57,
176 58, 59, 60, 61, 62, 63, 64, 65, 66,
177 /* ECC POS is only of size 64 bytes so commenting the remaining
178 * bytes here. As driver uses the Hardware ECC so it there is
179 * no issue with it
180 */
181 /*67, 68, 69, 70, 71, 72, 73, 74, 75, */
182 },
183 .oobavail = 48,
184 .oobfree = {
185 {.offset = 76,
186 .length = 48,
187 },
188 },
189};
190
191static struct nand_flash_dev *find_nand_flash_device(int dev_id)
192{
193 struct nand_flash_dev *dev = &nand_flash_ids[0];
194
195 while (dev->name && dev->id != dev_id)
196 dev++;
197 return dev->name ? dev : NULL;
198}
199
200static struct nand_manufacturers *find_nand_flash_vendor(int vendor_id)
201{
202 struct nand_manufacturers *vendor = &nand_manuf_ids[0];
203
204 while (vendor->id && vendor->id != vendor_id)
205 vendor++;
206 return vendor->id ? vendor : NULL;
207}
208
209#define REG_NAME(name) { name, #name }
210static struct {
211 uint32_t addr;
212 char *name;
213} reg_names[] = {
214 REG_NAME(COMMAND_REG),
215 REG_NAME(STATUS_REG),
216 REG_NAME(ISR_REG),
217 REG_NAME(IER_REG),
218 REG_NAME(CONFIG_REG),
219 REG_NAME(TIMING_REG),
220 REG_NAME(RESP_REG),
221 REG_NAME(TIMING2_REG),
222 REG_NAME(CMD_REG1),
223 REG_NAME(CMD_REG2),
224 REG_NAME(ADDR_REG1),
225 REG_NAME(ADDR_REG2),
226 REG_NAME(DMA_MST_CTRL_REG),
227 REG_NAME(DMA_CFG_A_REG),
228 REG_NAME(DMA_CFG_B_REG),
229 REG_NAME(FIFO_CTRL_REG),
230 REG_NAME(DATA_BLOCK_PTR_REG),
231 REG_NAME(TAG_PTR_REG),
232 REG_NAME(ECC_PTR_REG),
233 REG_NAME(DEC_STATUS_REG),
234 REG_NAME(HWSTATUS_CMD_REG),
235 REG_NAME(HWSTATUS_MASK_REG),
236 {0, NULL},
237};
238
239#undef REG_NAME
240
241static int dump_nand_regs(void)
242{
243 int i = 0;
244
245 TEGRA_DBG("%s: dumping registers\n", __func__);
246 while (reg_names[i].name != NULL) {
247 TEGRA_DBG("%s = 0x%08x\n", reg_names[i].name,
248 readl(reg_names[i].addr));
249 i++;
250 }
251 TEGRA_DBG("%s: end of reg dump\n", __func__);
252 return 1;
253}
254
255static inline void enable_ints(struct tegra_nand_info *info, uint32_t mask)
256{
257 (void)info;
258 writel(readl(IER_REG) | mask, IER_REG);
259}
260
261static inline void disable_ints(struct tegra_nand_info *info, uint32_t mask)
262{
263 (void)info;
264 writel(readl(IER_REG) & ~mask, IER_REG);
265}
266
267static inline void
268split_addr(struct tegra_nand_info *info, loff_t offset, int *chipnr,
269 uint32_t *page, uint32_t *column)
270{
271 *chipnr = (int)(offset >> info->chip.chip_shift);
272 *page = (offset >> info->chip.page_shift) & info->chip.page_mask;
273 *column = offset & info->chip.column_mask;
274}
275
276static irqreturn_t tegra_nand_irq(int irq, void *dev_id)
277{
278 struct tegra_nand_info *info = dev_id;
279 uint32_t isr;
280 uint32_t ier;
281 uint32_t dma_ctrl;
282 uint32_t tmp;
283
284 isr = readl(ISR_REG);
285 ier = readl(IER_REG);
286 dma_ctrl = readl(DMA_MST_CTRL_REG);
287#ifdef DEBUG_DUMP_IRQ
288 pr_info("IRQ: ISR=0x%08x IER=0x%08x DMA_IS=%d DMA_IE=%d\n",
289 isr, ier, !!(dma_ctrl & (1 << 20)), !!(dma_ctrl & (1 << 28)));
290#endif
291 if (isr & ISR_CMD_DONE) {
292 if (likely(!(readl(COMMAND_REG) & COMMAND_GO)))
293 complete(&info->cmd_complete);
294 else
295 pr_err("tegra_nand_irq: Spurious cmd done irq!\n");
296 }
297
298 if (isr & ISR_ECC_ERR) {
299 /* always want to read the decode status so xfers don't stall. */
300 tmp = readl(DEC_STATUS_REG);
301
302 /* was ECC check actually enabled */
303 if ((ier & IER_ECC_ERR)) {
304 unsigned long flags;
305 spin_lock_irqsave(&info->ecc_lock, flags);
306 info->ecc_errs[info->num_ecc_errs++] = tmp;
307 spin_unlock_irqrestore(&info->ecc_lock, flags);
308 }
309 }
310
311 if ((dma_ctrl & DMA_CTRL_IS_DMA_DONE) &&
312 (dma_ctrl & DMA_CTRL_IE_DMA_DONE)) {
313 complete(&info->dma_complete);
314 writel(dma_ctrl, DMA_MST_CTRL_REG);
315 }
316
317 if ((isr & ISR_UND) && (ier & IER_UND))
318 pr_err("%s: fifo underrun.\n", __func__);
319
320 if ((isr & ISR_OVR) && (ier & IER_OVR))
321 pr_err("%s: fifo overrun.\n", __func__);
322
323 /* clear ALL interrupts?! */
324 writel(isr & 0xfffc, ISR_REG);
325
326 return IRQ_HANDLED;
327}
328
329static inline int tegra_nand_is_cmd_done(struct tegra_nand_info *info)
330{
331 return (readl(COMMAND_REG) & COMMAND_GO) ? 0 : 1;
332}
333
334static int tegra_nand_wait_cmd_done(struct tegra_nand_info *info)
335{
336 uint32_t timeout = TIMEOUT; /* TODO: make this realistic */
337 int ret;
338
339 ret = wait_for_completion_timeout(&info->cmd_complete, timeout);
340
341#ifdef TEGRA_NAND_DEBUG_PEDANTIC
342 BUG_ON(!ret && dump_nand_regs());
343#endif
344
345 return ret ? 0 : ret;
346}
347
348static inline void select_chip(struct tegra_nand_info *info, int chipnr)
349{
350 BUG_ON(chipnr != -1 && chipnr >= info->plat->max_chips);
351 info->chip.curr_chip = chipnr;
352}
353
354static void cfg_hwstatus_mon(struct tegra_nand_info *info)
355{
356 uint32_t val;
357
358 val = (HWSTATUS_RDSTATUS_MASK(1) |
359 HWSTATUS_RDSTATUS_EXP_VAL(0) |
360 HWSTATUS_RBSY_MASK(NAND_STATUS_READY) |
361 HWSTATUS_RBSY_EXP_VAL(NAND_STATUS_READY));
362 writel(NAND_CMD_STATUS, HWSTATUS_CMD_REG);
363 writel(val, HWSTATUS_MASK_REG);
364}
365
366/* Tells the NAND controller to initiate the command. */
367static int tegra_nand_go(struct tegra_nand_info *info)
368{
369 BUG_ON(!tegra_nand_is_cmd_done(info));
370
371 INIT_COMPLETION(info->cmd_complete);
372 writel(info->command_reg | COMMAND_GO, COMMAND_REG);
373
374 if (unlikely(tegra_nand_wait_cmd_done(info))) {
375 /* TODO: abort command if needed? */
376 pr_err("%s: Timeout while waiting for command\n", __func__);
377 return -ETIMEDOUT;
378 }
379
380 /* TODO: maybe wait for dma here? */
381 return 0;
382}
383
384static void tegra_nand_prep_readid(struct tegra_nand_info *info)
385{
386 info->command_reg =
387 (COMMAND_CLE | COMMAND_ALE | COMMAND_PIO | COMMAND_RX |
388 COMMAND_ALE_BYTE_SIZE(0) | COMMAND_TRANS_SIZE(3) |
389 (COMMAND_CE(info->chip.curr_chip)));
390 writel(NAND_CMD_READID, CMD_REG1);
391 writel(0, CMD_REG2);
392 writel(0, ADDR_REG1);
393 writel(0, ADDR_REG2);
394 writel(0, CONFIG_REG);
395}
396
397static int
398tegra_nand_cmd_readid(struct tegra_nand_info *info, uint32_t *chip_id)
399{
400 int err;
401
402#ifdef TEGRA_NAND_DEBUG_PEDANTIC
403 BUG_ON(info->chip.curr_chip == -1);
404#endif
405
406 tegra_nand_prep_readid(info);
407 err = tegra_nand_go(info);
408 if (err != 0)
409 return err;
410
411 *chip_id = readl(RESP_REG);
412 return 0;
413}
414
415/* assumes right locks are held */
416static int nand_cmd_get_status(struct tegra_nand_info *info, uint32_t *status)
417{
418 int err;
419
420 info->command_reg = (COMMAND_CLE | COMMAND_PIO | COMMAND_RX |
421 COMMAND_RBSY_CHK |
422 (COMMAND_CE(info->chip.curr_chip)));
423 writel(NAND_CMD_STATUS, CMD_REG1);
424 writel(0, CMD_REG2);
425 writel(0, ADDR_REG1);
426 writel(0, ADDR_REG2);
427 writel(CONFIG_COM_BSY, CONFIG_REG);
428
429 err = tegra_nand_go(info);
430 if (err != 0)
431 return err;
432
433 *status = readl(RESP_REG) & 0xff;
434 return 0;
435}
436
437/* must be called with lock held */
438static int check_block_isbad(struct mtd_info *mtd, loff_t offs)
439{
440 struct tegra_nand_info *info = MTD_TO_INFO(mtd);
441 uint32_t block = offs >> info->chip.block_shift;
442 int chipnr;
443 uint32_t page;
444 uint32_t column;
445 int ret = 0;
446 int i;
447
448 if (info->bb_bitmap[BIT_WORD(block)] & BIT_MASK(block))
449 return 0;
450
451 offs &= ~(mtd->erasesize - 1);
452
453 if (info->is_data_bus_width_16)
454 writel(CONFIG_COM_BSY | CONFIG_BUS_WIDTH, CONFIG_REG);
455 else
456 writel(CONFIG_COM_BSY, CONFIG_REG);
457
458 split_addr(info, offs, &chipnr, &page, &column);
459 select_chip(info, chipnr);
460
461 column = mtd->writesize & 0xffff; /* force to be the offset of OOB */
462
463 /* check fist two pages of the block */
464 if (info->is_data_bus_width_16)
465 column = column >> 1;
466 for (i = 0; i < 2; ++i) {
467 info->command_reg =
468 COMMAND_CE(info->chip.curr_chip) | COMMAND_CLE |
469 COMMAND_ALE | COMMAND_ALE_BYTE_SIZE(4) | COMMAND_RX |
470 COMMAND_PIO | COMMAND_TRANS_SIZE(1) | COMMAND_A_VALID |
471 COMMAND_RBSY_CHK | COMMAND_SEC_CMD;
472 writel(NAND_CMD_READ0, CMD_REG1);
473 writel(NAND_CMD_READSTART, CMD_REG2);
474
475 writel(column | ((page & 0xffff) << 16), ADDR_REG1);
476 writel((page >> 16) & 0xff, ADDR_REG2);
477
478 /* ... poison me ... */
479 writel(0xaa55aa55, RESP_REG);
480 ret = tegra_nand_go(info);
481 if (ret != 0) {
482 pr_info("baaaaaad\n");
483 goto out;
484 }
485
486 if ((readl(RESP_REG) & 0xffff) != 0xffff) {
487 ret = 1;
488 goto out;
489 }
490
491 /* Note: The assumption here is that we cannot cross chip
492 * boundary since the we are only looking at the first 2 pages in
493 * a block, i.e. erasesize > writesize ALWAYS */
494 page++;
495 }
496
497out:
498 /* update the bitmap if the block is good */
499 if (ret == 0)
500 set_bit(block, info->bb_bitmap);
501 return ret;
502}
503
504static int tegra_nand_block_isbad(struct mtd_info *mtd, loff_t offs)
505{
506 struct tegra_nand_info *info = MTD_TO_INFO(mtd);
507 int ret;
508
509 if (offs >= mtd->size)
510 return -EINVAL;
511
512 mutex_lock(&info->lock);
513 ret = check_block_isbad(mtd, offs);
514 mutex_unlock(&info->lock);
515
516#if 0
517 if (ret > 0)
518 pr_info("block @ 0x%llx is bad.\n", offs);
519 else if (ret < 0)
520 pr_err("error checking block @ 0x%llx for badness.\n", offs);
521#endif
522
523 return ret;
524}
525
526static int tegra_nand_block_markbad(struct mtd_info *mtd, loff_t offs)
527{
528 struct tegra_nand_info *info = MTD_TO_INFO(mtd);
529 uint32_t block = offs >> info->chip.block_shift;
530 int chipnr;
531 uint32_t page;
532 uint32_t column;
533 int ret = 0;
534 int i;
535
536 if (offs >= mtd->size)
537 return -EINVAL;
538
539 pr_info("tegra_nand: setting block %d bad\n", block);
540
541 mutex_lock(&info->lock);
542 offs &= ~(mtd->erasesize - 1);
543
544 /* mark the block bad in our bitmap */
545 clear_bit(block, info->bb_bitmap);
546 mtd->ecc_stats.badblocks++;
547
548 if (info->is_data_bus_width_16)
549 writel(CONFIG_COM_BSY | CONFIG_BUS_WIDTH, CONFIG_REG);
550 else
551 writel(CONFIG_COM_BSY, CONFIG_REG);
552
553 split_addr(info, offs, &chipnr, &page, &column);
554 select_chip(info, chipnr);
555
556 column = mtd->writesize & 0xffff; /* force to be the offset of OOB */
557 if (info->is_data_bus_width_16)
558 column = column >> 1;
559 /* write to fist two pages in the block */
560 for (i = 0; i < 2; ++i) {
561 info->command_reg =
562 COMMAND_CE(info->chip.curr_chip) | COMMAND_CLE |
563 COMMAND_ALE | COMMAND_ALE_BYTE_SIZE(4) | COMMAND_TX |
564 COMMAND_PIO | COMMAND_TRANS_SIZE(1) | COMMAND_A_VALID |
565 COMMAND_RBSY_CHK | COMMAND_AFT_DAT | COMMAND_SEC_CMD;
566 writel(NAND_CMD_SEQIN, CMD_REG1);
567 writel(NAND_CMD_PAGEPROG, CMD_REG2);
568
569 writel(column | ((page & 0xffff) << 16), ADDR_REG1);
570 writel((page >> 16) & 0xff, ADDR_REG2);
571
572 writel(0x0, RESP_REG);
573 ret = tegra_nand_go(info);
574 if (ret != 0)
575 goto out;
576
577 /* TODO: check if the program op worked? */
578 page++;
579 }
580
581out:
582 mutex_unlock(&info->lock);
583 return ret;
584}
585
586static int tegra_nand_erase(struct mtd_info *mtd, struct erase_info *instr)
587{
588 struct tegra_nand_info *info = MTD_TO_INFO(mtd);
589 uint32_t num_blocks;
590 uint32_t offs;
591 int chipnr;
592 uint32_t page;
593 uint32_t column;
594 uint32_t status = 0;
595
596 TEGRA_DBG("tegra_nand_erase: addr=0x%08llx len=%lld\n", instr->addr,
597 instr->len);
598
599 if ((instr->addr + instr->len) > mtd->size) {
600 pr_err("tegra_nand_erase: Can't erase past end of device\n");
601 instr->state = MTD_ERASE_FAILED;
602 return -EINVAL;
603 }
604
605 if (instr->addr & (mtd->erasesize - 1)) {
606 pr_err("tegra_nand_erase: addr=0x%08llx not block-aligned\n",
607 instr->addr);
608 instr->state = MTD_ERASE_FAILED;
609 return -EINVAL;
610 }
611
612 if (instr->len & (mtd->erasesize - 1)) {
613 pr_err("tegra_nand_erase: len=%lld not block-aligned\n",
614 instr->len);
615 instr->state = MTD_ERASE_FAILED;
616 return -EINVAL;
617 }
618
619 instr->fail_addr = 0xffffffff;
620
621 mutex_lock(&info->lock);
622
623 instr->state = MTD_ERASING;
624
625 offs = instr->addr;
626 num_blocks = instr->len >> info->chip.block_shift;
627
628 select_chip(info, -1);
629
630 while (num_blocks--) {
631 split_addr(info, offs, &chipnr, &page, &column);
632 if (chipnr != info->chip.curr_chip)
633 select_chip(info, chipnr);
634 TEGRA_DBG("tegra_nand_erase: addr=0x%08x, page=0x%08x\n", offs,
635 page);
636
637 if (check_block_isbad(mtd, offs)) {
638 pr_info("%s: skipping bad block @ 0x%08x\n", __func__,
639 offs);
640 goto next_block;
641 }
642
643 info->command_reg =
644 COMMAND_CE(info->chip.curr_chip) | COMMAND_CLE |
645 COMMAND_ALE | COMMAND_ALE_BYTE_SIZE(2) |
646 COMMAND_RBSY_CHK | COMMAND_SEC_CMD;
647 writel(NAND_CMD_ERASE1, CMD_REG1);
648 writel(NAND_CMD_ERASE2, CMD_REG2);
649
650 writel(page & 0xffffff, ADDR_REG1);
651 writel(0, ADDR_REG2);
652 writel(CONFIG_COM_BSY, CONFIG_REG);
653
654 if (tegra_nand_go(info) != 0) {
655 instr->fail_addr = offs;
656 goto out_err;
657 }
658
659 /* TODO: do we want a timeout here? */
660 if ((nand_cmd_get_status(info, &status) != 0) ||
661 (status & NAND_STATUS_FAIL) ||
662 ((status & NAND_STATUS_READY) != NAND_STATUS_READY)) {
663 instr->fail_addr = offs;
664 pr_info("%s: erase failed @ 0x%08x (stat=0x%08x)\n",
665 __func__, offs, status);
666 goto out_err;
667 }
668next_block:
669 offs += mtd->erasesize;
670 }
671
672 instr->state = MTD_ERASE_DONE;
673 mutex_unlock(&info->lock);
674 mtd_erase_callback(instr);
675 return 0;
676
677out_err:
678 instr->state = MTD_ERASE_FAILED;
679 mutex_unlock(&info->lock);
680 return -EIO;
681}
682
683static inline void dump_mtd_oob_ops(struct mtd_oob_ops *ops)
684{
685 pr_info("%s: oob_ops: mode=%s len=0x%x ooblen=0x%x "
686 "ooboffs=0x%x dat=0x%p oob=0x%p\n", __func__,
687 (ops->mode == MTD_OOB_AUTO ? "MTD_OOB_AUTO" :
688 (ops->mode ==
689 MTD_OOB_PLACE ? "MTD_OOB_PLACE" : "MTD_OOB_RAW")), ops->len,
690 ops->ooblen, ops->ooboffs, ops->datbuf, ops->oobbuf);
691}
692
693static int
694tegra_nand_read(struct mtd_info *mtd, loff_t from, size_t len,
695 size_t *retlen, uint8_t *buf)
696{
697 struct mtd_oob_ops ops;
698 int ret;
699
700 pr_debug("%s: read: from=0x%llx len=0x%x\n", __func__, from, len);
701 ops.mode = MTD_OOB_AUTO;
702 ops.len = len;
703 ops.datbuf = buf;
704 ops.oobbuf = NULL;
705 ret = mtd->read_oob(mtd, from, &ops);
706 *retlen = ops.retlen;
707 return ret;
708}
709
710static void
711correct_ecc_errors_on_blank_page(struct tegra_nand_info *info, u8 *datbuf,
712 u8 *oobbuf, unsigned int a_len,
713 unsigned int b_len)
714{
715 int i;
716 int all_ff = 1;
717 unsigned long flags;
718
719 spin_lock_irqsave(&info->ecc_lock, flags);
720 if (info->num_ecc_errs) {
721 if (datbuf) {
722 for (i = 0; i < a_len; i++)
723 if (datbuf[i] != 0xFF)
724 all_ff = 0;
725 }
726 if (oobbuf) {
727 for (i = 0; i < b_len; i++)
728 if (oobbuf[i] != 0xFF)
729 all_ff = 0;
730 }
731 if (all_ff)
732 info->num_ecc_errs = 0;
733 }
734 spin_unlock_irqrestore(&info->ecc_lock, flags);
735}
736
737static void update_ecc_counts(struct tegra_nand_info *info, int check_oob)
738{
739 unsigned long flags;
740 int i;
741
742 spin_lock_irqsave(&info->ecc_lock, flags);
743 for (i = 0; i < info->num_ecc_errs; ++i) {
744 /* correctable */
745 info->mtd.ecc_stats.corrected +=
746 DEC_STATUS_ERR_CNT(info->ecc_errs[i]);
747
748 /* uncorrectable */
749 if (info->ecc_errs[i] & DEC_STATUS_ECC_FAIL_A)
750 info->mtd.ecc_stats.failed++;
751 if (check_oob && (info->ecc_errs[i] & DEC_STATUS_ECC_FAIL_B))
752 info->mtd.ecc_stats.failed++;
753 }
754 info->num_ecc_errs = 0;
755 spin_unlock_irqrestore(&info->ecc_lock, flags);
756}
757
758static inline void clear_regs(struct tegra_nand_info *info)
759{
760 info->command_reg = 0;
761 info->config_reg = 0;
762 info->dmactrl_reg = 0;
763}
764
765static void
766prep_transfer_dma(struct tegra_nand_info *info, int rx, int do_ecc,
767 uint32_t page, uint32_t column, dma_addr_t data_dma,
768 uint32_t data_len, dma_addr_t oob_dma, uint32_t oob_len)
769{
770 uint32_t tag_sz = oob_len;
771
772 uint32_t page_size_sel = (info->mtd.writesize >> 11) + 2;
773#if 0
774 pr_info("%s: rx=%d ecc=%d page=%d col=%d data_dma=0x%x "
775 "data_len=0x%08x oob_dma=0x%x ooblen=%d\n", __func__,
776 rx, do_ecc, page, column, data_dma, data_len, oob_dma, oob_len);
777#endif
778
779 info->command_reg =
780 COMMAND_CE(info->chip.curr_chip) | COMMAND_CLE | COMMAND_ALE |
781 COMMAND_ALE_BYTE_SIZE(4) | COMMAND_SEC_CMD | COMMAND_RBSY_CHK |
782 COMMAND_TRANS_SIZE(8);
783
784 info->config_reg = (CONFIG_PIPELINE_EN | CONFIG_EDO_MODE |
785 CONFIG_COM_BSY);
786 if (info->is_data_bus_width_16)
787 info->config_reg |= CONFIG_BUS_WIDTH;
788 info->dmactrl_reg = (DMA_CTRL_DMA_GO |
789 DMA_CTRL_DMA_PERF_EN | DMA_CTRL_IE_DMA_DONE |
790 DMA_CTRL_IS_DMA_DONE | DMA_CTRL_BURST_SIZE(4));
791
792 if (rx) {
793 if (do_ecc)
794 info->config_reg |= CONFIG_HW_ERR_CORRECTION;
795 info->command_reg |= COMMAND_RX;
796 info->dmactrl_reg |= DMA_CTRL_REUSE_BUFFER;
797 writel(NAND_CMD_READ0, CMD_REG1);
798 writel(NAND_CMD_READSTART, CMD_REG2);
799 } else {
800 info->command_reg |= (COMMAND_TX | COMMAND_AFT_DAT);
801 info->dmactrl_reg |= DMA_CTRL_DIR; /* DMA_RD == TX */
802 writel(NAND_CMD_SEQIN, CMD_REG1);
803 writel(NAND_CMD_PAGEPROG, CMD_REG2);
804 }
805
806 if (data_len) {
807 if (do_ecc)
808 info->config_reg |= CONFIG_HW_ECC | CONFIG_ECC_SEL;
809 info->config_reg |=
810 CONFIG_PAGE_SIZE_SEL(page_size_sel) | CONFIG_TVALUE(0) |
811 CONFIG_SKIP_SPARE | CONFIG_SKIP_SPARE_SEL(0);
812 info->command_reg |= COMMAND_A_VALID;
813 info->dmactrl_reg |= DMA_CTRL_DMA_EN_A;
814 writel(DMA_CFG_BLOCK_SIZE(data_len - 1), DMA_CFG_A_REG);
815 writel(data_dma, DATA_BLOCK_PTR_REG);
816 } else {
817 column = info->mtd.writesize;
818 if (do_ecc)
819 column += info->mtd.ecclayout->oobfree[0].offset;
820 writel(0, DMA_CFG_A_REG);
821 writel(0, DATA_BLOCK_PTR_REG);
822 }
823
824 if (oob_len) {
825 if (do_ecc) {
826 oob_len = info->mtd.oobavail;
827 tag_sz = info->mtd.oobavail;
828 tag_sz += 4; /* size of tag ecc */
829 if (rx)
830 oob_len += 4; /* size of tag ecc */
831 info->config_reg |= CONFIG_ECC_EN_TAG;
832 }
833 if (data_len && rx)
834 oob_len += 4; /* num of skipped bytes */
835
836 info->command_reg |= COMMAND_B_VALID;
837 info->config_reg |= CONFIG_TAG_BYTE_SIZE(tag_sz - 1);
838 info->dmactrl_reg |= DMA_CTRL_DMA_EN_B;
839 writel(DMA_CFG_BLOCK_SIZE(oob_len - 1), DMA_CFG_B_REG);
840 writel(oob_dma, TAG_PTR_REG);
841 } else {
842 writel(0, DMA_CFG_B_REG);
843 writel(0, TAG_PTR_REG);
844 }
845 /* For x16 bit we needs to divide the column number by 2 */
846 if (info->is_data_bus_width_16)
847 column = column >> 1;
848 writel((column & 0xffff) | ((page & 0xffff) << 16), ADDR_REG1);
849 writel((page >> 16) & 0xff, ADDR_REG2);
850}
851
852static dma_addr_t
853tegra_nand_dma_map(struct device *dev, void *addr, size_t size,
854 enum dma_data_direction dir)
855{
856 struct page *page;
857 unsigned long offset = (unsigned long)addr & ~PAGE_MASK;
858 if (virt_addr_valid(addr))
859 page = virt_to_page(addr);
860 else {
861 if (WARN_ON(size + offset > PAGE_SIZE))
862 return ~0;
863 page = vmalloc_to_page(addr);
864 }
865 return dma_map_page(dev, page, offset, size, dir);
866}
867
868static ssize_t show_vendor_id(struct device *dev, struct device_attribute *attr,
869 char *buf)
870{
871 struct tegra_nand_info *info = dev_get_drvdata(dev);
872 return sprintf(buf, "0x%x\n", info->vendor_id);
873}
874
875static DEVICE_ATTR(vendor_id, S_IRUSR, show_vendor_id, NULL);
876
877static ssize_t show_device_id(struct device *dev, struct device_attribute *attr,
878 char *buf)
879{
880 struct tegra_nand_info *info = dev_get_drvdata(dev);
881 return sprintf(buf, "0x%x\n", info->device_id);
882}
883
884static DEVICE_ATTR(device_id, S_IRUSR, show_device_id, NULL);
885
886static ssize_t show_flash_size(struct device *dev,
887 struct device_attribute *attr, char *buf)
888{
889 struct tegra_nand_info *info = dev_get_drvdata(dev);
890 struct mtd_info *mtd = &info->mtd;
891 return sprintf(buf, "%llu bytes\n", mtd->size);
892}
893
894static DEVICE_ATTR(flash_size, S_IRUSR, show_flash_size, NULL);
895
896static ssize_t show_num_bad_blocks(struct device *dev,
897 struct device_attribute *attr, char *buf)
898{
899 struct tegra_nand_info *info = dev_get_drvdata(dev);
900 return sprintf(buf, "%d\n", info->num_bad_blocks);
901}
902
903static DEVICE_ATTR(num_bad_blocks, S_IRUSR, show_num_bad_blocks, NULL);
904
905static ssize_t show_bb_bitmap(struct device *dev, struct device_attribute *attr,
906 char *buf)
907{
908 struct tegra_nand_info *info = dev_get_drvdata(dev);
909 struct mtd_info *mtd = &info->mtd;
910 int num_blocks = mtd->size >> info->chip.block_shift, i, ret = 0, size =
911 0;
912
913 for (i = 0; i < num_blocks / (8 * sizeof(unsigned long)); i++) {
914 size = sprintf(buf, "0x%lx\n", info->bb_bitmap[i]);
915 ret += size;
916 buf += size;
917 }
918 return ret;
919}
920
921static DEVICE_ATTR(bb_bitmap, S_IRUSR, show_bb_bitmap, NULL);
922
923/*
924 * Independent of Mode, we read main data and the OOB data from the oobfree areas as
925 * specified nand_ecclayout
926 * This function also checks buffer pool partial_unaligned_rw_buffer
927 * if the address is already present and is not 'unused' then it will use
928 * data in buffer else it will go for DMA.
929 */
930static int
931do_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
932{
933 struct tegra_nand_info *info = MTD_TO_INFO(mtd);
934 struct mtd_ecc_stats old_ecc_stats;
935 int chipnr;
936 uint32_t page;
937 uint32_t column;
938 uint8_t *datbuf = ops->datbuf;
939 uint8_t *oobbuf = ops->oobbuf;
940 uint32_t ooblen = oobbuf ? ops->ooblen : 0;
941 uint32_t oobsz;
942 uint32_t page_count;
943 int err;
944 int unaligned = from & info->chip.column_mask;
945 uint32_t len = datbuf ? ((ops->len) + unaligned) : 0;
946 int do_ecc = 1;
947 dma_addr_t datbuf_dma_addr = 0;
948
949#if 0
950 dump_mtd_oob_ops(ops);
951#endif
952 ops->retlen = 0;
953 ops->oobretlen = 0;
954 from = from - unaligned;
955
956 /* Don't care about the MTD_OOB_ value field always use oobavail and ecc. */
957 oobsz = mtd->oobavail;
958 if (unlikely(ops->oobbuf && ops->ooblen > oobsz)) {
959 pr_err("%s: can't read OOB from multiple pages (%d > %d)\n",
960 __func__, ops->ooblen, oobsz);
961 return -EINVAL;
962 } else if (ops->oobbuf && !len) {
963 page_count = 1;
964 } else {
965 page_count =
966 (uint32_t) ((len + mtd->writesize - 1) / mtd->writesize);
967 }
968
969 mutex_lock(&info->lock);
970
971 memcpy(&old_ecc_stats, &mtd->ecc_stats, sizeof(old_ecc_stats));
972
973 if (do_ecc) {
974 enable_ints(info, IER_ECC_ERR);
975 writel(info->ecc_addr, ECC_PTR_REG);
976 } else
977 disable_ints(info, IER_ECC_ERR);
978
979 split_addr(info, from, &chipnr, &page, &column);
980 select_chip(info, chipnr);
981
982 /* reset it to point back to beginning of page */
983 from -= column;
984
985 while (page_count--) {
986 int a_len = min(mtd->writesize - column, len);
987 int b_len = min(oobsz, ooblen);
988 int temp_len = 0;
989 char *temp_buf = NULL;
990 /* Take care when read is of less than page size.
991 * Otherwise there will be kernel Panic due to DMA timeout */
992 if (((a_len < mtd->writesize) && len) || unaligned) {
993 temp_len = a_len;
994 a_len = mtd->writesize;
995 temp_buf = datbuf;
996 datbuf = info->partial_unaligned_rw_buffer;
997 }
998#if 0
999 pr_info("%s: chip:=%d page=%d col=%d\n", __func__, chipnr,
1000 page, column);
1001#endif
1002
1003 clear_regs(info);
1004 if (datbuf)
1005 datbuf_dma_addr =
1006 tegra_nand_dma_map(info->dev, datbuf, a_len,
1007 DMA_FROM_DEVICE);
1008
1009 prep_transfer_dma(info, 1, do_ecc, page, column,
1010 datbuf_dma_addr, a_len, info->oob_dma_addr,
1011 b_len);
1012 writel(info->config_reg, CONFIG_REG);
1013 writel(info->dmactrl_reg, DMA_MST_CTRL_REG);
1014
1015 INIT_COMPLETION(info->dma_complete);
1016 err = tegra_nand_go(info);
1017 if (err != 0)
1018 goto out_err;
1019
1020 if (!wait_for_completion_timeout(&info->dma_complete, TIMEOUT)) {
1021 pr_err("%s: dma completion timeout\n", __func__);
1022 dump_nand_regs();
1023 err = -ETIMEDOUT;
1024 goto out_err;
1025 }
1026
1027 /*pr_info("tegra_read_oob: DMA complete\n"); */
1028
1029 /* if we are here, transfer is done */
1030 if (datbuf)
1031 dma_unmap_page(info->dev, datbuf_dma_addr, a_len,
1032 DMA_FROM_DEVICE);
1033
1034 if (oobbuf) {
1035 uint32_t ofs = datbuf && oobbuf ? 4 : 0; /* skipped bytes */
1036 memcpy(oobbuf, info->oob_dma_buf + ofs, b_len);
1037 }
1038
1039 correct_ecc_errors_on_blank_page(info, datbuf, oobbuf, a_len,
1040 b_len);
1041 /* Take care when read is of less than page size */
1042 if (temp_len) {
1043 memcpy(temp_buf, datbuf + unaligned,
1044 temp_len - unaligned);
1045 a_len = temp_len;
1046 datbuf = temp_buf;
1047 }
1048 if (datbuf) {
1049 len -= a_len;
1050 datbuf += a_len - unaligned;
1051 ops->retlen += a_len - unaligned;
1052 }
1053
1054 if (oobbuf) {
1055 ooblen -= b_len;
1056 oobbuf += b_len;
1057 ops->oobretlen += b_len;
1058 }
1059
1060 unaligned = 0;
1061 update_ecc_counts(info, oobbuf != NULL);
1062
1063 if (!page_count)
1064 break;
1065
1066 from += mtd->writesize;
1067 column = 0;
1068
1069 split_addr(info, from, &chipnr, &page, &column);
1070 if (chipnr != info->chip.curr_chip)
1071 select_chip(info, chipnr);
1072 }
1073
1074 disable_ints(info, IER_ECC_ERR);
1075
1076 if (mtd->ecc_stats.failed != old_ecc_stats.failed)
1077 err = -EBADMSG;
1078 else if (mtd->ecc_stats.corrected != old_ecc_stats.corrected)
1079 err = -EUCLEAN;
1080 else
1081 err = 0;
1082
1083 mutex_unlock(&info->lock);
1084 return err;
1085
1086out_err:
1087 ops->retlen = 0;
1088 ops->oobretlen = 0;
1089
1090 disable_ints(info, IER_ECC_ERR);
1091 mutex_unlock(&info->lock);
1092 return err;
1093}
1094
1095/* just does some parameter checking and calls do_read_oob */
1096static int
1097tegra_nand_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
1098{
1099 if (ops->datbuf && unlikely((from + ops->len) > mtd->size)) {
1100 pr_err("%s: Can't read past end of device.\n", __func__);
1101 return -EINVAL;
1102 }
1103
1104 if (unlikely(ops->oobbuf && !ops->ooblen)) {
1105 pr_err("%s: Reading 0 bytes from OOB is meaningless\n",
1106 __func__);
1107 return -EINVAL;
1108 }
1109
1110 if (unlikely(ops->mode != MTD_OOB_AUTO)) {
1111 if (ops->oobbuf && ops->datbuf) {
1112 pr_err("%s: can't read OOB + Data in non-AUTO mode.\n",
1113 __func__);
1114 return -EINVAL;
1115 }
1116 if ((ops->mode == MTD_OOB_RAW) && !ops->datbuf) {
1117 pr_err("%s: Raw mode only supports reading data area.\n",
1118 __func__);
1119 return -EINVAL;
1120 }
1121 }
1122
1123 return do_read_oob(mtd, from, ops);
1124}
1125
1126static int
1127tegra_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
1128 size_t *retlen, const uint8_t *buf)
1129{
1130 struct mtd_oob_ops ops;
1131 int ret;
1132
1133 pr_debug("%s: write: to=0x%llx len=0x%x\n", __func__, to, len);
1134 ops.mode = MTD_OOB_AUTO;
1135 ops.len = len;
1136 ops.datbuf = (uint8_t *) buf;
1137 ops.oobbuf = NULL;
1138 ret = mtd->write_oob(mtd, to, &ops);
1139 *retlen = ops.retlen;
1140 return ret;
1141}
1142
1143static int
1144do_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
1145{
1146 struct tegra_nand_info *info = MTD_TO_INFO(mtd);
1147 int chipnr;
1148 uint32_t page;
1149 uint32_t column;
1150 uint8_t *datbuf = ops->datbuf;
1151 uint8_t *oobbuf = ops->oobbuf;
1152 uint32_t len = datbuf ? ops->len : 0;
1153 uint32_t ooblen = oobbuf ? ops->ooblen : 0;
1154 uint32_t oobsz;
1155 uint32_t page_count;
1156 int err;
1157 int do_ecc = 1;
1158 dma_addr_t datbuf_dma_addr = 0;
1159
1160#if 0
1161 dump_mtd_oob_ops(ops);
1162#endif
1163
1164 ops->retlen = 0;
1165 ops->oobretlen = 0;
1166
1167 if (!ops->len)
1168 return 0;
1169
1170 oobsz = mtd->oobavail;
1171
1172 if (unlikely(ops->oobbuf && ops->ooblen > oobsz)) {
1173 pr_err("%s: can't write OOB to multiple pages (%d > %d)\n",
1174 __func__, ops->ooblen, oobsz);
1175 return -EINVAL;
1176 } else if (ops->oobbuf && !len) {
1177 page_count = 1;
1178 } else
1179 page_count =
1180 max((uint32_t) (ops->len / mtd->writesize), (uint32_t) 1);
1181
1182 mutex_lock(&info->lock);
1183
1184 split_addr(info, to, &chipnr, &page, &column);
1185 select_chip(info, chipnr);
1186
1187 while (page_count--) {
1188 int a_len = min(mtd->writesize, len);
1189 int b_len = min(oobsz, ooblen);
1190 int temp_len = 0;
1191 char *temp_buf = NULL;
1192 /* Take care when write is of less than page size. Otherwise
1193 * there will be kernel panic due to dma timeout */
1194 if ((a_len < mtd->writesize) && len) {
1195 temp_len = a_len;
1196 a_len = mtd->writesize;
1197 temp_buf = datbuf;
1198 datbuf = info->partial_unaligned_rw_buffer;
1199 memset(datbuf, 0xff, a_len);
1200 memcpy(datbuf, temp_buf, temp_len);
1201 }
1202
1203 if (datbuf)
1204 datbuf_dma_addr =
1205 tegra_nand_dma_map(info->dev, datbuf, a_len,
1206 DMA_TO_DEVICE);
1207 if (oobbuf)
1208 memcpy(info->oob_dma_buf, oobbuf, b_len);
1209
1210 clear_regs(info);
1211 prep_transfer_dma(info, 0, do_ecc, page, column,
1212 datbuf_dma_addr, a_len, info->oob_dma_addr,
1213 b_len);
1214
1215 writel(info->config_reg, CONFIG_REG);
1216 writel(info->dmactrl_reg, DMA_MST_CTRL_REG);
1217
1218 INIT_COMPLETION(info->dma_complete);
1219 err = tegra_nand_go(info);
1220 if (err != 0)
1221 goto out_err;
1222
1223 if (!wait_for_completion_timeout(&info->dma_complete, TIMEOUT)) {
1224 pr_err("%s: dma completion timeout\n", __func__);
1225 dump_nand_regs();
1226 goto out_err;
1227 }
1228 if (temp_len) {
1229 a_len = temp_len;
1230 datbuf = temp_buf;
1231 }
1232
1233 if (datbuf) {
1234 dma_unmap_page(info->dev, datbuf_dma_addr, a_len,
1235 DMA_TO_DEVICE);
1236 len -= a_len;
1237 datbuf += a_len;
1238 ops->retlen += a_len;
1239 }
1240 if (oobbuf) {
1241 ooblen -= b_len;
1242 oobbuf += b_len;
1243 ops->oobretlen += b_len;
1244 }
1245
1246 if (!page_count)
1247 break;
1248
1249 to += mtd->writesize;
1250 column = 0;
1251
1252 split_addr(info, to, &chipnr, &page, &column);
1253 if (chipnr != info->chip.curr_chip)
1254 select_chip(info, chipnr);
1255 }
1256
1257 mutex_unlock(&info->lock);
1258 return err;
1259
1260out_err:
1261 ops->retlen = 0;
1262 ops->oobretlen = 0;
1263
1264 mutex_unlock(&info->lock);
1265 return err;
1266}
1267
1268static int
1269tegra_nand_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
1270{
1271 struct tegra_nand_info *info = MTD_TO_INFO(mtd);
1272
1273 if (unlikely(to & info->chip.column_mask)) {
1274 pr_err("%s: Unaligned write (to 0x%llx) not supported\n",
1275 __func__, to);
1276 return -EINVAL;
1277 }
1278
1279 if (unlikely(ops->oobbuf && !ops->ooblen)) {
1280 pr_err("%s: Writing 0 bytes to OOB is meaningless\n", __func__);
1281 return -EINVAL;
1282 }
1283
1284 return do_write_oob(mtd, to, ops);
1285}
1286
1287static int tegra_nand_suspend(struct mtd_info *mtd)
1288{
1289 return 0;
1290}
1291
1292static void
1293set_chip_timing(struct tegra_nand_info *info, uint32_t vendor_id,
1294 uint32_t dev_id, uint32_t fourth_id_field)
1295{
1296 struct tegra_nand_chip_parms *chip_parms = NULL;
1297 uint32_t tmp;
1298 int i = 0;
1299 unsigned long nand_clk_freq_khz = clk_get_rate(info->clk) / 1000;
1300 for (i = 0; i < info->plat->nr_chip_parms; i++)
1301 if (info->plat->chip_parms[i].vendor_id == vendor_id &&
1302 info->plat->chip_parms[i].device_id == dev_id &&
1303 info->plat->chip_parms[i].read_id_fourth_byte ==
1304 fourth_id_field)
1305 chip_parms = &info->plat->chip_parms[i];
1306
1307 if (!chip_parms) {
1308 pr_warn("WARNING:tegra_nand: timing for vendor-id: "
1309 "%x device-id: %x fourth-id-field: %x not found. Using Bootloader timing",
1310 vendor_id, dev_id, fourth_id_field);
1311 return;
1312 }
1313 /* TODO: Handle the change of frequency if DVFS is enabled */
1314#define CNT(t) (((((t) * nand_clk_freq_khz) + 1000000 - 1) / 1000000) - 1)
1315 tmp = (TIMING_TRP_RESP(CNT(chip_parms->timing.trp_resp)) |
1316 TIMING_TWB(CNT(chip_parms->timing.twb)) |
1317 TIMING_TCR_TAR_TRR(CNT(chip_parms->timing.tcr_tar_trr)) |
1318 TIMING_TWHR(CNT(chip_parms->timing.twhr)) |
1319 TIMING_TCS(CNT(chip_parms->timing.tcs)) |
1320 TIMING_TWH(CNT(chip_parms->timing.twh)) |
1321 TIMING_TWP(CNT(chip_parms->timing.twp)) |
1322 TIMING_TRH(CNT(chip_parms->timing.trh)) |
1323 TIMING_TRP(CNT(chip_parms->timing.trp)));
1324 writel(tmp, TIMING_REG);
1325 writel(TIMING2_TADL(CNT(chip_parms->timing.tadl)), TIMING2_REG);
1326#undef CNT
1327}
1328
1329static void tegra_nand_resume(struct mtd_info *mtd)
1330{
1331 struct tegra_nand_info *info = MTD_TO_INFO(mtd);
1332
1333 cfg_hwstatus_mon(info);
1334
1335 /* clear all pending interrupts */
1336 writel(readl(ISR_REG), ISR_REG);
1337
1338 /* clear dma interrupt */
1339 writel(DMA_CTRL_IS_DMA_DONE, DMA_MST_CTRL_REG);
1340
1341 /* enable interrupts */
1342 disable_ints(info, 0xffffffff);
1343 enable_ints(info,
1344 IER_ERR_TRIG_VAL(4) | IER_UND | IER_OVR | IER_CMD_DONE |
1345 IER_ECC_ERR | IER_GIE);
1346
1347 writel(0, CONFIG_REG);
1348
1349 set_chip_timing(info, info->vendor_id,
1350 info->device_id, info->dev_parms);
1351
1352 return;
1353}
1354
1355static int scan_bad_blocks(struct tegra_nand_info *info)
1356{
1357 struct mtd_info *mtd = &info->mtd;
1358 int num_blocks = mtd->size >> info->chip.block_shift;
1359 uint32_t block;
1360 int is_bad = 0;
1361 info->num_bad_blocks = 0;
1362
1363 for (block = 0; block < num_blocks; ++block) {
1364 /* make sure the bit is cleared, meaning it's bad/unknown before
1365 * we check. */
1366 clear_bit(block, info->bb_bitmap);
1367 is_bad = mtd->block_isbad(mtd, block << info->chip.block_shift);
1368
1369 if (is_bad == 0)
1370 set_bit(block, info->bb_bitmap);
1371 else if (is_bad > 0) {
1372 info->num_bad_blocks++;
1373 pr_debug("block 0x%08x is bad.\n", block);
1374 } else {
1375 pr_err("Fatal error (%d) while scanning for "
1376 "bad blocks\n", is_bad);
1377 return is_bad;
1378 }
1379 }
1380 return 0;
1381}
1382
1383/* Scans for nand flash devices, identifies them, and fills in the
1384 * device info. */
1385static int tegra_nand_scan(struct mtd_info *mtd, int maxchips)
1386{
1387 struct tegra_nand_info *info = MTD_TO_INFO(mtd);
1388 struct nand_flash_dev *dev_info;
1389 struct nand_manufacturers *vendor_info;
1390 uint32_t tmp;
1391 uint32_t dev_id;
1392 uint32_t vendor_id;
1393 uint32_t dev_parms;
1394 uint32_t mlc_parms;
1395 int cnt;
1396 int err = 0;
1397
1398 writel(SCAN_TIMING_VAL, TIMING_REG);
1399 writel(SCAN_TIMING2_VAL, TIMING2_REG);
1400 writel(0, CONFIG_REG);
1401
1402 select_chip(info, 0);
1403 err = tegra_nand_cmd_readid(info, &tmp);
1404 if (err != 0)
1405 goto out_error;
1406
1407 vendor_id = tmp & 0xff;
1408 dev_id = (tmp >> 8) & 0xff;
1409 mlc_parms = (tmp >> 16) & 0xff;
1410 dev_parms = (tmp >> 24) & 0xff;
1411
1412 dev_info = find_nand_flash_device(dev_id);
1413 if (dev_info == NULL) {
1414 pr_err("%s: unknown flash device id (0x%02x) found.\n",
1415 __func__, dev_id);
1416 err = -ENODEV;
1417 goto out_error;
1418 }
1419
1420 vendor_info = find_nand_flash_vendor(vendor_id);
1421 if (vendor_info == NULL) {
1422 pr_err("%s: unknown flash vendor id (0x%02x) found.\n",
1423 __func__, vendor_id);
1424 err = -ENODEV;
1425 goto out_error;
1426 }
1427
1428 /* loop through and see if we can find more devices */
1429 for (cnt = 1; cnt < info->plat->max_chips; ++cnt) {
1430 select_chip(info, cnt);
1431 /* TODO: figure out what to do about errors here */
1432 err = tegra_nand_cmd_readid(info, &tmp);
1433 if (err != 0)
1434 goto out_error;
1435 if ((dev_id != ((tmp >> 8) & 0xff)) ||
1436 (vendor_id != (tmp & 0xff)))
1437 break;
1438 }
1439
1440 pr_info("%s: %d NAND chip(s) found (vend=0x%02x, dev=0x%02x) (%s %s)\n",
1441 DRIVER_NAME, cnt, vendor_id, dev_id, vendor_info->name,
1442 dev_info->name);
1443 info->vendor_id = vendor_id;
1444 info->device_id = dev_id;
1445 info->dev_parms = dev_parms;
1446 info->chip.num_chips = cnt;
1447 info->chip.chipsize = dev_info->chipsize << 20;
1448 mtd->size = info->chip.num_chips * info->chip.chipsize;
1449
1450 /* format of 4th id byte returned by READ ID
1451 * bit 7 = rsvd
1452 * bit 6 = bus width. 1 == 16bit, 0 == 8bit
1453 * bits 5:4 = data block size. 64kb * (2^val)
1454 * bit 3 = rsvd
1455 * bit 2 = spare area size / 512 bytes. 0 == 8bytes, 1 == 16bytes
1456 * bits 1:0 = page size. 1kb * (2^val)
1457 */
1458
1459 /* page_size */
1460 tmp = dev_parms & 0x3;
1461 mtd->writesize = 1024 << tmp;
1462 info->chip.column_mask = mtd->writesize - 1;
1463
1464 if (mtd->writesize > 4096) {
1465 pr_err("%s: Large page devices with pagesize > 4kb are NOT "
1466 "supported\n", __func__);
1467 goto out_error;
1468 } else if (mtd->writesize < 2048) {
1469 pr_err("%s: Small page devices are NOT supported\n", __func__);
1470 goto out_error;
1471 }
1472
1473 /* spare area, must be at least 64 bytes */
1474 tmp = (dev_parms >> 2) & 0x1;
1475 tmp = (8 << tmp) * (mtd->writesize / 512);
1476 if (tmp < 64) {
1477 pr_err("%s: Spare area (%d bytes) too small\n", __func__, tmp);
1478 goto out_error;
1479 }
1480 mtd->oobsize = tmp;
1481
1482 /* data block size (erase size) (w/o spare) */
1483 tmp = (dev_parms >> 4) & 0x3;
1484 mtd->erasesize = (64 * 1024) << tmp;
1485 info->chip.block_shift = ffs(mtd->erasesize) - 1;
1486 /* bus width of the nand chip 8/16 */
1487 tmp = (dev_parms >> 6) & 0x1;
1488 info->is_data_bus_width_16 = tmp;
1489 /* used to select the appropriate chip/page in case multiple devices
1490 * are connected */
1491 info->chip.chip_shift = ffs(info->chip.chipsize) - 1;
1492 info->chip.page_shift = ffs(mtd->writesize) - 1;
1493 info->chip.page_mask =
1494 (info->chip.chipsize >> info->chip.page_shift) - 1;
1495
1496 /* now fill in the rest of the mtd fields */
1497 if (mtd->oobsize == 64)
1498 mtd->ecclayout = &tegra_nand_oob_64;
1499 else
1500 mtd->ecclayout = &tegra_nand_oob_128;
1501
1502 mtd->oobavail = mtd->ecclayout->oobavail;
1503 mtd->type = MTD_NANDFLASH;
1504 mtd->flags = MTD_CAP_NANDFLASH;
1505
1506 mtd->erase = tegra_nand_erase;
1507 mtd->lock = NULL;
1508 mtd->point = NULL;
1509 mtd->unpoint = NULL;
1510 mtd->read = tegra_nand_read;
1511 mtd->write = tegra_nand_write;
1512 mtd->read_oob = tegra_nand_read_oob;
1513 mtd->write_oob = tegra_nand_write_oob;
1514
1515 mtd->resume = tegra_nand_resume;
1516 mtd->suspend = tegra_nand_suspend;
1517 mtd->block_isbad = tegra_nand_block_isbad;
1518 mtd->block_markbad = tegra_nand_block_markbad;
1519
1520 set_chip_timing(info, vendor_id, dev_id, dev_parms);
1521
1522 return 0;
1523
1524out_error:
1525 pr_err("%s: NAND device scan aborted due to error(s).\n", __func__);
1526 return err;
1527}
1528
1529static int __devinit tegra_nand_probe(struct platform_device *pdev)
1530{
1531 struct tegra_nand_platform *plat = pdev->dev.platform_data;
1532 struct tegra_nand_info *info = NULL;
1533 struct tegra_nand_chip *chip = NULL;
1534 struct mtd_info *mtd = NULL;
1535 int err = 0;
1536 uint64_t num_erase_blocks;
1537
1538 pr_debug("%s: probing (%p)\n", __func__, pdev);
1539
1540 if (!plat) {
1541 pr_err("%s: no platform device info\n", __func__);
1542 return -EINVAL;
1543 } else if (!plat->chip_parms) {
1544 pr_err("%s: no platform nand parms\n", __func__);
1545 return -EINVAL;
1546 }
1547
1548 info = kzalloc(sizeof(struct tegra_nand_info), GFP_KERNEL);
1549 if (!info) {
1550 pr_err("%s: no memory for flash info\n", __func__);
1551 return -ENOMEM;
1552 }
1553
1554 info->dev = &pdev->dev;
1555 info->plat = plat;
1556
1557 platform_set_drvdata(pdev, info);
1558
1559 init_completion(&info->cmd_complete);
1560 init_completion(&info->dma_complete);
1561
1562 mutex_init(&info->lock);
1563 spin_lock_init(&info->ecc_lock);
1564
1565 chip = &info->chip;
1566 chip->priv = &info->mtd;
1567 chip->curr_chip = -1;
1568
1569 mtd = &info->mtd;
1570 mtd->name = dev_name(&pdev->dev);
1571 mtd->priv = &info->chip;
1572 mtd->owner = THIS_MODULE;
1573
1574 /* HACK: allocate a dma buffer to hold 1 page oob data */
1575 info->oob_dma_buf = dma_alloc_coherent(NULL, 128,
1576 &info->oob_dma_addr, GFP_KERNEL);
1577 if (!info->oob_dma_buf) {
1578 err = -ENOMEM;
1579 goto out_free_info;
1580 }
1581
1582 /* this will store the ecc error vector info */
1583 info->ecc_buf = dma_alloc_coherent(NULL, ECC_BUF_SZ, &info->ecc_addr,
1584 GFP_KERNEL);
1585 if (!info->ecc_buf) {
1586 err = -ENOMEM;
1587 goto out_free_dma_buf;
1588 }
1589
1590 /* grab the irq */
1591 if (!(pdev->resource[0].flags & IORESOURCE_IRQ)) {
1592 pr_err("NAND IRQ resource not defined\n");
1593 err = -EINVAL;
1594 goto out_free_ecc_buf;
1595 }
1596
1597 err = request_irq(pdev->resource[0].start, tegra_nand_irq,
1598 IRQF_SHARED, DRIVER_NAME, info);
1599 if (err) {
1600 pr_err("Unable to request IRQ %d (%d)\n",
1601 pdev->resource[0].start, err);
1602 goto out_free_ecc_buf;
1603 }
1604
1605 /* TODO: configure pinmux here?? */
1606 info->clk = clk_get(&pdev->dev, NULL);
1607
1608 if (IS_ERR(info->clk)) {
1609 err = PTR_ERR(info->clk);
1610 goto out_free_ecc_buf;
1611 }
1612 err = clk_enable(info->clk);
1613 if (err != 0)
1614 goto out_free_ecc_buf;
1615
1616 if (plat->wp_gpio) {
1617 gpio_request(plat->wp_gpio, "nand_wp");
1618 tegra_gpio_enable(plat->wp_gpio);
1619 gpio_direction_output(plat->wp_gpio, 1);
1620 }
1621
1622 cfg_hwstatus_mon(info);
1623
1624 /* clear all pending interrupts */
1625 writel(readl(ISR_REG), ISR_REG);
1626
1627 /* clear dma interrupt */
1628 writel(DMA_CTRL_IS_DMA_DONE, DMA_MST_CTRL_REG);
1629
1630 /* enable interrupts */
1631 disable_ints(info, 0xffffffff);
1632 enable_ints(info,
1633 IER_ERR_TRIG_VAL(4) | IER_UND | IER_OVR | IER_CMD_DONE |
1634 IER_ECC_ERR | IER_GIE);
1635
1636 if (tegra_nand_scan(mtd, plat->max_chips)) {
1637 err = -ENXIO;
1638 goto out_dis_irq;
1639 }
1640 pr_info("%s: NVIDIA Tegra NAND controller @ base=0x%08x irq=%d.\n",
1641 DRIVER_NAME, TEGRA_NAND_PHYS, pdev->resource[0].start);
1642
1643 /* allocate memory to hold the ecc error info */
1644 info->max_ecc_errs = MAX_DMA_SZ / mtd->writesize;
1645 info->ecc_errs = kmalloc(info->max_ecc_errs * sizeof(uint32_t),
1646 GFP_KERNEL);
1647 if (!info->ecc_errs) {
1648 err = -ENOMEM;
1649 goto out_dis_irq;
1650 }
1651
1652 /* alloc the bad block bitmap */
1653 num_erase_blocks = mtd->size;
1654 do_div(num_erase_blocks, mtd->erasesize);
1655 info->bb_bitmap = kzalloc(BITS_TO_LONGS(num_erase_blocks) *
1656 sizeof(unsigned long), GFP_KERNEL);
1657 if (!info->bb_bitmap) {
1658 err = -ENOMEM;
1659 goto out_free_ecc;
1660 }
1661
1662 err = scan_bad_blocks(info);
1663 if (err != 0)
1664 goto out_free_bbbmap;
1665
1666#if 0
1667 dump_nand_regs();
1668#endif
1669
1670 err = parse_mtd_partitions(mtd, part_probes, &info->parts, 0);
1671 if (err > 0) {
1672 err = mtd_device_register(mtd, info->parts, err);
1673 } else if (err <= 0 && plat->parts) {
1674 err = mtd_device_register(mtd, plat->parts, plat->nr_parts);
1675 } else
1676 err = mtd_device_register(mtd, NULL, 0);
1677 if (err != 0)
1678 goto out_free_bbbmap;
1679
1680 dev_set_drvdata(&pdev->dev, info);
1681
1682 info->partial_unaligned_rw_buffer = kzalloc(mtd->writesize, GFP_KERNEL);
1683 if (!info->partial_unaligned_rw_buffer) {
1684 err = -ENOMEM;
1685 goto out_free_bbbmap;
1686 }
1687
1688 err = device_create_file(&pdev->dev, &dev_attr_device_id);
1689 if (err != 0)
1690 goto out_free_rw_buffer;
1691
1692 err = device_create_file(&pdev->dev, &dev_attr_vendor_id);
1693 if (err != 0)
1694 goto err_nand_sysfs_vendorid_failed;
1695
1696 err = device_create_file(&pdev->dev, &dev_attr_flash_size);
1697 if (err != 0)
1698 goto err_nand_sysfs_flash_size_failed;
1699
1700 err = device_create_file(&pdev->dev, &dev_attr_num_bad_blocks);
1701 if (err != 0)
1702 goto err_nand_sysfs_num_bad_blocks_failed;
1703
1704 err = device_create_file(&pdev->dev, &dev_attr_bb_bitmap);
1705 if (err != 0)
1706 goto err_nand_sysfs_bb_bitmap_failed;
1707
1708 pr_debug("%s: probe done.\n", __func__);
1709 return 0;
1710
1711err_nand_sysfs_bb_bitmap_failed:
1712 device_remove_file(&pdev->dev, &dev_attr_num_bad_blocks);
1713
1714err_nand_sysfs_num_bad_blocks_failed:
1715 device_remove_file(&pdev->dev, &dev_attr_flash_size);
1716
1717err_nand_sysfs_flash_size_failed:
1718 device_remove_file(&pdev->dev, &dev_attr_vendor_id);
1719
1720err_nand_sysfs_vendorid_failed:
1721 device_remove_file(&pdev->dev, &dev_attr_device_id);
1722
1723out_free_rw_buffer:
1724 kfree(info->partial_unaligned_rw_buffer);
1725
1726out_free_bbbmap:
1727 kfree(info->bb_bitmap);
1728
1729out_free_ecc:
1730 kfree(info->ecc_errs);
1731
1732out_dis_irq:
1733 disable_ints(info, 0xffffffff);
1734 free_irq(pdev->resource[0].start, info);
1735
1736out_free_ecc_buf:
1737 dma_free_coherent(NULL, ECC_BUF_SZ, info->ecc_buf, info->ecc_addr);
1738
1739out_free_dma_buf:
1740 dma_free_coherent(NULL, 128, info->oob_dma_buf, info->oob_dma_addr);
1741
1742out_free_info:
1743 platform_set_drvdata(pdev, NULL);
1744 kfree(info);
1745
1746 return err;
1747}
1748
1749static int __devexit tegra_nand_remove(struct platform_device *pdev)
1750{
1751 struct tegra_nand_info *info = dev_get_drvdata(&pdev->dev);
1752
1753 dev_set_drvdata(&pdev->dev, NULL);
1754
1755 if (info) {
1756 free_irq(pdev->resource[0].start, info);
1757 kfree(info->bb_bitmap);
1758 kfree(info->ecc_errs);
1759 kfree(info->partial_unaligned_rw_buffer);
1760
1761 device_remove_file(&pdev->dev, &dev_attr_device_id);
1762 device_remove_file(&pdev->dev, &dev_attr_vendor_id);
1763 device_remove_file(&pdev->dev, &dev_attr_flash_size);
1764 device_remove_file(&pdev->dev, &dev_attr_num_bad_blocks);
1765 device_remove_file(&pdev->dev, &dev_attr_bb_bitmap);
1766
1767 dma_free_coherent(NULL, ECC_BUF_SZ, info->ecc_buf,
1768 info->ecc_addr);
1769 dma_free_coherent(NULL, info->mtd.writesize + info->mtd.oobsize,
1770 info->oob_dma_buf, info->oob_dma_addr);
1771 kfree(info);
1772 }
1773
1774 return 0;
1775}
1776
1777static struct platform_driver tegra_nand_driver = {
1778 .probe = tegra_nand_probe,
1779 .remove = __devexit_p(tegra_nand_remove),
1780 .suspend = NULL,
1781 .resume = NULL,
1782 .driver = {
1783 .name = "tegra_nand",
1784 .owner = THIS_MODULE,
1785 },
1786};
1787
1788static int __init tegra_nand_init(void)
1789{
1790 return platform_driver_register(&tegra_nand_driver);
1791}
1792
1793static void __exit tegra_nand_exit(void)
1794{
1795 platform_driver_unregister(&tegra_nand_driver);
1796}
1797
1798module_init(tegra_nand_init);
1799module_exit(tegra_nand_exit);
1800
1801MODULE_LICENSE("GPL");
1802MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/mtd/devices/tegra_nand.h b/drivers/mtd/devices/tegra_nand.h
new file mode 100644
index 00000000000..339d6cc7330
--- /dev/null
+++ b/drivers/mtd/devices/tegra_nand.h
@@ -0,0 +1,148 @@
1/*
2 * drivers/mtd/devices/tegra_nand.h
3 *
4 * Copyright (C) 2010 Google, Inc.
5 * Author: Dima Zavin <dima@android.com>
6 * Colin Cross <ccross@android.com>
7 *
8 * This software is licensed under the terms of the GNU General Public
9 * License version 2, as published by the Free Software Foundation, and
10 * may be copied, distributed, and modified under those terms.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 */
18
19#ifndef __MTD_DEV_TEGRA_NAND_H
20#define __MTD_DEV_TEGRA_NAND_H
21
22#include <mach/io.h>
23
24#define __BITMASK0(len) ((1 << (len)) - 1)
25#define __BITMASK(start, len) (__BITMASK0(len) << (start))
26#define REG_BIT(bit) (1 << (bit))
27#define REG_FIELD(val, start, len) (((val) & __BITMASK0(len)) << (start))
28#define REG_FIELD_MASK(start, len) (~(__BITMASK((start), (len))))
29#define REG_GET_FIELD(val, start, len) (((val) >> (start)) & __BITMASK0(len))
30
31/* tegra nand registers... */
32#define TEGRA_NAND_PHYS 0x70008000
33#define TEGRA_NAND_BASE IO_TO_VIRT(TEGRA_NAND_PHYS)
34#define COMMAND_REG (TEGRA_NAND_BASE + 0x00)
35#define STATUS_REG (TEGRA_NAND_BASE + 0x04)
36#define ISR_REG (TEGRA_NAND_BASE + 0x08)
37#define IER_REG (TEGRA_NAND_BASE + 0x0c)
38#define CONFIG_REG (TEGRA_NAND_BASE + 0x10)
39#define TIMING_REG (TEGRA_NAND_BASE + 0x14)
40#define RESP_REG (TEGRA_NAND_BASE + 0x18)
41#define TIMING2_REG (TEGRA_NAND_BASE + 0x1c)
42#define CMD_REG1 (TEGRA_NAND_BASE + 0x20)
43#define CMD_REG2 (TEGRA_NAND_BASE + 0x24)
44#define ADDR_REG1 (TEGRA_NAND_BASE + 0x28)
45#define ADDR_REG2 (TEGRA_NAND_BASE + 0x2c)
46#define DMA_MST_CTRL_REG (TEGRA_NAND_BASE + 0x30)
47#define DMA_CFG_A_REG (TEGRA_NAND_BASE + 0x34)
48#define DMA_CFG_B_REG (TEGRA_NAND_BASE + 0x38)
49#define FIFO_CTRL_REG (TEGRA_NAND_BASE + 0x3c)
50#define DATA_BLOCK_PTR_REG (TEGRA_NAND_BASE + 0x40)
51#define TAG_PTR_REG (TEGRA_NAND_BASE + 0x44)
52#define ECC_PTR_REG (TEGRA_NAND_BASE + 0x48)
53#define DEC_STATUS_REG (TEGRA_NAND_BASE + 0x4c)
54#define HWSTATUS_CMD_REG (TEGRA_NAND_BASE + 0x50)
55#define HWSTATUS_MASK_REG (TEGRA_NAND_BASE + 0x54)
56#define LL_CONFIG_REG (TEGRA_NAND_BASE + 0x58)
57#define LL_PTR_REG (TEGRA_NAND_BASE + 0x5c)
58#define LL_STATUS_REG (TEGRA_NAND_BASE + 0x60)
59
60/* nand_command bits */
61#define COMMAND_GO REG_BIT(31)
62#define COMMAND_CLE REG_BIT(30)
63#define COMMAND_ALE REG_BIT(29)
64#define COMMAND_PIO REG_BIT(28)
65#define COMMAND_TX REG_BIT(27)
66#define COMMAND_RX REG_BIT(26)
67#define COMMAND_SEC_CMD REG_BIT(25)
68#define COMMAND_AFT_DAT REG_BIT(24)
69#define COMMAND_TRANS_SIZE(val) REG_FIELD((val), 20, 4)
70#define COMMAND_A_VALID REG_BIT(19)
71#define COMMAND_B_VALID REG_BIT(18)
72#define COMMAND_RD_STATUS_CHK REG_BIT(17)
73#define COMMAND_RBSY_CHK REG_BIT(16)
74#define COMMAND_CE(val) REG_BIT(8 + ((val) & 0x7))
75#define COMMAND_CLE_BYTE_SIZE(val) REG_FIELD((val), 4, 2)
76#define COMMAND_ALE_BYTE_SIZE(val) REG_FIELD((val), 0, 4)
77
78/* nand isr bits */
79#define ISR_UND REG_BIT(7)
80#define ISR_OVR REG_BIT(6)
81#define ISR_CMD_DONE REG_BIT(5)
82#define ISR_ECC_ERR REG_BIT(4)
83
84/* nand ier bits */
85#define IER_ERR_TRIG_VAL(val) REG_FIELD((val), 16, 4)
86#define IER_UND REG_BIT(7)
87#define IER_OVR REG_BIT(6)
88#define IER_CMD_DONE REG_BIT(5)
89#define IER_ECC_ERR REG_BIT(4)
90#define IER_GIE REG_BIT(0)
91
92/* nand config bits */
93#define CONFIG_HW_ECC REG_BIT(31)
94#define CONFIG_ECC_SEL REG_BIT(30)
95#define CONFIG_HW_ERR_CORRECTION REG_BIT(29)
96#define CONFIG_PIPELINE_EN REG_BIT(28)
97#define CONFIG_ECC_EN_TAG REG_BIT(27)
98#define CONFIG_TVALUE(val) REG_FIELD((val), 24, 2)
99#define CONFIG_SKIP_SPARE REG_BIT(23)
100#define CONFIG_COM_BSY REG_BIT(22)
101#define CONFIG_BUS_WIDTH REG_BIT(21)
102#define CONFIG_EDO_MODE REG_BIT(19)
103#define CONFIG_PAGE_SIZE_SEL(val) REG_FIELD((val), 16, 3)
104#define CONFIG_SKIP_SPARE_SEL(val) REG_FIELD((val), 14, 2)
105#define CONFIG_TAG_BYTE_SIZE(val) REG_FIELD((val), 0, 8)
106
107/* nand timing bits */
108#define TIMING_TRP_RESP(val) REG_FIELD((val), 28, 4)
109#define TIMING_TWB(val) REG_FIELD((val), 24, 4)
110#define TIMING_TCR_TAR_TRR(val) REG_FIELD((val), 20, 4)
111#define TIMING_TWHR(val) REG_FIELD((val), 16, 4)
112#define TIMING_TCS(val) REG_FIELD((val), 14, 2)
113#define TIMING_TWH(val) REG_FIELD((val), 12, 2)
114#define TIMING_TWP(val) REG_FIELD((val), 8, 4)
115#define TIMING_TRH(val) REG_FIELD((val), 4, 2)
116#define TIMING_TRP(val) REG_FIELD((val), 0, 4)
117
118/* nand timing2 bits */
119#define TIMING2_TADL(val) REG_FIELD((val), 0, 4)
120
121/* nand dma_mst_ctrl bits */
122#define DMA_CTRL_DMA_GO REG_BIT(31)
123#define DMA_CTRL_DIR REG_BIT(30)
124#define DMA_CTRL_DMA_PERF_EN REG_BIT(29)
125#define DMA_CTRL_IE_DMA_DONE REG_BIT(28)
126#define DMA_CTRL_REUSE_BUFFER REG_BIT(27)
127#define DMA_CTRL_BURST_SIZE(val) REG_FIELD((val), 24, 3)
128#define DMA_CTRL_IS_DMA_DONE REG_BIT(20)
129#define DMA_CTRL_DMA_EN_A REG_BIT(2)
130#define DMA_CTRL_DMA_EN_B REG_BIT(1)
131
132/* nand dma_cfg_a/cfg_b bits */
133#define DMA_CFG_BLOCK_SIZE(val) REG_FIELD((val), 0, 16)
134
135/* nand dec_status bits */
136#define DEC_STATUS_ERR_PAGE_NUM(val) REG_GET_FIELD((val), 24, 8)
137#define DEC_STATUS_ERR_CNT(val) REG_GET_FIELD((val), 16, 8)
138#define DEC_STATUS_ECC_FAIL_A REG_BIT(1)
139#define DEC_STATUS_ECC_FAIL_B REG_BIT(0)
140
141/* nand hwstatus_mask bits */
142#define HWSTATUS_RDSTATUS_MASK(val) REG_FIELD((val), 24, 8)
143#define HWSTATUS_RDSTATUS_EXP_VAL(val) REG_FIELD((val), 16, 8)
144#define HWSTATUS_RBSY_MASK(val) REG_FIELD((val), 8, 8)
145#define HWSTATUS_RBSY_EXP_VAL(val) REG_FIELD((val), 0, 8)
146
147#endif
148
diff --git a/drivers/mtd/maps/bcm963xx-flash.c b/drivers/mtd/maps/bcm963xx-flash.c
new file mode 100644
index 00000000000..608967fe74c
--- /dev/null
+++ b/drivers/mtd/maps/bcm963xx-flash.c
@@ -0,0 +1,276 @@
1/*
2 * Copyright © 2006-2008 Florian Fainelli <florian@openwrt.org>
3 * Mike Albon <malbon@openwrt.org>
4 * Copyright © 2009-2010 Daniel Dickinson <openwrt@cshore.neomailbox.net>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21#include <linux/init.h>
22#include <linux/kernel.h>
23#include <linux/slab.h>
24#include <linux/mtd/map.h>
25#include <linux/mtd/mtd.h>
26#include <linux/mtd/partitions.h>
27#include <linux/vmalloc.h>
28#include <linux/platform_device.h>
29#include <linux/io.h>
30
31#include <asm/mach-bcm63xx/bcm963xx_tag.h>
32
33#define BCM63XX_BUSWIDTH 2 /* Buswidth */
34#define BCM63XX_EXTENDED_SIZE 0xBFC00000 /* Extended flash address */
35
36#define PFX KBUILD_MODNAME ": "
37
38static struct mtd_partition *parsed_parts;
39
40static struct mtd_info *bcm963xx_mtd_info;
41
42static struct map_info bcm963xx_map = {
43 .name = "bcm963xx",
44 .bankwidth = BCM63XX_BUSWIDTH,
45};
46
47static int parse_cfe_partitions(struct mtd_info *master,
48 struct mtd_partition **pparts)
49{
50 /* CFE, NVRAM and global Linux are always present */
51 int nrparts = 3, curpart = 0;
52 struct bcm_tag *buf;
53 struct mtd_partition *parts;
54 int ret;
55 size_t retlen;
56 unsigned int rootfsaddr, kerneladdr, spareaddr;
57 unsigned int rootfslen, kernellen, sparelen, totallen;
58 int namelen = 0;
59 int i;
60 char *boardid;
61 char *tagversion;
62
63 /* Allocate memory for buffer */
64 buf = vmalloc(sizeof(struct bcm_tag));
65 if (!buf)
66 return -ENOMEM;
67
68 /* Get the tag */
69 ret = master->read(master, master->erasesize, sizeof(struct bcm_tag),
70 &retlen, (void *)buf);
71 if (retlen != sizeof(struct bcm_tag)) {
72 vfree(buf);
73 return -EIO;
74 }
75
76 sscanf(buf->kernel_address, "%u", &kerneladdr);
77 sscanf(buf->kernel_length, "%u", &kernellen);
78 sscanf(buf->total_length, "%u", &totallen);
79 tagversion = &(buf->tag_version[0]);
80 boardid = &(buf->board_id[0]);
81
82 printk(KERN_INFO PFX "CFE boot tag found with version %s "
83 "and board type %s\n", tagversion, boardid);
84
85 kerneladdr = kerneladdr - BCM63XX_EXTENDED_SIZE;
86 rootfsaddr = kerneladdr + kernellen;
87 spareaddr = roundup(totallen, master->erasesize) + master->erasesize;
88 sparelen = master->size - spareaddr - master->erasesize;
89 rootfslen = spareaddr - rootfsaddr;
90
91 /* Determine number of partitions */
92 namelen = 8;
93 if (rootfslen > 0) {
94 nrparts++;
95 namelen += 6;
96 };
97 if (kernellen > 0) {
98 nrparts++;
99 namelen += 6;
100 };
101
102 /* Ask kernel for more memory */
103 parts = kzalloc(sizeof(*parts) * nrparts + 10 * nrparts, GFP_KERNEL);
104 if (!parts) {
105 vfree(buf);
106 return -ENOMEM;
107 };
108
109 /* Start building partition list */
110 parts[curpart].name = "CFE";
111 parts[curpart].offset = 0;
112 parts[curpart].size = master->erasesize;
113 curpart++;
114
115 if (kernellen > 0) {
116 parts[curpart].name = "kernel";
117 parts[curpart].offset = kerneladdr;
118 parts[curpart].size = kernellen;
119 curpart++;
120 };
121
122 if (rootfslen > 0) {
123 parts[curpart].name = "rootfs";
124 parts[curpart].offset = rootfsaddr;
125 parts[curpart].size = rootfslen;
126 if (sparelen > 0)
127 parts[curpart].size += sparelen;
128 curpart++;
129 };
130
131 parts[curpart].name = "nvram";
132 parts[curpart].offset = master->size - master->erasesize;
133 parts[curpart].size = master->erasesize;
134
135 /* Global partition "linux" to make easy firmware upgrade */
136 curpart++;
137 parts[curpart].name = "linux";
138 parts[curpart].offset = parts[0].size;
139 parts[curpart].size = master->size - parts[0].size - parts[3].size;
140
141 for (i = 0; i < nrparts; i++)
142 printk(KERN_INFO PFX "Partition %d is %s offset %lx and "
143 "length %lx\n", i, parts[i].name,
144 (long unsigned int)(parts[i].offset),
145 (long unsigned int)(parts[i].size));
146
147 printk(KERN_INFO PFX "Spare partition is %x offset and length %x\n",
148 spareaddr, sparelen);
149 *pparts = parts;
150 vfree(buf);
151
152 return nrparts;
153};
154
155static int bcm963xx_detect_cfe(struct mtd_info *master)
156{
157 int idoffset = 0x4e0;
158 static char idstring[8] = "CFE1CFE1";
159 char buf[9];
160 int ret;
161 size_t retlen;
162
163 ret = master->read(master, idoffset, 8, &retlen, (void *)buf);
164 buf[retlen] = 0;
165 printk(KERN_INFO PFX "Read Signature value of %s\n", buf);
166
167 return strncmp(idstring, buf, 8);
168}
169
170static int bcm963xx_probe(struct platform_device *pdev)
171{
172 int err = 0;
173 int parsed_nr_parts = 0;
174 char *part_type;
175 struct resource *r;
176
177 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
178 if (!r) {
179 dev_err(&pdev->dev, "no resource supplied\n");
180 return -ENODEV;
181 }
182
183 bcm963xx_map.phys = r->start;
184 bcm963xx_map.size = resource_size(r);
185 bcm963xx_map.virt = ioremap(r->start, resource_size(r));
186 if (!bcm963xx_map.virt) {
187 dev_err(&pdev->dev, "failed to ioremap\n");
188 return -EIO;
189 }
190
191 dev_info(&pdev->dev, "0x%08lx at 0x%08x\n",
192 bcm963xx_map.size, bcm963xx_map.phys);
193
194 simple_map_init(&bcm963xx_map);
195
196 bcm963xx_mtd_info = do_map_probe("cfi_probe", &bcm963xx_map);
197 if (!bcm963xx_mtd_info) {
198 dev_err(&pdev->dev, "failed to probe using CFI\n");
199 bcm963xx_mtd_info = do_map_probe("jedec_probe", &bcm963xx_map);
200 if (bcm963xx_mtd_info)
201 goto probe_ok;
202 dev_err(&pdev->dev, "failed to probe using JEDEC\n");
203 err = -EIO;
204 goto err_probe;
205 }
206
207probe_ok:
208 bcm963xx_mtd_info->owner = THIS_MODULE;
209
210 /* This is mutually exclusive */
211 if (bcm963xx_detect_cfe(bcm963xx_mtd_info) == 0) {
212 dev_info(&pdev->dev, "CFE bootloader detected\n");
213 if (parsed_nr_parts == 0) {
214 int ret = parse_cfe_partitions(bcm963xx_mtd_info,
215 &parsed_parts);
216 if (ret > 0) {
217 part_type = "CFE";
218 parsed_nr_parts = ret;
219 }
220 }
221 } else {
222 dev_info(&pdev->dev, "unsupported bootloader\n");
223 err = -ENODEV;
224 goto err_probe;
225 }
226
227 return mtd_device_register(bcm963xx_mtd_info, parsed_parts,
228 parsed_nr_parts);
229
230err_probe:
231 iounmap(bcm963xx_map.virt);
232 return err;
233}
234
235static int bcm963xx_remove(struct platform_device *pdev)
236{
237 if (bcm963xx_mtd_info) {
238 mtd_device_unregister(bcm963xx_mtd_info);
239 map_destroy(bcm963xx_mtd_info);
240 }
241
242 if (bcm963xx_map.virt) {
243 iounmap(bcm963xx_map.virt);
244 bcm963xx_map.virt = 0;
245 }
246
247 return 0;
248}
249
250static struct platform_driver bcm63xx_mtd_dev = {
251 .probe = bcm963xx_probe,
252 .remove = bcm963xx_remove,
253 .driver = {
254 .name = "bcm963xx-flash",
255 .owner = THIS_MODULE,
256 },
257};
258
259static int __init bcm963xx_mtd_init(void)
260{
261 return platform_driver_register(&bcm63xx_mtd_dev);
262}
263
264static void __exit bcm963xx_mtd_exit(void)
265{
266 platform_driver_unregister(&bcm63xx_mtd_dev);
267}
268
269module_init(bcm963xx_mtd_init);
270module_exit(bcm963xx_mtd_exit);
271
272MODULE_LICENSE("GPL");
273MODULE_DESCRIPTION("Broadcom BCM63xx MTD driver for CFE and RedBoot");
274MODULE_AUTHOR("Daniel Dickinson <openwrt@cshore.neomailbox.net>");
275MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
276MODULE_AUTHOR("Mike Albon <malbon@openwrt.org>");
diff --git a/drivers/mtd/maps/cdb89712.c b/drivers/mtd/maps/cdb89712.c
new file mode 100644
index 00000000000..c29cbf87ea0
--- /dev/null
+++ b/drivers/mtd/maps/cdb89712.c
@@ -0,0 +1,278 @@
1/*
2 * Flash on Cirrus CDB89712
3 *
4 */
5
6#include <linux/module.h>
7#include <linux/types.h>
8#include <linux/kernel.h>
9#include <linux/ioport.h>
10#include <linux/init.h>
11#include <asm/io.h>
12#include <mach/hardware.h>
13#include <linux/mtd/mtd.h>
14#include <linux/mtd/map.h>
15#include <linux/mtd/partitions.h>
16
17/* dynamic ioremap() areas */
18#define FLASH_START 0x00000000
19#define FLASH_SIZE 0x800000
20#define FLASH_WIDTH 4
21
22#define SRAM_START 0x60000000
23#define SRAM_SIZE 0xc000
24#define SRAM_WIDTH 4
25
26#define BOOTROM_START 0x70000000
27#define BOOTROM_SIZE 0x80
28#define BOOTROM_WIDTH 4
29
30
31static struct mtd_info *flash_mtd;
32
33struct map_info cdb89712_flash_map = {
34 .name = "flash",
35 .size = FLASH_SIZE,
36 .bankwidth = FLASH_WIDTH,
37 .phys = FLASH_START,
38};
39
40struct resource cdb89712_flash_resource = {
41 .name = "Flash",
42 .start = FLASH_START,
43 .end = FLASH_START + FLASH_SIZE - 1,
44 .flags = IORESOURCE_IO | IORESOURCE_BUSY,
45};
46
47static int __init init_cdb89712_flash (void)
48{
49 int err;
50
51 if (request_resource (&ioport_resource, &cdb89712_flash_resource)) {
52 printk(KERN_NOTICE "Failed to reserve Cdb89712 FLASH space\n");
53 err = -EBUSY;
54 goto out;
55 }
56
57 cdb89712_flash_map.virt = ioremap(FLASH_START, FLASH_SIZE);
58 if (!cdb89712_flash_map.virt) {
59 printk(KERN_NOTICE "Failed to ioremap Cdb89712 FLASH space\n");
60 err = -EIO;
61 goto out_resource;
62 }
63 simple_map_init(&cdb89712_flash_map);
64 flash_mtd = do_map_probe("cfi_probe", &cdb89712_flash_map);
65 if (!flash_mtd) {
66 flash_mtd = do_map_probe("map_rom", &cdb89712_flash_map);
67 if (flash_mtd)
68 flash_mtd->erasesize = 0x10000;
69 }
70 if (!flash_mtd) {
71 printk("FLASH probe failed\n");
72 err = -ENXIO;
73 goto out_ioremap;
74 }
75
76 flash_mtd->owner = THIS_MODULE;
77
78 if (mtd_device_register(flash_mtd, NULL, 0)) {
79 printk("FLASH device addition failed\n");
80 err = -ENOMEM;
81 goto out_probe;
82 }
83
84 return 0;
85
86out_probe:
87 map_destroy(flash_mtd);
88 flash_mtd = 0;
89out_ioremap:
90 iounmap((void *)cdb89712_flash_map.virt);
91out_resource:
92 release_resource (&cdb89712_flash_resource);
93out:
94 return err;
95}
96
97
98
99
100
101static struct mtd_info *sram_mtd;
102
103struct map_info cdb89712_sram_map = {
104 .name = "SRAM",
105 .size = SRAM_SIZE,
106 .bankwidth = SRAM_WIDTH,
107 .phys = SRAM_START,
108};
109
110struct resource cdb89712_sram_resource = {
111 .name = "SRAM",
112 .start = SRAM_START,
113 .end = SRAM_START + SRAM_SIZE - 1,
114 .flags = IORESOURCE_IO | IORESOURCE_BUSY,
115};
116
117static int __init init_cdb89712_sram (void)
118{
119 int err;
120
121 if (request_resource (&ioport_resource, &cdb89712_sram_resource)) {
122 printk(KERN_NOTICE "Failed to reserve Cdb89712 SRAM space\n");
123 err = -EBUSY;
124 goto out;
125 }
126
127 cdb89712_sram_map.virt = ioremap(SRAM_START, SRAM_SIZE);
128 if (!cdb89712_sram_map.virt) {
129 printk(KERN_NOTICE "Failed to ioremap Cdb89712 SRAM space\n");
130 err = -EIO;
131 goto out_resource;
132 }
133 simple_map_init(&cdb89712_sram_map);
134 sram_mtd = do_map_probe("map_ram", &cdb89712_sram_map);
135 if (!sram_mtd) {
136 printk("SRAM probe failed\n");
137 err = -ENXIO;
138 goto out_ioremap;
139 }
140
141 sram_mtd->owner = THIS_MODULE;
142 sram_mtd->erasesize = 16;
143
144 if (mtd_device_register(sram_mtd, NULL, 0)) {
145 printk("SRAM device addition failed\n");
146 err = -ENOMEM;
147 goto out_probe;
148 }
149
150 return 0;
151
152out_probe:
153 map_destroy(sram_mtd);
154 sram_mtd = 0;
155out_ioremap:
156 iounmap((void *)cdb89712_sram_map.virt);
157out_resource:
158 release_resource (&cdb89712_sram_resource);
159out:
160 return err;
161}
162
163
164
165
166
167
168
169static struct mtd_info *bootrom_mtd;
170
171struct map_info cdb89712_bootrom_map = {
172 .name = "BootROM",
173 .size = BOOTROM_SIZE,
174 .bankwidth = BOOTROM_WIDTH,
175 .phys = BOOTROM_START,
176};
177
178struct resource cdb89712_bootrom_resource = {
179 .name = "BootROM",
180 .start = BOOTROM_START,
181 .end = BOOTROM_START + BOOTROM_SIZE - 1,
182 .flags = IORESOURCE_IO | IORESOURCE_BUSY,
183};
184
185static int __init init_cdb89712_bootrom (void)
186{
187 int err;
188
189 if (request_resource (&ioport_resource, &cdb89712_bootrom_resource)) {
190 printk(KERN_NOTICE "Failed to reserve Cdb89712 BOOTROM space\n");
191 err = -EBUSY;
192 goto out;
193 }
194
195 cdb89712_bootrom_map.virt = ioremap(BOOTROM_START, BOOTROM_SIZE);
196 if (!cdb89712_bootrom_map.virt) {
197 printk(KERN_NOTICE "Failed to ioremap Cdb89712 BootROM space\n");
198 err = -EIO;
199 goto out_resource;
200 }
201 simple_map_init(&cdb89712_bootrom_map);
202 bootrom_mtd = do_map_probe("map_rom", &cdb89712_bootrom_map);
203 if (!bootrom_mtd) {
204 printk("BootROM probe failed\n");
205 err = -ENXIO;
206 goto out_ioremap;
207 }
208
209 bootrom_mtd->owner = THIS_MODULE;
210 bootrom_mtd->erasesize = 0x10000;
211
212 if (mtd_device_register(bootrom_mtd, NULL, 0)) {
213 printk("BootROM device addition failed\n");
214 err = -ENOMEM;
215 goto out_probe;
216 }
217
218 return 0;
219
220out_probe:
221 map_destroy(bootrom_mtd);
222 bootrom_mtd = 0;
223out_ioremap:
224 iounmap((void *)cdb89712_bootrom_map.virt);
225out_resource:
226 release_resource (&cdb89712_bootrom_resource);
227out:
228 return err;
229}
230
231
232
233
234
235static int __init init_cdb89712_maps(void)
236{
237
238 printk(KERN_INFO "Cirrus CDB89712 MTD mappings:\n Flash 0x%x at 0x%x\n SRAM 0x%x at 0x%x\n BootROM 0x%x at 0x%x\n",
239 FLASH_SIZE, FLASH_START, SRAM_SIZE, SRAM_START, BOOTROM_SIZE, BOOTROM_START);
240
241 init_cdb89712_flash();
242 init_cdb89712_sram();
243 init_cdb89712_bootrom();
244
245 return 0;
246}
247
248
249static void __exit cleanup_cdb89712_maps(void)
250{
251 if (sram_mtd) {
252 mtd_device_unregister(sram_mtd);
253 map_destroy(sram_mtd);
254 iounmap((void *)cdb89712_sram_map.virt);
255 release_resource (&cdb89712_sram_resource);
256 }
257
258 if (flash_mtd) {
259 mtd_device_unregister(flash_mtd);
260 map_destroy(flash_mtd);
261 iounmap((void *)cdb89712_flash_map.virt);
262 release_resource (&cdb89712_flash_resource);
263 }
264
265 if (bootrom_mtd) {
266 mtd_device_unregister(bootrom_mtd);
267 map_destroy(bootrom_mtd);
268 iounmap((void *)cdb89712_bootrom_map.virt);
269 release_resource (&cdb89712_bootrom_resource);
270 }
271}
272
273module_init(init_cdb89712_maps);
274module_exit(cleanup_cdb89712_maps);
275
276MODULE_AUTHOR("Ray L");
277MODULE_DESCRIPTION("ARM CDB89712 map driver");
278MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/maps/ceiva.c b/drivers/mtd/maps/ceiva.c
new file mode 100644
index 00000000000..06f9c981572
--- /dev/null
+++ b/drivers/mtd/maps/ceiva.c
@@ -0,0 +1,341 @@
1/*
2 * Ceiva flash memory driver.
3 * Copyright (C) 2002 Rob Scott <rscott@mtrob.fdns.net>
4 *
5 * Note: this driver supports jedec compatible devices. Modification
6 * for CFI compatible devices should be straight forward: change
7 * jedec_probe to cfi_probe.
8 *
9 * Based on: sa1100-flash.c, which has the following copyright:
10 * Flash memory access on SA11x0 based devices
11 *
12 * (C) 2000 Nicolas Pitre <nico@fluxnic.net>
13 *
14 */
15
16#include <linux/module.h>
17#include <linux/types.h>
18#include <linux/ioport.h>
19#include <linux/kernel.h>
20#include <linux/init.h>
21#include <linux/slab.h>
22
23#include <linux/mtd/mtd.h>
24#include <linux/mtd/map.h>
25#include <linux/mtd/partitions.h>
26#include <linux/mtd/concat.h>
27
28#include <mach/hardware.h>
29#include <asm/mach-types.h>
30#include <asm/io.h>
31#include <asm/sizes.h>
32
33/*
34 * This isn't complete yet, so...
35 */
36#define CONFIG_MTD_CEIVA_STATICMAP
37
38#ifdef CONFIG_MTD_CEIVA_STATICMAP
39/*
40 * See include/linux/mtd/partitions.h for definition of the mtd_partition
41 * structure.
42 *
43 * Please note:
44 * 1. The flash size given should be the largest flash size that can
45 * be accommodated.
46 *
47 * 2. The bus width must defined in clps_setup_flash.
48 *
49 * The MTD layer will detect flash chip aliasing and reduce the size of
50 * the map accordingly.
51 *
52 */
53
54#ifdef CONFIG_ARCH_CEIVA
55/* Flash / Partition sizing */
56/* For the 28F8003, we use the block mapping to calcuate the sizes */
57#define MAX_SIZE_KiB (16 + 8 + 8 + 96 + (7*128))
58#define BOOT_PARTITION_SIZE_KiB (16)
59#define PARAMS_PARTITION_SIZE_KiB (8)
60#define KERNEL_PARTITION_SIZE_KiB (4*128)
61/* Use both remaining portion of first flash, and all of second flash */
62#define ROOT_PARTITION_SIZE_KiB (3*128) + (8*128)
63
64static struct mtd_partition ceiva_partitions[] = {
65 {
66 .name = "Ceiva BOOT partition",
67 .size = BOOT_PARTITION_SIZE_KiB*1024,
68 .offset = 0,
69
70 },{
71 .name = "Ceiva parameters partition",
72 .size = PARAMS_PARTITION_SIZE_KiB*1024,
73 .offset = (16 + 8) * 1024,
74 },{
75 .name = "Ceiva kernel partition",
76 .size = (KERNEL_PARTITION_SIZE_KiB)*1024,
77 .offset = 0x20000,
78
79 },{
80 .name = "Ceiva root filesystem partition",
81 .offset = MTDPART_OFS_APPEND,
82 .size = (ROOT_PARTITION_SIZE_KiB)*1024,
83 }
84};
85#endif
86
87static int __init clps_static_partitions(struct mtd_partition **parts)
88{
89 int nb_parts = 0;
90
91#ifdef CONFIG_ARCH_CEIVA
92 if (machine_is_ceiva()) {
93 *parts = ceiva_partitions;
94 nb_parts = ARRAY_SIZE(ceiva_partitions);
95 }
96#endif
97 return nb_parts;
98}
99#endif
100
101struct clps_info {
102 unsigned long base;
103 unsigned long size;
104 int width;
105 void *vbase;
106 struct map_info *map;
107 struct mtd_info *mtd;
108 struct resource *res;
109};
110
111#define NR_SUBMTD 4
112
113static struct clps_info info[NR_SUBMTD];
114
115static int __init clps_setup_mtd(struct clps_info *clps, int nr, struct mtd_info **rmtd)
116{
117 struct mtd_info *subdev[nr];
118 struct map_info *maps;
119 int i, found = 0, ret = 0;
120
121 /*
122 * Allocate the map_info structs in one go.
123 */
124 maps = kzalloc(sizeof(struct map_info) * nr, GFP_KERNEL);
125 if (!maps)
126 return -ENOMEM;
127 /*
128 * Claim and then map the memory regions.
129 */
130 for (i = 0; i < nr; i++) {
131 if (clps[i].base == (unsigned long)-1)
132 break;
133
134 clps[i].res = request_mem_region(clps[i].base, clps[i].size, "clps flash");
135 if (!clps[i].res) {
136 ret = -EBUSY;
137 break;
138 }
139
140 clps[i].map = maps + i;
141
142 clps[i].map->name = "clps flash";
143 clps[i].map->phys = clps[i].base;
144
145 clps[i].vbase = ioremap(clps[i].base, clps[i].size);
146 if (!clps[i].vbase) {
147 ret = -ENOMEM;
148 break;
149 }
150
151 clps[i].map->virt = (void __iomem *)clps[i].vbase;
152 clps[i].map->bankwidth = clps[i].width;
153 clps[i].map->size = clps[i].size;
154
155 simple_map_init(&clps[i].map);
156
157 clps[i].mtd = do_map_probe("jedec_probe", clps[i].map);
158 if (clps[i].mtd == NULL) {
159 ret = -ENXIO;
160 break;
161 }
162 clps[i].mtd->owner = THIS_MODULE;
163 subdev[i] = clps[i].mtd;
164
165 printk(KERN_INFO "clps flash: JEDEC device at 0x%08lx, %dMiB, "
166 "%d-bit\n", clps[i].base, clps[i].mtd->size >> 20,
167 clps[i].width * 8);
168 found += 1;
169 }
170
171 /*
172 * ENXIO is special. It means we didn't find a chip when
173 * we probed. We need to tear down the mapping, free the
174 * resource and mark it as such.
175 */
176 if (ret == -ENXIO) {
177 iounmap(clps[i].vbase);
178 clps[i].vbase = NULL;
179 release_resource(clps[i].res);
180 clps[i].res = NULL;
181 }
182
183 /*
184 * If we found one device, don't bother with concat support.
185 * If we found multiple devices, use concat if we have it
186 * available, otherwise fail.
187 */
188 if (ret == 0 || ret == -ENXIO) {
189 if (found == 1) {
190 *rmtd = subdev[0];
191 ret = 0;
192 } else if (found > 1) {
193 /*
194 * We detected multiple devices. Concatenate
195 * them together.
196 */
197 *rmtd = mtd_concat_create(subdev, found,
198 "clps flash");
199 if (*rmtd == NULL)
200 ret = -ENXIO;
201 }
202 }
203
204 /*
205 * If we failed, clean up.
206 */
207 if (ret) {
208 do {
209 if (clps[i].mtd)
210 map_destroy(clps[i].mtd);
211 if (clps[i].vbase)
212 iounmap(clps[i].vbase);
213 if (clps[i].res)
214 release_resource(clps[i].res);
215 } while (i--);
216
217 kfree(maps);
218 }
219
220 return ret;
221}
222
223static void __exit clps_destroy_mtd(struct clps_info *clps, struct mtd_info *mtd)
224{
225 int i;
226
227 mtd_device_unregister(mtd);
228
229 if (mtd != clps[0].mtd)
230 mtd_concat_destroy(mtd);
231
232 for (i = NR_SUBMTD; i >= 0; i--) {
233 if (clps[i].mtd)
234 map_destroy(clps[i].mtd);
235 if (clps[i].vbase)
236 iounmap(clps[i].vbase);
237 if (clps[i].res)
238 release_resource(clps[i].res);
239 }
240 kfree(clps[0].map);
241}
242
243/*
244 * We define the memory space, size, and width for the flash memory
245 * space here.
246 */
247
248static int __init clps_setup_flash(void)
249{
250 int nr = 0;
251
252#ifdef CONFIG_ARCH_CEIVA
253 if (machine_is_ceiva()) {
254 info[0].base = CS0_PHYS_BASE;
255 info[0].size = SZ_32M;
256 info[0].width = CEIVA_FLASH_WIDTH;
257 info[1].base = CS1_PHYS_BASE;
258 info[1].size = SZ_32M;
259 info[1].width = CEIVA_FLASH_WIDTH;
260 nr = 2;
261 }
262#endif
263 return nr;
264}
265
266static struct mtd_partition *parsed_parts;
267static const char *probes[] = { "cmdlinepart", "RedBoot", NULL };
268
269static void __init clps_locate_partitions(struct mtd_info *mtd)
270{
271 const char *part_type = NULL;
272 int nr_parts = 0;
273 do {
274 /*
275 * Partition selection stuff.
276 */
277 nr_parts = parse_mtd_partitions(mtd, probes, &parsed_parts, 0);
278 if (nr_parts > 0) {
279 part_type = "command line";
280 break;
281 }
282#ifdef CONFIG_MTD_CEIVA_STATICMAP
283 nr_parts = clps_static_partitions(&parsed_parts);
284 if (nr_parts > 0) {
285 part_type = "static";
286 break;
287 }
288 printk("found: %d partitions\n", nr_parts);
289#endif
290 } while (0);
291
292 if (nr_parts == 0) {
293 printk(KERN_NOTICE "clps flash: no partition info "
294 "available, registering whole flash\n");
295 mtd_device_register(mtd, NULL, 0);
296 } else {
297 printk(KERN_NOTICE "clps flash: using %s partition "
298 "definition\n", part_type);
299 mtd_device_register(mtd, parsed_parts, nr_parts);
300 }
301
302 /* Always succeeds. */
303}
304
305static void __exit clps_destroy_partitions(void)
306{
307 kfree(parsed_parts);
308}
309
310static struct mtd_info *mymtd;
311
312static int __init clps_mtd_init(void)
313{
314 int ret;
315 int nr;
316
317 nr = clps_setup_flash();
318 if (nr < 0)
319 return nr;
320
321 ret = clps_setup_mtd(info, nr, &mymtd);
322 if (ret)
323 return ret;
324
325 clps_locate_partitions(mymtd);
326
327 return 0;
328}
329
330static void __exit clps_mtd_cleanup(void)
331{
332 clps_destroy_mtd(info, mymtd);
333 clps_destroy_partitions();
334}
335
336module_init(clps_mtd_init);
337module_exit(clps_mtd_cleanup);
338
339MODULE_AUTHOR("Rob Scott");
340MODULE_DESCRIPTION("Cirrus Logic JEDEC map driver");
341MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/maps/edb7312.c b/drivers/mtd/maps/edb7312.c
new file mode 100644
index 00000000000..fe42a212bb3
--- /dev/null
+++ b/drivers/mtd/maps/edb7312.c
@@ -0,0 +1,134 @@
1/*
2 * Handle mapping of the NOR flash on Cogent EDB7312 boards
3 *
4 * Copyright 2002 SYSGO Real-Time Solutions GmbH
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/module.h>
12#include <linux/types.h>
13#include <linux/kernel.h>
14#include <linux/init.h>
15#include <asm/io.h>
16#include <linux/mtd/mtd.h>
17#include <linux/mtd/map.h>
18#include <linux/mtd/partitions.h>
19
20#define WINDOW_ADDR 0x00000000 /* physical properties of flash */
21#define WINDOW_SIZE 0x01000000
22#define BUSWIDTH 2
23#define FLASH_BLOCKSIZE_MAIN 0x20000
24#define FLASH_NUMBLOCKS_MAIN 128
25/* can be "cfi_probe", "jedec_probe", "map_rom", NULL }; */
26#define PROBETYPES { "cfi_probe", NULL }
27
28#define MSG_PREFIX "EDB7312-NOR:" /* prefix for our printk()'s */
29#define MTDID "edb7312-nor" /* for mtdparts= partitioning */
30
31static struct mtd_info *mymtd;
32
33struct map_info edb7312nor_map = {
34 .name = "NOR flash on EDB7312",
35 .size = WINDOW_SIZE,
36 .bankwidth = BUSWIDTH,
37 .phys = WINDOW_ADDR,
38};
39
40/*
41 * MTD partitioning stuff
42 */
43static struct mtd_partition static_partitions[3] =
44{
45 {
46 .name = "ARMboot",
47 .size = 0x40000,
48 .offset = 0
49 },
50 {
51 .name = "Kernel",
52 .size = 0x200000,
53 .offset = 0x40000
54 },
55 {
56 .name = "RootFS",
57 .size = 0xDC0000,
58 .offset = 0x240000
59 },
60};
61
62static const char *probes[] = { "RedBoot", "cmdlinepart", NULL };
63
64static int mtd_parts_nb = 0;
65static struct mtd_partition *mtd_parts = 0;
66
67static int __init init_edb7312nor(void)
68{
69 static const char *rom_probe_types[] = PROBETYPES;
70 const char **type;
71 const char *part_type = 0;
72
73 printk(KERN_NOTICE MSG_PREFIX "0x%08x at 0x%08x\n",
74 WINDOW_SIZE, WINDOW_ADDR);
75 edb7312nor_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE);
76
77 if (!edb7312nor_map.virt) {
78 printk(MSG_PREFIX "failed to ioremap\n");
79 return -EIO;
80 }
81
82 simple_map_init(&edb7312nor_map);
83
84 mymtd = 0;
85 type = rom_probe_types;
86 for(; !mymtd && *type; type++) {
87 mymtd = do_map_probe(*type, &edb7312nor_map);
88 }
89 if (mymtd) {
90 mymtd->owner = THIS_MODULE;
91
92 mtd_parts_nb = parse_mtd_partitions(mymtd, probes, &mtd_parts, MTDID);
93 if (mtd_parts_nb > 0)
94 part_type = "detected";
95
96 if (mtd_parts_nb == 0) {
97 mtd_parts = static_partitions;
98 mtd_parts_nb = ARRAY_SIZE(static_partitions);
99 part_type = "static";
100 }
101
102 if (mtd_parts_nb == 0)
103 printk(KERN_NOTICE MSG_PREFIX "no partition info available\n");
104 else
105 printk(KERN_NOTICE MSG_PREFIX
106 "using %s partition definition\n", part_type);
107 /* Register the whole device first. */
108 mtd_device_register(mymtd, NULL, 0);
109 mtd_device_register(mymtd, mtd_parts, mtd_parts_nb);
110 return 0;
111 }
112
113 iounmap((void *)edb7312nor_map.virt);
114 return -ENXIO;
115}
116
117static void __exit cleanup_edb7312nor(void)
118{
119 if (mymtd) {
120 mtd_device_unregister(mymtd);
121 map_destroy(mymtd);
122 }
123 if (edb7312nor_map.virt) {
124 iounmap((void *)edb7312nor_map.virt);
125 edb7312nor_map.virt = 0;
126 }
127}
128
129module_init(init_edb7312nor);
130module_exit(cleanup_edb7312nor);
131
132MODULE_LICENSE("GPL");
133MODULE_AUTHOR("Marius Groeger <mag@sysgo.de>");
134MODULE_DESCRIPTION("Generic configurable MTD map driver");
diff --git a/drivers/mtd/maps/fortunet.c b/drivers/mtd/maps/fortunet.c
new file mode 100644
index 00000000000..956e2e4f30e
--- /dev/null
+++ b/drivers/mtd/maps/fortunet.c
@@ -0,0 +1,277 @@
1/* fortunet.c memory map
2 *
3 */
4
5#include <linux/module.h>
6#include <linux/types.h>
7#include <linux/kernel.h>
8#include <linux/init.h>
9#include <linux/string.h>
10
11#include <linux/mtd/mtd.h>
12#include <linux/mtd/map.h>
13#include <linux/mtd/partitions.h>
14
15#include <asm/io.h>
16
17#define MAX_NUM_REGIONS 4
18#define MAX_NUM_PARTITIONS 8
19
20#define DEF_WINDOW_ADDR_PHY 0x00000000
21#define DEF_WINDOW_SIZE 0x00800000 // 8 Mega Bytes
22
23#define MTD_FORTUNET_PK "MTD FortuNet: "
24
25#define MAX_NAME_SIZE 128
26
27struct map_region
28{
29 int window_addr_physical;
30 int altbankwidth;
31 struct map_info map_info;
32 struct mtd_info *mymtd;
33 struct mtd_partition parts[MAX_NUM_PARTITIONS];
34 char map_name[MAX_NAME_SIZE];
35 char parts_name[MAX_NUM_PARTITIONS][MAX_NAME_SIZE];
36};
37
38static struct map_region map_regions[MAX_NUM_REGIONS];
39static int map_regions_set[MAX_NUM_REGIONS] = {0,0,0,0};
40static int map_regions_parts[MAX_NUM_REGIONS] = {0,0,0,0};
41
42
43
44struct map_info default_map = {
45 .size = DEF_WINDOW_SIZE,
46 .bankwidth = 4,
47};
48
49static char * __init get_string_option(char *dest,int dest_size,char *sor)
50{
51 if(!dest_size)
52 return sor;
53 dest_size--;
54 while(*sor)
55 {
56 if(*sor==',')
57 {
58 sor++;
59 break;
60 }
61 else if(*sor=='\"')
62 {
63 sor++;
64 while(*sor)
65 {
66 if(*sor=='\"')
67 {
68 sor++;
69 break;
70 }
71 *dest = *sor;
72 dest++;
73 sor++;
74 dest_size--;
75 if(!dest_size)
76 {
77 *dest = 0;
78 return sor;
79 }
80 }
81 }
82 else
83 {
84 *dest = *sor;
85 dest++;
86 sor++;
87 dest_size--;
88 if(!dest_size)
89 {
90 *dest = 0;
91 return sor;
92 }
93 }
94 }
95 *dest = 0;
96 return sor;
97}
98
99static int __init MTD_New_Region(char *line)
100{
101 char string[MAX_NAME_SIZE];
102 int params[6];
103 get_options (get_string_option(string,sizeof(string),line),6,params);
104 if(params[0]<1)
105 {
106 printk(MTD_FORTUNET_PK "Bad parameters for MTD Region "
107 " name,region-number[,base,size,bankwidth,altbankwidth]\n");
108 return 1;
109 }
110 if((params[1]<0)||(params[1]>=MAX_NUM_REGIONS))
111 {
112 printk(MTD_FORTUNET_PK "Bad region index of %d only have 0..%u regions\n",
113 params[1],MAX_NUM_REGIONS-1);
114 return 1;
115 }
116 memset(&map_regions[params[1]],0,sizeof(map_regions[params[1]]));
117 memcpy(&map_regions[params[1]].map_info,
118 &default_map,sizeof(map_regions[params[1]].map_info));
119 map_regions_set[params[1]] = 1;
120 map_regions[params[1]].window_addr_physical = DEF_WINDOW_ADDR_PHY;
121 map_regions[params[1]].altbankwidth = 2;
122 map_regions[params[1]].mymtd = NULL;
123 map_regions[params[1]].map_info.name = map_regions[params[1]].map_name;
124 strcpy(map_regions[params[1]].map_info.name,string);
125 if(params[0]>1)
126 {
127 map_regions[params[1]].window_addr_physical = params[2];
128 }
129 if(params[0]>2)
130 {
131 map_regions[params[1]].map_info.size = params[3];
132 }
133 if(params[0]>3)
134 {
135 map_regions[params[1]].map_info.bankwidth = params[4];
136 }
137 if(params[0]>4)
138 {
139 map_regions[params[1]].altbankwidth = params[5];
140 }
141 return 1;
142}
143
144static int __init MTD_New_Partition(char *line)
145{
146 char string[MAX_NAME_SIZE];
147 int params[4];
148 get_options (get_string_option(string,sizeof(string),line),4,params);
149 if(params[0]<3)
150 {
151 printk(MTD_FORTUNET_PK "Bad parameters for MTD Partition "
152 " name,region-number,size,offset\n");
153 return 1;
154 }
155 if((params[1]<0)||(params[1]>=MAX_NUM_REGIONS))
156 {
157 printk(MTD_FORTUNET_PK "Bad region index of %d only have 0..%u regions\n",
158 params[1],MAX_NUM_REGIONS-1);
159 return 1;
160 }
161 if(map_regions_parts[params[1]]>=MAX_NUM_PARTITIONS)
162 {
163 printk(MTD_FORTUNET_PK "Out of space for partition in this region\n");
164 return 1;
165 }
166 map_regions[params[1]].parts[map_regions_parts[params[1]]].name =
167 map_regions[params[1]]. parts_name[map_regions_parts[params[1]]];
168 strcpy(map_regions[params[1]].parts[map_regions_parts[params[1]]].name,string);
169 map_regions[params[1]].parts[map_regions_parts[params[1]]].size =
170 params[2];
171 map_regions[params[1]].parts[map_regions_parts[params[1]]].offset =
172 params[3];
173 map_regions[params[1]].parts[map_regions_parts[params[1]]].mask_flags = 0;
174 map_regions_parts[params[1]]++;
175 return 1;
176}
177
178__setup("MTD_Region=", MTD_New_Region);
179__setup("MTD_Partition=", MTD_New_Partition);
180
181/* Backwards-spelling-compatibility */
182__setup("MTD_Partion=", MTD_New_Partition);
183
184static int __init init_fortunet(void)
185{
186 int ix,iy;
187 for(iy=ix=0;ix<MAX_NUM_REGIONS;ix++)
188 {
189 if(map_regions_parts[ix]&&(!map_regions_set[ix]))
190 {
191 printk(MTD_FORTUNET_PK "Region %d is not setup (Setting to default)\n",
192 ix);
193 memset(&map_regions[ix],0,sizeof(map_regions[ix]));
194 memcpy(&map_regions[ix].map_info,&default_map,
195 sizeof(map_regions[ix].map_info));
196 map_regions_set[ix] = 1;
197 map_regions[ix].window_addr_physical = DEF_WINDOW_ADDR_PHY;
198 map_regions[ix].altbankwidth = 2;
199 map_regions[ix].mymtd = NULL;
200 map_regions[ix].map_info.name = map_regions[ix].map_name;
201 strcpy(map_regions[ix].map_info.name,"FORTUNET");
202 }
203 if(map_regions_set[ix])
204 {
205 iy++;
206 printk(KERN_NOTICE MTD_FORTUNET_PK "%s flash device at physically "
207 " address %x size %x\n",
208 map_regions[ix].map_info.name,
209 map_regions[ix].window_addr_physical,
210 map_regions[ix].map_info.size);
211
212 map_regions[ix].map_info.phys = map_regions[ix].window_addr_physical,
213
214 map_regions[ix].map_info.virt =
215 ioremap_nocache(
216 map_regions[ix].window_addr_physical,
217 map_regions[ix].map_info.size);
218 if(!map_regions[ix].map_info.virt)
219 {
220 int j = 0;
221 printk(MTD_FORTUNET_PK "%s flash failed to ioremap!\n",
222 map_regions[ix].map_info.name);
223 for (j = 0 ; j < ix; j++)
224 iounmap(map_regions[j].map_info.virt);
225 return -ENXIO;
226 }
227 simple_map_init(&map_regions[ix].map_info);
228
229 printk(KERN_NOTICE MTD_FORTUNET_PK "%s flash is virtually at: %x\n",
230 map_regions[ix].map_info.name,
231 map_regions[ix].map_info.virt);
232 map_regions[ix].mymtd = do_map_probe("cfi_probe",
233 &map_regions[ix].map_info);
234 if((!map_regions[ix].mymtd)&&(
235 map_regions[ix].altbankwidth!=map_regions[ix].map_info.bankwidth))
236 {
237 printk(KERN_NOTICE MTD_FORTUNET_PK "Trying alternate bankwidth "
238 "for %s flash.\n",
239 map_regions[ix].map_info.name);
240 map_regions[ix].map_info.bankwidth =
241 map_regions[ix].altbankwidth;
242 map_regions[ix].mymtd = do_map_probe("cfi_probe",
243 &map_regions[ix].map_info);
244 }
245 map_regions[ix].mymtd->owner = THIS_MODULE;
246 mtd_device_register(map_regions[ix].mymtd,
247 map_regions[ix].parts,
248 map_regions_parts[ix]);
249 }
250 }
251 if(iy)
252 return 0;
253 return -ENXIO;
254}
255
256static void __exit cleanup_fortunet(void)
257{
258 int ix;
259 for(ix=0;ix<MAX_NUM_REGIONS;ix++)
260 {
261 if(map_regions_set[ix])
262 {
263 if( map_regions[ix].mymtd )
264 {
265 mtd_device_unregister(map_regions[ix].mymtd);
266 map_destroy( map_regions[ix].mymtd );
267 }
268 iounmap((void *)map_regions[ix].map_info.virt);
269 }
270 }
271}
272
273module_init(init_fortunet);
274module_exit(cleanup_fortunet);
275
276MODULE_AUTHOR("FortuNet, Inc.");
277MODULE_DESCRIPTION("MTD map driver for FortuNet boards");
diff --git a/drivers/mtd/maps/tegra_nor.c b/drivers/mtd/maps/tegra_nor.c
new file mode 100644
index 00000000000..b455fd5e1c0
--- /dev/null
+++ b/drivers/mtd/maps/tegra_nor.c
@@ -0,0 +1,483 @@
1/*
2 * drivers/mtd/maps/tegra_nor.c
3 *
4 * MTD mapping driver for the internal SNOR controller in Tegra SoCs
5 *
6 * Copyright (C) 2009 - 2012 NVIDIA Corporation
7 *
8 * Author:
9 * Raghavendra VK <rvk@nvidia.com>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
19 * more details.
20 *
21 * You should have received a copy of the GNU General Public License along
22 * with this program; if not, write to the Free Software Foundation, Inc.,
23 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
24 */
25
26#include <linux/platform_device.h>
27#include <linux/module.h>
28#include <linux/types.h>
29#include <linux/kernel.h>
30#include <linux/init.h>
31#include <linux/ioport.h>
32#include <linux/slab.h>
33#include <linux/interrupt.h>
34#include <linux/irq.h>
35#include <linux/mutex.h>
36#include <linux/mtd/mtd.h>
37#include <linux/mtd/map.h>
38#include <linux/mtd/partitions.h>
39#include <linux/dma-mapping.h>
40#include <linux/proc_fs.h>
41#include <linux/io.h>
42#include <linux/uaccess.h>
43#include <linux/clk.h>
44#include <linux/platform_data/tegra_nor.h>
45#include <asm/cacheflush.h>
46
47#define __BITMASK0(len) (BIT(len) - 1)
48#define REG_FIELD(val, start, len) (((val) & __BITMASK0(len)) << (start))
49#define REG_GET_FIELD(val, start, len) (((val) >> (start)) & __BITMASK0(len))
50
51/* tegra gmi registers... */
52#define TEGRA_SNOR_CONFIG_REG 0x00
53#define TEGRA_SNOR_NOR_ADDR_PTR_REG 0x08
54#define TEGRA_SNOR_AHB_ADDR_PTR_REG 0x0C
55#define TEGRA_SNOR_TIMING0_REG 0x10
56#define TEGRA_SNOR_TIMING1_REG 0x14
57#define TEGRA_SNOR_DMA_CFG_REG 0x20
58
59/* config register */
60#define TEGRA_SNOR_CONFIG_GO BIT(31)
61#define TEGRA_SNOR_CONFIG_WORDWIDE BIT(30)
62#define TEGRA_SNOR_CONFIG_DEVICE_TYPE BIT(29)
63#define TEGRA_SNOR_CONFIG_MUX_MODE BIT(28)
64#define TEGRA_SNOR_CONFIG_BURST_LEN(val) REG_FIELD((val), 26, 2)
65#define TEGRA_SNOR_CONFIG_RDY_ACTIVE BIT(24)
66#define TEGRA_SNOR_CONFIG_RDY_POLARITY BIT(23)
67#define TEGRA_SNOR_CONFIG_ADV_POLARITY BIT(22)
68#define TEGRA_SNOR_CONFIG_OE_WE_POLARITY BIT(21)
69#define TEGRA_SNOR_CONFIG_CS_POLARITY BIT(20)
70#define TEGRA_SNOR_CONFIG_NOR_DPD BIT(19)
71#define TEGRA_SNOR_CONFIG_WP BIT(15)
72#define TEGRA_SNOR_CONFIG_PAGE_SZ(val) REG_FIELD((val), 8, 2)
73#define TEGRA_SNOR_CONFIG_MST_ENB BIT(7)
74#define TEGRA_SNOR_CONFIG_SNOR_CS(val) REG_FIELD((val), 4, 2)
75#define TEGRA_SNOR_CONFIG_CE_LAST REG_FIELD(3)
76#define TEGRA_SNOR_CONFIG_CE_FIRST REG_FIELD(2)
77#define TEGRA_SNOR_CONFIG_DEVICE_MODE(val) REG_FIELD((val), 0, 2)
78
79/* dma config register */
80#define TEGRA_SNOR_DMA_CFG_GO BIT(31)
81#define TEGRA_SNOR_DMA_CFG_BSY BIT(30)
82#define TEGRA_SNOR_DMA_CFG_DIR BIT(29)
83#define TEGRA_SNOR_DMA_CFG_INT_ENB BIT(28)
84#define TEGRA_SNOR_DMA_CFG_INT_STA BIT(27)
85#define TEGRA_SNOR_DMA_CFG_BRST_SZ(val) REG_FIELD((val), 24, 3)
86#define TEGRA_SNOR_DMA_CFG_WRD_CNT(val) REG_FIELD((val), 2, 14)
87
88/* timing 0 register */
89#define TEGRA_SNOR_TIMING0_PG_RDY(val) REG_FIELD((val), 28, 4)
90#define TEGRA_SNOR_TIMING0_PG_SEQ(val) REG_FIELD((val), 20, 4)
91#define TEGRA_SNOR_TIMING0_MUX(val) REG_FIELD((val), 12, 4)
92#define TEGRA_SNOR_TIMING0_HOLD(val) REG_FIELD((val), 8, 4)
93#define TEGRA_SNOR_TIMING0_ADV(val) REG_FIELD((val), 4, 4)
94#define TEGRA_SNOR_TIMING0_CE(val) REG_FIELD((val), 0, 4)
95
96/* timing 1 register */
97#define TEGRA_SNOR_TIMING1_WE(val) REG_FIELD((val), 16, 8)
98#define TEGRA_SNOR_TIMING1_OE(val) REG_FIELD((val), 8, 8)
99#define TEGRA_SNOR_TIMING1_WAIT(val) REG_FIELD((val), 0, 8)
100
101/* SNOR DMA supports 2^14 AHB (32-bit words)
102 * Maximum data in one transfer = 2^16 bytes
103 */
104#define TEGRA_SNOR_DMA_LIMIT 0x10000
105#define TEGRA_SNOR_DMA_LIMIT_WORDS (TEGRA_SNOR_DMA_LIMIT >> 2)
106
107/* Even if BW is 1 MB/s, maximum time to
108 * transfer SNOR_DMA_LIMIT bytes is 66 ms
109 */
110#define TEGRA_SNOR_DMA_TIMEOUT_MS 67
111
112struct tegra_nor_info {
113 struct tegra_nor_platform_data *plat;
114 struct device *dev;
115 struct clk *clk;
116 struct mtd_partition *parts;
117 struct mtd_info *mtd;
118 struct map_info map;
119 struct completion dma_complete;
120 void __iomem *base;
121 void *dma_virt_buffer;
122 dma_addr_t dma_phys_buffer;
123 u32 init_config;
124 u32 timing0_default, timing1_default;
125 u32 timing0_read, timing1_read;
126};
127
128static inline unsigned long snor_tegra_readl(struct tegra_nor_info *tnor,
129 unsigned long reg)
130{
131 return readl(tnor->base + reg);
132}
133
134static inline void snor_tegra_writel(struct tegra_nor_info *tnor,
135 unsigned long val, unsigned long reg)
136{
137 writel(val, tnor->base + reg);
138}
139
140#define DRV_NAME "tegra-nor"
141
142static const char * const part_probes[] = { "cmdlinepart", NULL };
143
144static int wait_for_dma_completion(struct tegra_nor_info *info)
145{
146 unsigned long dma_timeout;
147 int ret;
148
149 dma_timeout = msecs_to_jiffies(TEGRA_SNOR_DMA_TIMEOUT_MS);
150 ret = wait_for_completion_timeout(&info->dma_complete, dma_timeout);
151 return ret ? 0 : -ETIMEDOUT;
152}
153
154static void tegra_flash_dma(struct map_info *map,
155 void *to, unsigned long from, ssize_t len)
156{
157 u32 snor_config, dma_config = 0;
158 int dma_transfer_count = 0, word32_count = 0;
159 u32 nor_address, current_transfer = 0;
160 u32 copy_to = (u32)to;
161 struct tegra_nor_info *c =
162 container_of(map, struct tegra_nor_info, map);
163 unsigned int bytes_remaining = len;
164
165 snor_config = c->init_config;
166 snor_tegra_writel(c, c->timing0_read, TEGRA_SNOR_TIMING0_REG);
167 snor_tegra_writel(c, c->timing1_read, TEGRA_SNOR_TIMING1_REG);
168
169 if (len > 32) {
170 word32_count = len >> 2;
171 bytes_remaining = len & 0x00000003;
172 /*
173 * The parameters can be setup in any order since we write to
174 * controller register only after all parameters are set.
175 */
176 /* SNOR CONFIGURATION SETUP */
177 snor_config |= TEGRA_SNOR_CONFIG_DEVICE_MODE(1);
178 /* 8 word page */
179 snor_config |= TEGRA_SNOR_CONFIG_PAGE_SZ(2);
180 snor_config |= TEGRA_SNOR_CONFIG_MST_ENB;
181 /* SNOR DMA CONFIGURATION SETUP */
182 /* NOR -> AHB */
183 dma_config &= ~TEGRA_SNOR_DMA_CFG_DIR;
184 /* One word burst */
185 dma_config |= TEGRA_SNOR_DMA_CFG_BRST_SZ(4);
186
187 for (nor_address = (unsigned int)(map->phys + from);
188 word32_count > 0;
189 word32_count -= current_transfer,
190 dma_transfer_count += current_transfer,
191 nor_address += (current_transfer * 4),
192 copy_to += (current_transfer * 4)) {
193
194 current_transfer =
195 (word32_count > TEGRA_SNOR_DMA_LIMIT_WORDS)
196 ? (TEGRA_SNOR_DMA_LIMIT_WORDS) : word32_count;
197 /* Start NOR operation */
198 snor_config |= TEGRA_SNOR_CONFIG_GO;
199 dma_config |= TEGRA_SNOR_DMA_CFG_GO;
200 /* Enable interrupt before every transaction since the
201 * interrupt handler disables it */
202 dma_config |= TEGRA_SNOR_DMA_CFG_INT_ENB;
203 /* Num of AHB (32-bit) words to transferred minus 1 */
204 dma_config |=
205 TEGRA_SNOR_DMA_CFG_WRD_CNT(current_transfer - 1);
206 snor_tegra_writel(c, c->dma_phys_buffer,
207 TEGRA_SNOR_AHB_ADDR_PTR_REG);
208 snor_tegra_writel(c, nor_address,
209 TEGRA_SNOR_NOR_ADDR_PTR_REG);
210 snor_tegra_writel(c, snor_config,
211 TEGRA_SNOR_CONFIG_REG);
212 snor_tegra_writel(c, dma_config,
213 TEGRA_SNOR_DMA_CFG_REG);
214 if (wait_for_dma_completion(c)) {
215 dev_err(c->dev, "timout waiting for DMA\n");
216 /* Transfer the remaining words by memcpy */
217 bytes_remaining += (word32_count << 2);
218 break;
219 }
220 memcpy((char *)(copy_to), (char *)(c->dma_virt_buffer),
221 (current_transfer << 2));
222
223 }
224 }
225 /* Put the controller back into slave mode. */
226 snor_config = snor_tegra_readl(c, TEGRA_SNOR_CONFIG_REG);
227 snor_config &= ~TEGRA_SNOR_CONFIG_MST_ENB;
228 snor_config |= TEGRA_SNOR_CONFIG_DEVICE_MODE(0);
229 snor_tegra_writel(c, snor_config, TEGRA_SNOR_CONFIG_REG);
230
231 memcpy_fromio(((char *)to + (dma_transfer_count << 2)),
232 ((char *)(map->virt + from) + (dma_transfer_count << 2)),
233 bytes_remaining);
234
235 snor_tegra_writel(c, c->timing0_default, TEGRA_SNOR_TIMING0_REG);
236 snor_tegra_writel(c, c->timing1_default, TEGRA_SNOR_TIMING1_REG);
237}
238
239static irqreturn_t tegra_nor_isr(int flag, void *dev_id)
240{
241 struct tegra_nor_info *info = (struct tegra_nor_info *)dev_id;
242 u32 dma_config = snor_tegra_readl(info, TEGRA_SNOR_DMA_CFG_REG);
243 if (dma_config & TEGRA_SNOR_DMA_CFG_INT_STA) {
244 /* Disable interrupts. WAR for BUG:821560 */
245 dma_config &= ~TEGRA_SNOR_DMA_CFG_INT_ENB;
246 snor_tegra_writel(info, dma_config, TEGRA_SNOR_DMA_CFG_REG);
247 complete(&info->dma_complete);
248 } else {
249 pr_err("%s: Spurious interrupt\n", __func__);
250 }
251 return IRQ_HANDLED;
252}
253
254static int tegra_snor_controller_init(struct tegra_nor_info *info)
255{
256 struct tegra_nor_chip_parms *chip_parm = &info->plat->chip_parms;
257 u32 width = info->plat->flash.width;
258 u32 config = 0;
259
260 config |= TEGRA_SNOR_CONFIG_DEVICE_MODE(0);
261 config |= TEGRA_SNOR_CONFIG_SNOR_CS(0);
262 config &= ~TEGRA_SNOR_CONFIG_DEVICE_TYPE; /* Select NOR */
263 config |= TEGRA_SNOR_CONFIG_WP; /* Enable writes */
264 switch (width) {
265 case 2:
266 config &= ~TEGRA_SNOR_CONFIG_WORDWIDE; /* 16 bit */
267 break;
268 case 4:
269 config |= TEGRA_SNOR_CONFIG_WORDWIDE; /* 32 bit */
270 break;
271 default:
272 return -EINVAL;
273 }
274 config |= TEGRA_SNOR_CONFIG_BURST_LEN(0);
275 config &= ~TEGRA_SNOR_CONFIG_MUX_MODE;
276 snor_tegra_writel(info, config, TEGRA_SNOR_CONFIG_REG);
277 info->init_config = config;
278
279 info->timing0_default = chip_parm->timing_default.timing0;
280 info->timing0_read = chip_parm->timing_read.timing0;
281 info->timing1_default = chip_parm->timing_default.timing1;
282 info->timing1_read = chip_parm->timing_read.timing0;
283
284 snor_tegra_writel(info, info->timing1_default, TEGRA_SNOR_TIMING1_REG);
285 snor_tegra_writel(info, info->timing0_default, TEGRA_SNOR_TIMING0_REG);
286 return 0;
287}
288
289static int tegra_nor_probe(struct platform_device *pdev)
290{
291 int err = 0;
292 struct tegra_nor_platform_data *plat = pdev->dev.platform_data;
293 struct tegra_nor_info *info = NULL;
294 struct device *dev = &pdev->dev;
295 struct resource *res;
296 int irq;
297
298 if (!plat) {
299 pr_err("%s: no platform device info\n", __func__);
300 err = -EINVAL;
301 goto fail;
302 }
303
304 info = devm_kzalloc(dev, sizeof(struct tegra_nor_info),
305 GFP_KERNEL);
306 if (!info) {
307 err = -ENOMEM;
308 goto fail;
309 }
310
311 /* Get NOR controller & map the same */
312 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
313 if (!res) {
314 dev_err(dev, "no mem resource?\n");
315 err = -ENODEV;
316 goto fail;
317 }
318
319 if (!devm_request_mem_region(dev, res->start, resource_size(res),
320 dev_name(&pdev->dev))) {
321 dev_err(dev, "NOR region already claimed\n");
322 err = -EBUSY;
323 goto fail;
324 }
325
326 info->base = devm_ioremap(dev, res->start, resource_size(res));
327 if (!info->base) {
328 dev_err(dev, "Can't ioremap NOR region\n");
329 err = -ENOMEM;
330 goto fail;
331 }
332
333 /* Get NOR flash aperture & map the same */
334 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
335 if (!res) {
336 dev_err(dev, "no mem resource?\n");
337 err = -ENODEV;
338 goto fail;
339 }
340
341 if (!devm_request_mem_region(dev, res->start, resource_size(res),
342 dev_name(dev))) {
343 dev_err(dev, "NOR region already claimed\n");
344 err = -EBUSY;
345 goto fail;
346 }
347
348 info->map.virt = devm_ioremap(dev, res->start,
349 resource_size(res));
350 if (!info->map.virt) {
351 dev_err(dev, "Can't ioremap NOR region\n");
352 err = -ENOMEM;
353 goto fail;
354 }
355
356 info->plat = plat;
357 info->dev = dev;
358 info->map.bankwidth = plat->flash.width;
359 info->map.name = dev_name(dev);
360 info->map.phys = res->start;
361 info->map.size = resource_size(res);
362
363 info->clk = clk_get(dev, NULL);
364 if (IS_ERR(info->clk)) {
365 err = PTR_ERR(info->clk);
366 goto fail;
367 }
368
369 err = clk_enable(info->clk);
370 if (err != 0)
371 goto out_clk_put;
372
373 simple_map_init(&info->map);
374 info->map.copy_from = tegra_flash_dma;
375
376 /* Intialise the SNOR controller before probe */
377 err = tegra_snor_controller_init(info);
378 if (err) {
379 dev_err(dev, "Error initializing controller\n");
380 goto out_clk_disable;
381 }
382
383 init_completion(&info->dma_complete);
384
385 irq = platform_get_irq(pdev, 0);
386 if (!irq) {
387 dev_err(dev, "no irq resource?\n");
388 err = -ENODEV;
389 goto out_clk_disable;
390 }
391
392 /* Register SNOR DMA completion interrupt */
393 err = devm_request_irq(dev, irq, tegra_nor_isr, IRQF_DISABLED,
394 dev_name(dev), info);
395 if (err) {
396 dev_err(dev, "Failed to request irq %i\n", irq);
397 goto out_clk_disable;
398 }
399 info->dma_virt_buffer = dma_alloc_coherent(dev,
400 TEGRA_SNOR_DMA_LIMIT,
401 &info->dma_phys_buffer,
402 GFP_KERNEL);
403 if (info->dma_virt_buffer == NULL) {
404 dev_err(&pdev->dev, "Could not allocate buffer for DMA");
405 err = -ENOMEM;
406 goto out_clk_disable;
407 }
408
409 info->mtd = do_map_probe(plat->flash.map_name, &info->map);
410 if (!info->mtd) {
411 err = -EIO;
412 goto out_dma_free_coherent;
413 }
414 info->mtd->owner = THIS_MODULE;
415 info->parts = NULL;
416
417 platform_set_drvdata(pdev, info);
418 err = parse_mtd_partitions(info->mtd, part_probes, &info->parts, 0);
419 if (err > 0)
420 err = mtd_device_register(info->mtd, info->parts, err);
421 else if (err <= 0 && plat->flash.parts)
422 err =
423 mtd_device_register(info->mtd, plat->flash.parts,
424 plat->flash.nr_parts);
425 else
426 mtd_device_register(info->mtd, NULL, 0);
427
428 return 0;
429
430out_dma_free_coherent:
431 dma_free_coherent(dev, TEGRA_SNOR_DMA_LIMIT,
432 info->dma_virt_buffer, info->dma_phys_buffer);
433out_clk_disable:
434 clk_disable(info->clk);
435out_clk_put:
436 clk_put(info->clk);
437fail:
438 pr_err("Tegra NOR probe failed\n");
439 return err;
440}
441
442static int tegra_nor_remove(struct platform_device *pdev)
443{
444 struct tegra_nor_info *info = platform_get_drvdata(pdev);
445
446 mtd_device_unregister(info->mtd);
447 if (info->parts)
448 kfree(info->parts);
449 dma_free_coherent(&pdev->dev, TEGRA_SNOR_DMA_LIMIT,
450 info->dma_virt_buffer, info->dma_phys_buffer);
451 map_destroy(info->mtd);
452 clk_disable(info->clk);
453 clk_put(info->clk);
454
455 return 0;
456}
457
458static struct platform_driver __refdata tegra_nor_driver = {
459 .probe = tegra_nor_probe,
460 .remove = __devexit_p(tegra_nor_remove),
461 .driver = {
462 .name = DRV_NAME,
463 .owner = THIS_MODULE,
464 },
465};
466
467static int __init tegra_nor_init(void)
468{
469 return platform_driver_register(&tegra_nor_driver);
470}
471
472static void __exit tegra_nor_exit(void)
473{
474 platform_driver_unregister(&tegra_nor_driver);
475}
476
477module_init(tegra_nor_init);
478module_exit(tegra_nor_exit);
479
480MODULE_AUTHOR("Raghavendra VK <rvk@nvidia.com>");
481MODULE_DESCRIPTION("NOR Flash mapping driver for NVIDIA Tegra based boards");
482MODULE_LICENSE("GPL");
483MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/mtd/maps/wr_sbc82xx_flash.c b/drivers/mtd/maps/wr_sbc82xx_flash.c
new file mode 100644
index 00000000000..901ce968efa
--- /dev/null
+++ b/drivers/mtd/maps/wr_sbc82xx_flash.c
@@ -0,0 +1,181 @@
1/*
2 * Map for flash chips on Wind River PowerQUICC II SBC82xx board.
3 *
4 * Copyright (C) 2004 Red Hat, Inc.
5 *
6 * Author: David Woodhouse <dwmw2@infradead.org>
7 *
8 */
9
10#include <linux/module.h>
11#include <linux/types.h>
12#include <linux/kernel.h>
13#include <linux/init.h>
14#include <linux/slab.h>
15#include <asm/io.h>
16#include <linux/mtd/mtd.h>
17#include <linux/mtd/map.h>
18#include <linux/mtd/partitions.h>
19
20#include <asm/immap_cpm2.h>
21
22static struct mtd_info *sbcmtd[3];
23static struct mtd_partition *sbcmtd_parts[3];
24
25struct map_info sbc82xx_flash_map[3] = {
26 {.name = "Boot flash"},
27 {.name = "Alternate boot flash"},
28 {.name = "User flash"}
29};
30
31static struct mtd_partition smallflash_parts[] = {
32 {
33 .name = "space",
34 .size = 0x100000,
35 .offset = 0,
36 }, {
37 .name = "bootloader",
38 .size = MTDPART_SIZ_FULL,
39 .offset = MTDPART_OFS_APPEND,
40 }
41};
42
43static struct mtd_partition bigflash_parts[] = {
44 {
45 .name = "bootloader",
46 .size = 0x00100000,
47 .offset = 0,
48 }, {
49 .name = "file system",
50 .size = 0x01f00000,
51 .offset = MTDPART_OFS_APPEND,
52 }, {
53 .name = "boot config",
54 .size = 0x00100000,
55 .offset = MTDPART_OFS_APPEND,
56 }, {
57 .name = "space",
58 .size = 0x01f00000,
59 .offset = MTDPART_OFS_APPEND,
60 }
61};
62
63static const char *part_probes[] __initdata = {"cmdlinepart", "RedBoot", NULL};
64
65#define init_sbc82xx_one_flash(map, br, or) \
66do { \
67 (map).phys = (br & 1) ? (br & 0xffff8000) : 0; \
68 (map).size = (br & 1) ? (~(or & 0xffff8000) + 1) : 0; \
69 switch (br & 0x00001800) { \
70 case 0x00000000: \
71 case 0x00000800: (map).bankwidth = 1; break; \
72 case 0x00001000: (map).bankwidth = 2; break; \
73 case 0x00001800: (map).bankwidth = 4; break; \
74 } \
75} while (0);
76
77static int __init init_sbc82xx_flash(void)
78{
79 volatile memctl_cpm2_t *mc = &cpm2_immr->im_memctl;
80 int bigflash;
81 int i;
82
83#ifdef CONFIG_SBC8560
84 mc = ioremap(0xff700000 + 0x5000, sizeof(memctl_cpm2_t));
85#else
86 mc = &cpm2_immr->im_memctl;
87#endif
88
89 bigflash = 1;
90 if ((mc->memc_br0 & 0x00001800) == 0x00001800)
91 bigflash = 0;
92
93 init_sbc82xx_one_flash(sbc82xx_flash_map[0], mc->memc_br0, mc->memc_or0);
94 init_sbc82xx_one_flash(sbc82xx_flash_map[1], mc->memc_br6, mc->memc_or6);
95 init_sbc82xx_one_flash(sbc82xx_flash_map[2], mc->memc_br1, mc->memc_or1);
96
97#ifdef CONFIG_SBC8560
98 iounmap((void *) mc);
99#endif
100
101 for (i=0; i<3; i++) {
102 int8_t flashcs[3] = { 0, 6, 1 };
103 int nr_parts;
104
105 printk(KERN_NOTICE "PowerQUICC II %s (%ld MiB on CS%d",
106 sbc82xx_flash_map[i].name,
107 (sbc82xx_flash_map[i].size >> 20),
108 flashcs[i]);
109 if (!sbc82xx_flash_map[i].phys) {
110 /* We know it can't be at zero. */
111 printk("): disabled by bootloader.\n");
112 continue;
113 }
114 printk(" at %08lx)\n", sbc82xx_flash_map[i].phys);
115
116 sbc82xx_flash_map[i].virt = ioremap(sbc82xx_flash_map[i].phys, sbc82xx_flash_map[i].size);
117
118 if (!sbc82xx_flash_map[i].virt) {
119 printk("Failed to ioremap\n");
120 continue;
121 }
122
123 simple_map_init(&sbc82xx_flash_map[i]);
124
125 sbcmtd[i] = do_map_probe("cfi_probe", &sbc82xx_flash_map[i]);
126
127 if (!sbcmtd[i])
128 continue;
129
130 sbcmtd[i]->owner = THIS_MODULE;
131
132 nr_parts = parse_mtd_partitions(sbcmtd[i], part_probes,
133 &sbcmtd_parts[i], 0);
134 if (nr_parts > 0) {
135 mtd_device_register(sbcmtd[i], sbcmtd_parts[i],
136 nr_parts);
137 continue;
138 }
139
140 /* No partitioning detected. Use default */
141 if (i == 2) {
142 mtd_device_register(sbcmtd[i], NULL, 0);
143 } else if (i == bigflash) {
144 mtd_device_register(sbcmtd[i], bigflash_parts,
145 ARRAY_SIZE(bigflash_parts));
146 } else {
147 mtd_device_register(sbcmtd[i], smallflash_parts,
148 ARRAY_SIZE(smallflash_parts));
149 }
150 }
151 return 0;
152}
153
154static void __exit cleanup_sbc82xx_flash(void)
155{
156 int i;
157
158 for (i=0; i<3; i++) {
159 if (!sbcmtd[i])
160 continue;
161
162 if (i<2 || sbcmtd_parts[i])
163 mtd_device_unregister(sbcmtd[i]);
164 else
165 mtd_device_unregister(sbcmtd[i]);
166
167 kfree(sbcmtd_parts[i]);
168 map_destroy(sbcmtd[i]);
169
170 iounmap((void *)sbc82xx_flash_map[i].virt);
171 sbc82xx_flash_map[i].virt = 0;
172 }
173}
174
175module_init(init_sbc82xx_flash);
176module_exit(cleanup_sbc82xx_flash);
177
178
179MODULE_LICENSE("GPL");
180MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
181MODULE_DESCRIPTION("Flash map driver for WindRiver PowerQUICC II");
diff --git a/drivers/mtd/nand/autcpu12.c b/drivers/mtd/nand/autcpu12.c
new file mode 100644
index 00000000000..eddc9a22498
--- /dev/null
+++ b/drivers/mtd/nand/autcpu12.c
@@ -0,0 +1,239 @@
1/*
2 * drivers/mtd/autcpu12.c
3 *
4 * Copyright (c) 2002 Thomas Gleixner <tgxl@linutronix.de>
5 *
6 * Derived from drivers/mtd/spia.c
7 * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * Overview:
14 * This is a device driver for the NAND flash device found on the
15 * autronix autcpu12 board, which is a SmartMediaCard. It supports
16 * 16MiB, 32MiB and 64MiB cards.
17 *
18 *
19 * 02-12-2002 TG Cleanup of module params
20 *
21 * 02-20-2002 TG adjusted for different rd/wr address support
22 * added support for read device ready/busy line
23 * added page_cache
24 *
25 * 10-06-2002 TG 128K card support added
26 */
27
28#include <linux/slab.h>
29#include <linux/init.h>
30#include <linux/module.h>
31#include <linux/mtd/mtd.h>
32#include <linux/mtd/nand.h>
33#include <linux/mtd/partitions.h>
34#include <asm/io.h>
35#include <mach/hardware.h>
36#include <asm/sizes.h>
37#include <mach/autcpu12.h>
38
39/*
40 * MTD structure for AUTCPU12 board
41 */
42static struct mtd_info *autcpu12_mtd = NULL;
43static void __iomem *autcpu12_fio_base;
44
45/*
46 * Define partitions for flash devices
47 */
48static struct mtd_partition partition_info16k[] = {
49 { .name = "AUTCPU12 flash partition 1",
50 .offset = 0,
51 .size = 8 * SZ_1M },
52 { .name = "AUTCPU12 flash partition 2",
53 .offset = 8 * SZ_1M,
54 .size = 8 * SZ_1M },
55};
56
57static struct mtd_partition partition_info32k[] = {
58 { .name = "AUTCPU12 flash partition 1",
59 .offset = 0,
60 .size = 8 * SZ_1M },
61 { .name = "AUTCPU12 flash partition 2",
62 .offset = 8 * SZ_1M,
63 .size = 24 * SZ_1M },
64};
65
66static struct mtd_partition partition_info64k[] = {
67 { .name = "AUTCPU12 flash partition 1",
68 .offset = 0,
69 .size = 16 * SZ_1M },
70 { .name = "AUTCPU12 flash partition 2",
71 .offset = 16 * SZ_1M,
72 .size = 48 * SZ_1M },
73};
74
75static struct mtd_partition partition_info128k[] = {
76 { .name = "AUTCPU12 flash partition 1",
77 .offset = 0,
78 .size = 16 * SZ_1M },
79 { .name = "AUTCPU12 flash partition 2",
80 .offset = 16 * SZ_1M,
81 .size = 112 * SZ_1M },
82};
83
84#define NUM_PARTITIONS16K 2
85#define NUM_PARTITIONS32K 2
86#define NUM_PARTITIONS64K 2
87#define NUM_PARTITIONS128K 2
88/*
89 * hardware specific access to control-lines
90 *
91 * ALE bit 4 autcpu12_pedr
92 * CLE bit 5 autcpu12_pedr
93 * NCE bit 0 fio_ctrl
94 *
95 */
96static void autcpu12_hwcontrol(struct mtd_info *mtd, int cmd,
97 unsigned int ctrl)
98{
99 struct nand_chip *chip = mtd->priv;
100
101 if (ctrl & NAND_CTRL_CHANGE) {
102 void __iomem *addr;
103 unsigned char bits;
104
105 addr = CS89712_VIRT_BASE + AUTCPU12_SMC_PORT_OFFSET;
106 bits = (ctrl & NAND_CLE) << 4;
107 bits |= (ctrl & NAND_ALE) << 2;
108 writeb((readb(addr) & ~0x30) | bits, addr);
109
110 addr = autcpu12_fio_base + AUTCPU12_SMC_SELECT_OFFSET;
111 writeb((readb(addr) & ~0x1) | (ctrl & NAND_NCE), addr);
112 }
113
114 if (cmd != NAND_CMD_NONE)
115 writeb(cmd, chip->IO_ADDR_W);
116}
117
118/*
119 * read device ready pin
120 */
121int autcpu12_device_ready(struct mtd_info *mtd)
122{
123 void __iomem *addr = CS89712_VIRT_BASE + AUTCPU12_SMC_PORT_OFFSET;
124
125 return readb(addr) & AUTCPU12_SMC_RDY;
126}
127
128/*
129 * Main initialization routine
130 */
131static int __init autcpu12_init(void)
132{
133 struct nand_chip *this;
134 int err = 0;
135
136 /* Allocate memory for MTD device structure and private data */
137 autcpu12_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip),
138 GFP_KERNEL);
139 if (!autcpu12_mtd) {
140 printk("Unable to allocate AUTCPU12 NAND MTD device structure.\n");
141 err = -ENOMEM;
142 goto out;
143 }
144
145 /* map physical address */
146 autcpu12_fio_base = ioremap(AUTCPU12_PHYS_SMC, SZ_1K);
147 if (!autcpu12_fio_base) {
148 printk("Ioremap autcpu12 SmartMedia Card failed\n");
149 err = -EIO;
150 goto out_mtd;
151 }
152
153 /* Get pointer to private data */
154 this = (struct nand_chip *)(&autcpu12_mtd[1]);
155
156 /* Initialize structures */
157 memset(autcpu12_mtd, 0, sizeof(struct mtd_info));
158 memset(this, 0, sizeof(struct nand_chip));
159
160 /* Link the private data with the MTD structure */
161 autcpu12_mtd->priv = this;
162 autcpu12_mtd->owner = THIS_MODULE;
163
164 /* Set address of NAND IO lines */
165 this->IO_ADDR_R = autcpu12_fio_base;
166 this->IO_ADDR_W = autcpu12_fio_base;
167 this->cmd_ctrl = autcpu12_hwcontrol;
168 this->dev_ready = autcpu12_device_ready;
169 /* 20 us command delay time */
170 this->chip_delay = 20;
171 this->ecc.mode = NAND_ECC_SOFT;
172
173 /* Enable the following for a flash based bad block table */
174 /*
175 this->options = NAND_USE_FLASH_BBT;
176 */
177 this->options = NAND_USE_FLASH_BBT;
178
179 /* Scan to find existence of the device */
180 if (nand_scan(autcpu12_mtd, 1)) {
181 err = -ENXIO;
182 goto out_ior;
183 }
184
185 /* Register the partitions */
186 switch (autcpu12_mtd->size) {
187 case SZ_16M:
188 mtd_device_register(autcpu12_mtd, partition_info16k,
189 NUM_PARTITIONS16K);
190 break;
191 case SZ_32M:
192 mtd_device_register(autcpu12_mtd, partition_info32k,
193 NUM_PARTITIONS32K);
194 break;
195 case SZ_64M:
196 mtd_device_register(autcpu12_mtd, partition_info64k,
197 NUM_PARTITIONS64K);
198 break;
199 case SZ_128M:
200 mtd_device_register(autcpu12_mtd, partition_info128k,
201 NUM_PARTITIONS128K);
202 break;
203 default:
204 printk("Unsupported SmartMedia device\n");
205 err = -ENXIO;
206 goto out_ior;
207 }
208 goto out;
209
210 out_ior:
211 iounmap(autcpu12_fio_base);
212 out_mtd:
213 kfree(autcpu12_mtd);
214 out:
215 return err;
216}
217
218module_init(autcpu12_init);
219
220/*
221 * Clean up routine
222 */
223static void __exit autcpu12_cleanup(void)
224{
225 /* Release resources, unregister device */
226 nand_release(autcpu12_mtd);
227
228 /* unmap physical address */
229 iounmap(autcpu12_fio_base);
230
231 /* Free the MTD device structure */
232 kfree(autcpu12_mtd);
233}
234
235module_exit(autcpu12_cleanup);
236
237MODULE_LICENSE("GPL");
238MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
239MODULE_DESCRIPTION("Glue layer for SmartMediaCard on autronix autcpu12");
diff --git a/drivers/mtd/nand/bcm_umi_bch.c b/drivers/mtd/nand/bcm_umi_bch.c
new file mode 100644
index 00000000000..a930666d068
--- /dev/null
+++ b/drivers/mtd/nand/bcm_umi_bch.c
@@ -0,0 +1,213 @@
1/*****************************************************************************
2* Copyright 2004 - 2009 Broadcom Corporation. All rights reserved.
3*
4* Unless you and Broadcom execute a separate written software license
5* agreement governing use of this software, this software is licensed to you
6* under the terms of the GNU General Public License version 2, available at
7* http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
8*
9* Notwithstanding the above, under no circumstances may you combine this
10* software in any way with any other Broadcom software provided under a
11* license other than the GPL, without Broadcom's express prior written
12* consent.
13*****************************************************************************/
14
15/* ---- Include Files ---------------------------------------------------- */
16#include "nand_bcm_umi.h"
17
18/* ---- External Variable Declarations ----------------------------------- */
19/* ---- External Function Prototypes ------------------------------------- */
20/* ---- Public Variables ------------------------------------------------- */
21/* ---- Private Constants and Types -------------------------------------- */
22
23/* ---- Private Function Prototypes -------------------------------------- */
24static int bcm_umi_bch_read_page_hwecc(struct mtd_info *mtd,
25 struct nand_chip *chip, uint8_t *buf, int page);
26static void bcm_umi_bch_write_page_hwecc(struct mtd_info *mtd,
27 struct nand_chip *chip, const uint8_t *buf);
28
29/* ---- Private Variables ------------------------------------------------ */
30
31/*
32** nand_hw_eccoob
33** New oob placement block for use with hardware ecc generation.
34*/
35static struct nand_ecclayout nand_hw_eccoob_512 = {
36 /* Reserve 5 for BI indicator */
37 .oobfree = {
38#if (NAND_ECC_NUM_BYTES > 3)
39 {.offset = 0, .length = 2}
40#else
41 {.offset = 0, .length = 5},
42 {.offset = 6, .length = 7}
43#endif
44 }
45};
46
47/*
48** We treat the OOB for a 2K page as if it were 4 512 byte oobs,
49** except the BI is at byte 0.
50*/
51static struct nand_ecclayout nand_hw_eccoob_2048 = {
52 /* Reserve 0 as BI indicator */
53 .oobfree = {
54#if (NAND_ECC_NUM_BYTES > 10)
55 {.offset = 1, .length = 2},
56#elif (NAND_ECC_NUM_BYTES > 7)
57 {.offset = 1, .length = 5},
58 {.offset = 16, .length = 6},
59 {.offset = 32, .length = 6},
60 {.offset = 48, .length = 6}
61#else
62 {.offset = 1, .length = 8},
63 {.offset = 16, .length = 9},
64 {.offset = 32, .length = 9},
65 {.offset = 48, .length = 9}
66#endif
67 }
68};
69
70/* We treat the OOB for a 4K page as if it were 8 512 byte oobs,
71 * except the BI is at byte 0. */
72static struct nand_ecclayout nand_hw_eccoob_4096 = {
73 /* Reserve 0 as BI indicator */
74 .oobfree = {
75#if (NAND_ECC_NUM_BYTES > 10)
76 {.offset = 1, .length = 2},
77 {.offset = 16, .length = 3},
78 {.offset = 32, .length = 3},
79 {.offset = 48, .length = 3},
80 {.offset = 64, .length = 3},
81 {.offset = 80, .length = 3},
82 {.offset = 96, .length = 3},
83 {.offset = 112, .length = 3}
84#else
85 {.offset = 1, .length = 5},
86 {.offset = 16, .length = 6},
87 {.offset = 32, .length = 6},
88 {.offset = 48, .length = 6},
89 {.offset = 64, .length = 6},
90 {.offset = 80, .length = 6},
91 {.offset = 96, .length = 6},
92 {.offset = 112, .length = 6}
93#endif
94 }
95};
96
97/* ---- Private Functions ------------------------------------------------ */
98/* ==== Public Functions ================================================= */
99
100/****************************************************************************
101*
102* bcm_umi_bch_read_page_hwecc - hardware ecc based page read function
103* @mtd: mtd info structure
104* @chip: nand chip info structure
105* @buf: buffer to store read data
106*
107***************************************************************************/
108static int bcm_umi_bch_read_page_hwecc(struct mtd_info *mtd,
109 struct nand_chip *chip, uint8_t * buf,
110 int page)
111{
112 int sectorIdx = 0;
113 int eccsize = chip->ecc.size;
114 int eccsteps = chip->ecc.steps;
115 uint8_t *datap = buf;
116 uint8_t eccCalc[NAND_ECC_NUM_BYTES];
117 int sectorOobSize = mtd->oobsize / eccsteps;
118 int stat;
119
120 for (sectorIdx = 0; sectorIdx < eccsteps;
121 sectorIdx++, datap += eccsize) {
122 if (sectorIdx > 0) {
123 /* Seek to page location within sector */
124 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, sectorIdx * eccsize,
125 -1);
126 }
127
128 /* Enable hardware ECC before reading the buf */
129 nand_bcm_umi_bch_enable_read_hwecc();
130
131 /* Read in data */
132 bcm_umi_nand_read_buf(mtd, datap, eccsize);
133
134 /* Pause hardware ECC after reading the buf */
135 nand_bcm_umi_bch_pause_read_ecc_calc();
136
137 /* Read the OOB ECC */
138 chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
139 mtd->writesize + sectorIdx * sectorOobSize, -1);
140 nand_bcm_umi_bch_read_oobEcc(mtd->writesize, eccCalc,
141 NAND_ECC_NUM_BYTES,
142 chip->oob_poi +
143 sectorIdx * sectorOobSize);
144
145 /* Correct any ECC detected errors */
146 stat =
147 nand_bcm_umi_bch_correct_page(datap, eccCalc,
148 NAND_ECC_NUM_BYTES);
149
150 /* Update Stats */
151 if (stat < 0) {
152#if defined(NAND_BCM_UMI_DEBUG)
153 printk(KERN_WARNING "%s uncorr_err sectorIdx=%d\n",
154 __func__, sectorIdx);
155 printk(KERN_WARNING
156 "%s data %02x %02x %02x %02x "
157 "%02x %02x %02x %02x\n",
158 __func__, datap[0], datap[1], datap[2], datap[3],
159 datap[4], datap[5], datap[6], datap[7]);
160 printk(KERN_WARNING
161 "%s ecc %02x %02x %02x %02x "
162 "%02x %02x %02x %02x %02x %02x "
163 "%02x %02x %02x\n",
164 __func__, eccCalc[0], eccCalc[1], eccCalc[2],
165 eccCalc[3], eccCalc[4], eccCalc[5], eccCalc[6],
166 eccCalc[7], eccCalc[8], eccCalc[9], eccCalc[10],
167 eccCalc[11], eccCalc[12]);
168 BUG();
169#endif
170 mtd->ecc_stats.failed++;
171 } else {
172#if defined(NAND_BCM_UMI_DEBUG)
173 if (stat > 0) {
174 printk(KERN_INFO
175 "%s %d correctable_errors detected\n",
176 __func__, stat);
177 }
178#endif
179 mtd->ecc_stats.corrected += stat;
180 }
181 }
182 return 0;
183}
184
185/****************************************************************************
186*
187* bcm_umi_bch_write_page_hwecc - hardware ecc based page write function
188* @mtd: mtd info structure
189* @chip: nand chip info structure
190* @buf: data buffer
191*
192***************************************************************************/
193static void bcm_umi_bch_write_page_hwecc(struct mtd_info *mtd,
194 struct nand_chip *chip, const uint8_t *buf)
195{
196 int sectorIdx = 0;
197 int eccsize = chip->ecc.size;
198 int eccsteps = chip->ecc.steps;
199 const uint8_t *datap = buf;
200 uint8_t *oobp = chip->oob_poi;
201 int sectorOobSize = mtd->oobsize / eccsteps;
202
203 for (sectorIdx = 0; sectorIdx < eccsteps;
204 sectorIdx++, datap += eccsize, oobp += sectorOobSize) {
205 /* Enable hardware ECC before writing the buf */
206 nand_bcm_umi_bch_enable_write_hwecc();
207 bcm_umi_nand_write_buf(mtd, datap, eccsize);
208 nand_bcm_umi_bch_write_oobEcc(mtd->writesize, oobp,
209 NAND_ECC_NUM_BYTES);
210 }
211
212 bcm_umi_nand_write_buf(mtd, chip->oob_poi, mtd->oobsize);
213}
diff --git a/drivers/mtd/nand/bcm_umi_nand.c b/drivers/mtd/nand/bcm_umi_nand.c
new file mode 100644
index 00000000000..8c569e454dc
--- /dev/null
+++ b/drivers/mtd/nand/bcm_umi_nand.c
@@ -0,0 +1,579 @@
1/*****************************************************************************
2* Copyright 2004 - 2009 Broadcom Corporation. All rights reserved.
3*
4* Unless you and Broadcom execute a separate written software license
5* agreement governing use of this software, this software is licensed to you
6* under the terms of the GNU General Public License version 2, available at
7* http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
8*
9* Notwithstanding the above, under no circumstances may you combine this
10* software in any way with any other Broadcom software provided under a
11* license other than the GPL, without Broadcom's express prior written
12* consent.
13*****************************************************************************/
14
15/* ---- Include Files ---------------------------------------------------- */
16#include <linux/module.h>
17#include <linux/types.h>
18#include <linux/init.h>
19#include <linux/kernel.h>
20#include <linux/slab.h>
21#include <linux/string.h>
22#include <linux/ioport.h>
23#include <linux/device.h>
24#include <linux/delay.h>
25#include <linux/err.h>
26#include <linux/io.h>
27#include <linux/platform_device.h>
28#include <linux/mtd/mtd.h>
29#include <linux/mtd/nand.h>
30#include <linux/mtd/nand_ecc.h>
31#include <linux/mtd/partitions.h>
32
33#include <asm/mach-types.h>
34#include <asm/system.h>
35
36#include <mach/reg_nand.h>
37#include <mach/reg_umi.h>
38
39#include "nand_bcm_umi.h"
40
41#include <mach/memory_settings.h>
42
43#define USE_DMA 1
44#include <mach/dma.h>
45#include <linux/dma-mapping.h>
46#include <linux/completion.h>
47
48/* ---- External Variable Declarations ----------------------------------- */
49/* ---- External Function Prototypes ------------------------------------- */
50/* ---- Public Variables ------------------------------------------------- */
51/* ---- Private Constants and Types -------------------------------------- */
52static const __devinitconst char gBanner[] = KERN_INFO \
53 "BCM UMI MTD NAND Driver: 1.00\n";
54
55const char *part_probes[] = { "cmdlinepart", NULL };
56
57#if NAND_ECC_BCH
58static uint8_t scan_ff_pattern[] = { 0xff };
59
60static struct nand_bbt_descr largepage_bbt = {
61 .options = 0,
62 .offs = 0,
63 .len = 1,
64 .pattern = scan_ff_pattern
65};
66#endif
67
68/*
69** Preallocate a buffer to avoid having to do this every dma operation.
70** This is the size of the preallocated coherent DMA buffer.
71*/
72#if USE_DMA
73#define DMA_MIN_BUFLEN 512
74#define DMA_MAX_BUFLEN PAGE_SIZE
75#define USE_DIRECT_IO(len) (((len) < DMA_MIN_BUFLEN) || \
76 ((len) > DMA_MAX_BUFLEN))
77
78/*
79 * The current NAND data space goes from 0x80001900 to 0x80001FFF,
80 * which is only 0x700 = 1792 bytes long. This is too small for 2K, 4K page
81 * size NAND flash. Need to break the DMA down to multiple 1Ks.
82 *
83 * Need to make sure REG_NAND_DATA_PADDR + DMA_MAX_LEN < 0x80002000
84 */
85#define DMA_MAX_LEN 1024
86
87#else /* !USE_DMA */
88#define DMA_MIN_BUFLEN 0
89#define DMA_MAX_BUFLEN 0
90#define USE_DIRECT_IO(len) 1
91#endif
92/* ---- Private Function Prototypes -------------------------------------- */
93static void bcm_umi_nand_read_buf(struct mtd_info *mtd, u_char * buf, int len);
94static void bcm_umi_nand_write_buf(struct mtd_info *mtd, const u_char * buf,
95 int len);
96
97/* ---- Private Variables ------------------------------------------------ */
98static struct mtd_info *board_mtd;
99static void __iomem *bcm_umi_io_base;
100static void *virtPtr;
101static dma_addr_t physPtr;
102static struct completion nand_comp;
103
104/* ---- Private Functions ------------------------------------------------ */
105#if NAND_ECC_BCH
106#include "bcm_umi_bch.c"
107#else
108#include "bcm_umi_hamming.c"
109#endif
110
111#if USE_DMA
112
113/* Handler called when the DMA finishes. */
114static void nand_dma_handler(DMA_Device_t dev, int reason, void *userData)
115{
116 complete(&nand_comp);
117}
118
119static int nand_dma_init(void)
120{
121 int rc;
122
123 rc = dma_set_device_handler(DMA_DEVICE_NAND_MEM_TO_MEM,
124 nand_dma_handler, NULL);
125 if (rc != 0) {
126 printk(KERN_ERR "dma_set_device_handler failed: %d\n", rc);
127 return rc;
128 }
129
130 virtPtr =
131 dma_alloc_coherent(NULL, DMA_MAX_BUFLEN, &physPtr, GFP_KERNEL);
132 if (virtPtr == NULL) {
133 printk(KERN_ERR "NAND - Failed to allocate memory for DMA buffer\n");
134 return -ENOMEM;
135 }
136
137 return 0;
138}
139
140static void nand_dma_term(void)
141{
142 if (virtPtr != NULL)
143 dma_free_coherent(NULL, DMA_MAX_BUFLEN, virtPtr, physPtr);
144}
145
146static void nand_dma_read(void *buf, int len)
147{
148 int offset = 0;
149 int tmp_len = 0;
150 int len_left = len;
151 DMA_Handle_t hndl;
152
153 if (virtPtr == NULL)
154 panic("nand_dma_read: virtPtr == NULL\n");
155
156 if ((void *)physPtr == NULL)
157 panic("nand_dma_read: physPtr == NULL\n");
158
159 hndl = dma_request_channel(DMA_DEVICE_NAND_MEM_TO_MEM);
160 if (hndl < 0) {
161 printk(KERN_ERR
162 "nand_dma_read: unable to allocate dma channel: %d\n",
163 (int)hndl);
164 panic("\n");
165 }
166
167 while (len_left > 0) {
168 if (len_left > DMA_MAX_LEN) {
169 tmp_len = DMA_MAX_LEN;
170 len_left -= DMA_MAX_LEN;
171 } else {
172 tmp_len = len_left;
173 len_left = 0;
174 }
175
176 init_completion(&nand_comp);
177 dma_transfer_mem_to_mem(hndl, REG_NAND_DATA_PADDR,
178 physPtr + offset, tmp_len);
179 wait_for_completion(&nand_comp);
180
181 offset += tmp_len;
182 }
183
184 dma_free_channel(hndl);
185
186 if (buf != NULL)
187 memcpy(buf, virtPtr, len);
188}
189
190static void nand_dma_write(const void *buf, int len)
191{
192 int offset = 0;
193 int tmp_len = 0;
194 int len_left = len;
195 DMA_Handle_t hndl;
196
197 if (buf == NULL)
198 panic("nand_dma_write: buf == NULL\n");
199
200 if (virtPtr == NULL)
201 panic("nand_dma_write: virtPtr == NULL\n");
202
203 if ((void *)physPtr == NULL)
204 panic("nand_dma_write: physPtr == NULL\n");
205
206 memcpy(virtPtr, buf, len);
207
208
209 hndl = dma_request_channel(DMA_DEVICE_NAND_MEM_TO_MEM);
210 if (hndl < 0) {
211 printk(KERN_ERR
212 "nand_dma_write: unable to allocate dma channel: %d\n",
213 (int)hndl);
214 panic("\n");
215 }
216
217 while (len_left > 0) {
218 if (len_left > DMA_MAX_LEN) {
219 tmp_len = DMA_MAX_LEN;
220 len_left -= DMA_MAX_LEN;
221 } else {
222 tmp_len = len_left;
223 len_left = 0;
224 }
225
226 init_completion(&nand_comp);
227 dma_transfer_mem_to_mem(hndl, physPtr + offset,
228 REG_NAND_DATA_PADDR, tmp_len);
229 wait_for_completion(&nand_comp);
230
231 offset += tmp_len;
232 }
233
234 dma_free_channel(hndl);
235}
236
237#endif
238
239static int nand_dev_ready(struct mtd_info *mtd)
240{
241 return nand_bcm_umi_dev_ready();
242}
243
244/****************************************************************************
245*
246* bcm_umi_nand_inithw
247*
248* This routine does the necessary hardware (board-specific)
249* initializations. This includes setting up the timings, etc.
250*
251***************************************************************************/
252int bcm_umi_nand_inithw(void)
253{
254 /* Configure nand timing parameters */
255 REG_UMI_NAND_TCR &= ~0x7ffff;
256 REG_UMI_NAND_TCR |= HW_CFG_NAND_TCR;
257
258#if !defined(CONFIG_MTD_NAND_BCM_UMI_HWCS)
259 /* enable software control of CS */
260 REG_UMI_NAND_TCR |= REG_UMI_NAND_TCR_CS_SWCTRL;
261#endif
262
263 /* keep NAND chip select asserted */
264 REG_UMI_NAND_RCSR |= REG_UMI_NAND_RCSR_CS_ASSERTED;
265
266 REG_UMI_NAND_TCR &= ~REG_UMI_NAND_TCR_WORD16;
267 /* enable writes to flash */
268 REG_UMI_MMD_ICR |= REG_UMI_MMD_ICR_FLASH_WP;
269
270 writel(NAND_CMD_RESET, bcm_umi_io_base + REG_NAND_CMD_OFFSET);
271 nand_bcm_umi_wait_till_ready();
272
273#if NAND_ECC_BCH
274 nand_bcm_umi_bch_config_ecc(NAND_ECC_NUM_BYTES);
275#endif
276
277 return 0;
278}
279
280/* Used to turn latch the proper register for access. */
281static void bcm_umi_nand_hwcontrol(struct mtd_info *mtd, int cmd,
282 unsigned int ctrl)
283{
284 /* send command to hardware */
285 struct nand_chip *chip = mtd->priv;
286 if (ctrl & NAND_CTRL_CHANGE) {
287 if (ctrl & NAND_CLE) {
288 chip->IO_ADDR_W = bcm_umi_io_base + REG_NAND_CMD_OFFSET;
289 goto CMD;
290 }
291 if (ctrl & NAND_ALE) {
292 chip->IO_ADDR_W =
293 bcm_umi_io_base + REG_NAND_ADDR_OFFSET;
294 goto CMD;
295 }
296 chip->IO_ADDR_W = bcm_umi_io_base + REG_NAND_DATA8_OFFSET;
297 }
298
299CMD:
300 /* Send command to chip directly */
301 if (cmd != NAND_CMD_NONE)
302 writeb(cmd, chip->IO_ADDR_W);
303}
304
305static void bcm_umi_nand_write_buf(struct mtd_info *mtd, const u_char * buf,
306 int len)
307{
308 if (USE_DIRECT_IO(len)) {
309 /* Do it the old way if the buffer is small or too large.
310 * Probably quicker than starting and checking dma. */
311 int i;
312 struct nand_chip *this = mtd->priv;
313
314 for (i = 0; i < len; i++)
315 writeb(buf[i], this->IO_ADDR_W);
316 }
317#if USE_DMA
318 else
319 nand_dma_write(buf, len);
320#endif
321}
322
323static void bcm_umi_nand_read_buf(struct mtd_info *mtd, u_char * buf, int len)
324{
325 if (USE_DIRECT_IO(len)) {
326 int i;
327 struct nand_chip *this = mtd->priv;
328
329 for (i = 0; i < len; i++)
330 buf[i] = readb(this->IO_ADDR_R);
331 }
332#if USE_DMA
333 else
334 nand_dma_read(buf, len);
335#endif
336}
337
338static uint8_t readbackbuf[NAND_MAX_PAGESIZE];
339static int bcm_umi_nand_verify_buf(struct mtd_info *mtd, const u_char * buf,
340 int len)
341{
342 /*
343 * Try to readback page with ECC correction. This is necessary
344 * for MLC parts which may have permanently stuck bits.
345 */
346 struct nand_chip *chip = mtd->priv;
347 int ret = chip->ecc.read_page(mtd, chip, readbackbuf, 0);
348 if (ret < 0)
349 return -EFAULT;
350 else {
351 if (memcmp(readbackbuf, buf, len) == 0)
352 return 0;
353
354 return -EFAULT;
355 }
356 return 0;
357}
358
359static int __devinit bcm_umi_nand_probe(struct platform_device *pdev)
360{
361 struct nand_chip *this;
362 struct resource *r;
363 int err = 0;
364
365 printk(gBanner);
366
367 /* Allocate memory for MTD device structure and private data */
368 board_mtd =
369 kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip),
370 GFP_KERNEL);
371 if (!board_mtd) {
372 printk(KERN_WARNING
373 "Unable to allocate NAND MTD device structure.\n");
374 return -ENOMEM;
375 }
376
377 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
378
379 if (!r)
380 return -ENXIO;
381
382 /* map physical address */
383 bcm_umi_io_base = ioremap(r->start, resource_size(r));
384
385 if (!bcm_umi_io_base) {
386 printk(KERN_ERR "ioremap to access BCM UMI NAND chip failed\n");
387 kfree(board_mtd);
388 return -EIO;
389 }
390
391 /* Get pointer to private data */
392 this = (struct nand_chip *)(&board_mtd[1]);
393
394 /* Initialize structures */
395 memset((char *)board_mtd, 0, sizeof(struct mtd_info));
396 memset((char *)this, 0, sizeof(struct nand_chip));
397
398 /* Link the private data with the MTD structure */
399 board_mtd->priv = this;
400
401 /* Initialize the NAND hardware. */
402 if (bcm_umi_nand_inithw() < 0) {
403 printk(KERN_ERR "BCM UMI NAND chip could not be initialized\n");
404 iounmap(bcm_umi_io_base);
405 kfree(board_mtd);
406 return -EIO;
407 }
408
409 /* Set address of NAND IO lines */
410 this->IO_ADDR_W = bcm_umi_io_base + REG_NAND_DATA8_OFFSET;
411 this->IO_ADDR_R = bcm_umi_io_base + REG_NAND_DATA8_OFFSET;
412
413 /* Set command delay time, see datasheet for correct value */
414 this->chip_delay = 0;
415 /* Assign the device ready function, if available */
416 this->dev_ready = nand_dev_ready;
417 this->options = 0;
418
419 this->write_buf = bcm_umi_nand_write_buf;
420 this->read_buf = bcm_umi_nand_read_buf;
421 this->verify_buf = bcm_umi_nand_verify_buf;
422
423 this->cmd_ctrl = bcm_umi_nand_hwcontrol;
424 this->ecc.mode = NAND_ECC_HW;
425 this->ecc.size = 512;
426 this->ecc.bytes = NAND_ECC_NUM_BYTES;
427#if NAND_ECC_BCH
428 this->ecc.read_page = bcm_umi_bch_read_page_hwecc;
429 this->ecc.write_page = bcm_umi_bch_write_page_hwecc;
430#else
431 this->ecc.correct = nand_correct_data512;
432 this->ecc.calculate = bcm_umi_hamming_get_hw_ecc;
433 this->ecc.hwctl = bcm_umi_hamming_enable_hwecc;
434#endif
435
436#if USE_DMA
437 err = nand_dma_init();
438 if (err != 0)
439 return err;
440#endif
441
442 /* Figure out the size of the device that we have.
443 * We need to do this to figure out which ECC
444 * layout we'll be using.
445 */
446
447 err = nand_scan_ident(board_mtd, 1, NULL);
448 if (err) {
449 printk(KERN_ERR "nand_scan failed: %d\n", err);
450 iounmap(bcm_umi_io_base);
451 kfree(board_mtd);
452 return err;
453 }
454
455 /* Now that we know the nand size, we can setup the ECC layout */
456
457 switch (board_mtd->writesize) { /* writesize is the pagesize */
458 case 4096:
459 this->ecc.layout = &nand_hw_eccoob_4096;
460 break;
461 case 2048:
462 this->ecc.layout = &nand_hw_eccoob_2048;
463 break;
464 case 512:
465 this->ecc.layout = &nand_hw_eccoob_512;
466 break;
467 default:
468 {
469 printk(KERN_ERR "NAND - Unrecognized pagesize: %d\n",
470 board_mtd->writesize);
471 return -EINVAL;
472 }
473 }
474
475#if NAND_ECC_BCH
476 if (board_mtd->writesize > 512) {
477 if (this->options & NAND_USE_FLASH_BBT)
478 largepage_bbt.options = NAND_BBT_SCAN2NDPAGE;
479 this->badblock_pattern = &largepage_bbt;
480 }
481#endif
482
483 /* Now finish off the scan, now that ecc.layout has been initialized. */
484
485 err = nand_scan_tail(board_mtd);
486 if (err) {
487 printk(KERN_ERR "nand_scan failed: %d\n", err);
488 iounmap(bcm_umi_io_base);
489 kfree(board_mtd);
490 return err;
491 }
492
493 /* Register the partitions */
494 {
495 int nr_partitions;
496 struct mtd_partition *partition_info;
497
498 board_mtd->name = "bcm_umi-nand";
499 nr_partitions =
500 parse_mtd_partitions(board_mtd, part_probes,
501 &partition_info, 0);
502
503 if (nr_partitions <= 0) {
504 printk(KERN_ERR "BCM UMI NAND: Too few partitions - %d\n",
505 nr_partitions);
506 iounmap(bcm_umi_io_base);
507 kfree(board_mtd);
508 return -EIO;
509 }
510 mtd_device_register(board_mtd, partition_info, nr_partitions);
511 }
512
513 /* Return happy */
514 return 0;
515}
516
517static int bcm_umi_nand_remove(struct platform_device *pdev)
518{
519#if USE_DMA
520 nand_dma_term();
521#endif
522
523 /* Release resources, unregister device */
524 nand_release(board_mtd);
525
526 /* unmap physical address */
527 iounmap(bcm_umi_io_base);
528
529 /* Free the MTD device structure */
530 kfree(board_mtd);
531
532 return 0;
533}
534
535#ifdef CONFIG_PM
536static int bcm_umi_nand_suspend(struct platform_device *pdev,
537 pm_message_t state)
538{
539 printk(KERN_ERR "MTD NAND suspend is being called\n");
540 return 0;
541}
542
543static int bcm_umi_nand_resume(struct platform_device *pdev)
544{
545 printk(KERN_ERR "MTD NAND resume is being called\n");
546 return 0;
547}
548#else
549#define bcm_umi_nand_suspend NULL
550#define bcm_umi_nand_resume NULL
551#endif
552
553static struct platform_driver nand_driver = {
554 .driver = {
555 .name = "bcm-nand",
556 .owner = THIS_MODULE,
557 },
558 .probe = bcm_umi_nand_probe,
559 .remove = bcm_umi_nand_remove,
560 .suspend = bcm_umi_nand_suspend,
561 .resume = bcm_umi_nand_resume,
562};
563
564static int __init nand_init(void)
565{
566 return platform_driver_register(&nand_driver);
567}
568
569static void __exit nand_exit(void)
570{
571 platform_driver_unregister(&nand_driver);
572}
573
574module_init(nand_init);
575module_exit(nand_exit);
576
577MODULE_LICENSE("GPL");
578MODULE_AUTHOR("Broadcom");
579MODULE_DESCRIPTION("BCM UMI MTD NAND driver");
diff --git a/drivers/mtd/nand/edb7312.c b/drivers/mtd/nand/edb7312.c
new file mode 100644
index 00000000000..8400d0f6dad
--- /dev/null
+++ b/drivers/mtd/nand/edb7312.c
@@ -0,0 +1,203 @@
1/*
2 * drivers/mtd/nand/edb7312.c
3 *
4 * Copyright (C) 2002 Marius Gröger (mag@sysgo.de)
5 *
6 * Derived from drivers/mtd/nand/autcpu12.c
7 * Copyright (c) 2001 Thomas Gleixner (gleixner@autronix.de)
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * Overview:
14 * This is a device driver for the NAND flash device found on the
15 * CLEP7312 board which utilizes the Toshiba TC58V64AFT part. This is
16 * a 64Mibit (8MiB x 8 bits) NAND flash device.
17 */
18
19#include <linux/slab.h>
20#include <linux/module.h>
21#include <linux/init.h>
22#include <linux/mtd/mtd.h>
23#include <linux/mtd/nand.h>
24#include <linux/mtd/partitions.h>
25#include <asm/io.h>
26#include <mach/hardware.h> /* for CLPS7111_VIRT_BASE */
27#include <asm/sizes.h>
28#include <asm/hardware/clps7111.h>
29
30/*
31 * MTD structure for EDB7312 board
32 */
33static struct mtd_info *ep7312_mtd = NULL;
34
35/*
36 * Values specific to the EDB7312 board (used with EP7312 processor)
37 */
38#define EP7312_FIO_PBASE 0x10000000 /* Phys address of flash */
39#define EP7312_PXDR 0x0001 /*
40 * IO offset to Port B data register
41 * where the CLE, ALE and NCE pins
42 * are wired to.
43 */
44#define EP7312_PXDDR 0x0041 /*
45 * IO offset to Port B data direction
46 * register so we can control the IO
47 * lines.
48 */
49
50/*
51 * Module stuff
52 */
53
54static unsigned long ep7312_fio_pbase = EP7312_FIO_PBASE;
55static void __iomem *ep7312_pxdr = (void __iomem *)EP7312_PXDR;
56static void __iomem *ep7312_pxddr = (void __iomem *)EP7312_PXDDR;
57
58/*
59 * Define static partitions for flash device
60 */
61static struct mtd_partition partition_info[] = {
62 {.name = "EP7312 Nand Flash",
63 .offset = 0,
64 .size = 8 * 1024 * 1024}
65};
66
67#define NUM_PARTITIONS 1
68
69/*
70 * hardware specific access to control-lines
71 *
72 * NAND_NCE: bit 0 -> bit 6 (bit 7 = 1)
73 * NAND_CLE: bit 1 -> bit 4
74 * NAND_ALE: bit 2 -> bit 5
75 */
76static void ep7312_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
77{
78 struct nand_chip *chip = mtd->priv;
79
80 if (ctrl & NAND_CTRL_CHANGE) {
81 unsigned char bits = 0x80;
82
83 bits |= (ctrl & (NAND_CLE | NAND_ALE)) << 3;
84 bits |= (ctrl & NAND_NCE) ? 0x00 : 0x40;
85
86 clps_writeb((clps_readb(ep7312_pxdr) & 0xF0) | bits,
87 ep7312_pxdr);
88 }
89 if (cmd != NAND_CMD_NONE)
90 writeb(cmd, chip->IO_ADDR_W);
91}
92
93/*
94 * read device ready pin
95 */
96static int ep7312_device_ready(struct mtd_info *mtd)
97{
98 return 1;
99}
100
101const char *part_probes[] = { "cmdlinepart", NULL };
102
103/*
104 * Main initialization routine
105 */
106static int __init ep7312_init(void)
107{
108 struct nand_chip *this;
109 const char *part_type = 0;
110 int mtd_parts_nb = 0;
111 struct mtd_partition *mtd_parts = 0;
112 void __iomem *ep7312_fio_base;
113
114 /* Allocate memory for MTD device structure and private data */
115 ep7312_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
116 if (!ep7312_mtd) {
117 printk("Unable to allocate EDB7312 NAND MTD device structure.\n");
118 return -ENOMEM;
119 }
120
121 /* map physical address */
122 ep7312_fio_base = ioremap(ep7312_fio_pbase, SZ_1K);
123 if (!ep7312_fio_base) {
124 printk("ioremap EDB7312 NAND flash failed\n");
125 kfree(ep7312_mtd);
126 return -EIO;
127 }
128
129 /* Get pointer to private data */
130 this = (struct nand_chip *)(&ep7312_mtd[1]);
131
132 /* Initialize structures */
133 memset(ep7312_mtd, 0, sizeof(struct mtd_info));
134 memset(this, 0, sizeof(struct nand_chip));
135
136 /* Link the private data with the MTD structure */
137 ep7312_mtd->priv = this;
138 ep7312_mtd->owner = THIS_MODULE;
139
140 /*
141 * Set GPIO Port B control register so that the pins are configured
142 * to be outputs for controlling the NAND flash.
143 */
144 clps_writeb(0xf0, ep7312_pxddr);
145
146 /* insert callbacks */
147 this->IO_ADDR_R = ep7312_fio_base;
148 this->IO_ADDR_W = ep7312_fio_base;
149 this->cmd_ctrl = ep7312_hwcontrol;
150 this->dev_ready = ep7312_device_ready;
151 /* 15 us command delay time */
152 this->chip_delay = 15;
153
154 /* Scan to find existence of the device */
155 if (nand_scan(ep7312_mtd, 1)) {
156 iounmap((void *)ep7312_fio_base);
157 kfree(ep7312_mtd);
158 return -ENXIO;
159 }
160 ep7312_mtd->name = "edb7312-nand";
161 mtd_parts_nb = parse_mtd_partitions(ep7312_mtd, part_probes, &mtd_parts, 0);
162 if (mtd_parts_nb > 0)
163 part_type = "command line";
164 else
165 mtd_parts_nb = 0;
166 if (mtd_parts_nb == 0) {
167 mtd_parts = partition_info;
168 mtd_parts_nb = NUM_PARTITIONS;
169 part_type = "static";
170 }
171
172 /* Register the partitions */
173 printk(KERN_NOTICE "Using %s partition definition\n", part_type);
174 mtd_device_register(ep7312_mtd, mtd_parts, mtd_parts_nb);
175
176 /* Return happy */
177 return 0;
178}
179
180module_init(ep7312_init);
181
182/*
183 * Clean up routine
184 */
185static void __exit ep7312_cleanup(void)
186{
187 struct nand_chip *this = (struct nand_chip *)&ep7312_mtd[1];
188
189 /* Release resources, unregister device */
190 nand_release(ap7312_mtd);
191
192 /* Release io resource */
193 iounmap(this->IO_ADDR_R);
194
195 /* Free the MTD device structure */
196 kfree(ep7312_mtd);
197}
198
199module_exit(ep7312_cleanup);
200
201MODULE_LICENSE("GPL");
202MODULE_AUTHOR("Marius Groeger <mag@sysgo.de>");
203MODULE_DESCRIPTION("MTD map driver for Cogent EDB7312 board");
diff --git a/drivers/mtd/nand/nand_bcm_umi.c b/drivers/mtd/nand/nand_bcm_umi.c
new file mode 100644
index 00000000000..46a6bc9c4b7
--- /dev/null
+++ b/drivers/mtd/nand/nand_bcm_umi.c
@@ -0,0 +1,149 @@
1/*****************************************************************************
2* Copyright 2004 - 2009 Broadcom Corporation. All rights reserved.
3*
4* Unless you and Broadcom execute a separate written software license
5* agreement governing use of this software, this software is licensed to you
6* under the terms of the GNU General Public License version 2, available at
7* http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
8*
9* Notwithstanding the above, under no circumstances may you combine this
10* software in any way with any other Broadcom software provided under a
11* license other than the GPL, without Broadcom's express prior written
12* consent.
13*****************************************************************************/
14
15/* ---- Include Files ---------------------------------------------------- */
16#include <mach/reg_umi.h>
17#include "nand_bcm_umi.h"
18#ifdef BOOT0_BUILD
19#include <uart.h>
20#endif
21
22/* ---- External Variable Declarations ----------------------------------- */
23/* ---- External Function Prototypes ------------------------------------- */
24/* ---- Public Variables ------------------------------------------------- */
25/* ---- Private Constants and Types -------------------------------------- */
26/* ---- Private Function Prototypes -------------------------------------- */
27/* ---- Private Variables ------------------------------------------------ */
28/* ---- Private Functions ------------------------------------------------ */
29
30#if NAND_ECC_BCH
31/****************************************************************************
32* nand_bch_ecc_flip_bit - Routine to flip an errored bit
33*
34* PURPOSE:
35* This is a helper routine that flips the bit (0 -> 1 or 1 -> 0) of the
36* errored bit specified
37*
38* PARAMETERS:
39* datap - Container that holds the 512 byte data
40* errorLocation - Location of the bit that needs to be flipped
41*
42* RETURNS:
43* None
44****************************************************************************/
45static void nand_bcm_umi_bch_ecc_flip_bit(uint8_t *datap, int errorLocation)
46{
47 int locWithinAByte = (errorLocation & REG_UMI_BCH_ERR_LOC_BYTE) >> 0;
48 int locWithinAWord = (errorLocation & REG_UMI_BCH_ERR_LOC_WORD) >> 3;
49 int locWithinAPage = (errorLocation & REG_UMI_BCH_ERR_LOC_PAGE) >> 5;
50
51 uint8_t errorByte = 0;
52 uint8_t byteMask = 1 << locWithinAByte;
53
54 /* BCH uses big endian, need to change the location
55 * bits to little endian */
56 locWithinAWord = 3 - locWithinAWord;
57
58 errorByte = datap[locWithinAPage * sizeof(uint32_t) + locWithinAWord];
59
60#ifdef BOOT0_BUILD
61 puthexs("\nECC Correct Offset: ",
62 locWithinAPage * sizeof(uint32_t) + locWithinAWord);
63 puthexs(" errorByte:", errorByte);
64 puthex8(" Bit: ", locWithinAByte);
65#endif
66
67 if (errorByte & byteMask) {
68 /* bit needs to be cleared */
69 errorByte &= ~byteMask;
70 } else {
71 /* bit needs to be set */
72 errorByte |= byteMask;
73 }
74
75 /* write back the value with the fixed bit */
76 datap[locWithinAPage * sizeof(uint32_t) + locWithinAWord] = errorByte;
77}
78
79/****************************************************************************
80* nand_correct_page_bch - Routine to correct bit errors when reading NAND
81*
82* PURPOSE:
83* This routine reads the BCH registers to determine if there are any bit
84* errors during the read of the last 512 bytes of data + ECC bytes. If
85* errors exists, the routine fixes it.
86*
87* PARAMETERS:
88* datap - Container that holds the 512 byte data
89*
90* RETURNS:
91* 0 or greater = Number of errors corrected
92* (No errors are found or errors have been fixed)
93* -1 = Error(s) cannot be fixed
94****************************************************************************/
95int nand_bcm_umi_bch_correct_page(uint8_t *datap, uint8_t *readEccData,
96 int numEccBytes)
97{
98 int numErrors;
99 int errorLocation;
100 int idx;
101 uint32_t regValue;
102
103 /* wait for read ECC to be valid */
104 regValue = nand_bcm_umi_bch_poll_read_ecc_calc();
105
106 /*
107 * read the control status register to determine if there
108 * are error'ed bits
109 * see if errors are correctible
110 */
111 if ((regValue & REG_UMI_BCH_CTRL_STATUS_UNCORR_ERR) > 0) {
112 int i;
113
114 for (i = 0; i < numEccBytes; i++) {
115 if (readEccData[i] != 0xff) {
116 /* errors cannot be fixed, return -1 */
117 return -1;
118 }
119 }
120 /* If ECC is unprogrammed then we can't correct,
121 * assume everything OK */
122 return 0;
123 }
124
125 if ((regValue & REG_UMI_BCH_CTRL_STATUS_CORR_ERR) == 0) {
126 /* no errors */
127 return 0;
128 }
129
130 /*
131 * Fix errored bits by doing the following:
132 * 1. Read the number of errors in the control and status register
133 * 2. Read the error location registers that corresponds to the number
134 * of errors reported
135 * 3. Invert the bit in the data
136 */
137 numErrors = (regValue & REG_UMI_BCH_CTRL_STATUS_NB_CORR_ERROR) >> 20;
138
139 for (idx = 0; idx < numErrors; idx++) {
140 errorLocation =
141 REG_UMI_BCH_ERR_LOC_ADDR(idx) & REG_UMI_BCH_ERR_LOC_MASK;
142
143 /* Flip bit */
144 nand_bcm_umi_bch_ecc_flip_bit(datap, errorLocation);
145 }
146 /* Errors corrected */
147 return numErrors;
148}
149#endif
diff --git a/drivers/mtd/nand/nand_bcm_umi.h b/drivers/mtd/nand/nand_bcm_umi.h
new file mode 100644
index 00000000000..198b304d6f7
--- /dev/null
+++ b/drivers/mtd/nand/nand_bcm_umi.h
@@ -0,0 +1,337 @@
1/*****************************************************************************
2* Copyright 2003 - 2009 Broadcom Corporation. All rights reserved.
3*
4* Unless you and Broadcom execute a separate written software license
5* agreement governing use of this software, this software is licensed to you
6* under the terms of the GNU General Public License version 2, available at
7* http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
8*
9* Notwithstanding the above, under no circumstances may you combine this
10* software in any way with any other Broadcom software provided under a
11* license other than the GPL, without Broadcom's express prior written
12* consent.
13*****************************************************************************/
14#ifndef NAND_BCM_UMI_H
15#define NAND_BCM_UMI_H
16
17/* ---- Include Files ---------------------------------------------------- */
18#include <mach/reg_umi.h>
19#include <mach/reg_nand.h>
20#include <cfg_global.h>
21
22/* ---- Constants and Types ---------------------------------------------- */
23#if (CFG_GLOBAL_CHIP_FAMILY == CFG_GLOBAL_CHIP_FAMILY_BCMRING)
24#define NAND_ECC_BCH (CFG_GLOBAL_CHIP_REV > 0xA0)
25#else
26#define NAND_ECC_BCH 0
27#endif
28
29#define CFG_GLOBAL_NAND_ECC_BCH_NUM_BYTES 13
30
31#if NAND_ECC_BCH
32#ifdef BOOT0_BUILD
33#define NAND_ECC_NUM_BYTES 13
34#else
35#define NAND_ECC_NUM_BYTES CFG_GLOBAL_NAND_ECC_BCH_NUM_BYTES
36#endif
37#else
38#define NAND_ECC_NUM_BYTES 3
39#endif
40
41#define NAND_DATA_ACCESS_SIZE 512
42
43/* ---- Variable Externs ------------------------------------------ */
44/* ---- Function Prototypes --------------------------------------- */
45int nand_bcm_umi_bch_correct_page(uint8_t *datap, uint8_t *readEccData,
46 int numEccBytes);
47
48/* Check in device is ready */
49static inline int nand_bcm_umi_dev_ready(void)
50{
51 return REG_UMI_NAND_RCSR & REG_UMI_NAND_RCSR_RDY;
52}
53
54/* Wait until device is ready */
55static inline void nand_bcm_umi_wait_till_ready(void)
56{
57 while (nand_bcm_umi_dev_ready() == 0)
58 ;
59}
60
61/* Enable Hamming ECC */
62static inline void nand_bcm_umi_hamming_enable_hwecc(void)
63{
64 /* disable and reset ECC, 512 byte page */
65 REG_UMI_NAND_ECC_CSR &= ~(REG_UMI_NAND_ECC_CSR_ECC_ENABLE |
66 REG_UMI_NAND_ECC_CSR_256BYTE);
67 /* enable ECC */
68 REG_UMI_NAND_ECC_CSR |= REG_UMI_NAND_ECC_CSR_ECC_ENABLE;
69}
70
71#if NAND_ECC_BCH
72/* BCH ECC specifics */
73#define ECC_BITS_PER_CORRECTABLE_BIT 13
74
75/* Enable BCH Read ECC */
76static inline void nand_bcm_umi_bch_enable_read_hwecc(void)
77{
78 /* disable and reset ECC */
79 REG_UMI_BCH_CTRL_STATUS = REG_UMI_BCH_CTRL_STATUS_RD_ECC_VALID;
80 /* Turn on ECC */
81 REG_UMI_BCH_CTRL_STATUS = REG_UMI_BCH_CTRL_STATUS_ECC_RD_EN;
82}
83
84/* Enable BCH Write ECC */
85static inline void nand_bcm_umi_bch_enable_write_hwecc(void)
86{
87 /* disable and reset ECC */
88 REG_UMI_BCH_CTRL_STATUS = REG_UMI_BCH_CTRL_STATUS_WR_ECC_VALID;
89 /* Turn on ECC */
90 REG_UMI_BCH_CTRL_STATUS = REG_UMI_BCH_CTRL_STATUS_ECC_WR_EN;
91}
92
93/* Config number of BCH ECC bytes */
94static inline void nand_bcm_umi_bch_config_ecc(uint8_t numEccBytes)
95{
96 uint32_t nValue;
97 uint32_t tValue;
98 uint32_t kValue;
99 uint32_t numBits = numEccBytes * 8;
100
101 /* disable and reset ECC */
102 REG_UMI_BCH_CTRL_STATUS =
103 REG_UMI_BCH_CTRL_STATUS_WR_ECC_VALID |
104 REG_UMI_BCH_CTRL_STATUS_RD_ECC_VALID;
105
106 /* Every correctible bit requires 13 ECC bits */
107 tValue = (uint32_t) (numBits / ECC_BITS_PER_CORRECTABLE_BIT);
108
109 /* Total data in number of bits for generating and computing BCH ECC */
110 nValue = (NAND_DATA_ACCESS_SIZE + numEccBytes) * 8;
111
112 /* K parameter is used internally. K = N - (T * 13) */
113 kValue = nValue - (tValue * ECC_BITS_PER_CORRECTABLE_BIT);
114
115 /* Write the settings */
116 REG_UMI_BCH_N = nValue;
117 REG_UMI_BCH_T = tValue;
118 REG_UMI_BCH_K = kValue;
119}
120
121/* Pause during ECC read calculation to skip bytes in OOB */
122static inline void nand_bcm_umi_bch_pause_read_ecc_calc(void)
123{
124 REG_UMI_BCH_CTRL_STATUS =
125 REG_UMI_BCH_CTRL_STATUS_ECC_RD_EN |
126 REG_UMI_BCH_CTRL_STATUS_PAUSE_ECC_DEC;
127}
128
129/* Resume during ECC read calculation after skipping bytes in OOB */
130static inline void nand_bcm_umi_bch_resume_read_ecc_calc(void)
131{
132 REG_UMI_BCH_CTRL_STATUS = REG_UMI_BCH_CTRL_STATUS_ECC_RD_EN;
133}
134
135/* Poll read ECC calc to check when hardware completes */
136static inline uint32_t nand_bcm_umi_bch_poll_read_ecc_calc(void)
137{
138 uint32_t regVal;
139
140 do {
141 /* wait for ECC to be valid */
142 regVal = REG_UMI_BCH_CTRL_STATUS;
143 } while ((regVal & REG_UMI_BCH_CTRL_STATUS_RD_ECC_VALID) == 0);
144
145 return regVal;
146}
147
148/* Poll write ECC calc to check when hardware completes */
149static inline void nand_bcm_umi_bch_poll_write_ecc_calc(void)
150{
151 /* wait for ECC to be valid */
152 while ((REG_UMI_BCH_CTRL_STATUS & REG_UMI_BCH_CTRL_STATUS_WR_ECC_VALID)
153 == 0)
154 ;
155}
156
157/* Read the OOB and ECC, for kernel write OOB to a buffer */
158#if defined(__KERNEL__) && !defined(STANDALONE)
159static inline void nand_bcm_umi_bch_read_oobEcc(uint32_t pageSize,
160 uint8_t *eccCalc, int numEccBytes, uint8_t *oobp)
161#else
162static inline void nand_bcm_umi_bch_read_oobEcc(uint32_t pageSize,
163 uint8_t *eccCalc, int numEccBytes)
164#endif
165{
166 int eccPos = 0;
167 int numToRead = 16; /* There are 16 bytes per sector in the OOB */
168
169 /* ECC is already paused when this function is called */
170 if (pageSize != NAND_DATA_ACCESS_SIZE) {
171 /* skip BI */
172#if defined(__KERNEL__) && !defined(STANDALONE)
173 *oobp++ = REG_NAND_DATA8;
174#else
175 REG_NAND_DATA8;
176#endif
177 numToRead--;
178 }
179
180 while (numToRead > numEccBytes) {
181 /* skip free oob region */
182#if defined(__KERNEL__) && !defined(STANDALONE)
183 *oobp++ = REG_NAND_DATA8;
184#else
185 REG_NAND_DATA8;
186#endif
187 numToRead--;
188 }
189
190 if (pageSize == NAND_DATA_ACCESS_SIZE) {
191 /* read ECC bytes before BI */
192 nand_bcm_umi_bch_resume_read_ecc_calc();
193
194 while (numToRead > 11) {
195#if defined(__KERNEL__) && !defined(STANDALONE)
196 *oobp = REG_NAND_DATA8;
197 eccCalc[eccPos++] = *oobp;
198 oobp++;
199#else
200 eccCalc[eccPos++] = REG_NAND_DATA8;
201#endif
202 numToRead--;
203 }
204
205 nand_bcm_umi_bch_pause_read_ecc_calc();
206
207 if (numToRead == 11) {
208 /* read BI */
209#if defined(__KERNEL__) && !defined(STANDALONE)
210 *oobp++ = REG_NAND_DATA8;
211#else
212 REG_NAND_DATA8;
213#endif
214 numToRead--;
215 }
216
217 }
218 /* read ECC bytes */
219 nand_bcm_umi_bch_resume_read_ecc_calc();
220 while (numToRead) {
221#if defined(__KERNEL__) && !defined(STANDALONE)
222 *oobp = REG_NAND_DATA8;
223 eccCalc[eccPos++] = *oobp;
224 oobp++;
225#else
226 eccCalc[eccPos++] = REG_NAND_DATA8;
227#endif
228 numToRead--;
229 }
230}
231
232/* Helper function to write ECC */
233static inline void NAND_BCM_UMI_ECC_WRITE(int numEccBytes, int eccBytePos,
234 uint8_t *oobp, uint8_t eccVal)
235{
236 if (eccBytePos <= numEccBytes)
237 *oobp = eccVal;
238}
239
240/* Write OOB with ECC */
241static inline void nand_bcm_umi_bch_write_oobEcc(uint32_t pageSize,
242 uint8_t *oobp, int numEccBytes)
243{
244 uint32_t eccVal = 0xffffffff;
245
246 /* wait for write ECC to be valid */
247 nand_bcm_umi_bch_poll_write_ecc_calc();
248
249 /*
250 ** Get the hardware ecc from the 32-bit result registers.
251 ** Read after 512 byte accesses. Format B3B2B1B0
252 ** where B3 = ecc3, etc.
253 */
254
255 if (pageSize == NAND_DATA_ACCESS_SIZE) {
256 /* Now fill in the ECC bytes */
257 if (numEccBytes >= 13)
258 eccVal = REG_UMI_BCH_WR_ECC_3;
259
260 /* Usually we skip CM in oob[0,1] */
261 NAND_BCM_UMI_ECC_WRITE(numEccBytes, 15, &oobp[0],
262 (eccVal >> 16) & 0xff);
263 NAND_BCM_UMI_ECC_WRITE(numEccBytes, 14, &oobp[1],
264 (eccVal >> 8) & 0xff);
265
266 /* Write ECC in oob[2,3,4] */
267 NAND_BCM_UMI_ECC_WRITE(numEccBytes, 13, &oobp[2],
268 eccVal & 0xff); /* ECC 12 */
269
270 if (numEccBytes >= 9)
271 eccVal = REG_UMI_BCH_WR_ECC_2;
272
273 NAND_BCM_UMI_ECC_WRITE(numEccBytes, 12, &oobp[3],
274 (eccVal >> 24) & 0xff); /* ECC11 */
275 NAND_BCM_UMI_ECC_WRITE(numEccBytes, 11, &oobp[4],
276 (eccVal >> 16) & 0xff); /* ECC10 */
277
278 /* Always Skip BI in oob[5] */
279 } else {
280 /* Always Skip BI in oob[0] */
281
282 /* Now fill in the ECC bytes */
283 if (numEccBytes >= 13)
284 eccVal = REG_UMI_BCH_WR_ECC_3;
285
286 /* Usually skip CM in oob[1,2] */
287 NAND_BCM_UMI_ECC_WRITE(numEccBytes, 15, &oobp[1],
288 (eccVal >> 16) & 0xff);
289 NAND_BCM_UMI_ECC_WRITE(numEccBytes, 14, &oobp[2],
290 (eccVal >> 8) & 0xff);
291
292 /* Write ECC in oob[3-15] */
293 NAND_BCM_UMI_ECC_WRITE(numEccBytes, 13, &oobp[3],
294 eccVal & 0xff); /* ECC12 */
295
296 if (numEccBytes >= 9)
297 eccVal = REG_UMI_BCH_WR_ECC_2;
298
299 NAND_BCM_UMI_ECC_WRITE(numEccBytes, 12, &oobp[4],
300 (eccVal >> 24) & 0xff); /* ECC11 */
301 NAND_BCM_UMI_ECC_WRITE(numEccBytes, 11, &oobp[5],
302 (eccVal >> 16) & 0xff); /* ECC10 */
303 }
304
305 /* Fill in the remainder of ECC locations */
306 NAND_BCM_UMI_ECC_WRITE(numEccBytes, 10, &oobp[6],
307 (eccVal >> 8) & 0xff); /* ECC9 */
308 NAND_BCM_UMI_ECC_WRITE(numEccBytes, 9, &oobp[7],
309 eccVal & 0xff); /* ECC8 */
310
311 if (numEccBytes >= 5)
312 eccVal = REG_UMI_BCH_WR_ECC_1;
313
314 NAND_BCM_UMI_ECC_WRITE(numEccBytes, 8, &oobp[8],
315 (eccVal >> 24) & 0xff); /* ECC7 */
316 NAND_BCM_UMI_ECC_WRITE(numEccBytes, 7, &oobp[9],
317 (eccVal >> 16) & 0xff); /* ECC6 */
318 NAND_BCM_UMI_ECC_WRITE(numEccBytes, 6, &oobp[10],
319 (eccVal >> 8) & 0xff); /* ECC5 */
320 NAND_BCM_UMI_ECC_WRITE(numEccBytes, 5, &oobp[11],
321 eccVal & 0xff); /* ECC4 */
322
323 if (numEccBytes >= 1)
324 eccVal = REG_UMI_BCH_WR_ECC_0;
325
326 NAND_BCM_UMI_ECC_WRITE(numEccBytes, 4, &oobp[12],
327 (eccVal >> 24) & 0xff); /* ECC3 */
328 NAND_BCM_UMI_ECC_WRITE(numEccBytes, 3, &oobp[13],
329 (eccVal >> 16) & 0xff); /* ECC2 */
330 NAND_BCM_UMI_ECC_WRITE(numEccBytes, 2, &oobp[14],
331 (eccVal >> 8) & 0xff); /* ECC1 */
332 NAND_BCM_UMI_ECC_WRITE(numEccBytes, 1, &oobp[15],
333 eccVal & 0xff); /* ECC0 */
334}
335#endif
336
337#endif /* NAND_BCM_UMI_H */
diff --git a/drivers/mtd/nand/nomadik_nand.c b/drivers/mtd/nand/nomadik_nand.c
new file mode 100644
index 00000000000..b6a5c86ab31
--- /dev/null
+++ b/drivers/mtd/nand/nomadik_nand.c
@@ -0,0 +1,246 @@
1/*
2 * drivers/mtd/nand/nomadik_nand.c
3 *
4 * Overview:
5 * Driver for on-board NAND flash on Nomadik Platforms
6 *
7 * Copyright © 2007 STMicroelectronics Pvt. Ltd.
8 * Author: Sachin Verma <sachin.verma@st.com>
9 *
10 * Copyright © 2009 Alessandro Rubini
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 */
23
24#include <linux/init.h>
25#include <linux/module.h>
26#include <linux/types.h>
27#include <linux/mtd/mtd.h>
28#include <linux/mtd/nand.h>
29#include <linux/mtd/nand_ecc.h>
30#include <linux/platform_device.h>
31#include <linux/mtd/partitions.h>
32#include <linux/io.h>
33#include <linux/slab.h>
34#include <mach/nand.h>
35#include <mach/fsmc.h>
36
37#include <mtd/mtd-abi.h>
38
39struct nomadik_nand_host {
40 struct mtd_info mtd;
41 struct nand_chip nand;
42 void __iomem *data_va;
43 void __iomem *cmd_va;
44 void __iomem *addr_va;
45 struct nand_bbt_descr *bbt_desc;
46};
47
48static struct nand_ecclayout nomadik_ecc_layout = {
49 .eccbytes = 3 * 4,
50 .eccpos = { /* each subpage has 16 bytes: pos 2,3,4 hosts ECC */
51 0x02, 0x03, 0x04,
52 0x12, 0x13, 0x14,
53 0x22, 0x23, 0x24,
54 0x32, 0x33, 0x34},
55 /* let's keep bytes 5,6,7 for us, just in case we change ECC algo */
56 .oobfree = { {0x08, 0x08}, {0x18, 0x08}, {0x28, 0x08}, {0x38, 0x08} },
57};
58
59static void nomadik_ecc_control(struct mtd_info *mtd, int mode)
60{
61 /* No need to enable hw ecc, it's on by default */
62}
63
64static void nomadik_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
65{
66 struct nand_chip *nand = mtd->priv;
67 struct nomadik_nand_host *host = nand->priv;
68
69 if (cmd == NAND_CMD_NONE)
70 return;
71
72 if (ctrl & NAND_CLE)
73 writeb(cmd, host->cmd_va);
74 else
75 writeb(cmd, host->addr_va);
76}
77
78static int nomadik_nand_probe(struct platform_device *pdev)
79{
80 struct nomadik_nand_platform_data *pdata = pdev->dev.platform_data;
81 struct nomadik_nand_host *host;
82 struct mtd_info *mtd;
83 struct nand_chip *nand;
84 struct resource *res;
85 int ret = 0;
86
87 /* Allocate memory for the device structure (and zero it) */
88 host = kzalloc(sizeof(struct nomadik_nand_host), GFP_KERNEL);
89 if (!host) {
90 dev_err(&pdev->dev, "Failed to allocate device structure.\n");
91 return -ENOMEM;
92 }
93
94 /* Call the client's init function, if any */
95 if (pdata->init)
96 ret = pdata->init();
97 if (ret < 0) {
98 dev_err(&pdev->dev, "Init function failed\n");
99 goto err;
100 }
101
102 /* ioremap three regions */
103 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_addr");
104 if (!res) {
105 ret = -EIO;
106 goto err_unmap;
107 }
108 host->addr_va = ioremap(res->start, resource_size(res));
109
110 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_data");
111 if (!res) {
112 ret = -EIO;
113 goto err_unmap;
114 }
115 host->data_va = ioremap(res->start, resource_size(res));
116
117 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_cmd");
118 if (!res) {
119 ret = -EIO;
120 goto err_unmap;
121 }
122 host->cmd_va = ioremap(res->start, resource_size(res));
123
124 if (!host->addr_va || !host->data_va || !host->cmd_va) {
125 ret = -ENOMEM;
126 goto err_unmap;
127 }
128
129 /* Link all private pointers */
130 mtd = &host->mtd;
131 nand = &host->nand;
132 mtd->priv = nand;
133 nand->priv = host;
134
135 host->mtd.owner = THIS_MODULE;
136 nand->IO_ADDR_R = host->data_va;
137 nand->IO_ADDR_W = host->data_va;
138 nand->cmd_ctrl = nomadik_cmd_ctrl;
139
140 /*
141 * This stanza declares ECC_HW but uses soft routines. It's because
142 * HW claims to make the calculation but not the correction. However,
143 * I haven't managed to get the desired data out of it until now.
144 */
145 nand->ecc.mode = NAND_ECC_SOFT;
146 nand->ecc.layout = &nomadik_ecc_layout;
147 nand->ecc.hwctl = nomadik_ecc_control;
148 nand->ecc.size = 512;
149 nand->ecc.bytes = 3;
150
151 nand->options = pdata->options;
152
153 /*
154 * Scan to find existence of the device
155 */
156 if (nand_scan(&host->mtd, 1)) {
157 ret = -ENXIO;
158 goto err_unmap;
159 }
160
161 mtd_device_register(&host->mtd, pdata->parts, pdata->nparts);
162
163 platform_set_drvdata(pdev, host);
164 return 0;
165
166 err_unmap:
167 if (host->cmd_va)
168 iounmap(host->cmd_va);
169 if (host->data_va)
170 iounmap(host->data_va);
171 if (host->addr_va)
172 iounmap(host->addr_va);
173 err:
174 kfree(host);
175 return ret;
176}
177
178/*
179 * Clean up routine
180 */
181static int nomadik_nand_remove(struct platform_device *pdev)
182{
183 struct nomadik_nand_host *host = platform_get_drvdata(pdev);
184 struct nomadik_nand_platform_data *pdata = pdev->dev.platform_data;
185
186 if (pdata->exit)
187 pdata->exit();
188
189 if (host) {
190 iounmap(host->cmd_va);
191 iounmap(host->data_va);
192 iounmap(host->addr_va);
193 kfree(host);
194 }
195 return 0;
196}
197
198static int nomadik_nand_suspend(struct device *dev)
199{
200 struct nomadik_nand_host *host = dev_get_drvdata(dev);
201 int ret = 0;
202 if (host)
203 ret = host->mtd.suspend(&host->mtd);
204 return ret;
205}
206
207static int nomadik_nand_resume(struct device *dev)
208{
209 struct nomadik_nand_host *host = dev_get_drvdata(dev);
210 if (host)
211 host->mtd.resume(&host->mtd);
212 return 0;
213}
214
215static const struct dev_pm_ops nomadik_nand_pm_ops = {
216 .suspend = nomadik_nand_suspend,
217 .resume = nomadik_nand_resume,
218};
219
220static struct platform_driver nomadik_nand_driver = {
221 .probe = nomadik_nand_probe,
222 .remove = nomadik_nand_remove,
223 .driver = {
224 .owner = THIS_MODULE,
225 .name = "nomadik_nand",
226 .pm = &nomadik_nand_pm_ops,
227 },
228};
229
230static int __init nand_nomadik_init(void)
231{
232 pr_info("Nomadik NAND driver\n");
233 return platform_driver_register(&nomadik_nand_driver);
234}
235
236static void __exit nand_nomadik_exit(void)
237{
238 platform_driver_unregister(&nomadik_nand_driver);
239}
240
241module_init(nand_nomadik_init);
242module_exit(nand_nomadik_exit);
243
244MODULE_LICENSE("GPL");
245MODULE_AUTHOR("ST Microelectronics (sachin.verma@st.com)");
246MODULE_DESCRIPTION("NAND driver for Nomadik Platform");
diff --git a/drivers/mtd/nand/spia.c b/drivers/mtd/nand/spia.c
new file mode 100644
index 00000000000..bef76cd7c24
--- /dev/null
+++ b/drivers/mtd/nand/spia.c
@@ -0,0 +1,176 @@
1/*
2 * drivers/mtd/nand/spia.c
3 *
4 * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
5 *
6 *
7 * 10-29-2001 TG change to support hardwarespecific access
8 * to controllines (due to change in nand.c)
9 * page_cache added
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 *
15 * Overview:
16 * This is a device driver for the NAND flash device found on the
17 * SPIA board which utilizes the Toshiba TC58V64AFT part. This is
18 * a 64Mibit (8MiB x 8 bits) NAND flash device.
19 */
20
21#include <linux/kernel.h>
22#include <linux/init.h>
23#include <linux/slab.h>
24#include <linux/module.h>
25#include <linux/mtd/mtd.h>
26#include <linux/mtd/nand.h>
27#include <linux/mtd/partitions.h>
28#include <asm/io.h>
29
30/*
31 * MTD structure for SPIA board
32 */
33static struct mtd_info *spia_mtd = NULL;
34
35/*
36 * Values specific to the SPIA board (used with EP7212 processor)
37 */
38#define SPIA_IO_BASE 0xd0000000 /* Start of EP7212 IO address space */
39#define SPIA_FIO_BASE 0xf0000000 /* Address where flash is mapped */
40#define SPIA_PEDR 0x0080 /*
41 * IO offset to Port E data register
42 * where the CLE, ALE and NCE pins
43 * are wired to.
44 */
45#define SPIA_PEDDR 0x00c0 /*
46 * IO offset to Port E data direction
47 * register so we can control the IO
48 * lines.
49 */
50
51/*
52 * Module stuff
53 */
54
55static int spia_io_base = SPIA_IO_BASE;
56static int spia_fio_base = SPIA_FIO_BASE;
57static int spia_pedr = SPIA_PEDR;
58static int spia_peddr = SPIA_PEDDR;
59
60module_param(spia_io_base, int, 0);
61module_param(spia_fio_base, int, 0);
62module_param(spia_pedr, int, 0);
63module_param(spia_peddr, int, 0);
64
65/*
66 * Define partitions for flash device
67 */
68static const struct mtd_partition partition_info[] = {
69 {
70 .name = "SPIA flash partition 1",
71 .offset = 0,
72 .size = 2 * 1024 * 1024},
73 {
74 .name = "SPIA flash partition 2",
75 .offset = 2 * 1024 * 1024,
76 .size = 6 * 1024 * 1024}
77};
78
79#define NUM_PARTITIONS 2
80
81/*
82 * hardware specific access to control-lines
83 *
84 * ctrl:
85 * NAND_CNE: bit 0 -> bit 2
86 * NAND_CLE: bit 1 -> bit 0
87 * NAND_ALE: bit 2 -> bit 1
88 */
89static void spia_hwcontrol(struct mtd_info *mtd, int cmd)
90{
91 struct nand_chip *chip = mtd->priv;
92
93 if (ctrl & NAND_CTRL_CHANGE) {
94 void __iomem *addr = spia_io_base + spia_pedr;
95 unsigned char bits;
96
97 bits = (ctrl & NAND_CNE) << 2;
98 bits |= (ctrl & NAND_CLE | NAND_ALE) >> 1;
99 writeb((readb(addr) & ~0x7) | bits, addr);
100 }
101
102 if (cmd != NAND_CMD_NONE)
103 writeb(cmd, chip->IO_ADDR_W);
104}
105
106/*
107 * Main initialization routine
108 */
109static int __init spia_init(void)
110{
111 struct nand_chip *this;
112
113 /* Allocate memory for MTD device structure and private data */
114 spia_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
115 if (!spia_mtd) {
116 printk("Unable to allocate SPIA NAND MTD device structure.\n");
117 return -ENOMEM;
118 }
119
120 /* Get pointer to private data */
121 this = (struct nand_chip *)(&spia_mtd[1]);
122
123 /* Initialize structures */
124 memset(spia_mtd, 0, sizeof(struct mtd_info));
125 memset(this, 0, sizeof(struct nand_chip));
126
127 /* Link the private data with the MTD structure */
128 spia_mtd->priv = this;
129 spia_mtd->owner = THIS_MODULE;
130
131 /*
132 * Set GPIO Port E control register so that the pins are configured
133 * to be outputs for controlling the NAND flash.
134 */
135 (*(volatile unsigned char *)(spia_io_base + spia_peddr)) = 0x07;
136
137 /* Set address of NAND IO lines */
138 this->IO_ADDR_R = (void __iomem *)spia_fio_base;
139 this->IO_ADDR_W = (void __iomem *)spia_fio_base;
140 /* Set address of hardware control function */
141 this->cmd_ctrl = spia_hwcontrol;
142 /* 15 us command delay time */
143 this->chip_delay = 15;
144
145 /* Scan to find existence of the device */
146 if (nand_scan(spia_mtd, 1)) {
147 kfree(spia_mtd);
148 return -ENXIO;
149 }
150
151 /* Register the partitions */
152 mtd_device_register(spia_mtd, partition_info, NUM_PARTITIONS);
153
154 /* Return happy */
155 return 0;
156}
157
158module_init(spia_init);
159
160/*
161 * Clean up routine
162 */
163static void __exit spia_cleanup(void)
164{
165 /* Release resources, unregister device */
166 nand_release(spia_mtd);
167
168 /* Free the MTD device structure */
169 kfree(spia_mtd);
170}
171
172module_exit(spia_cleanup);
173
174MODULE_LICENSE("GPL");
175MODULE_AUTHOR("Steven J. Hill <sjhill@realitydiluted.com");
176MODULE_DESCRIPTION("Board-specific glue layer for NAND flash on SPIA board");
diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c
new file mode 100644
index 00000000000..a3a198f9b98
--- /dev/null
+++ b/drivers/mtd/ubi/scan.c
@@ -0,0 +1,1605 @@
1/*
2 * Copyright (c) International Business Machines Corp., 2006
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
12 * the GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 *
18 * Author: Artem Bityutskiy (Битюцкий Артём)
19 */
20
21/*
22 * UBI scanning sub-system.
23 *
24 * This sub-system is responsible for scanning the flash media, checking UBI
25 * headers and providing complete information about the UBI flash image.
26 *
27 * The scanning information is represented by a &struct ubi_scan_info' object.
28 * Information about found volumes is represented by &struct ubi_scan_volume
29 * objects which are kept in volume RB-tree with root at the @volumes field.
30 * The RB-tree is indexed by the volume ID.
31 *
32 * Scanned logical eraseblocks are represented by &struct ubi_scan_leb objects.
33 * These objects are kept in per-volume RB-trees with the root at the
34 * corresponding &struct ubi_scan_volume object. To put it differently, we keep
35 * an RB-tree of per-volume objects and each of these objects is the root of
36 * RB-tree of per-eraseblock objects.
37 *
38 * Corrupted physical eraseblocks are put to the @corr list, free physical
39 * eraseblocks are put to the @free list and the physical eraseblock to be
40 * erased are put to the @erase list.
41 *
42 * About corruptions
43 * ~~~~~~~~~~~~~~~~~
44 *
45 * UBI protects EC and VID headers with CRC-32 checksums, so it can detect
46 * whether the headers are corrupted or not. Sometimes UBI also protects the
47 * data with CRC-32, e.g., when it executes the atomic LEB change operation, or
48 * when it moves the contents of a PEB for wear-leveling purposes.
49 *
50 * UBI tries to distinguish between 2 types of corruptions.
51 *
52 * 1. Corruptions caused by power cuts. These are expected corruptions and UBI
53 * tries to handle them gracefully, without printing too many warnings and
54 * error messages. The idea is that we do not lose important data in these case
55 * - we may lose only the data which was being written to the media just before
56 * the power cut happened, and the upper layers (e.g., UBIFS) are supposed to
57 * handle such data losses (e.g., by using the FS journal).
58 *
59 * When UBI detects a corruption (CRC-32 mismatch) in a PEB, and it looks like
60 * the reason is a power cut, UBI puts this PEB to the @erase list, and all
61 * PEBs in the @erase list are scheduled for erasure later.
62 *
63 * 2. Unexpected corruptions which are not caused by power cuts. During
64 * scanning, such PEBs are put to the @corr list and UBI preserves them.
65 * Obviously, this lessens the amount of available PEBs, and if at some point
66 * UBI runs out of free PEBs, it switches to R/O mode. UBI also loudly informs
67 * about such PEBs every time the MTD device is attached.
68 *
69 * However, it is difficult to reliably distinguish between these types of
70 * corruptions and UBI's strategy is as follows. UBI assumes corruption type 2
71 * if the VID header is corrupted and the data area does not contain all 0xFFs,
72 * and there were no bit-flips or integrity errors while reading the data area.
73 * Otherwise UBI assumes corruption type 1. So the decision criteria are as
74 * follows.
75 * o If the data area contains only 0xFFs, there is no data, and it is safe
76 * to just erase this PEB - this is corruption type 1.
77 * o If the data area has bit-flips or data integrity errors (ECC errors on
78 * NAND), it is probably a PEB which was being erased when power cut
79 * happened, so this is corruption type 1. However, this is just a guess,
80 * which might be wrong.
81 * o Otherwise this it corruption type 2.
82 */
83
84#include <linux/err.h>
85#include <linux/slab.h>
86#include <linux/crc32.h>
87#include <linux/math64.h>
88#include <linux/random.h>
89#include "ubi.h"
90
91#ifdef CONFIG_MTD_UBI_DEBUG
92static int paranoid_check_si(struct ubi_device *ubi, struct ubi_scan_info *si);
93#else
94#define paranoid_check_si(ubi, si) 0
95#endif
96
97/* Temporary variables used during scanning */
98static struct ubi_ec_hdr *ech;
99static struct ubi_vid_hdr *vidh;
100
101/**
102 * add_to_list - add physical eraseblock to a list.
103 * @si: scanning information
104 * @pnum: physical eraseblock number to add
105 * @ec: erase counter of the physical eraseblock
106 * @to_head: if not zero, add to the head of the list
107 * @list: the list to add to
108 *
109 * This function adds physical eraseblock @pnum to free, erase, or alien lists.
110 * If @to_head is not zero, PEB will be added to the head of the list, which
111 * basically means it will be processed first later. E.g., we add corrupted
112 * PEBs (corrupted due to power cuts) to the head of the erase list to make
113 * sure we erase them first and get rid of corruptions ASAP. This function
114 * returns zero in case of success and a negative error code in case of
115 * failure.
116 */
117static int add_to_list(struct ubi_scan_info *si, int pnum, int ec, int to_head,
118 struct list_head *list)
119{
120 struct ubi_scan_leb *seb;
121
122 if (list == &si->free) {
123 dbg_bld("add to free: PEB %d, EC %d", pnum, ec);
124 } else if (list == &si->erase) {
125 dbg_bld("add to erase: PEB %d, EC %d", pnum, ec);
126 } else if (list == &si->alien) {
127 dbg_bld("add to alien: PEB %d, EC %d", pnum, ec);
128 si->alien_peb_count += 1;
129 } else
130 BUG();
131
132 seb = kmem_cache_alloc(si->scan_leb_slab, GFP_KERNEL);
133 if (!seb)
134 return -ENOMEM;
135
136 seb->pnum = pnum;
137 seb->ec = ec;
138 if (to_head)
139 list_add(&seb->u.list, list);
140 else
141 list_add_tail(&seb->u.list, list);
142 return 0;
143}
144
145/**
146 * add_corrupted - add a corrupted physical eraseblock.
147 * @si: scanning information
148 * @pnum: physical eraseblock number to add
149 * @ec: erase counter of the physical eraseblock
150 *
151 * This function adds corrupted physical eraseblock @pnum to the 'corr' list.
152 * The corruption was presumably not caused by a power cut. Returns zero in
153 * case of success and a negative error code in case of failure.
154 */
155static int add_corrupted(struct ubi_scan_info *si, int pnum, int ec)
156{
157 struct ubi_scan_leb *seb;
158
159 dbg_bld("add to corrupted: PEB %d, EC %d", pnum, ec);
160
161 seb = kmem_cache_alloc(si->scan_leb_slab, GFP_KERNEL);
162 if (!seb)
163 return -ENOMEM;
164
165 si->corr_peb_count += 1;
166 seb->pnum = pnum;
167 seb->ec = ec;
168 list_add(&seb->u.list, &si->corr);
169 return 0;
170}
171
172/**
173 * validate_vid_hdr - check volume identifier header.
174 * @vid_hdr: the volume identifier header to check
175 * @sv: information about the volume this logical eraseblock belongs to
176 * @pnum: physical eraseblock number the VID header came from
177 *
178 * This function checks that data stored in @vid_hdr is consistent. Returns
179 * non-zero if an inconsistency was found and zero if not.
180 *
181 * Note, UBI does sanity check of everything it reads from the flash media.
182 * Most of the checks are done in the I/O sub-system. Here we check that the
183 * information in the VID header is consistent to the information in other VID
184 * headers of the same volume.
185 */
186static int validate_vid_hdr(const struct ubi_vid_hdr *vid_hdr,
187 const struct ubi_scan_volume *sv, int pnum)
188{
189 int vol_type = vid_hdr->vol_type;
190 int vol_id = be32_to_cpu(vid_hdr->vol_id);
191 int used_ebs = be32_to_cpu(vid_hdr->used_ebs);
192 int data_pad = be32_to_cpu(vid_hdr->data_pad);
193
194 if (sv->leb_count != 0) {
195 int sv_vol_type;
196
197 /*
198 * This is not the first logical eraseblock belonging to this
199 * volume. Ensure that the data in its VID header is consistent
200 * to the data in previous logical eraseblock headers.
201 */
202
203 if (vol_id != sv->vol_id) {
204 dbg_err("inconsistent vol_id");
205 goto bad;
206 }
207
208 if (sv->vol_type == UBI_STATIC_VOLUME)
209 sv_vol_type = UBI_VID_STATIC;
210 else
211 sv_vol_type = UBI_VID_DYNAMIC;
212
213 if (vol_type != sv_vol_type) {
214 dbg_err("inconsistent vol_type");
215 goto bad;
216 }
217
218 if (used_ebs != sv->used_ebs) {
219 dbg_err("inconsistent used_ebs");
220 goto bad;
221 }
222
223 if (data_pad != sv->data_pad) {
224 dbg_err("inconsistent data_pad");
225 goto bad;
226 }
227 }
228
229 return 0;
230
231bad:
232 ubi_err("inconsistent VID header at PEB %d", pnum);
233 ubi_dbg_dump_vid_hdr(vid_hdr);
234 ubi_dbg_dump_sv(sv);
235 return -EINVAL;
236}
237
238/**
239 * add_volume - add volume to the scanning information.
240 * @si: scanning information
241 * @vol_id: ID of the volume to add
242 * @pnum: physical eraseblock number
243 * @vid_hdr: volume identifier header
244 *
245 * If the volume corresponding to the @vid_hdr logical eraseblock is already
246 * present in the scanning information, this function does nothing. Otherwise
247 * it adds corresponding volume to the scanning information. Returns a pointer
248 * to the scanning volume object in case of success and a negative error code
249 * in case of failure.
250 */
251static struct ubi_scan_volume *add_volume(struct ubi_scan_info *si, int vol_id,
252 int pnum,
253 const struct ubi_vid_hdr *vid_hdr)
254{
255 struct ubi_scan_volume *sv;
256 struct rb_node **p = &si->volumes.rb_node, *parent = NULL;
257
258 ubi_assert(vol_id == be32_to_cpu(vid_hdr->vol_id));
259
260 /* Walk the volume RB-tree to look if this volume is already present */
261 while (*p) {
262 parent = *p;
263 sv = rb_entry(parent, struct ubi_scan_volume, rb);
264
265 if (vol_id == sv->vol_id)
266 return sv;
267
268 if (vol_id > sv->vol_id)
269 p = &(*p)->rb_left;
270 else
271 p = &(*p)->rb_right;
272 }
273
274 /* The volume is absent - add it */
275 sv = kmalloc(sizeof(struct ubi_scan_volume), GFP_KERNEL);
276 if (!sv)
277 return ERR_PTR(-ENOMEM);
278
279 sv->highest_lnum = sv->leb_count = 0;
280 sv->vol_id = vol_id;
281 sv->root = RB_ROOT;
282 sv->used_ebs = be32_to_cpu(vid_hdr->used_ebs);
283 sv->data_pad = be32_to_cpu(vid_hdr->data_pad);
284 sv->compat = vid_hdr->compat;
285 sv->vol_type = vid_hdr->vol_type == UBI_VID_DYNAMIC ? UBI_DYNAMIC_VOLUME
286 : UBI_STATIC_VOLUME;
287 if (vol_id > si->highest_vol_id)
288 si->highest_vol_id = vol_id;
289
290 rb_link_node(&sv->rb, parent, p);
291 rb_insert_color(&sv->rb, &si->volumes);
292 si->vols_found += 1;
293 dbg_bld("added volume %d", vol_id);
294 return sv;
295}
296
297/**
298 * compare_lebs - find out which logical eraseblock is newer.
299 * @ubi: UBI device description object
300 * @seb: first logical eraseblock to compare
301 * @pnum: physical eraseblock number of the second logical eraseblock to
302 * compare
303 * @vid_hdr: volume identifier header of the second logical eraseblock
304 *
305 * This function compares 2 copies of a LEB and informs which one is newer. In
306 * case of success this function returns a positive value, in case of failure, a
307 * negative error code is returned. The success return codes use the following
308 * bits:
309 * o bit 0 is cleared: the first PEB (described by @seb) is newer than the
310 * second PEB (described by @pnum and @vid_hdr);
311 * o bit 0 is set: the second PEB is newer;
312 * o bit 1 is cleared: no bit-flips were detected in the newer LEB;
313 * o bit 1 is set: bit-flips were detected in the newer LEB;
314 * o bit 2 is cleared: the older LEB is not corrupted;
315 * o bit 2 is set: the older LEB is corrupted.
316 */
317static int compare_lebs(struct ubi_device *ubi, const struct ubi_scan_leb *seb,
318 int pnum, const struct ubi_vid_hdr *vid_hdr)
319{
320 void *buf;
321 int len, err, second_is_newer, bitflips = 0, corrupted = 0;
322 uint32_t data_crc, crc;
323 struct ubi_vid_hdr *vh = NULL;
324 unsigned long long sqnum2 = be64_to_cpu(vid_hdr->sqnum);
325
326 if (sqnum2 == seb->sqnum) {
327 /*
328 * This must be a really ancient UBI image which has been
329 * created before sequence numbers support has been added. At
330 * that times we used 32-bit LEB versions stored in logical
331 * eraseblocks. That was before UBI got into mainline. We do not
332 * support these images anymore. Well, those images still work,
333 * but only if no unclean reboots happened.
334 */
335 ubi_err("unsupported on-flash UBI format\n");
336 return -EINVAL;
337 }
338
339 /* Obviously the LEB with lower sequence counter is older */
340 second_is_newer = !!(sqnum2 > seb->sqnum);
341
342 /*
343 * Now we know which copy is newer. If the copy flag of the PEB with
344 * newer version is not set, then we just return, otherwise we have to
345 * check data CRC. For the second PEB we already have the VID header,
346 * for the first one - we'll need to re-read it from flash.
347 *
348 * Note: this may be optimized so that we wouldn't read twice.
349 */
350
351 if (second_is_newer) {
352 if (!vid_hdr->copy_flag) {
353 /* It is not a copy, so it is newer */
354 dbg_bld("second PEB %d is newer, copy_flag is unset",
355 pnum);
356 return 1;
357 }
358 } else {
359 if (!seb->copy_flag) {
360 /* It is not a copy, so it is newer */
361 dbg_bld("first PEB %d is newer, copy_flag is unset",
362 pnum);
363 return bitflips << 1;
364 }
365
366 vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
367 if (!vh)
368 return -ENOMEM;
369
370 pnum = seb->pnum;
371 err = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
372 if (err) {
373 if (err == UBI_IO_BITFLIPS)
374 bitflips = 1;
375 else {
376 dbg_err("VID of PEB %d header is bad, but it "
377 "was OK earlier, err %d", pnum, err);
378 if (err > 0)
379 err = -EIO;
380
381 goto out_free_vidh;
382 }
383 }
384
385 vid_hdr = vh;
386 }
387
388 /* Read the data of the copy and check the CRC */
389
390 len = be32_to_cpu(vid_hdr->data_size);
391 buf = vmalloc(len);
392 if (!buf) {
393 err = -ENOMEM;
394 goto out_free_vidh;
395 }
396
397 err = ubi_io_read_data(ubi, buf, pnum, 0, len);
398 if (err && err != UBI_IO_BITFLIPS && err != -EBADMSG)
399 goto out_free_buf;
400
401 data_crc = be32_to_cpu(vid_hdr->data_crc);
402 crc = crc32(UBI_CRC32_INIT, buf, len);
403 if (crc != data_crc) {
404 dbg_bld("PEB %d CRC error: calculated %#08x, must be %#08x",
405 pnum, crc, data_crc);
406 corrupted = 1;
407 bitflips = 0;
408 second_is_newer = !second_is_newer;
409 } else {
410 dbg_bld("PEB %d CRC is OK", pnum);
411 bitflips = !!err;
412 }
413
414 vfree(buf);
415 ubi_free_vid_hdr(ubi, vh);
416
417 if (second_is_newer)
418 dbg_bld("second PEB %d is newer, copy_flag is set", pnum);
419 else
420 dbg_bld("first PEB %d is newer, copy_flag is set", pnum);
421
422 return second_is_newer | (bitflips << 1) | (corrupted << 2);
423
424out_free_buf:
425 vfree(buf);
426out_free_vidh:
427 ubi_free_vid_hdr(ubi, vh);
428 return err;
429}
430
431/**
432 * ubi_scan_add_used - add physical eraseblock to the scanning information.
433 * @ubi: UBI device description object
434 * @si: scanning information
435 * @pnum: the physical eraseblock number
436 * @ec: erase counter
437 * @vid_hdr: the volume identifier header
438 * @bitflips: if bit-flips were detected when this physical eraseblock was read
439 *
440 * This function adds information about a used physical eraseblock to the
441 * 'used' tree of the corresponding volume. The function is rather complex
442 * because it has to handle cases when this is not the first physical
443 * eraseblock belonging to the same logical eraseblock, and the newer one has
444 * to be picked, while the older one has to be dropped. This function returns
445 * zero in case of success and a negative error code in case of failure.
446 */
447int ubi_scan_add_used(struct ubi_device *ubi, struct ubi_scan_info *si,
448 int pnum, int ec, const struct ubi_vid_hdr *vid_hdr,
449 int bitflips)
450{
451 int err, vol_id, lnum;
452 unsigned long long sqnum;
453 struct ubi_scan_volume *sv;
454 struct ubi_scan_leb *seb;
455 struct rb_node **p, *parent = NULL;
456
457 vol_id = be32_to_cpu(vid_hdr->vol_id);
458 lnum = be32_to_cpu(vid_hdr->lnum);
459 sqnum = be64_to_cpu(vid_hdr->sqnum);
460
461 dbg_bld("PEB %d, LEB %d:%d, EC %d, sqnum %llu, bitflips %d",
462 pnum, vol_id, lnum, ec, sqnum, bitflips);
463
464 sv = add_volume(si, vol_id, pnum, vid_hdr);
465 if (IS_ERR(sv))
466 return PTR_ERR(sv);
467
468 if (si->max_sqnum < sqnum)
469 si->max_sqnum = sqnum;
470
471 /*
472 * Walk the RB-tree of logical eraseblocks of volume @vol_id to look
473 * if this is the first instance of this logical eraseblock or not.
474 */
475 p = &sv->root.rb_node;
476 while (*p) {
477 int cmp_res;
478
479 parent = *p;
480 seb = rb_entry(parent, struct ubi_scan_leb, u.rb);
481 if (lnum != seb->lnum) {
482 if (lnum < seb->lnum)
483 p = &(*p)->rb_left;
484 else
485 p = &(*p)->rb_right;
486 continue;
487 }
488
489 /*
490 * There is already a physical eraseblock describing the same
491 * logical eraseblock present.
492 */
493
494 dbg_bld("this LEB already exists: PEB %d, sqnum %llu, "
495 "EC %d", seb->pnum, seb->sqnum, seb->ec);
496
497 /*
498 * Make sure that the logical eraseblocks have different
499 * sequence numbers. Otherwise the image is bad.
500 *
501 * However, if the sequence number is zero, we assume it must
502 * be an ancient UBI image from the era when UBI did not have
503 * sequence numbers. We still can attach these images, unless
504 * there is a need to distinguish between old and new
505 * eraseblocks, in which case we'll refuse the image in
506 * 'compare_lebs()'. In other words, we attach old clean
507 * images, but refuse attaching old images with duplicated
508 * logical eraseblocks because there was an unclean reboot.
509 */
510 if (seb->sqnum == sqnum && sqnum != 0) {
511 ubi_err("two LEBs with same sequence number %llu",
512 sqnum);
513 ubi_dbg_dump_seb(seb, 0);
514 ubi_dbg_dump_vid_hdr(vid_hdr);
515 return -EINVAL;
516 }
517
518 /*
519 * Now we have to drop the older one and preserve the newer
520 * one.
521 */
522 cmp_res = compare_lebs(ubi, seb, pnum, vid_hdr);
523 if (cmp_res < 0)
524 return cmp_res;
525
526 if (cmp_res & 1) {
527 /*
528 * This logical eraseblock is newer than the one
529 * found earlier.
530 */
531 err = validate_vid_hdr(vid_hdr, sv, pnum);
532 if (err)
533 return err;
534
535 err = add_to_list(si, seb->pnum, seb->ec, cmp_res & 4,
536 &si->erase);
537 if (err)
538 return err;
539
540 seb->ec = ec;
541 seb->pnum = pnum;
542 seb->scrub = ((cmp_res & 2) || bitflips);
543 seb->copy_flag = vid_hdr->copy_flag;
544 seb->sqnum = sqnum;
545
546 if (sv->highest_lnum == lnum)
547 sv->last_data_size =
548 be32_to_cpu(vid_hdr->data_size);
549
550 return 0;
551 } else {
552 /*
553 * This logical eraseblock is older than the one found
554 * previously.
555 */
556 return add_to_list(si, pnum, ec, cmp_res & 4,
557 &si->erase);
558 }
559 }
560
561 /*
562 * We've met this logical eraseblock for the first time, add it to the
563 * scanning information.
564 */
565
566 err = validate_vid_hdr(vid_hdr, sv, pnum);
567 if (err)
568 return err;
569
570 seb = kmem_cache_alloc(si->scan_leb_slab, GFP_KERNEL);
571 if (!seb)
572 return -ENOMEM;
573
574 seb->ec = ec;
575 seb->pnum = pnum;
576 seb->lnum = lnum;
577 seb->scrub = bitflips;
578 seb->copy_flag = vid_hdr->copy_flag;
579 seb->sqnum = sqnum;
580
581 if (sv->highest_lnum <= lnum) {
582 sv->highest_lnum = lnum;
583 sv->last_data_size = be32_to_cpu(vid_hdr->data_size);
584 }
585
586 sv->leb_count += 1;
587 rb_link_node(&seb->u.rb, parent, p);
588 rb_insert_color(&seb->u.rb, &sv->root);
589 return 0;
590}
591
592/**
593 * ubi_scan_find_sv - find volume in the scanning information.
594 * @si: scanning information
595 * @vol_id: the requested volume ID
596 *
597 * This function returns a pointer to the volume description or %NULL if there
598 * are no data about this volume in the scanning information.
599 */
600struct ubi_scan_volume *ubi_scan_find_sv(const struct ubi_scan_info *si,
601 int vol_id)
602{
603 struct ubi_scan_volume *sv;
604 struct rb_node *p = si->volumes.rb_node;
605
606 while (p) {
607 sv = rb_entry(p, struct ubi_scan_volume, rb);
608
609 if (vol_id == sv->vol_id)
610 return sv;
611
612 if (vol_id > sv->vol_id)
613 p = p->rb_left;
614 else
615 p = p->rb_right;
616 }
617
618 return NULL;
619}
620
621/**
622 * ubi_scan_find_seb - find LEB in the volume scanning information.
623 * @sv: a pointer to the volume scanning information
624 * @lnum: the requested logical eraseblock
625 *
626 * This function returns a pointer to the scanning logical eraseblock or %NULL
627 * if there are no data about it in the scanning volume information.
628 */
629struct ubi_scan_leb *ubi_scan_find_seb(const struct ubi_scan_volume *sv,
630 int lnum)
631{
632 struct ubi_scan_leb *seb;
633 struct rb_node *p = sv->root.rb_node;
634
635 while (p) {
636 seb = rb_entry(p, struct ubi_scan_leb, u.rb);
637
638 if (lnum == seb->lnum)
639 return seb;
640
641 if (lnum > seb->lnum)
642 p = p->rb_left;
643 else
644 p = p->rb_right;
645 }
646
647 return NULL;
648}
649
650/**
651 * ubi_scan_rm_volume - delete scanning information about a volume.
652 * @si: scanning information
653 * @sv: the volume scanning information to delete
654 */
655void ubi_scan_rm_volume(struct ubi_scan_info *si, struct ubi_scan_volume *sv)
656{
657 struct rb_node *rb;
658 struct ubi_scan_leb *seb;
659
660 dbg_bld("remove scanning information about volume %d", sv->vol_id);
661
662 while ((rb = rb_first(&sv->root))) {
663 seb = rb_entry(rb, struct ubi_scan_leb, u.rb);
664 rb_erase(&seb->u.rb, &sv->root);
665 list_add_tail(&seb->u.list, &si->erase);
666 }
667
668 rb_erase(&sv->rb, &si->volumes);
669 kfree(sv);
670 si->vols_found -= 1;
671}
672
673/**
674 * ubi_scan_erase_peb - erase a physical eraseblock.
675 * @ubi: UBI device description object
676 * @si: scanning information
677 * @pnum: physical eraseblock number to erase;
678 * @ec: erase counter value to write (%UBI_SCAN_UNKNOWN_EC if it is unknown)
679 *
680 * This function erases physical eraseblock 'pnum', and writes the erase
681 * counter header to it. This function should only be used on UBI device
682 * initialization stages, when the EBA sub-system had not been yet initialized.
683 * This function returns zero in case of success and a negative error code in
684 * case of failure.
685 */
686int ubi_scan_erase_peb(struct ubi_device *ubi, const struct ubi_scan_info *si,
687 int pnum, int ec)
688{
689 int err;
690 struct ubi_ec_hdr *ec_hdr;
691
692 if ((long long)ec >= UBI_MAX_ERASECOUNTER) {
693 /*
694 * Erase counter overflow. Upgrade UBI and use 64-bit
695 * erase counters internally.
696 */
697 ubi_err("erase counter overflow at PEB %d, EC %d", pnum, ec);
698 return -EINVAL;
699 }
700
701 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
702 if (!ec_hdr)
703 return -ENOMEM;
704
705 ec_hdr->ec = cpu_to_be64(ec);
706
707 err = ubi_io_sync_erase(ubi, pnum, 0);
708 if (err < 0)
709 goto out_free;
710
711 err = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr);
712
713out_free:
714 kfree(ec_hdr);
715 return err;
716}
717
718/**
719 * ubi_scan_get_free_peb - get a free physical eraseblock.
720 * @ubi: UBI device description object
721 * @si: scanning information
722 *
723 * This function returns a free physical eraseblock. It is supposed to be
724 * called on the UBI initialization stages when the wear-leveling sub-system is
725 * not initialized yet. This function picks a physical eraseblocks from one of
726 * the lists, writes the EC header if it is needed, and removes it from the
727 * list.
728 *
729 * This function returns scanning physical eraseblock information in case of
730 * success and an error code in case of failure.
731 */
732struct ubi_scan_leb *ubi_scan_get_free_peb(struct ubi_device *ubi,
733 struct ubi_scan_info *si)
734{
735 int err = 0;
736 struct ubi_scan_leb *seb, *tmp_seb;
737
738 if (!list_empty(&si->free)) {
739 seb = list_entry(si->free.next, struct ubi_scan_leb, u.list);
740 list_del(&seb->u.list);
741 dbg_bld("return free PEB %d, EC %d", seb->pnum, seb->ec);
742 return seb;
743 }
744
745 /*
746 * We try to erase the first physical eraseblock from the erase list
747 * and pick it if we succeed, or try to erase the next one if not. And
748 * so forth. We don't want to take care about bad eraseblocks here -
749 * they'll be handled later.
750 */
751 list_for_each_entry_safe(seb, tmp_seb, &si->erase, u.list) {
752 if (seb->ec == UBI_SCAN_UNKNOWN_EC)
753 seb->ec = si->mean_ec;
754
755 err = ubi_scan_erase_peb(ubi, si, seb->pnum, seb->ec+1);
756 if (err)
757 continue;
758
759 seb->ec += 1;
760 list_del(&seb->u.list);
761 dbg_bld("return PEB %d, EC %d", seb->pnum, seb->ec);
762 return seb;
763 }
764
765 ubi_err("no free eraseblocks");
766 return ERR_PTR(-ENOSPC);
767}
768
769/**
770 * check_corruption - check the data area of PEB.
771 * @ubi: UBI device description object
772 * @vid_hrd: the (corrupted) VID header of this PEB
773 * @pnum: the physical eraseblock number to check
774 *
775 * This is a helper function which is used to distinguish between VID header
776 * corruptions caused by power cuts and other reasons. If the PEB contains only
777 * 0xFF bytes in the data area, the VID header is most probably corrupted
778 * because of a power cut (%0 is returned in this case). Otherwise, it was
779 * probably corrupted for some other reasons (%1 is returned in this case). A
780 * negative error code is returned if a read error occurred.
781 *
782 * If the corruption reason was a power cut, UBI can safely erase this PEB.
783 * Otherwise, it should preserve it to avoid possibly destroying important
784 * information.
785 */
786static int check_corruption(struct ubi_device *ubi, struct ubi_vid_hdr *vid_hdr,
787 int pnum)
788{
789 int err;
790
791 mutex_lock(&ubi->buf_mutex);
792 memset(ubi->peb_buf1, 0x00, ubi->leb_size);
793
794 err = ubi_io_read(ubi, ubi->peb_buf1, pnum, ubi->leb_start,
795 ubi->leb_size);
796 if (err == UBI_IO_BITFLIPS || err == -EBADMSG) {
797 /*
798 * Bit-flips or integrity errors while reading the data area.
799 * It is difficult to say for sure what type of corruption is
800 * this, but presumably a power cut happened while this PEB was
801 * erased, so it became unstable and corrupted, and should be
802 * erased.
803 */
804 err = 0;
805 goto out_unlock;
806 }
807
808 if (err)
809 goto out_unlock;
810
811 if (ubi_check_pattern(ubi->peb_buf1, 0xFF, ubi->leb_size))
812 goto out_unlock;
813
814 ubi_err("PEB %d contains corrupted VID header, and the data does not "
815 "contain all 0xFF, this may be a non-UBI PEB or a severe VID "
816 "header corruption which requires manual inspection", pnum);
817 ubi_dbg_dump_vid_hdr(vid_hdr);
818 dbg_msg("hexdump of PEB %d offset %d, length %d",
819 pnum, ubi->leb_start, ubi->leb_size);
820 ubi_dbg_print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
821 ubi->peb_buf1, ubi->leb_size, 1);
822 err = 1;
823
824out_unlock:
825 mutex_unlock(&ubi->buf_mutex);
826 return err;
827}
828
829/**
830 * process_eb - read, check UBI headers, and add them to scanning information.
831 * @ubi: UBI device description object
832 * @si: scanning information
833 * @pnum: the physical eraseblock number
834 *
835 * This function returns a zero if the physical eraseblock was successfully
836 * handled and a negative error code in case of failure.
837 */
838static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si,
839 int pnum)
840{
841 long long uninitialized_var(ec);
842 int err, bitflips = 0, vol_id, ec_err = 0;
843
844 dbg_bld("scan PEB %d", pnum);
845
846 /* Skip bad physical eraseblocks */
847 err = ubi_io_is_bad(ubi, pnum);
848 if (err < 0)
849 return err;
850 else if (err) {
851 /*
852 * FIXME: this is actually duty of the I/O sub-system to
853 * initialize this, but MTD does not provide enough
854 * information.
855 */
856 si->bad_peb_count += 1;
857 return 0;
858 }
859
860 err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
861 if (err < 0)
862 return err;
863 switch (err) {
864 case 0:
865 break;
866 case UBI_IO_BITFLIPS:
867 bitflips = 1;
868 break;
869 case UBI_IO_FF:
870 si->empty_peb_count += 1;
871 return add_to_list(si, pnum, UBI_SCAN_UNKNOWN_EC, 0,
872 &si->erase);
873 case UBI_IO_FF_BITFLIPS:
874 si->empty_peb_count += 1;
875 return add_to_list(si, pnum, UBI_SCAN_UNKNOWN_EC, 1,
876 &si->erase);
877 case UBI_IO_BAD_HDR_EBADMSG:
878 case UBI_IO_BAD_HDR:
879 /*
880 * We have to also look at the VID header, possibly it is not
881 * corrupted. Set %bitflips flag in order to make this PEB be
882 * moved and EC be re-created.
883 */
884 ec_err = err;
885 ec = UBI_SCAN_UNKNOWN_EC;
886 bitflips = 1;
887 break;
888 default:
889 ubi_err("'ubi_io_read_ec_hdr()' returned unknown code %d", err);
890 return -EINVAL;
891 }
892
893 if (!ec_err) {
894 int image_seq;
895
896 /* Make sure UBI version is OK */
897 if (ech->version != UBI_VERSION) {
898 ubi_err("this UBI version is %d, image version is %d",
899 UBI_VERSION, (int)ech->version);
900 return -EINVAL;
901 }
902
903 ec = be64_to_cpu(ech->ec);
904 if (ec > UBI_MAX_ERASECOUNTER) {
905 /*
906 * Erase counter overflow. The EC headers have 64 bits
907 * reserved, but we anyway make use of only 31 bit
908 * values, as this seems to be enough for any existing
909 * flash. Upgrade UBI and use 64-bit erase counters
910 * internally.
911 */
912 ubi_err("erase counter overflow, max is %d",
913 UBI_MAX_ERASECOUNTER);
914 ubi_dbg_dump_ec_hdr(ech);
915 return -EINVAL;
916 }
917
918 /*
919 * Make sure that all PEBs have the same image sequence number.
920 * This allows us to detect situations when users flash UBI
921 * images incorrectly, so that the flash has the new UBI image
922 * and leftovers from the old one. This feature was added
923 * relatively recently, and the sequence number was always
924 * zero, because old UBI implementations always set it to zero.
925 * For this reasons, we do not panic if some PEBs have zero
926 * sequence number, while other PEBs have non-zero sequence
927 * number.
928 */
929 image_seq = be32_to_cpu(ech->image_seq);
930 if (!ubi->image_seq && image_seq)
931 ubi->image_seq = image_seq;
932 if (ubi->image_seq && image_seq &&
933 ubi->image_seq != image_seq) {
934 ubi_err("bad image sequence number %d in PEB %d, "
935 "expected %d", image_seq, pnum, ubi->image_seq);
936 ubi_dbg_dump_ec_hdr(ech);
937 return -EINVAL;
938 }
939 }
940
941 /* OK, we've done with the EC header, let's look at the VID header */
942
943 err = ubi_io_read_vid_hdr(ubi, pnum, vidh, 0);
944 if (err < 0)
945 return err;
946 switch (err) {
947 case 0:
948 break;
949 case UBI_IO_BITFLIPS:
950 bitflips = 1;
951 break;
952 case UBI_IO_BAD_HDR_EBADMSG:
953 if (ec_err == UBI_IO_BAD_HDR_EBADMSG)
954 /*
955 * Both EC and VID headers are corrupted and were read
956 * with data integrity error, probably this is a bad
957 * PEB, bit it is not marked as bad yet. This may also
958 * be a result of power cut during erasure.
959 */
960 si->maybe_bad_peb_count += 1;
961 case UBI_IO_BAD_HDR:
962 if (ec_err)
963 /*
964 * Both headers are corrupted. There is a possibility
965 * that this a valid UBI PEB which has corresponding
966 * LEB, but the headers are corrupted. However, it is
967 * impossible to distinguish it from a PEB which just
968 * contains garbage because of a power cut during erase
969 * operation. So we just schedule this PEB for erasure.
970 *
971 * Besides, in case of NOR flash, we deliberately
972 * corrupt both headers because NOR flash erasure is
973 * slow and can start from the end.
974 */
975 err = 0;
976 else
977 /*
978 * The EC was OK, but the VID header is corrupted. We
979 * have to check what is in the data area.
980 */
981 err = check_corruption(ubi, vidh, pnum);
982
983 if (err < 0)
984 return err;
985 else if (!err)
986 /* This corruption is caused by a power cut */
987 err = add_to_list(si, pnum, ec, 1, &si->erase);
988 else
989 /* This is an unexpected corruption */
990 err = add_corrupted(si, pnum, ec);
991 if (err)
992 return err;
993 goto adjust_mean_ec;
994 case UBI_IO_FF_BITFLIPS:
995 err = add_to_list(si, pnum, ec, 1, &si->erase);
996 if (err)
997 return err;
998 goto adjust_mean_ec;
999 case UBI_IO_FF:
1000 if (ec_err)
1001 err = add_to_list(si, pnum, ec, 1, &si->erase);
1002 else
1003 err = add_to_list(si, pnum, ec, 0, &si->free);
1004 if (err)
1005 return err;
1006 goto adjust_mean_ec;
1007 default:
1008 ubi_err("'ubi_io_read_vid_hdr()' returned unknown code %d",
1009 err);
1010 return -EINVAL;
1011 }
1012
1013 vol_id = be32_to_cpu(vidh->vol_id);
1014 if (vol_id > UBI_MAX_VOLUMES && vol_id != UBI_LAYOUT_VOLUME_ID) {
1015 int lnum = be32_to_cpu(vidh->lnum);
1016
1017 /* Unsupported internal volume */
1018 switch (vidh->compat) {
1019 case UBI_COMPAT_DELETE:
1020 ubi_msg("\"delete\" compatible internal volume %d:%d"
1021 " found, will remove it", vol_id, lnum);
1022 err = add_to_list(si, pnum, ec, 1, &si->erase);
1023 if (err)
1024 return err;
1025 return 0;
1026
1027 case UBI_COMPAT_RO:
1028 ubi_msg("read-only compatible internal volume %d:%d"
1029 " found, switch to read-only mode",
1030 vol_id, lnum);
1031 ubi->ro_mode = 1;
1032 break;
1033
1034 case UBI_COMPAT_PRESERVE:
1035 ubi_msg("\"preserve\" compatible internal volume %d:%d"
1036 " found", vol_id, lnum);
1037 err = add_to_list(si, pnum, ec, 0, &si->alien);
1038 if (err)
1039 return err;
1040 return 0;
1041
1042 case UBI_COMPAT_REJECT:
1043 ubi_err("incompatible internal volume %d:%d found",
1044 vol_id, lnum);
1045 return -EINVAL;
1046 }
1047 }
1048
1049 if (ec_err)
1050 ubi_warn("valid VID header but corrupted EC header at PEB %d",
1051 pnum);
1052 err = ubi_scan_add_used(ubi, si, pnum, ec, vidh, bitflips);
1053 if (err)
1054 return err;
1055
1056adjust_mean_ec:
1057 if (!ec_err) {
1058 si->ec_sum += ec;
1059 si->ec_count += 1;
1060 if (ec > si->max_ec)
1061 si->max_ec = ec;
1062 if (ec < si->min_ec)
1063 si->min_ec = ec;
1064 }
1065
1066 return 0;
1067}
1068
1069/**
1070 * check_what_we_have - check what PEB were found by scanning.
1071 * @ubi: UBI device description object
1072 * @si: scanning information
1073 *
1074 * This is a helper function which takes a look what PEBs were found by
1075 * scanning, and decides whether the flash is empty and should be formatted and
1076 * whether there are too many corrupted PEBs and we should not attach this
1077 * MTD device. Returns zero if we should proceed with attaching the MTD device,
1078 * and %-EINVAL if we should not.
1079 */
1080static int check_what_we_have(struct ubi_device *ubi, struct ubi_scan_info *si)
1081{
1082 struct ubi_scan_leb *seb;
1083 int max_corr, peb_count;
1084
1085 peb_count = ubi->peb_count - si->bad_peb_count - si->alien_peb_count;
1086 max_corr = peb_count / 20 ?: 8;
1087
1088 /*
1089 * Few corrupted PEBs is not a problem and may be just a result of
1090 * unclean reboots. However, many of them may indicate some problems
1091 * with the flash HW or driver.
1092 */
1093 if (si->corr_peb_count) {
1094 ubi_err("%d PEBs are corrupted and preserved",
1095 si->corr_peb_count);
1096 printk(KERN_ERR "Corrupted PEBs are:");
1097 list_for_each_entry(seb, &si->corr, u.list)
1098 printk(KERN_CONT " %d", seb->pnum);
1099 printk(KERN_CONT "\n");
1100
1101 /*
1102 * If too many PEBs are corrupted, we refuse attaching,
1103 * otherwise, only print a warning.
1104 */
1105 if (si->corr_peb_count >= max_corr) {
1106 ubi_err("too many corrupted PEBs, refusing");
1107 return -EINVAL;
1108 }
1109 }
1110
1111 if (si->empty_peb_count + si->maybe_bad_peb_count == peb_count) {
1112 /*
1113 * All PEBs are empty, or almost all - a couple PEBs look like
1114 * they may be bad PEBs which were not marked as bad yet.
1115 *
1116 * This piece of code basically tries to distinguish between
1117 * the following situations:
1118 *
1119 * 1. Flash is empty, but there are few bad PEBs, which are not
1120 * marked as bad so far, and which were read with error. We
1121 * want to go ahead and format this flash. While formatting,
1122 * the faulty PEBs will probably be marked as bad.
1123 *
1124 * 2. Flash contains non-UBI data and we do not want to format
1125 * it and destroy possibly important information.
1126 */
1127 if (si->maybe_bad_peb_count <= 2) {
1128 si->is_empty = 1;
1129 ubi_msg("empty MTD device detected");
1130 get_random_bytes(&ubi->image_seq,
1131 sizeof(ubi->image_seq));
1132 } else {
1133 ubi_err("MTD device is not UBI-formatted and possibly "
1134 "contains non-UBI data - refusing it");
1135 return -EINVAL;
1136 }
1137
1138 }
1139
1140 return 0;
1141}
1142
1143/**
1144 * ubi_scan - scan an MTD device.
1145 * @ubi: UBI device description object
1146 *
1147 * This function does full scanning of an MTD device and returns complete
1148 * information about it. In case of failure, an error code is returned.
1149 */
1150struct ubi_scan_info *ubi_scan(struct ubi_device *ubi)
1151{
1152 int err, pnum;
1153 struct rb_node *rb1, *rb2;
1154 struct ubi_scan_volume *sv;
1155 struct ubi_scan_leb *seb;
1156 struct ubi_scan_info *si;
1157
1158 si = kzalloc(sizeof(struct ubi_scan_info), GFP_KERNEL);
1159 if (!si)
1160 return ERR_PTR(-ENOMEM);
1161
1162 INIT_LIST_HEAD(&si->corr);
1163 INIT_LIST_HEAD(&si->free);
1164 INIT_LIST_HEAD(&si->erase);
1165 INIT_LIST_HEAD(&si->alien);
1166 si->volumes = RB_ROOT;
1167
1168 err = -ENOMEM;
1169 si->scan_leb_slab = kmem_cache_create("ubi_scan_leb_slab",
1170 sizeof(struct ubi_scan_leb),
1171 0, 0, NULL);
1172 if (!si->scan_leb_slab)
1173 goto out_si;
1174
1175 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
1176 if (!ech)
1177 goto out_slab;
1178
1179 vidh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
1180 if (!vidh)
1181 goto out_ech;
1182
1183 for (pnum = 0; pnum < ubi->peb_count; pnum++) {
1184 cond_resched();
1185
1186 dbg_gen("process PEB %d", pnum);
1187 err = process_eb(ubi, si, pnum);
1188 if (err < 0)
1189 goto out_vidh;
1190 }
1191
1192 dbg_msg("scanning is finished");
1193
1194 /* Calculate mean erase counter */
1195 if (si->ec_count)
1196 si->mean_ec = div_u64(si->ec_sum, si->ec_count);
1197
1198 err = check_what_we_have(ubi, si);
1199 if (err)
1200 goto out_vidh;
1201
1202 /*
1203 * In case of unknown erase counter we use the mean erase counter
1204 * value.
1205 */
1206 ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) {
1207 ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb)
1208 if (seb->ec == UBI_SCAN_UNKNOWN_EC)
1209 seb->ec = si->mean_ec;
1210 }
1211
1212 list_for_each_entry(seb, &si->free, u.list) {
1213 if (seb->ec == UBI_SCAN_UNKNOWN_EC)
1214 seb->ec = si->mean_ec;
1215 }
1216
1217 list_for_each_entry(seb, &si->corr, u.list)
1218 if (seb->ec == UBI_SCAN_UNKNOWN_EC)
1219 seb->ec = si->mean_ec;
1220
1221 list_for_each_entry(seb, &si->erase, u.list)
1222 if (seb->ec == UBI_SCAN_UNKNOWN_EC)
1223 seb->ec = si->mean_ec;
1224
1225 err = paranoid_check_si(ubi, si);
1226 if (err)
1227 goto out_vidh;
1228
1229 ubi_free_vid_hdr(ubi, vidh);
1230 kfree(ech);
1231
1232 return si;
1233
1234out_vidh:
1235 ubi_free_vid_hdr(ubi, vidh);
1236out_ech:
1237 kfree(ech);
1238out_slab:
1239 kmem_cache_destroy(si->scan_leb_slab);
1240out_si:
1241 ubi_scan_destroy_si(si);
1242 return ERR_PTR(err);
1243}
1244
1245/**
1246 * destroy_sv - free the scanning volume information
1247 * @sv: scanning volume information
1248 * @si: scanning information
1249 *
1250 * This function destroys the volume RB-tree (@sv->root) and the scanning
1251 * volume information.
1252 */
1253static void destroy_sv(struct ubi_scan_info *si, struct ubi_scan_volume *sv)
1254{
1255 struct ubi_scan_leb *seb;
1256 struct rb_node *this = sv->root.rb_node;
1257
1258 while (this) {
1259 if (this->rb_left)
1260 this = this->rb_left;
1261 else if (this->rb_right)
1262 this = this->rb_right;
1263 else {
1264 seb = rb_entry(this, struct ubi_scan_leb, u.rb);
1265 this = rb_parent(this);
1266 if (this) {
1267 if (this->rb_left == &seb->u.rb)
1268 this->rb_left = NULL;
1269 else
1270 this->rb_right = NULL;
1271 }
1272
1273 kmem_cache_free(si->scan_leb_slab, seb);
1274 }
1275 }
1276 kfree(sv);
1277}
1278
1279/**
1280 * ubi_scan_destroy_si - destroy scanning information.
1281 * @si: scanning information
1282 */
1283void ubi_scan_destroy_si(struct ubi_scan_info *si)
1284{
1285 struct ubi_scan_leb *seb, *seb_tmp;
1286 struct ubi_scan_volume *sv;
1287 struct rb_node *rb;
1288
1289 list_for_each_entry_safe(seb, seb_tmp, &si->alien, u.list) {
1290 list_del(&seb->u.list);
1291 kmem_cache_free(si->scan_leb_slab, seb);
1292 }
1293 list_for_each_entry_safe(seb, seb_tmp, &si->erase, u.list) {
1294 list_del(&seb->u.list);
1295 kmem_cache_free(si->scan_leb_slab, seb);
1296 }
1297 list_for_each_entry_safe(seb, seb_tmp, &si->corr, u.list) {
1298 list_del(&seb->u.list);
1299 kmem_cache_free(si->scan_leb_slab, seb);
1300 }
1301 list_for_each_entry_safe(seb, seb_tmp, &si->free, u.list) {
1302 list_del(&seb->u.list);
1303 kmem_cache_free(si->scan_leb_slab, seb);
1304 }
1305
1306 /* Destroy the volume RB-tree */
1307 rb = si->volumes.rb_node;
1308 while (rb) {
1309 if (rb->rb_left)
1310 rb = rb->rb_left;
1311 else if (rb->rb_right)
1312 rb = rb->rb_right;
1313 else {
1314 sv = rb_entry(rb, struct ubi_scan_volume, rb);
1315
1316 rb = rb_parent(rb);
1317 if (rb) {
1318 if (rb->rb_left == &sv->rb)
1319 rb->rb_left = NULL;
1320 else
1321 rb->rb_right = NULL;
1322 }
1323
1324 destroy_sv(si, sv);
1325 }
1326 }
1327
1328 kmem_cache_destroy(si->scan_leb_slab);
1329 kfree(si);
1330}
1331
1332#ifdef CONFIG_MTD_UBI_DEBUG
1333
1334/**
1335 * paranoid_check_si - check the scanning information.
1336 * @ubi: UBI device description object
1337 * @si: scanning information
1338 *
1339 * This function returns zero if the scanning information is all right, and a
1340 * negative error code if not or if an error occurred.
1341 */
1342static int paranoid_check_si(struct ubi_device *ubi, struct ubi_scan_info *si)
1343{
1344 int pnum, err, vols_found = 0;
1345 struct rb_node *rb1, *rb2;
1346 struct ubi_scan_volume *sv;
1347 struct ubi_scan_leb *seb, *last_seb;
1348 uint8_t *buf;
1349
1350 if (!ubi->dbg->chk_gen)
1351 return 0;
1352
1353 /*
1354 * At first, check that scanning information is OK.
1355 */
1356 ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) {
1357 int leb_count = 0;
1358
1359 cond_resched();
1360
1361 vols_found += 1;
1362
1363 if (si->is_empty) {
1364 ubi_err("bad is_empty flag");
1365 goto bad_sv;
1366 }
1367
1368 if (sv->vol_id < 0 || sv->highest_lnum < 0 ||
1369 sv->leb_count < 0 || sv->vol_type < 0 || sv->used_ebs < 0 ||
1370 sv->data_pad < 0 || sv->last_data_size < 0) {
1371 ubi_err("negative values");
1372 goto bad_sv;
1373 }
1374
1375 if (sv->vol_id >= UBI_MAX_VOLUMES &&
1376 sv->vol_id < UBI_INTERNAL_VOL_START) {
1377 ubi_err("bad vol_id");
1378 goto bad_sv;
1379 }
1380
1381 if (sv->vol_id > si->highest_vol_id) {
1382 ubi_err("highest_vol_id is %d, but vol_id %d is there",
1383 si->highest_vol_id, sv->vol_id);
1384 goto out;
1385 }
1386
1387 if (sv->vol_type != UBI_DYNAMIC_VOLUME &&
1388 sv->vol_type != UBI_STATIC_VOLUME) {
1389 ubi_err("bad vol_type");
1390 goto bad_sv;
1391 }
1392
1393 if (sv->data_pad > ubi->leb_size / 2) {
1394 ubi_err("bad data_pad");
1395 goto bad_sv;
1396 }
1397
1398 last_seb = NULL;
1399 ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) {
1400 cond_resched();
1401
1402 last_seb = seb;
1403 leb_count += 1;
1404
1405 if (seb->pnum < 0 || seb->ec < 0) {
1406 ubi_err("negative values");
1407 goto bad_seb;
1408 }
1409
1410 if (seb->ec < si->min_ec) {
1411 ubi_err("bad si->min_ec (%d), %d found",
1412 si->min_ec, seb->ec);
1413 goto bad_seb;
1414 }
1415
1416 if (seb->ec > si->max_ec) {
1417 ubi_err("bad si->max_ec (%d), %d found",
1418 si->max_ec, seb->ec);
1419 goto bad_seb;
1420 }
1421
1422 if (seb->pnum >= ubi->peb_count) {
1423 ubi_err("too high PEB number %d, total PEBs %d",
1424 seb->pnum, ubi->peb_count);
1425 goto bad_seb;
1426 }
1427
1428 if (sv->vol_type == UBI_STATIC_VOLUME) {
1429 if (seb->lnum >= sv->used_ebs) {
1430 ubi_err("bad lnum or used_ebs");
1431 goto bad_seb;
1432 }
1433 } else {
1434 if (sv->used_ebs != 0) {
1435 ubi_err("non-zero used_ebs");
1436 goto bad_seb;
1437 }
1438 }
1439
1440 if (seb->lnum > sv->highest_lnum) {
1441 ubi_err("incorrect highest_lnum or lnum");
1442 goto bad_seb;
1443 }
1444 }
1445
1446 if (sv->leb_count != leb_count) {
1447 ubi_err("bad leb_count, %d objects in the tree",
1448 leb_count);
1449 goto bad_sv;
1450 }
1451
1452 if (!last_seb)
1453 continue;
1454
1455 seb = last_seb;
1456
1457 if (seb->lnum != sv->highest_lnum) {
1458 ubi_err("bad highest_lnum");
1459 goto bad_seb;
1460 }
1461 }
1462
1463 if (vols_found != si->vols_found) {
1464 ubi_err("bad si->vols_found %d, should be %d",
1465 si->vols_found, vols_found);
1466 goto out;
1467 }
1468
1469 /* Check that scanning information is correct */
1470 ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) {
1471 last_seb = NULL;
1472 ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) {
1473 int vol_type;
1474
1475 cond_resched();
1476
1477 last_seb = seb;
1478
1479 err = ubi_io_read_vid_hdr(ubi, seb->pnum, vidh, 1);
1480 if (err && err != UBI_IO_BITFLIPS) {
1481 ubi_err("VID header is not OK (%d)", err);
1482 if (err > 0)
1483 err = -EIO;
1484 return err;
1485 }
1486
1487 vol_type = vidh->vol_type == UBI_VID_DYNAMIC ?
1488 UBI_DYNAMIC_VOLUME : UBI_STATIC_VOLUME;
1489 if (sv->vol_type != vol_type) {
1490 ubi_err("bad vol_type");
1491 goto bad_vid_hdr;
1492 }
1493
1494 if (seb->sqnum != be64_to_cpu(vidh->sqnum)) {
1495 ubi_err("bad sqnum %llu", seb->sqnum);
1496 goto bad_vid_hdr;
1497 }
1498
1499 if (sv->vol_id != be32_to_cpu(vidh->vol_id)) {
1500 ubi_err("bad vol_id %d", sv->vol_id);
1501 goto bad_vid_hdr;
1502 }
1503
1504 if (sv->compat != vidh->compat) {
1505 ubi_err("bad compat %d", vidh->compat);
1506 goto bad_vid_hdr;
1507 }
1508
1509 if (seb->lnum != be32_to_cpu(vidh->lnum)) {
1510 ubi_err("bad lnum %d", seb->lnum);
1511 goto bad_vid_hdr;
1512 }
1513
1514 if (sv->used_ebs != be32_to_cpu(vidh->used_ebs)) {
1515 ubi_err("bad used_ebs %d", sv->used_ebs);
1516 goto bad_vid_hdr;
1517 }
1518
1519 if (sv->data_pad != be32_to_cpu(vidh->data_pad)) {
1520 ubi_err("bad data_pad %d", sv->data_pad);
1521 goto bad_vid_hdr;
1522 }
1523 }
1524
1525 if (!last_seb)
1526 continue;
1527
1528 if (sv->highest_lnum != be32_to_cpu(vidh->lnum)) {
1529 ubi_err("bad highest_lnum %d", sv->highest_lnum);
1530 goto bad_vid_hdr;
1531 }
1532
1533 if (sv->last_data_size != be32_to_cpu(vidh->data_size)) {
1534 ubi_err("bad last_data_size %d", sv->last_data_size);
1535 goto bad_vid_hdr;
1536 }
1537 }
1538
1539 /*
1540 * Make sure that all the physical eraseblocks are in one of the lists
1541 * or trees.
1542 */
1543 buf = kzalloc(ubi->peb_count, GFP_KERNEL);
1544 if (!buf)
1545 return -ENOMEM;
1546
1547 for (pnum = 0; pnum < ubi->peb_count; pnum++) {
1548 err = ubi_io_is_bad(ubi, pnum);
1549 if (err < 0) {
1550 kfree(buf);
1551 return err;
1552 } else if (err)
1553 buf[pnum] = 1;
1554 }
1555
1556 ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb)
1557 ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb)
1558 buf[seb->pnum] = 1;
1559
1560 list_for_each_entry(seb, &si->free, u.list)
1561 buf[seb->pnum] = 1;
1562
1563 list_for_each_entry(seb, &si->corr, u.list)
1564 buf[seb->pnum] = 1;
1565
1566 list_for_each_entry(seb, &si->erase, u.list)
1567 buf[seb->pnum] = 1;
1568
1569 list_for_each_entry(seb, &si->alien, u.list)
1570 buf[seb->pnum] = 1;
1571
1572 err = 0;
1573 for (pnum = 0; pnum < ubi->peb_count; pnum++)
1574 if (!buf[pnum]) {
1575 ubi_err("PEB %d is not referred", pnum);
1576 err = 1;
1577 }
1578
1579 kfree(buf);
1580 if (err)
1581 goto out;
1582 return 0;
1583
1584bad_seb:
1585 ubi_err("bad scanning information about LEB %d", seb->lnum);
1586 ubi_dbg_dump_seb(seb, 0);
1587 ubi_dbg_dump_sv(sv);
1588 goto out;
1589
1590bad_sv:
1591 ubi_err("bad scanning information about volume %d", sv->vol_id);
1592 ubi_dbg_dump_sv(sv);
1593 goto out;
1594
1595bad_vid_hdr:
1596 ubi_err("bad scanning information about volume %d", sv->vol_id);
1597 ubi_dbg_dump_sv(sv);
1598 ubi_dbg_dump_vid_hdr(vidh);
1599
1600out:
1601 ubi_dbg_dump_stack();
1602 return -EINVAL;
1603}
1604
1605#endif /* CONFIG_MTD_UBI_DEBUG */
diff --git a/drivers/mtd/ubi/scan.h b/drivers/mtd/ubi/scan.h
new file mode 100644
index 00000000000..d48aef15ab5
--- /dev/null
+++ b/drivers/mtd/ubi/scan.h
@@ -0,0 +1,174 @@
1/*
2 * Copyright (c) International Business Machines Corp., 2006
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
12 * the GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 *
18 * Author: Artem Bityutskiy (Битюцкий Артём)
19 */
20
21#ifndef __UBI_SCAN_H__
22#define __UBI_SCAN_H__
23
24/* The erase counter value for this physical eraseblock is unknown */
25#define UBI_SCAN_UNKNOWN_EC (-1)
26
27/**
28 * struct ubi_scan_leb - scanning information about a physical eraseblock.
29 * @ec: erase counter (%UBI_SCAN_UNKNOWN_EC if it is unknown)
30 * @pnum: physical eraseblock number
31 * @lnum: logical eraseblock number
32 * @scrub: if this physical eraseblock needs scrubbing
33 * @copy_flag: this LEB is a copy (@copy_flag is set in VID header of this LEB)
34 * @sqnum: sequence number
35 * @u: unions RB-tree or @list links
36 * @u.rb: link in the per-volume RB-tree of &struct ubi_scan_leb objects
37 * @u.list: link in one of the eraseblock lists
38 *
39 * One object of this type is allocated for each physical eraseblock during
40 * scanning.
41 */
42struct ubi_scan_leb {
43 int ec;
44 int pnum;
45 int lnum;
46 unsigned int scrub:1;
47 unsigned int copy_flag:1;
48 unsigned long long sqnum;
49 union {
50 struct rb_node rb;
51 struct list_head list;
52 } u;
53};
54
55/**
56 * struct ubi_scan_volume - scanning information about a volume.
57 * @vol_id: volume ID
58 * @highest_lnum: highest logical eraseblock number in this volume
59 * @leb_count: number of logical eraseblocks in this volume
60 * @vol_type: volume type
61 * @used_ebs: number of used logical eraseblocks in this volume (only for
62 * static volumes)
63 * @last_data_size: amount of data in the last logical eraseblock of this
64 * volume (always equivalent to the usable logical eraseblock
65 * size in case of dynamic volumes)
66 * @data_pad: how many bytes at the end of logical eraseblocks of this volume
67 * are not used (due to volume alignment)
68 * @compat: compatibility flags of this volume
69 * @rb: link in the volume RB-tree
70 * @root: root of the RB-tree containing all the eraseblock belonging to this
71 * volume (&struct ubi_scan_leb objects)
72 *
73 * One object of this type is allocated for each volume during scanning.
74 */
75struct ubi_scan_volume {
76 int vol_id;
77 int highest_lnum;
78 int leb_count;
79 int vol_type;
80 int used_ebs;
81 int last_data_size;
82 int data_pad;
83 int compat;
84 struct rb_node rb;
85 struct rb_root root;
86};
87
88/**
89 * struct ubi_scan_info - UBI scanning information.
90 * @volumes: root of the volume RB-tree
91 * @corr: list of corrupted physical eraseblocks
92 * @free: list of free physical eraseblocks
93 * @erase: list of physical eraseblocks which have to be erased
94 * @alien: list of physical eraseblocks which should not be used by UBI (e.g.,
95 * those belonging to "preserve"-compatible internal volumes)
96 * @corr_peb_count: count of PEBs in the @corr list
97 * @empty_peb_count: count of PEBs which are presumably empty (contain only
98 * 0xFF bytes)
99 * @alien_peb_count: count of PEBs in the @alien list
100 * @bad_peb_count: count of bad physical eraseblocks
101 * @maybe_bad_peb_count: count of bad physical eraseblocks which are not marked
102 * as bad yet, but which look like bad
103 * @vols_found: number of volumes found during scanning
104 * @highest_vol_id: highest volume ID
105 * @is_empty: flag indicating whether the MTD device is empty or not
106 * @min_ec: lowest erase counter value
107 * @max_ec: highest erase counter value
108 * @max_sqnum: highest sequence number value
109 * @mean_ec: mean erase counter value
110 * @ec_sum: a temporary variable used when calculating @mean_ec
111 * @ec_count: a temporary variable used when calculating @mean_ec
112 * @scan_leb_slab: slab cache for &struct ubi_scan_leb objects
113 *
114 * This data structure contains the result of scanning and may be used by other
115 * UBI sub-systems to build final UBI data structures, further error-recovery
116 * and so on.
117 */
118struct ubi_scan_info {
119 struct rb_root volumes;
120 struct list_head corr;
121 struct list_head free;
122 struct list_head erase;
123 struct list_head alien;
124 int corr_peb_count;
125 int empty_peb_count;
126 int alien_peb_count;
127 int bad_peb_count;
128 int maybe_bad_peb_count;
129 int vols_found;
130 int highest_vol_id;
131 int is_empty;
132 int min_ec;
133 int max_ec;
134 unsigned long long max_sqnum;
135 int mean_ec;
136 uint64_t ec_sum;
137 int ec_count;
138 struct kmem_cache *scan_leb_slab;
139};
140
141struct ubi_device;
142struct ubi_vid_hdr;
143
144/*
145 * ubi_scan_move_to_list - move a PEB from the volume tree to a list.
146 *
147 * @sv: volume scanning information
148 * @seb: scanning eraseblock information
149 * @list: the list to move to
150 */
151static inline void ubi_scan_move_to_list(struct ubi_scan_volume *sv,
152 struct ubi_scan_leb *seb,
153 struct list_head *list)
154{
155 rb_erase(&seb->u.rb, &sv->root);
156 list_add_tail(&seb->u.list, list);
157}
158
159int ubi_scan_add_used(struct ubi_device *ubi, struct ubi_scan_info *si,
160 int pnum, int ec, const struct ubi_vid_hdr *vid_hdr,
161 int bitflips);
162struct ubi_scan_volume *ubi_scan_find_sv(const struct ubi_scan_info *si,
163 int vol_id);
164struct ubi_scan_leb *ubi_scan_find_seb(const struct ubi_scan_volume *sv,
165 int lnum);
166void ubi_scan_rm_volume(struct ubi_scan_info *si, struct ubi_scan_volume *sv);
167struct ubi_scan_leb *ubi_scan_get_free_peb(struct ubi_device *ubi,
168 struct ubi_scan_info *si);
169int ubi_scan_erase_peb(struct ubi_device *ubi, const struct ubi_scan_info *si,
170 int pnum, int ec);
171struct ubi_scan_info *ubi_scan(struct ubi_device *ubi);
172void ubi_scan_destroy_si(struct ubi_scan_info *si);
173
174#endif /* !__UBI_SCAN_H__ */