diff options
Diffstat (limited to 'drivers/mtd/devices')
-rw-r--r-- | drivers/mtd/devices/tegra_nand.c | 1802 | ||||
-rw-r--r-- | drivers/mtd/devices/tegra_nand.h | 148 |
2 files changed, 1950 insertions, 0 deletions
diff --git a/drivers/mtd/devices/tegra_nand.c b/drivers/mtd/devices/tegra_nand.c new file mode 100644 index 00000000000..c8a3e7090b9 --- /dev/null +++ b/drivers/mtd/devices/tegra_nand.c | |||
@@ -0,0 +1,1802 @@ | |||
1 | /* | ||
2 | * drivers/mtd/devices/tegra_nand.c | ||
3 | * | ||
4 | * Copyright (C) 2010 Google, Inc. | ||
5 | * Author: Dima Zavin <dima@android.com> | ||
6 | * Colin Cross <ccross@android.com> | ||
7 | * | ||
8 | * Copyright (C) 2010-2011 Nvidia Graphics Pvt. Ltd. | ||
9 | * http://www.nvidia.com | ||
10 | * | ||
11 | * This software is licensed under the terms of the GNU General Public | ||
12 | * License version 2, as published by the Free Software Foundation, and | ||
13 | * may be copied, distributed, and modified under those terms. | ||
14 | * | ||
15 | * This program is distributed in the hope that it will be useful, | ||
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
18 | * GNU General Public License for more details. | ||
19 | * | ||
20 | * Derived from: drivers/mtd/nand/nand_base.c | ||
21 | * drivers/mtd/nand/pxa3xx.c | ||
22 | * | ||
23 | */ | ||
24 | |||
25 | #include <linux/delay.h> | ||
26 | #include <linux/dma-mapping.h> | ||
27 | #include <linux/init.h> | ||
28 | #include <linux/interrupt.h> | ||
29 | #include <linux/io.h> | ||
30 | #include <linux/module.h> | ||
31 | #include <linux/mutex.h> | ||
32 | #include <linux/mtd/nand.h> | ||
33 | #include <linux/mtd/mtd.h> | ||
34 | #include <linux/mtd/partitions.h> | ||
35 | #include <linux/platform_device.h> | ||
36 | #include <linux/types.h> | ||
37 | #include <linux/clk.h> | ||
38 | #include <linux/slab.h> | ||
39 | #include <linux/gpio.h> | ||
40 | |||
41 | #include <mach/nand.h> | ||
42 | |||
43 | #include "tegra_nand.h" | ||
44 | |||
45 | #define DRIVER_NAME "tegra_nand" | ||
46 | #define DRIVER_DESC "Nvidia Tegra NAND Flash Controller driver" | ||
47 | |||
48 | #define MAX_DMA_SZ SZ_64K | ||
49 | #define ECC_BUF_SZ SZ_1K | ||
50 | |||
51 | /* FIXME: is this right?! | ||
52 | * NvRM code says it should be 128 bytes, but that seems awfully small | ||
53 | */ | ||
54 | |||
55 | /*#define TEGRA_NAND_DEBUG | ||
56 | #define TEGRA_NAND_DEBUG_PEDANTIC*/ | ||
57 | |||
58 | #ifdef TEGRA_NAND_DEBUG | ||
59 | #define TEGRA_DBG(fmt, args...) \ | ||
60 | do { pr_info(fmt, ##args); } while (0) | ||
61 | #else | ||
62 | #define TEGRA_DBG(fmt, args...) | ||
63 | #endif | ||
64 | |||
65 | /* TODO: will vary with devices, move into appropriate device spcific header */ | ||
66 | #define SCAN_TIMING_VAL 0x3f0bd214 | ||
67 | #define SCAN_TIMING2_VAL 0xb | ||
68 | |||
69 | #define TIMEOUT (2 * HZ) | ||
70 | /* TODO: pull in the register defs (fields, masks, etc) from Nvidia files | ||
71 | * so we don't have to redefine them */ | ||
72 | |||
73 | static const char *part_probes[] = { "cmdlinepart", NULL, }; | ||
74 | |||
75 | struct tegra_nand_chip { | ||
76 | spinlock_t lock; | ||
77 | uint32_t chipsize; | ||
78 | int num_chips; | ||
79 | int curr_chip; | ||
80 | |||
81 | /* addr >> chip_shift == chip number */ | ||
82 | uint32_t chip_shift; | ||
83 | /* (addr >> page_shift) & page_mask == page number within chip */ | ||
84 | uint32_t page_shift; | ||
85 | uint32_t page_mask; | ||
86 | /* column within page */ | ||
87 | uint32_t column_mask; | ||
88 | /* addr >> block_shift == block number (across the whole mtd dev, not | ||
89 | * just a single chip. */ | ||
90 | uint32_t block_shift; | ||
91 | |||
92 | void *priv; | ||
93 | }; | ||
94 | |||
95 | struct tegra_nand_info { | ||
96 | struct tegra_nand_chip chip; | ||
97 | struct mtd_info mtd; | ||
98 | struct tegra_nand_platform *plat; | ||
99 | struct device *dev; | ||
100 | struct mtd_partition *parts; | ||
101 | |||
102 | /* synchronizes access to accessing the actual NAND controller */ | ||
103 | struct mutex lock; | ||
104 | /* partial_unaligned_rw_buffer is temporary buffer used during | ||
105 | reading of unaligned data from nand pages or if data to be read | ||
106 | is less than nand page size. | ||
107 | */ | ||
108 | uint8_t *partial_unaligned_rw_buffer; | ||
109 | |||
110 | void *oob_dma_buf; | ||
111 | dma_addr_t oob_dma_addr; | ||
112 | /* ecc error vector info (offset into page and data mask to apply */ | ||
113 | void *ecc_buf; | ||
114 | dma_addr_t ecc_addr; | ||
115 | /* ecc error status (page number, err_cnt) */ | ||
116 | uint32_t *ecc_errs; | ||
117 | uint32_t num_ecc_errs; | ||
118 | uint32_t max_ecc_errs; | ||
119 | spinlock_t ecc_lock; | ||
120 | |||
121 | uint32_t command_reg; | ||
122 | uint32_t config_reg; | ||
123 | uint32_t dmactrl_reg; | ||
124 | |||
125 | struct completion cmd_complete; | ||
126 | struct completion dma_complete; | ||
127 | |||
128 | /* bad block bitmap: 1 == good, 0 == bad/unknown */ | ||
129 | unsigned long *bb_bitmap; | ||
130 | |||
131 | struct clk *clk; | ||
132 | uint32_t is_data_bus_width_16; | ||
133 | uint32_t device_id; | ||
134 | uint32_t vendor_id; | ||
135 | uint32_t dev_parms; | ||
136 | uint32_t num_bad_blocks; | ||
137 | }; | ||
138 | #define MTD_TO_INFO(mtd) container_of((mtd), struct tegra_nand_info, mtd) | ||
139 | |||
140 | /* 64 byte oob block info for large page (== 2KB) device | ||
141 | * | ||
142 | * OOB flash layout for Tegra with Reed-Solomon 4 symbol correct ECC: | ||
143 | * Skipped bytes(4) | ||
144 | * Main area Ecc(36) | ||
145 | * Tag data(20) | ||
146 | * Tag data Ecc(4) | ||
147 | * | ||
148 | * Yaffs2 will use 16 tag bytes. | ||
149 | */ | ||
150 | |||
151 | static struct nand_ecclayout tegra_nand_oob_64 = { | ||
152 | .eccbytes = 36, | ||
153 | .eccpos = { | ||
154 | 4, 5, 6, 7, 8, 9, 10, 11, 12, | ||
155 | 13, 14, 15, 16, 17, 18, 19, 20, 21, | ||
156 | 22, 23, 24, 25, 26, 27, 28, 29, 30, | ||
157 | 31, 32, 33, 34, 35, 36, 37, 38, 39, | ||
158 | }, | ||
159 | .oobavail = 20, | ||
160 | .oobfree = { | ||
161 | {.offset = 40, | ||
162 | .length = 20, | ||
163 | }, | ||
164 | }, | ||
165 | }; | ||
166 | |||
167 | static struct nand_ecclayout tegra_nand_oob_128 = { | ||
168 | .eccbytes = 72, | ||
169 | .eccpos = { | ||
170 | 4, 5, 6, 7, 8, 9, 10, 11, 12, | ||
171 | 13, 14, 15, 16, 17, 18, 19, 20, 21, | ||
172 | 22, 23, 24, 25, 26, 27, 28, 29, 30, | ||
173 | 31, 32, 33, 34, 35, 36, 37, 38, 39, | ||
174 | 40, 41, 42, 43, 44, 45, 46, 47, 48, | ||
175 | 49, 50, 51, 52, 53, 54, 55, 56, 57, | ||
176 | 58, 59, 60, 61, 62, 63, 64, 65, 66, | ||
177 | /* ECC POS is only of size 64 bytes so commenting the remaining | ||
178 | * bytes here. As driver uses the Hardware ECC so it there is | ||
179 | * no issue with it | ||
180 | */ | ||
181 | /*67, 68, 69, 70, 71, 72, 73, 74, 75, */ | ||
182 | }, | ||
183 | .oobavail = 48, | ||
184 | .oobfree = { | ||
185 | {.offset = 76, | ||
186 | .length = 48, | ||
187 | }, | ||
188 | }, | ||
189 | }; | ||
190 | |||
191 | static struct nand_flash_dev *find_nand_flash_device(int dev_id) | ||
192 | { | ||
193 | struct nand_flash_dev *dev = &nand_flash_ids[0]; | ||
194 | |||
195 | while (dev->name && dev->id != dev_id) | ||
196 | dev++; | ||
197 | return dev->name ? dev : NULL; | ||
198 | } | ||
199 | |||
200 | static struct nand_manufacturers *find_nand_flash_vendor(int vendor_id) | ||
201 | { | ||
202 | struct nand_manufacturers *vendor = &nand_manuf_ids[0]; | ||
203 | |||
204 | while (vendor->id && vendor->id != vendor_id) | ||
205 | vendor++; | ||
206 | return vendor->id ? vendor : NULL; | ||
207 | } | ||
208 | |||
209 | #define REG_NAME(name) { name, #name } | ||
210 | static struct { | ||
211 | uint32_t addr; | ||
212 | char *name; | ||
213 | } reg_names[] = { | ||
214 | REG_NAME(COMMAND_REG), | ||
215 | REG_NAME(STATUS_REG), | ||
216 | REG_NAME(ISR_REG), | ||
217 | REG_NAME(IER_REG), | ||
218 | REG_NAME(CONFIG_REG), | ||
219 | REG_NAME(TIMING_REG), | ||
220 | REG_NAME(RESP_REG), | ||
221 | REG_NAME(TIMING2_REG), | ||
222 | REG_NAME(CMD_REG1), | ||
223 | REG_NAME(CMD_REG2), | ||
224 | REG_NAME(ADDR_REG1), | ||
225 | REG_NAME(ADDR_REG2), | ||
226 | REG_NAME(DMA_MST_CTRL_REG), | ||
227 | REG_NAME(DMA_CFG_A_REG), | ||
228 | REG_NAME(DMA_CFG_B_REG), | ||
229 | REG_NAME(FIFO_CTRL_REG), | ||
230 | REG_NAME(DATA_BLOCK_PTR_REG), | ||
231 | REG_NAME(TAG_PTR_REG), | ||
232 | REG_NAME(ECC_PTR_REG), | ||
233 | REG_NAME(DEC_STATUS_REG), | ||
234 | REG_NAME(HWSTATUS_CMD_REG), | ||
235 | REG_NAME(HWSTATUS_MASK_REG), | ||
236 | {0, NULL}, | ||
237 | }; | ||
238 | |||
239 | #undef REG_NAME | ||
240 | |||
241 | static int dump_nand_regs(void) | ||
242 | { | ||
243 | int i = 0; | ||
244 | |||
245 | TEGRA_DBG("%s: dumping registers\n", __func__); | ||
246 | while (reg_names[i].name != NULL) { | ||
247 | TEGRA_DBG("%s = 0x%08x\n", reg_names[i].name, | ||
248 | readl(reg_names[i].addr)); | ||
249 | i++; | ||
250 | } | ||
251 | TEGRA_DBG("%s: end of reg dump\n", __func__); | ||
252 | return 1; | ||
253 | } | ||
254 | |||
255 | static inline void enable_ints(struct tegra_nand_info *info, uint32_t mask) | ||
256 | { | ||
257 | (void)info; | ||
258 | writel(readl(IER_REG) | mask, IER_REG); | ||
259 | } | ||
260 | |||
261 | static inline void disable_ints(struct tegra_nand_info *info, uint32_t mask) | ||
262 | { | ||
263 | (void)info; | ||
264 | writel(readl(IER_REG) & ~mask, IER_REG); | ||
265 | } | ||
266 | |||
267 | static inline void | ||
268 | split_addr(struct tegra_nand_info *info, loff_t offset, int *chipnr, | ||
269 | uint32_t *page, uint32_t *column) | ||
270 | { | ||
271 | *chipnr = (int)(offset >> info->chip.chip_shift); | ||
272 | *page = (offset >> info->chip.page_shift) & info->chip.page_mask; | ||
273 | *column = offset & info->chip.column_mask; | ||
274 | } | ||
275 | |||
276 | static irqreturn_t tegra_nand_irq(int irq, void *dev_id) | ||
277 | { | ||
278 | struct tegra_nand_info *info = dev_id; | ||
279 | uint32_t isr; | ||
280 | uint32_t ier; | ||
281 | uint32_t dma_ctrl; | ||
282 | uint32_t tmp; | ||
283 | |||
284 | isr = readl(ISR_REG); | ||
285 | ier = readl(IER_REG); | ||
286 | dma_ctrl = readl(DMA_MST_CTRL_REG); | ||
287 | #ifdef DEBUG_DUMP_IRQ | ||
288 | pr_info("IRQ: ISR=0x%08x IER=0x%08x DMA_IS=%d DMA_IE=%d\n", | ||
289 | isr, ier, !!(dma_ctrl & (1 << 20)), !!(dma_ctrl & (1 << 28))); | ||
290 | #endif | ||
291 | if (isr & ISR_CMD_DONE) { | ||
292 | if (likely(!(readl(COMMAND_REG) & COMMAND_GO))) | ||
293 | complete(&info->cmd_complete); | ||
294 | else | ||
295 | pr_err("tegra_nand_irq: Spurious cmd done irq!\n"); | ||
296 | } | ||
297 | |||
298 | if (isr & ISR_ECC_ERR) { | ||
299 | /* always want to read the decode status so xfers don't stall. */ | ||
300 | tmp = readl(DEC_STATUS_REG); | ||
301 | |||
302 | /* was ECC check actually enabled */ | ||
303 | if ((ier & IER_ECC_ERR)) { | ||
304 | unsigned long flags; | ||
305 | spin_lock_irqsave(&info->ecc_lock, flags); | ||
306 | info->ecc_errs[info->num_ecc_errs++] = tmp; | ||
307 | spin_unlock_irqrestore(&info->ecc_lock, flags); | ||
308 | } | ||
309 | } | ||
310 | |||
311 | if ((dma_ctrl & DMA_CTRL_IS_DMA_DONE) && | ||
312 | (dma_ctrl & DMA_CTRL_IE_DMA_DONE)) { | ||
313 | complete(&info->dma_complete); | ||
314 | writel(dma_ctrl, DMA_MST_CTRL_REG); | ||
315 | } | ||
316 | |||
317 | if ((isr & ISR_UND) && (ier & IER_UND)) | ||
318 | pr_err("%s: fifo underrun.\n", __func__); | ||
319 | |||
320 | if ((isr & ISR_OVR) && (ier & IER_OVR)) | ||
321 | pr_err("%s: fifo overrun.\n", __func__); | ||
322 | |||
323 | /* clear ALL interrupts?! */ | ||
324 | writel(isr & 0xfffc, ISR_REG); | ||
325 | |||
326 | return IRQ_HANDLED; | ||
327 | } | ||
328 | |||
329 | static inline int tegra_nand_is_cmd_done(struct tegra_nand_info *info) | ||
330 | { | ||
331 | return (readl(COMMAND_REG) & COMMAND_GO) ? 0 : 1; | ||
332 | } | ||
333 | |||
334 | static int tegra_nand_wait_cmd_done(struct tegra_nand_info *info) | ||
335 | { | ||
336 | uint32_t timeout = TIMEOUT; /* TODO: make this realistic */ | ||
337 | int ret; | ||
338 | |||
339 | ret = wait_for_completion_timeout(&info->cmd_complete, timeout); | ||
340 | |||
341 | #ifdef TEGRA_NAND_DEBUG_PEDANTIC | ||
342 | BUG_ON(!ret && dump_nand_regs()); | ||
343 | #endif | ||
344 | |||
345 | return ret ? 0 : ret; | ||
346 | } | ||
347 | |||
348 | static inline void select_chip(struct tegra_nand_info *info, int chipnr) | ||
349 | { | ||
350 | BUG_ON(chipnr != -1 && chipnr >= info->plat->max_chips); | ||
351 | info->chip.curr_chip = chipnr; | ||
352 | } | ||
353 | |||
354 | static void cfg_hwstatus_mon(struct tegra_nand_info *info) | ||
355 | { | ||
356 | uint32_t val; | ||
357 | |||
358 | val = (HWSTATUS_RDSTATUS_MASK(1) | | ||
359 | HWSTATUS_RDSTATUS_EXP_VAL(0) | | ||
360 | HWSTATUS_RBSY_MASK(NAND_STATUS_READY) | | ||
361 | HWSTATUS_RBSY_EXP_VAL(NAND_STATUS_READY)); | ||
362 | writel(NAND_CMD_STATUS, HWSTATUS_CMD_REG); | ||
363 | writel(val, HWSTATUS_MASK_REG); | ||
364 | } | ||
365 | |||
366 | /* Tells the NAND controller to initiate the command. */ | ||
367 | static int tegra_nand_go(struct tegra_nand_info *info) | ||
368 | { | ||
369 | BUG_ON(!tegra_nand_is_cmd_done(info)); | ||
370 | |||
371 | INIT_COMPLETION(info->cmd_complete); | ||
372 | writel(info->command_reg | COMMAND_GO, COMMAND_REG); | ||
373 | |||
374 | if (unlikely(tegra_nand_wait_cmd_done(info))) { | ||
375 | /* TODO: abort command if needed? */ | ||
376 | pr_err("%s: Timeout while waiting for command\n", __func__); | ||
377 | return -ETIMEDOUT; | ||
378 | } | ||
379 | |||
380 | /* TODO: maybe wait for dma here? */ | ||
381 | return 0; | ||
382 | } | ||
383 | |||
384 | static void tegra_nand_prep_readid(struct tegra_nand_info *info) | ||
385 | { | ||
386 | info->command_reg = | ||
387 | (COMMAND_CLE | COMMAND_ALE | COMMAND_PIO | COMMAND_RX | | ||
388 | COMMAND_ALE_BYTE_SIZE(0) | COMMAND_TRANS_SIZE(3) | | ||
389 | (COMMAND_CE(info->chip.curr_chip))); | ||
390 | writel(NAND_CMD_READID, CMD_REG1); | ||
391 | writel(0, CMD_REG2); | ||
392 | writel(0, ADDR_REG1); | ||
393 | writel(0, ADDR_REG2); | ||
394 | writel(0, CONFIG_REG); | ||
395 | } | ||
396 | |||
397 | static int | ||
398 | tegra_nand_cmd_readid(struct tegra_nand_info *info, uint32_t *chip_id) | ||
399 | { | ||
400 | int err; | ||
401 | |||
402 | #ifdef TEGRA_NAND_DEBUG_PEDANTIC | ||
403 | BUG_ON(info->chip.curr_chip == -1); | ||
404 | #endif | ||
405 | |||
406 | tegra_nand_prep_readid(info); | ||
407 | err = tegra_nand_go(info); | ||
408 | if (err != 0) | ||
409 | return err; | ||
410 | |||
411 | *chip_id = readl(RESP_REG); | ||
412 | return 0; | ||
413 | } | ||
414 | |||
415 | /* assumes right locks are held */ | ||
416 | static int nand_cmd_get_status(struct tegra_nand_info *info, uint32_t *status) | ||
417 | { | ||
418 | int err; | ||
419 | |||
420 | info->command_reg = (COMMAND_CLE | COMMAND_PIO | COMMAND_RX | | ||
421 | COMMAND_RBSY_CHK | | ||
422 | (COMMAND_CE(info->chip.curr_chip))); | ||
423 | writel(NAND_CMD_STATUS, CMD_REG1); | ||
424 | writel(0, CMD_REG2); | ||
425 | writel(0, ADDR_REG1); | ||
426 | writel(0, ADDR_REG2); | ||
427 | writel(CONFIG_COM_BSY, CONFIG_REG); | ||
428 | |||
429 | err = tegra_nand_go(info); | ||
430 | if (err != 0) | ||
431 | return err; | ||
432 | |||
433 | *status = readl(RESP_REG) & 0xff; | ||
434 | return 0; | ||
435 | } | ||
436 | |||
437 | /* must be called with lock held */ | ||
438 | static int check_block_isbad(struct mtd_info *mtd, loff_t offs) | ||
439 | { | ||
440 | struct tegra_nand_info *info = MTD_TO_INFO(mtd); | ||
441 | uint32_t block = offs >> info->chip.block_shift; | ||
442 | int chipnr; | ||
443 | uint32_t page; | ||
444 | uint32_t column; | ||
445 | int ret = 0; | ||
446 | int i; | ||
447 | |||
448 | if (info->bb_bitmap[BIT_WORD(block)] & BIT_MASK(block)) | ||
449 | return 0; | ||
450 | |||
451 | offs &= ~(mtd->erasesize - 1); | ||
452 | |||
453 | if (info->is_data_bus_width_16) | ||
454 | writel(CONFIG_COM_BSY | CONFIG_BUS_WIDTH, CONFIG_REG); | ||
455 | else | ||
456 | writel(CONFIG_COM_BSY, CONFIG_REG); | ||
457 | |||
458 | split_addr(info, offs, &chipnr, &page, &column); | ||
459 | select_chip(info, chipnr); | ||
460 | |||
461 | column = mtd->writesize & 0xffff; /* force to be the offset of OOB */ | ||
462 | |||
463 | /* check fist two pages of the block */ | ||
464 | if (info->is_data_bus_width_16) | ||
465 | column = column >> 1; | ||
466 | for (i = 0; i < 2; ++i) { | ||
467 | info->command_reg = | ||
468 | COMMAND_CE(info->chip.curr_chip) | COMMAND_CLE | | ||
469 | COMMAND_ALE | COMMAND_ALE_BYTE_SIZE(4) | COMMAND_RX | | ||
470 | COMMAND_PIO | COMMAND_TRANS_SIZE(1) | COMMAND_A_VALID | | ||
471 | COMMAND_RBSY_CHK | COMMAND_SEC_CMD; | ||
472 | writel(NAND_CMD_READ0, CMD_REG1); | ||
473 | writel(NAND_CMD_READSTART, CMD_REG2); | ||
474 | |||
475 | writel(column | ((page & 0xffff) << 16), ADDR_REG1); | ||
476 | writel((page >> 16) & 0xff, ADDR_REG2); | ||
477 | |||
478 | /* ... poison me ... */ | ||
479 | writel(0xaa55aa55, RESP_REG); | ||
480 | ret = tegra_nand_go(info); | ||
481 | if (ret != 0) { | ||
482 | pr_info("baaaaaad\n"); | ||
483 | goto out; | ||
484 | } | ||
485 | |||
486 | if ((readl(RESP_REG) & 0xffff) != 0xffff) { | ||
487 | ret = 1; | ||
488 | goto out; | ||
489 | } | ||
490 | |||
491 | /* Note: The assumption here is that we cannot cross chip | ||
492 | * boundary since the we are only looking at the first 2 pages in | ||
493 | * a block, i.e. erasesize > writesize ALWAYS */ | ||
494 | page++; | ||
495 | } | ||
496 | |||
497 | out: | ||
498 | /* update the bitmap if the block is good */ | ||
499 | if (ret == 0) | ||
500 | set_bit(block, info->bb_bitmap); | ||
501 | return ret; | ||
502 | } | ||
503 | |||
504 | static int tegra_nand_block_isbad(struct mtd_info *mtd, loff_t offs) | ||
505 | { | ||
506 | struct tegra_nand_info *info = MTD_TO_INFO(mtd); | ||
507 | int ret; | ||
508 | |||
509 | if (offs >= mtd->size) | ||
510 | return -EINVAL; | ||
511 | |||
512 | mutex_lock(&info->lock); | ||
513 | ret = check_block_isbad(mtd, offs); | ||
514 | mutex_unlock(&info->lock); | ||
515 | |||
516 | #if 0 | ||
517 | if (ret > 0) | ||
518 | pr_info("block @ 0x%llx is bad.\n", offs); | ||
519 | else if (ret < 0) | ||
520 | pr_err("error checking block @ 0x%llx for badness.\n", offs); | ||
521 | #endif | ||
522 | |||
523 | return ret; | ||
524 | } | ||
525 | |||
526 | static int tegra_nand_block_markbad(struct mtd_info *mtd, loff_t offs) | ||
527 | { | ||
528 | struct tegra_nand_info *info = MTD_TO_INFO(mtd); | ||
529 | uint32_t block = offs >> info->chip.block_shift; | ||
530 | int chipnr; | ||
531 | uint32_t page; | ||
532 | uint32_t column; | ||
533 | int ret = 0; | ||
534 | int i; | ||
535 | |||
536 | if (offs >= mtd->size) | ||
537 | return -EINVAL; | ||
538 | |||
539 | pr_info("tegra_nand: setting block %d bad\n", block); | ||
540 | |||
541 | mutex_lock(&info->lock); | ||
542 | offs &= ~(mtd->erasesize - 1); | ||
543 | |||
544 | /* mark the block bad in our bitmap */ | ||
545 | clear_bit(block, info->bb_bitmap); | ||
546 | mtd->ecc_stats.badblocks++; | ||
547 | |||
548 | if (info->is_data_bus_width_16) | ||
549 | writel(CONFIG_COM_BSY | CONFIG_BUS_WIDTH, CONFIG_REG); | ||
550 | else | ||
551 | writel(CONFIG_COM_BSY, CONFIG_REG); | ||
552 | |||
553 | split_addr(info, offs, &chipnr, &page, &column); | ||
554 | select_chip(info, chipnr); | ||
555 | |||
556 | column = mtd->writesize & 0xffff; /* force to be the offset of OOB */ | ||
557 | if (info->is_data_bus_width_16) | ||
558 | column = column >> 1; | ||
559 | /* write to fist two pages in the block */ | ||
560 | for (i = 0; i < 2; ++i) { | ||
561 | info->command_reg = | ||
562 | COMMAND_CE(info->chip.curr_chip) | COMMAND_CLE | | ||
563 | COMMAND_ALE | COMMAND_ALE_BYTE_SIZE(4) | COMMAND_TX | | ||
564 | COMMAND_PIO | COMMAND_TRANS_SIZE(1) | COMMAND_A_VALID | | ||
565 | COMMAND_RBSY_CHK | COMMAND_AFT_DAT | COMMAND_SEC_CMD; | ||
566 | writel(NAND_CMD_SEQIN, CMD_REG1); | ||
567 | writel(NAND_CMD_PAGEPROG, CMD_REG2); | ||
568 | |||
569 | writel(column | ((page & 0xffff) << 16), ADDR_REG1); | ||
570 | writel((page >> 16) & 0xff, ADDR_REG2); | ||
571 | |||
572 | writel(0x0, RESP_REG); | ||
573 | ret = tegra_nand_go(info); | ||
574 | if (ret != 0) | ||
575 | goto out; | ||
576 | |||
577 | /* TODO: check if the program op worked? */ | ||
578 | page++; | ||
579 | } | ||
580 | |||
581 | out: | ||
582 | mutex_unlock(&info->lock); | ||
583 | return ret; | ||
584 | } | ||
585 | |||
586 | static int tegra_nand_erase(struct mtd_info *mtd, struct erase_info *instr) | ||
587 | { | ||
588 | struct tegra_nand_info *info = MTD_TO_INFO(mtd); | ||
589 | uint32_t num_blocks; | ||
590 | uint32_t offs; | ||
591 | int chipnr; | ||
592 | uint32_t page; | ||
593 | uint32_t column; | ||
594 | uint32_t status = 0; | ||
595 | |||
596 | TEGRA_DBG("tegra_nand_erase: addr=0x%08llx len=%lld\n", instr->addr, | ||
597 | instr->len); | ||
598 | |||
599 | if ((instr->addr + instr->len) > mtd->size) { | ||
600 | pr_err("tegra_nand_erase: Can't erase past end of device\n"); | ||
601 | instr->state = MTD_ERASE_FAILED; | ||
602 | return -EINVAL; | ||
603 | } | ||
604 | |||
605 | if (instr->addr & (mtd->erasesize - 1)) { | ||
606 | pr_err("tegra_nand_erase: addr=0x%08llx not block-aligned\n", | ||
607 | instr->addr); | ||
608 | instr->state = MTD_ERASE_FAILED; | ||
609 | return -EINVAL; | ||
610 | } | ||
611 | |||
612 | if (instr->len & (mtd->erasesize - 1)) { | ||
613 | pr_err("tegra_nand_erase: len=%lld not block-aligned\n", | ||
614 | instr->len); | ||
615 | instr->state = MTD_ERASE_FAILED; | ||
616 | return -EINVAL; | ||
617 | } | ||
618 | |||
619 | instr->fail_addr = 0xffffffff; | ||
620 | |||
621 | mutex_lock(&info->lock); | ||
622 | |||
623 | instr->state = MTD_ERASING; | ||
624 | |||
625 | offs = instr->addr; | ||
626 | num_blocks = instr->len >> info->chip.block_shift; | ||
627 | |||
628 | select_chip(info, -1); | ||
629 | |||
630 | while (num_blocks--) { | ||
631 | split_addr(info, offs, &chipnr, &page, &column); | ||
632 | if (chipnr != info->chip.curr_chip) | ||
633 | select_chip(info, chipnr); | ||
634 | TEGRA_DBG("tegra_nand_erase: addr=0x%08x, page=0x%08x\n", offs, | ||
635 | page); | ||
636 | |||
637 | if (check_block_isbad(mtd, offs)) { | ||
638 | pr_info("%s: skipping bad block @ 0x%08x\n", __func__, | ||
639 | offs); | ||
640 | goto next_block; | ||
641 | } | ||
642 | |||
643 | info->command_reg = | ||
644 | COMMAND_CE(info->chip.curr_chip) | COMMAND_CLE | | ||
645 | COMMAND_ALE | COMMAND_ALE_BYTE_SIZE(2) | | ||
646 | COMMAND_RBSY_CHK | COMMAND_SEC_CMD; | ||
647 | writel(NAND_CMD_ERASE1, CMD_REG1); | ||
648 | writel(NAND_CMD_ERASE2, CMD_REG2); | ||
649 | |||
650 | writel(page & 0xffffff, ADDR_REG1); | ||
651 | writel(0, ADDR_REG2); | ||
652 | writel(CONFIG_COM_BSY, CONFIG_REG); | ||
653 | |||
654 | if (tegra_nand_go(info) != 0) { | ||
655 | instr->fail_addr = offs; | ||
656 | goto out_err; | ||
657 | } | ||
658 | |||
659 | /* TODO: do we want a timeout here? */ | ||
660 | if ((nand_cmd_get_status(info, &status) != 0) || | ||
661 | (status & NAND_STATUS_FAIL) || | ||
662 | ((status & NAND_STATUS_READY) != NAND_STATUS_READY)) { | ||
663 | instr->fail_addr = offs; | ||
664 | pr_info("%s: erase failed @ 0x%08x (stat=0x%08x)\n", | ||
665 | __func__, offs, status); | ||
666 | goto out_err; | ||
667 | } | ||
668 | next_block: | ||
669 | offs += mtd->erasesize; | ||
670 | } | ||
671 | |||
672 | instr->state = MTD_ERASE_DONE; | ||
673 | mutex_unlock(&info->lock); | ||
674 | mtd_erase_callback(instr); | ||
675 | return 0; | ||
676 | |||
677 | out_err: | ||
678 | instr->state = MTD_ERASE_FAILED; | ||
679 | mutex_unlock(&info->lock); | ||
680 | return -EIO; | ||
681 | } | ||
682 | |||
683 | static inline void dump_mtd_oob_ops(struct mtd_oob_ops *ops) | ||
684 | { | ||
685 | pr_info("%s: oob_ops: mode=%s len=0x%x ooblen=0x%x " | ||
686 | "ooboffs=0x%x dat=0x%p oob=0x%p\n", __func__, | ||
687 | (ops->mode == MTD_OOB_AUTO ? "MTD_OOB_AUTO" : | ||
688 | (ops->mode == | ||
689 | MTD_OOB_PLACE ? "MTD_OOB_PLACE" : "MTD_OOB_RAW")), ops->len, | ||
690 | ops->ooblen, ops->ooboffs, ops->datbuf, ops->oobbuf); | ||
691 | } | ||
692 | |||
693 | static int | ||
694 | tegra_nand_read(struct mtd_info *mtd, loff_t from, size_t len, | ||
695 | size_t *retlen, uint8_t *buf) | ||
696 | { | ||
697 | struct mtd_oob_ops ops; | ||
698 | int ret; | ||
699 | |||
700 | pr_debug("%s: read: from=0x%llx len=0x%x\n", __func__, from, len); | ||
701 | ops.mode = MTD_OOB_AUTO; | ||
702 | ops.len = len; | ||
703 | ops.datbuf = buf; | ||
704 | ops.oobbuf = NULL; | ||
705 | ret = mtd->read_oob(mtd, from, &ops); | ||
706 | *retlen = ops.retlen; | ||
707 | return ret; | ||
708 | } | ||
709 | |||
710 | static void | ||
711 | correct_ecc_errors_on_blank_page(struct tegra_nand_info *info, u8 *datbuf, | ||
712 | u8 *oobbuf, unsigned int a_len, | ||
713 | unsigned int b_len) | ||
714 | { | ||
715 | int i; | ||
716 | int all_ff = 1; | ||
717 | unsigned long flags; | ||
718 | |||
719 | spin_lock_irqsave(&info->ecc_lock, flags); | ||
720 | if (info->num_ecc_errs) { | ||
721 | if (datbuf) { | ||
722 | for (i = 0; i < a_len; i++) | ||
723 | if (datbuf[i] != 0xFF) | ||
724 | all_ff = 0; | ||
725 | } | ||
726 | if (oobbuf) { | ||
727 | for (i = 0; i < b_len; i++) | ||
728 | if (oobbuf[i] != 0xFF) | ||
729 | all_ff = 0; | ||
730 | } | ||
731 | if (all_ff) | ||
732 | info->num_ecc_errs = 0; | ||
733 | } | ||
734 | spin_unlock_irqrestore(&info->ecc_lock, flags); | ||
735 | } | ||
736 | |||
737 | static void update_ecc_counts(struct tegra_nand_info *info, int check_oob) | ||
738 | { | ||
739 | unsigned long flags; | ||
740 | int i; | ||
741 | |||
742 | spin_lock_irqsave(&info->ecc_lock, flags); | ||
743 | for (i = 0; i < info->num_ecc_errs; ++i) { | ||
744 | /* correctable */ | ||
745 | info->mtd.ecc_stats.corrected += | ||
746 | DEC_STATUS_ERR_CNT(info->ecc_errs[i]); | ||
747 | |||
748 | /* uncorrectable */ | ||
749 | if (info->ecc_errs[i] & DEC_STATUS_ECC_FAIL_A) | ||
750 | info->mtd.ecc_stats.failed++; | ||
751 | if (check_oob && (info->ecc_errs[i] & DEC_STATUS_ECC_FAIL_B)) | ||
752 | info->mtd.ecc_stats.failed++; | ||
753 | } | ||
754 | info->num_ecc_errs = 0; | ||
755 | spin_unlock_irqrestore(&info->ecc_lock, flags); | ||
756 | } | ||
757 | |||
758 | static inline void clear_regs(struct tegra_nand_info *info) | ||
759 | { | ||
760 | info->command_reg = 0; | ||
761 | info->config_reg = 0; | ||
762 | info->dmactrl_reg = 0; | ||
763 | } | ||
764 | |||
765 | static void | ||
766 | prep_transfer_dma(struct tegra_nand_info *info, int rx, int do_ecc, | ||
767 | uint32_t page, uint32_t column, dma_addr_t data_dma, | ||
768 | uint32_t data_len, dma_addr_t oob_dma, uint32_t oob_len) | ||
769 | { | ||
770 | uint32_t tag_sz = oob_len; | ||
771 | |||
772 | uint32_t page_size_sel = (info->mtd.writesize >> 11) + 2; | ||
773 | #if 0 | ||
774 | pr_info("%s: rx=%d ecc=%d page=%d col=%d data_dma=0x%x " | ||
775 | "data_len=0x%08x oob_dma=0x%x ooblen=%d\n", __func__, | ||
776 | rx, do_ecc, page, column, data_dma, data_len, oob_dma, oob_len); | ||
777 | #endif | ||
778 | |||
779 | info->command_reg = | ||
780 | COMMAND_CE(info->chip.curr_chip) | COMMAND_CLE | COMMAND_ALE | | ||
781 | COMMAND_ALE_BYTE_SIZE(4) | COMMAND_SEC_CMD | COMMAND_RBSY_CHK | | ||
782 | COMMAND_TRANS_SIZE(8); | ||
783 | |||
784 | info->config_reg = (CONFIG_PIPELINE_EN | CONFIG_EDO_MODE | | ||
785 | CONFIG_COM_BSY); | ||
786 | if (info->is_data_bus_width_16) | ||
787 | info->config_reg |= CONFIG_BUS_WIDTH; | ||
788 | info->dmactrl_reg = (DMA_CTRL_DMA_GO | | ||
789 | DMA_CTRL_DMA_PERF_EN | DMA_CTRL_IE_DMA_DONE | | ||
790 | DMA_CTRL_IS_DMA_DONE | DMA_CTRL_BURST_SIZE(4)); | ||
791 | |||
792 | if (rx) { | ||
793 | if (do_ecc) | ||
794 | info->config_reg |= CONFIG_HW_ERR_CORRECTION; | ||
795 | info->command_reg |= COMMAND_RX; | ||
796 | info->dmactrl_reg |= DMA_CTRL_REUSE_BUFFER; | ||
797 | writel(NAND_CMD_READ0, CMD_REG1); | ||
798 | writel(NAND_CMD_READSTART, CMD_REG2); | ||
799 | } else { | ||
800 | info->command_reg |= (COMMAND_TX | COMMAND_AFT_DAT); | ||
801 | info->dmactrl_reg |= DMA_CTRL_DIR; /* DMA_RD == TX */ | ||
802 | writel(NAND_CMD_SEQIN, CMD_REG1); | ||
803 | writel(NAND_CMD_PAGEPROG, CMD_REG2); | ||
804 | } | ||
805 | |||
806 | if (data_len) { | ||
807 | if (do_ecc) | ||
808 | info->config_reg |= CONFIG_HW_ECC | CONFIG_ECC_SEL; | ||
809 | info->config_reg |= | ||
810 | CONFIG_PAGE_SIZE_SEL(page_size_sel) | CONFIG_TVALUE(0) | | ||
811 | CONFIG_SKIP_SPARE | CONFIG_SKIP_SPARE_SEL(0); | ||
812 | info->command_reg |= COMMAND_A_VALID; | ||
813 | info->dmactrl_reg |= DMA_CTRL_DMA_EN_A; | ||
814 | writel(DMA_CFG_BLOCK_SIZE(data_len - 1), DMA_CFG_A_REG); | ||
815 | writel(data_dma, DATA_BLOCK_PTR_REG); | ||
816 | } else { | ||
817 | column = info->mtd.writesize; | ||
818 | if (do_ecc) | ||
819 | column += info->mtd.ecclayout->oobfree[0].offset; | ||
820 | writel(0, DMA_CFG_A_REG); | ||
821 | writel(0, DATA_BLOCK_PTR_REG); | ||
822 | } | ||
823 | |||
824 | if (oob_len) { | ||
825 | if (do_ecc) { | ||
826 | oob_len = info->mtd.oobavail; | ||
827 | tag_sz = info->mtd.oobavail; | ||
828 | tag_sz += 4; /* size of tag ecc */ | ||
829 | if (rx) | ||
830 | oob_len += 4; /* size of tag ecc */ | ||
831 | info->config_reg |= CONFIG_ECC_EN_TAG; | ||
832 | } | ||
833 | if (data_len && rx) | ||
834 | oob_len += 4; /* num of skipped bytes */ | ||
835 | |||
836 | info->command_reg |= COMMAND_B_VALID; | ||
837 | info->config_reg |= CONFIG_TAG_BYTE_SIZE(tag_sz - 1); | ||
838 | info->dmactrl_reg |= DMA_CTRL_DMA_EN_B; | ||
839 | writel(DMA_CFG_BLOCK_SIZE(oob_len - 1), DMA_CFG_B_REG); | ||
840 | writel(oob_dma, TAG_PTR_REG); | ||
841 | } else { | ||
842 | writel(0, DMA_CFG_B_REG); | ||
843 | writel(0, TAG_PTR_REG); | ||
844 | } | ||
845 | /* For x16 bit we needs to divide the column number by 2 */ | ||
846 | if (info->is_data_bus_width_16) | ||
847 | column = column >> 1; | ||
848 | writel((column & 0xffff) | ((page & 0xffff) << 16), ADDR_REG1); | ||
849 | writel((page >> 16) & 0xff, ADDR_REG2); | ||
850 | } | ||
851 | |||
852 | static dma_addr_t | ||
853 | tegra_nand_dma_map(struct device *dev, void *addr, size_t size, | ||
854 | enum dma_data_direction dir) | ||
855 | { | ||
856 | struct page *page; | ||
857 | unsigned long offset = (unsigned long)addr & ~PAGE_MASK; | ||
858 | if (virt_addr_valid(addr)) | ||
859 | page = virt_to_page(addr); | ||
860 | else { | ||
861 | if (WARN_ON(size + offset > PAGE_SIZE)) | ||
862 | return ~0; | ||
863 | page = vmalloc_to_page(addr); | ||
864 | } | ||
865 | return dma_map_page(dev, page, offset, size, dir); | ||
866 | } | ||
867 | |||
868 | static ssize_t show_vendor_id(struct device *dev, struct device_attribute *attr, | ||
869 | char *buf) | ||
870 | { | ||
871 | struct tegra_nand_info *info = dev_get_drvdata(dev); | ||
872 | return sprintf(buf, "0x%x\n", info->vendor_id); | ||
873 | } | ||
874 | |||
875 | static DEVICE_ATTR(vendor_id, S_IRUSR, show_vendor_id, NULL); | ||
876 | |||
877 | static ssize_t show_device_id(struct device *dev, struct device_attribute *attr, | ||
878 | char *buf) | ||
879 | { | ||
880 | struct tegra_nand_info *info = dev_get_drvdata(dev); | ||
881 | return sprintf(buf, "0x%x\n", info->device_id); | ||
882 | } | ||
883 | |||
884 | static DEVICE_ATTR(device_id, S_IRUSR, show_device_id, NULL); | ||
885 | |||
886 | static ssize_t show_flash_size(struct device *dev, | ||
887 | struct device_attribute *attr, char *buf) | ||
888 | { | ||
889 | struct tegra_nand_info *info = dev_get_drvdata(dev); | ||
890 | struct mtd_info *mtd = &info->mtd; | ||
891 | return sprintf(buf, "%llu bytes\n", mtd->size); | ||
892 | } | ||
893 | |||
894 | static DEVICE_ATTR(flash_size, S_IRUSR, show_flash_size, NULL); | ||
895 | |||
896 | static ssize_t show_num_bad_blocks(struct device *dev, | ||
897 | struct device_attribute *attr, char *buf) | ||
898 | { | ||
899 | struct tegra_nand_info *info = dev_get_drvdata(dev); | ||
900 | return sprintf(buf, "%d\n", info->num_bad_blocks); | ||
901 | } | ||
902 | |||
903 | static DEVICE_ATTR(num_bad_blocks, S_IRUSR, show_num_bad_blocks, NULL); | ||
904 | |||
905 | static ssize_t show_bb_bitmap(struct device *dev, struct device_attribute *attr, | ||
906 | char *buf) | ||
907 | { | ||
908 | struct tegra_nand_info *info = dev_get_drvdata(dev); | ||
909 | struct mtd_info *mtd = &info->mtd; | ||
910 | int num_blocks = mtd->size >> info->chip.block_shift, i, ret = 0, size = | ||
911 | 0; | ||
912 | |||
913 | for (i = 0; i < num_blocks / (8 * sizeof(unsigned long)); i++) { | ||
914 | size = sprintf(buf, "0x%lx\n", info->bb_bitmap[i]); | ||
915 | ret += size; | ||
916 | buf += size; | ||
917 | } | ||
918 | return ret; | ||
919 | } | ||
920 | |||
921 | static DEVICE_ATTR(bb_bitmap, S_IRUSR, show_bb_bitmap, NULL); | ||
922 | |||
923 | /* | ||
924 | * Independent of Mode, we read main data and the OOB data from the oobfree areas as | ||
925 | * specified nand_ecclayout | ||
926 | * This function also checks buffer pool partial_unaligned_rw_buffer | ||
927 | * if the address is already present and is not 'unused' then it will use | ||
928 | * data in buffer else it will go for DMA. | ||
929 | */ | ||
930 | static int | ||
931 | do_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops) | ||
932 | { | ||
933 | struct tegra_nand_info *info = MTD_TO_INFO(mtd); | ||
934 | struct mtd_ecc_stats old_ecc_stats; | ||
935 | int chipnr; | ||
936 | uint32_t page; | ||
937 | uint32_t column; | ||
938 | uint8_t *datbuf = ops->datbuf; | ||
939 | uint8_t *oobbuf = ops->oobbuf; | ||
940 | uint32_t ooblen = oobbuf ? ops->ooblen : 0; | ||
941 | uint32_t oobsz; | ||
942 | uint32_t page_count; | ||
943 | int err; | ||
944 | int unaligned = from & info->chip.column_mask; | ||
945 | uint32_t len = datbuf ? ((ops->len) + unaligned) : 0; | ||
946 | int do_ecc = 1; | ||
947 | dma_addr_t datbuf_dma_addr = 0; | ||
948 | |||
949 | #if 0 | ||
950 | dump_mtd_oob_ops(ops); | ||
951 | #endif | ||
952 | ops->retlen = 0; | ||
953 | ops->oobretlen = 0; | ||
954 | from = from - unaligned; | ||
955 | |||
956 | /* Don't care about the MTD_OOB_ value field always use oobavail and ecc. */ | ||
957 | oobsz = mtd->oobavail; | ||
958 | if (unlikely(ops->oobbuf && ops->ooblen > oobsz)) { | ||
959 | pr_err("%s: can't read OOB from multiple pages (%d > %d)\n", | ||
960 | __func__, ops->ooblen, oobsz); | ||
961 | return -EINVAL; | ||
962 | } else if (ops->oobbuf && !len) { | ||
963 | page_count = 1; | ||
964 | } else { | ||
965 | page_count = | ||
966 | (uint32_t) ((len + mtd->writesize - 1) / mtd->writesize); | ||
967 | } | ||
968 | |||
969 | mutex_lock(&info->lock); | ||
970 | |||
971 | memcpy(&old_ecc_stats, &mtd->ecc_stats, sizeof(old_ecc_stats)); | ||
972 | |||
973 | if (do_ecc) { | ||
974 | enable_ints(info, IER_ECC_ERR); | ||
975 | writel(info->ecc_addr, ECC_PTR_REG); | ||
976 | } else | ||
977 | disable_ints(info, IER_ECC_ERR); | ||
978 | |||
979 | split_addr(info, from, &chipnr, &page, &column); | ||
980 | select_chip(info, chipnr); | ||
981 | |||
982 | /* reset it to point back to beginning of page */ | ||
983 | from -= column; | ||
984 | |||
985 | while (page_count--) { | ||
986 | int a_len = min(mtd->writesize - column, len); | ||
987 | int b_len = min(oobsz, ooblen); | ||
988 | int temp_len = 0; | ||
989 | char *temp_buf = NULL; | ||
990 | /* Take care when read is of less than page size. | ||
991 | * Otherwise there will be kernel Panic due to DMA timeout */ | ||
992 | if (((a_len < mtd->writesize) && len) || unaligned) { | ||
993 | temp_len = a_len; | ||
994 | a_len = mtd->writesize; | ||
995 | temp_buf = datbuf; | ||
996 | datbuf = info->partial_unaligned_rw_buffer; | ||
997 | } | ||
998 | #if 0 | ||
999 | pr_info("%s: chip:=%d page=%d col=%d\n", __func__, chipnr, | ||
1000 | page, column); | ||
1001 | #endif | ||
1002 | |||
1003 | clear_regs(info); | ||
1004 | if (datbuf) | ||
1005 | datbuf_dma_addr = | ||
1006 | tegra_nand_dma_map(info->dev, datbuf, a_len, | ||
1007 | DMA_FROM_DEVICE); | ||
1008 | |||
1009 | prep_transfer_dma(info, 1, do_ecc, page, column, | ||
1010 | datbuf_dma_addr, a_len, info->oob_dma_addr, | ||
1011 | b_len); | ||
1012 | writel(info->config_reg, CONFIG_REG); | ||
1013 | writel(info->dmactrl_reg, DMA_MST_CTRL_REG); | ||
1014 | |||
1015 | INIT_COMPLETION(info->dma_complete); | ||
1016 | err = tegra_nand_go(info); | ||
1017 | if (err != 0) | ||
1018 | goto out_err; | ||
1019 | |||
1020 | if (!wait_for_completion_timeout(&info->dma_complete, TIMEOUT)) { | ||
1021 | pr_err("%s: dma completion timeout\n", __func__); | ||
1022 | dump_nand_regs(); | ||
1023 | err = -ETIMEDOUT; | ||
1024 | goto out_err; | ||
1025 | } | ||
1026 | |||
1027 | /*pr_info("tegra_read_oob: DMA complete\n"); */ | ||
1028 | |||
1029 | /* if we are here, transfer is done */ | ||
1030 | if (datbuf) | ||
1031 | dma_unmap_page(info->dev, datbuf_dma_addr, a_len, | ||
1032 | DMA_FROM_DEVICE); | ||
1033 | |||
1034 | if (oobbuf) { | ||
1035 | uint32_t ofs = datbuf && oobbuf ? 4 : 0; /* skipped bytes */ | ||
1036 | memcpy(oobbuf, info->oob_dma_buf + ofs, b_len); | ||
1037 | } | ||
1038 | |||
1039 | correct_ecc_errors_on_blank_page(info, datbuf, oobbuf, a_len, | ||
1040 | b_len); | ||
1041 | /* Take care when read is of less than page size */ | ||
1042 | if (temp_len) { | ||
1043 | memcpy(temp_buf, datbuf + unaligned, | ||
1044 | temp_len - unaligned); | ||
1045 | a_len = temp_len; | ||
1046 | datbuf = temp_buf; | ||
1047 | } | ||
1048 | if (datbuf) { | ||
1049 | len -= a_len; | ||
1050 | datbuf += a_len - unaligned; | ||
1051 | ops->retlen += a_len - unaligned; | ||
1052 | } | ||
1053 | |||
1054 | if (oobbuf) { | ||
1055 | ooblen -= b_len; | ||
1056 | oobbuf += b_len; | ||
1057 | ops->oobretlen += b_len; | ||
1058 | } | ||
1059 | |||
1060 | unaligned = 0; | ||
1061 | update_ecc_counts(info, oobbuf != NULL); | ||
1062 | |||
1063 | if (!page_count) | ||
1064 | break; | ||
1065 | |||
1066 | from += mtd->writesize; | ||
1067 | column = 0; | ||
1068 | |||
1069 | split_addr(info, from, &chipnr, &page, &column); | ||
1070 | if (chipnr != info->chip.curr_chip) | ||
1071 | select_chip(info, chipnr); | ||
1072 | } | ||
1073 | |||
1074 | disable_ints(info, IER_ECC_ERR); | ||
1075 | |||
1076 | if (mtd->ecc_stats.failed != old_ecc_stats.failed) | ||
1077 | err = -EBADMSG; | ||
1078 | else if (mtd->ecc_stats.corrected != old_ecc_stats.corrected) | ||
1079 | err = -EUCLEAN; | ||
1080 | else | ||
1081 | err = 0; | ||
1082 | |||
1083 | mutex_unlock(&info->lock); | ||
1084 | return err; | ||
1085 | |||
1086 | out_err: | ||
1087 | ops->retlen = 0; | ||
1088 | ops->oobretlen = 0; | ||
1089 | |||
1090 | disable_ints(info, IER_ECC_ERR); | ||
1091 | mutex_unlock(&info->lock); | ||
1092 | return err; | ||
1093 | } | ||
1094 | |||
1095 | /* just does some parameter checking and calls do_read_oob */ | ||
1096 | static int | ||
1097 | tegra_nand_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops) | ||
1098 | { | ||
1099 | if (ops->datbuf && unlikely((from + ops->len) > mtd->size)) { | ||
1100 | pr_err("%s: Can't read past end of device.\n", __func__); | ||
1101 | return -EINVAL; | ||
1102 | } | ||
1103 | |||
1104 | if (unlikely(ops->oobbuf && !ops->ooblen)) { | ||
1105 | pr_err("%s: Reading 0 bytes from OOB is meaningless\n", | ||
1106 | __func__); | ||
1107 | return -EINVAL; | ||
1108 | } | ||
1109 | |||
1110 | if (unlikely(ops->mode != MTD_OOB_AUTO)) { | ||
1111 | if (ops->oobbuf && ops->datbuf) { | ||
1112 | pr_err("%s: can't read OOB + Data in non-AUTO mode.\n", | ||
1113 | __func__); | ||
1114 | return -EINVAL; | ||
1115 | } | ||
1116 | if ((ops->mode == MTD_OOB_RAW) && !ops->datbuf) { | ||
1117 | pr_err("%s: Raw mode only supports reading data area.\n", | ||
1118 | __func__); | ||
1119 | return -EINVAL; | ||
1120 | } | ||
1121 | } | ||
1122 | |||
1123 | return do_read_oob(mtd, from, ops); | ||
1124 | } | ||
1125 | |||
1126 | static int | ||
1127 | tegra_nand_write(struct mtd_info *mtd, loff_t to, size_t len, | ||
1128 | size_t *retlen, const uint8_t *buf) | ||
1129 | { | ||
1130 | struct mtd_oob_ops ops; | ||
1131 | int ret; | ||
1132 | |||
1133 | pr_debug("%s: write: to=0x%llx len=0x%x\n", __func__, to, len); | ||
1134 | ops.mode = MTD_OOB_AUTO; | ||
1135 | ops.len = len; | ||
1136 | ops.datbuf = (uint8_t *) buf; | ||
1137 | ops.oobbuf = NULL; | ||
1138 | ret = mtd->write_oob(mtd, to, &ops); | ||
1139 | *retlen = ops.retlen; | ||
1140 | return ret; | ||
1141 | } | ||
1142 | |||
1143 | static int | ||
1144 | do_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops) | ||
1145 | { | ||
1146 | struct tegra_nand_info *info = MTD_TO_INFO(mtd); | ||
1147 | int chipnr; | ||
1148 | uint32_t page; | ||
1149 | uint32_t column; | ||
1150 | uint8_t *datbuf = ops->datbuf; | ||
1151 | uint8_t *oobbuf = ops->oobbuf; | ||
1152 | uint32_t len = datbuf ? ops->len : 0; | ||
1153 | uint32_t ooblen = oobbuf ? ops->ooblen : 0; | ||
1154 | uint32_t oobsz; | ||
1155 | uint32_t page_count; | ||
1156 | int err; | ||
1157 | int do_ecc = 1; | ||
1158 | dma_addr_t datbuf_dma_addr = 0; | ||
1159 | |||
1160 | #if 0 | ||
1161 | dump_mtd_oob_ops(ops); | ||
1162 | #endif | ||
1163 | |||
1164 | ops->retlen = 0; | ||
1165 | ops->oobretlen = 0; | ||
1166 | |||
1167 | if (!ops->len) | ||
1168 | return 0; | ||
1169 | |||
1170 | oobsz = mtd->oobavail; | ||
1171 | |||
1172 | if (unlikely(ops->oobbuf && ops->ooblen > oobsz)) { | ||
1173 | pr_err("%s: can't write OOB to multiple pages (%d > %d)\n", | ||
1174 | __func__, ops->ooblen, oobsz); | ||
1175 | return -EINVAL; | ||
1176 | } else if (ops->oobbuf && !len) { | ||
1177 | page_count = 1; | ||
1178 | } else | ||
1179 | page_count = | ||
1180 | max((uint32_t) (ops->len / mtd->writesize), (uint32_t) 1); | ||
1181 | |||
1182 | mutex_lock(&info->lock); | ||
1183 | |||
1184 | split_addr(info, to, &chipnr, &page, &column); | ||
1185 | select_chip(info, chipnr); | ||
1186 | |||
1187 | while (page_count--) { | ||
1188 | int a_len = min(mtd->writesize, len); | ||
1189 | int b_len = min(oobsz, ooblen); | ||
1190 | int temp_len = 0; | ||
1191 | char *temp_buf = NULL; | ||
1192 | /* Take care when write is of less than page size. Otherwise | ||
1193 | * there will be kernel panic due to dma timeout */ | ||
1194 | if ((a_len < mtd->writesize) && len) { | ||
1195 | temp_len = a_len; | ||
1196 | a_len = mtd->writesize; | ||
1197 | temp_buf = datbuf; | ||
1198 | datbuf = info->partial_unaligned_rw_buffer; | ||
1199 | memset(datbuf, 0xff, a_len); | ||
1200 | memcpy(datbuf, temp_buf, temp_len); | ||
1201 | } | ||
1202 | |||
1203 | if (datbuf) | ||
1204 | datbuf_dma_addr = | ||
1205 | tegra_nand_dma_map(info->dev, datbuf, a_len, | ||
1206 | DMA_TO_DEVICE); | ||
1207 | if (oobbuf) | ||
1208 | memcpy(info->oob_dma_buf, oobbuf, b_len); | ||
1209 | |||
1210 | clear_regs(info); | ||
1211 | prep_transfer_dma(info, 0, do_ecc, page, column, | ||
1212 | datbuf_dma_addr, a_len, info->oob_dma_addr, | ||
1213 | b_len); | ||
1214 | |||
1215 | writel(info->config_reg, CONFIG_REG); | ||
1216 | writel(info->dmactrl_reg, DMA_MST_CTRL_REG); | ||
1217 | |||
1218 | INIT_COMPLETION(info->dma_complete); | ||
1219 | err = tegra_nand_go(info); | ||
1220 | if (err != 0) | ||
1221 | goto out_err; | ||
1222 | |||
1223 | if (!wait_for_completion_timeout(&info->dma_complete, TIMEOUT)) { | ||
1224 | pr_err("%s: dma completion timeout\n", __func__); | ||
1225 | dump_nand_regs(); | ||
1226 | goto out_err; | ||
1227 | } | ||
1228 | if (temp_len) { | ||
1229 | a_len = temp_len; | ||
1230 | datbuf = temp_buf; | ||
1231 | } | ||
1232 | |||
1233 | if (datbuf) { | ||
1234 | dma_unmap_page(info->dev, datbuf_dma_addr, a_len, | ||
1235 | DMA_TO_DEVICE); | ||
1236 | len -= a_len; | ||
1237 | datbuf += a_len; | ||
1238 | ops->retlen += a_len; | ||
1239 | } | ||
1240 | if (oobbuf) { | ||
1241 | ooblen -= b_len; | ||
1242 | oobbuf += b_len; | ||
1243 | ops->oobretlen += b_len; | ||
1244 | } | ||
1245 | |||
1246 | if (!page_count) | ||
1247 | break; | ||
1248 | |||
1249 | to += mtd->writesize; | ||
1250 | column = 0; | ||
1251 | |||
1252 | split_addr(info, to, &chipnr, &page, &column); | ||
1253 | if (chipnr != info->chip.curr_chip) | ||
1254 | select_chip(info, chipnr); | ||
1255 | } | ||
1256 | |||
1257 | mutex_unlock(&info->lock); | ||
1258 | return err; | ||
1259 | |||
1260 | out_err: | ||
1261 | ops->retlen = 0; | ||
1262 | ops->oobretlen = 0; | ||
1263 | |||
1264 | mutex_unlock(&info->lock); | ||
1265 | return err; | ||
1266 | } | ||
1267 | |||
1268 | static int | ||
1269 | tegra_nand_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops) | ||
1270 | { | ||
1271 | struct tegra_nand_info *info = MTD_TO_INFO(mtd); | ||
1272 | |||
1273 | if (unlikely(to & info->chip.column_mask)) { | ||
1274 | pr_err("%s: Unaligned write (to 0x%llx) not supported\n", | ||
1275 | __func__, to); | ||
1276 | return -EINVAL; | ||
1277 | } | ||
1278 | |||
1279 | if (unlikely(ops->oobbuf && !ops->ooblen)) { | ||
1280 | pr_err("%s: Writing 0 bytes to OOB is meaningless\n", __func__); | ||
1281 | return -EINVAL; | ||
1282 | } | ||
1283 | |||
1284 | return do_write_oob(mtd, to, ops); | ||
1285 | } | ||
1286 | |||
1287 | static int tegra_nand_suspend(struct mtd_info *mtd) | ||
1288 | { | ||
1289 | return 0; | ||
1290 | } | ||
1291 | |||
1292 | static void | ||
1293 | set_chip_timing(struct tegra_nand_info *info, uint32_t vendor_id, | ||
1294 | uint32_t dev_id, uint32_t fourth_id_field) | ||
1295 | { | ||
1296 | struct tegra_nand_chip_parms *chip_parms = NULL; | ||
1297 | uint32_t tmp; | ||
1298 | int i = 0; | ||
1299 | unsigned long nand_clk_freq_khz = clk_get_rate(info->clk) / 1000; | ||
1300 | for (i = 0; i < info->plat->nr_chip_parms; i++) | ||
1301 | if (info->plat->chip_parms[i].vendor_id == vendor_id && | ||
1302 | info->plat->chip_parms[i].device_id == dev_id && | ||
1303 | info->plat->chip_parms[i].read_id_fourth_byte == | ||
1304 | fourth_id_field) | ||
1305 | chip_parms = &info->plat->chip_parms[i]; | ||
1306 | |||
1307 | if (!chip_parms) { | ||
1308 | pr_warn("WARNING:tegra_nand: timing for vendor-id: " | ||
1309 | "%x device-id: %x fourth-id-field: %x not found. Using Bootloader timing", | ||
1310 | vendor_id, dev_id, fourth_id_field); | ||
1311 | return; | ||
1312 | } | ||
1313 | /* TODO: Handle the change of frequency if DVFS is enabled */ | ||
1314 | #define CNT(t) (((((t) * nand_clk_freq_khz) + 1000000 - 1) / 1000000) - 1) | ||
1315 | tmp = (TIMING_TRP_RESP(CNT(chip_parms->timing.trp_resp)) | | ||
1316 | TIMING_TWB(CNT(chip_parms->timing.twb)) | | ||
1317 | TIMING_TCR_TAR_TRR(CNT(chip_parms->timing.tcr_tar_trr)) | | ||
1318 | TIMING_TWHR(CNT(chip_parms->timing.twhr)) | | ||
1319 | TIMING_TCS(CNT(chip_parms->timing.tcs)) | | ||
1320 | TIMING_TWH(CNT(chip_parms->timing.twh)) | | ||
1321 | TIMING_TWP(CNT(chip_parms->timing.twp)) | | ||
1322 | TIMING_TRH(CNT(chip_parms->timing.trh)) | | ||
1323 | TIMING_TRP(CNT(chip_parms->timing.trp))); | ||
1324 | writel(tmp, TIMING_REG); | ||
1325 | writel(TIMING2_TADL(CNT(chip_parms->timing.tadl)), TIMING2_REG); | ||
1326 | #undef CNT | ||
1327 | } | ||
1328 | |||
1329 | static void tegra_nand_resume(struct mtd_info *mtd) | ||
1330 | { | ||
1331 | struct tegra_nand_info *info = MTD_TO_INFO(mtd); | ||
1332 | |||
1333 | cfg_hwstatus_mon(info); | ||
1334 | |||
1335 | /* clear all pending interrupts */ | ||
1336 | writel(readl(ISR_REG), ISR_REG); | ||
1337 | |||
1338 | /* clear dma interrupt */ | ||
1339 | writel(DMA_CTRL_IS_DMA_DONE, DMA_MST_CTRL_REG); | ||
1340 | |||
1341 | /* enable interrupts */ | ||
1342 | disable_ints(info, 0xffffffff); | ||
1343 | enable_ints(info, | ||
1344 | IER_ERR_TRIG_VAL(4) | IER_UND | IER_OVR | IER_CMD_DONE | | ||
1345 | IER_ECC_ERR | IER_GIE); | ||
1346 | |||
1347 | writel(0, CONFIG_REG); | ||
1348 | |||
1349 | set_chip_timing(info, info->vendor_id, | ||
1350 | info->device_id, info->dev_parms); | ||
1351 | |||
1352 | return; | ||
1353 | } | ||
1354 | |||
1355 | static int scan_bad_blocks(struct tegra_nand_info *info) | ||
1356 | { | ||
1357 | struct mtd_info *mtd = &info->mtd; | ||
1358 | int num_blocks = mtd->size >> info->chip.block_shift; | ||
1359 | uint32_t block; | ||
1360 | int is_bad = 0; | ||
1361 | info->num_bad_blocks = 0; | ||
1362 | |||
1363 | for (block = 0; block < num_blocks; ++block) { | ||
1364 | /* make sure the bit is cleared, meaning it's bad/unknown before | ||
1365 | * we check. */ | ||
1366 | clear_bit(block, info->bb_bitmap); | ||
1367 | is_bad = mtd->block_isbad(mtd, block << info->chip.block_shift); | ||
1368 | |||
1369 | if (is_bad == 0) | ||
1370 | set_bit(block, info->bb_bitmap); | ||
1371 | else if (is_bad > 0) { | ||
1372 | info->num_bad_blocks++; | ||
1373 | pr_debug("block 0x%08x is bad.\n", block); | ||
1374 | } else { | ||
1375 | pr_err("Fatal error (%d) while scanning for " | ||
1376 | "bad blocks\n", is_bad); | ||
1377 | return is_bad; | ||
1378 | } | ||
1379 | } | ||
1380 | return 0; | ||
1381 | } | ||
1382 | |||
1383 | /* Scans for nand flash devices, identifies them, and fills in the | ||
1384 | * device info. */ | ||
1385 | static int tegra_nand_scan(struct mtd_info *mtd, int maxchips) | ||
1386 | { | ||
1387 | struct tegra_nand_info *info = MTD_TO_INFO(mtd); | ||
1388 | struct nand_flash_dev *dev_info; | ||
1389 | struct nand_manufacturers *vendor_info; | ||
1390 | uint32_t tmp; | ||
1391 | uint32_t dev_id; | ||
1392 | uint32_t vendor_id; | ||
1393 | uint32_t dev_parms; | ||
1394 | uint32_t mlc_parms; | ||
1395 | int cnt; | ||
1396 | int err = 0; | ||
1397 | |||
1398 | writel(SCAN_TIMING_VAL, TIMING_REG); | ||
1399 | writel(SCAN_TIMING2_VAL, TIMING2_REG); | ||
1400 | writel(0, CONFIG_REG); | ||
1401 | |||
1402 | select_chip(info, 0); | ||
1403 | err = tegra_nand_cmd_readid(info, &tmp); | ||
1404 | if (err != 0) | ||
1405 | goto out_error; | ||
1406 | |||
1407 | vendor_id = tmp & 0xff; | ||
1408 | dev_id = (tmp >> 8) & 0xff; | ||
1409 | mlc_parms = (tmp >> 16) & 0xff; | ||
1410 | dev_parms = (tmp >> 24) & 0xff; | ||
1411 | |||
1412 | dev_info = find_nand_flash_device(dev_id); | ||
1413 | if (dev_info == NULL) { | ||
1414 | pr_err("%s: unknown flash device id (0x%02x) found.\n", | ||
1415 | __func__, dev_id); | ||
1416 | err = -ENODEV; | ||
1417 | goto out_error; | ||
1418 | } | ||
1419 | |||
1420 | vendor_info = find_nand_flash_vendor(vendor_id); | ||
1421 | if (vendor_info == NULL) { | ||
1422 | pr_err("%s: unknown flash vendor id (0x%02x) found.\n", | ||
1423 | __func__, vendor_id); | ||
1424 | err = -ENODEV; | ||
1425 | goto out_error; | ||
1426 | } | ||
1427 | |||
1428 | /* loop through and see if we can find more devices */ | ||
1429 | for (cnt = 1; cnt < info->plat->max_chips; ++cnt) { | ||
1430 | select_chip(info, cnt); | ||
1431 | /* TODO: figure out what to do about errors here */ | ||
1432 | err = tegra_nand_cmd_readid(info, &tmp); | ||
1433 | if (err != 0) | ||
1434 | goto out_error; | ||
1435 | if ((dev_id != ((tmp >> 8) & 0xff)) || | ||
1436 | (vendor_id != (tmp & 0xff))) | ||
1437 | break; | ||
1438 | } | ||
1439 | |||
1440 | pr_info("%s: %d NAND chip(s) found (vend=0x%02x, dev=0x%02x) (%s %s)\n", | ||
1441 | DRIVER_NAME, cnt, vendor_id, dev_id, vendor_info->name, | ||
1442 | dev_info->name); | ||
1443 | info->vendor_id = vendor_id; | ||
1444 | info->device_id = dev_id; | ||
1445 | info->dev_parms = dev_parms; | ||
1446 | info->chip.num_chips = cnt; | ||
1447 | info->chip.chipsize = dev_info->chipsize << 20; | ||
1448 | mtd->size = info->chip.num_chips * info->chip.chipsize; | ||
1449 | |||
1450 | /* format of 4th id byte returned by READ ID | ||
1451 | * bit 7 = rsvd | ||
1452 | * bit 6 = bus width. 1 == 16bit, 0 == 8bit | ||
1453 | * bits 5:4 = data block size. 64kb * (2^val) | ||
1454 | * bit 3 = rsvd | ||
1455 | * bit 2 = spare area size / 512 bytes. 0 == 8bytes, 1 == 16bytes | ||
1456 | * bits 1:0 = page size. 1kb * (2^val) | ||
1457 | */ | ||
1458 | |||
1459 | /* page_size */ | ||
1460 | tmp = dev_parms & 0x3; | ||
1461 | mtd->writesize = 1024 << tmp; | ||
1462 | info->chip.column_mask = mtd->writesize - 1; | ||
1463 | |||
1464 | if (mtd->writesize > 4096) { | ||
1465 | pr_err("%s: Large page devices with pagesize > 4kb are NOT " | ||
1466 | "supported\n", __func__); | ||
1467 | goto out_error; | ||
1468 | } else if (mtd->writesize < 2048) { | ||
1469 | pr_err("%s: Small page devices are NOT supported\n", __func__); | ||
1470 | goto out_error; | ||
1471 | } | ||
1472 | |||
1473 | /* spare area, must be at least 64 bytes */ | ||
1474 | tmp = (dev_parms >> 2) & 0x1; | ||
1475 | tmp = (8 << tmp) * (mtd->writesize / 512); | ||
1476 | if (tmp < 64) { | ||
1477 | pr_err("%s: Spare area (%d bytes) too small\n", __func__, tmp); | ||
1478 | goto out_error; | ||
1479 | } | ||
1480 | mtd->oobsize = tmp; | ||
1481 | |||
1482 | /* data block size (erase size) (w/o spare) */ | ||
1483 | tmp = (dev_parms >> 4) & 0x3; | ||
1484 | mtd->erasesize = (64 * 1024) << tmp; | ||
1485 | info->chip.block_shift = ffs(mtd->erasesize) - 1; | ||
1486 | /* bus width of the nand chip 8/16 */ | ||
1487 | tmp = (dev_parms >> 6) & 0x1; | ||
1488 | info->is_data_bus_width_16 = tmp; | ||
1489 | /* used to select the appropriate chip/page in case multiple devices | ||
1490 | * are connected */ | ||
1491 | info->chip.chip_shift = ffs(info->chip.chipsize) - 1; | ||
1492 | info->chip.page_shift = ffs(mtd->writesize) - 1; | ||
1493 | info->chip.page_mask = | ||
1494 | (info->chip.chipsize >> info->chip.page_shift) - 1; | ||
1495 | |||
1496 | /* now fill in the rest of the mtd fields */ | ||
1497 | if (mtd->oobsize == 64) | ||
1498 | mtd->ecclayout = &tegra_nand_oob_64; | ||
1499 | else | ||
1500 | mtd->ecclayout = &tegra_nand_oob_128; | ||
1501 | |||
1502 | mtd->oobavail = mtd->ecclayout->oobavail; | ||
1503 | mtd->type = MTD_NANDFLASH; | ||
1504 | mtd->flags = MTD_CAP_NANDFLASH; | ||
1505 | |||
1506 | mtd->erase = tegra_nand_erase; | ||
1507 | mtd->lock = NULL; | ||
1508 | mtd->point = NULL; | ||
1509 | mtd->unpoint = NULL; | ||
1510 | mtd->read = tegra_nand_read; | ||
1511 | mtd->write = tegra_nand_write; | ||
1512 | mtd->read_oob = tegra_nand_read_oob; | ||
1513 | mtd->write_oob = tegra_nand_write_oob; | ||
1514 | |||
1515 | mtd->resume = tegra_nand_resume; | ||
1516 | mtd->suspend = tegra_nand_suspend; | ||
1517 | mtd->block_isbad = tegra_nand_block_isbad; | ||
1518 | mtd->block_markbad = tegra_nand_block_markbad; | ||
1519 | |||
1520 | set_chip_timing(info, vendor_id, dev_id, dev_parms); | ||
1521 | |||
1522 | return 0; | ||
1523 | |||
1524 | out_error: | ||
1525 | pr_err("%s: NAND device scan aborted due to error(s).\n", __func__); | ||
1526 | return err; | ||
1527 | } | ||
1528 | |||
1529 | static int __devinit tegra_nand_probe(struct platform_device *pdev) | ||
1530 | { | ||
1531 | struct tegra_nand_platform *plat = pdev->dev.platform_data; | ||
1532 | struct tegra_nand_info *info = NULL; | ||
1533 | struct tegra_nand_chip *chip = NULL; | ||
1534 | struct mtd_info *mtd = NULL; | ||
1535 | int err = 0; | ||
1536 | uint64_t num_erase_blocks; | ||
1537 | |||
1538 | pr_debug("%s: probing (%p)\n", __func__, pdev); | ||
1539 | |||
1540 | if (!plat) { | ||
1541 | pr_err("%s: no platform device info\n", __func__); | ||
1542 | return -EINVAL; | ||
1543 | } else if (!plat->chip_parms) { | ||
1544 | pr_err("%s: no platform nand parms\n", __func__); | ||
1545 | return -EINVAL; | ||
1546 | } | ||
1547 | |||
1548 | info = kzalloc(sizeof(struct tegra_nand_info), GFP_KERNEL); | ||
1549 | if (!info) { | ||
1550 | pr_err("%s: no memory for flash info\n", __func__); | ||
1551 | return -ENOMEM; | ||
1552 | } | ||
1553 | |||
1554 | info->dev = &pdev->dev; | ||
1555 | info->plat = plat; | ||
1556 | |||
1557 | platform_set_drvdata(pdev, info); | ||
1558 | |||
1559 | init_completion(&info->cmd_complete); | ||
1560 | init_completion(&info->dma_complete); | ||
1561 | |||
1562 | mutex_init(&info->lock); | ||
1563 | spin_lock_init(&info->ecc_lock); | ||
1564 | |||
1565 | chip = &info->chip; | ||
1566 | chip->priv = &info->mtd; | ||
1567 | chip->curr_chip = -1; | ||
1568 | |||
1569 | mtd = &info->mtd; | ||
1570 | mtd->name = dev_name(&pdev->dev); | ||
1571 | mtd->priv = &info->chip; | ||
1572 | mtd->owner = THIS_MODULE; | ||
1573 | |||
1574 | /* HACK: allocate a dma buffer to hold 1 page oob data */ | ||
1575 | info->oob_dma_buf = dma_alloc_coherent(NULL, 128, | ||
1576 | &info->oob_dma_addr, GFP_KERNEL); | ||
1577 | if (!info->oob_dma_buf) { | ||
1578 | err = -ENOMEM; | ||
1579 | goto out_free_info; | ||
1580 | } | ||
1581 | |||
1582 | /* this will store the ecc error vector info */ | ||
1583 | info->ecc_buf = dma_alloc_coherent(NULL, ECC_BUF_SZ, &info->ecc_addr, | ||
1584 | GFP_KERNEL); | ||
1585 | if (!info->ecc_buf) { | ||
1586 | err = -ENOMEM; | ||
1587 | goto out_free_dma_buf; | ||
1588 | } | ||
1589 | |||
1590 | /* grab the irq */ | ||
1591 | if (!(pdev->resource[0].flags & IORESOURCE_IRQ)) { | ||
1592 | pr_err("NAND IRQ resource not defined\n"); | ||
1593 | err = -EINVAL; | ||
1594 | goto out_free_ecc_buf; | ||
1595 | } | ||
1596 | |||
1597 | err = request_irq(pdev->resource[0].start, tegra_nand_irq, | ||
1598 | IRQF_SHARED, DRIVER_NAME, info); | ||
1599 | if (err) { | ||
1600 | pr_err("Unable to request IRQ %d (%d)\n", | ||
1601 | pdev->resource[0].start, err); | ||
1602 | goto out_free_ecc_buf; | ||
1603 | } | ||
1604 | |||
1605 | /* TODO: configure pinmux here?? */ | ||
1606 | info->clk = clk_get(&pdev->dev, NULL); | ||
1607 | |||
1608 | if (IS_ERR(info->clk)) { | ||
1609 | err = PTR_ERR(info->clk); | ||
1610 | goto out_free_ecc_buf; | ||
1611 | } | ||
1612 | err = clk_enable(info->clk); | ||
1613 | if (err != 0) | ||
1614 | goto out_free_ecc_buf; | ||
1615 | |||
1616 | if (plat->wp_gpio) { | ||
1617 | gpio_request(plat->wp_gpio, "nand_wp"); | ||
1618 | tegra_gpio_enable(plat->wp_gpio); | ||
1619 | gpio_direction_output(plat->wp_gpio, 1); | ||
1620 | } | ||
1621 | |||
1622 | cfg_hwstatus_mon(info); | ||
1623 | |||
1624 | /* clear all pending interrupts */ | ||
1625 | writel(readl(ISR_REG), ISR_REG); | ||
1626 | |||
1627 | /* clear dma interrupt */ | ||
1628 | writel(DMA_CTRL_IS_DMA_DONE, DMA_MST_CTRL_REG); | ||
1629 | |||
1630 | /* enable interrupts */ | ||
1631 | disable_ints(info, 0xffffffff); | ||
1632 | enable_ints(info, | ||
1633 | IER_ERR_TRIG_VAL(4) | IER_UND | IER_OVR | IER_CMD_DONE | | ||
1634 | IER_ECC_ERR | IER_GIE); | ||
1635 | |||
1636 | if (tegra_nand_scan(mtd, plat->max_chips)) { | ||
1637 | err = -ENXIO; | ||
1638 | goto out_dis_irq; | ||
1639 | } | ||
1640 | pr_info("%s: NVIDIA Tegra NAND controller @ base=0x%08x irq=%d.\n", | ||
1641 | DRIVER_NAME, TEGRA_NAND_PHYS, pdev->resource[0].start); | ||
1642 | |||
1643 | /* allocate memory to hold the ecc error info */ | ||
1644 | info->max_ecc_errs = MAX_DMA_SZ / mtd->writesize; | ||
1645 | info->ecc_errs = kmalloc(info->max_ecc_errs * sizeof(uint32_t), | ||
1646 | GFP_KERNEL); | ||
1647 | if (!info->ecc_errs) { | ||
1648 | err = -ENOMEM; | ||
1649 | goto out_dis_irq; | ||
1650 | } | ||
1651 | |||
1652 | /* alloc the bad block bitmap */ | ||
1653 | num_erase_blocks = mtd->size; | ||
1654 | do_div(num_erase_blocks, mtd->erasesize); | ||
1655 | info->bb_bitmap = kzalloc(BITS_TO_LONGS(num_erase_blocks) * | ||
1656 | sizeof(unsigned long), GFP_KERNEL); | ||
1657 | if (!info->bb_bitmap) { | ||
1658 | err = -ENOMEM; | ||
1659 | goto out_free_ecc; | ||
1660 | } | ||
1661 | |||
1662 | err = scan_bad_blocks(info); | ||
1663 | if (err != 0) | ||
1664 | goto out_free_bbbmap; | ||
1665 | |||
1666 | #if 0 | ||
1667 | dump_nand_regs(); | ||
1668 | #endif | ||
1669 | |||
1670 | err = parse_mtd_partitions(mtd, part_probes, &info->parts, 0); | ||
1671 | if (err > 0) { | ||
1672 | err = mtd_device_register(mtd, info->parts, err); | ||
1673 | } else if (err <= 0 && plat->parts) { | ||
1674 | err = mtd_device_register(mtd, plat->parts, plat->nr_parts); | ||
1675 | } else | ||
1676 | err = mtd_device_register(mtd, NULL, 0); | ||
1677 | if (err != 0) | ||
1678 | goto out_free_bbbmap; | ||
1679 | |||
1680 | dev_set_drvdata(&pdev->dev, info); | ||
1681 | |||
1682 | info->partial_unaligned_rw_buffer = kzalloc(mtd->writesize, GFP_KERNEL); | ||
1683 | if (!info->partial_unaligned_rw_buffer) { | ||
1684 | err = -ENOMEM; | ||
1685 | goto out_free_bbbmap; | ||
1686 | } | ||
1687 | |||
1688 | err = device_create_file(&pdev->dev, &dev_attr_device_id); | ||
1689 | if (err != 0) | ||
1690 | goto out_free_rw_buffer; | ||
1691 | |||
1692 | err = device_create_file(&pdev->dev, &dev_attr_vendor_id); | ||
1693 | if (err != 0) | ||
1694 | goto err_nand_sysfs_vendorid_failed; | ||
1695 | |||
1696 | err = device_create_file(&pdev->dev, &dev_attr_flash_size); | ||
1697 | if (err != 0) | ||
1698 | goto err_nand_sysfs_flash_size_failed; | ||
1699 | |||
1700 | err = device_create_file(&pdev->dev, &dev_attr_num_bad_blocks); | ||
1701 | if (err != 0) | ||
1702 | goto err_nand_sysfs_num_bad_blocks_failed; | ||
1703 | |||
1704 | err = device_create_file(&pdev->dev, &dev_attr_bb_bitmap); | ||
1705 | if (err != 0) | ||
1706 | goto err_nand_sysfs_bb_bitmap_failed; | ||
1707 | |||
1708 | pr_debug("%s: probe done.\n", __func__); | ||
1709 | return 0; | ||
1710 | |||
1711 | err_nand_sysfs_bb_bitmap_failed: | ||
1712 | device_remove_file(&pdev->dev, &dev_attr_num_bad_blocks); | ||
1713 | |||
1714 | err_nand_sysfs_num_bad_blocks_failed: | ||
1715 | device_remove_file(&pdev->dev, &dev_attr_flash_size); | ||
1716 | |||
1717 | err_nand_sysfs_flash_size_failed: | ||
1718 | device_remove_file(&pdev->dev, &dev_attr_vendor_id); | ||
1719 | |||
1720 | err_nand_sysfs_vendorid_failed: | ||
1721 | device_remove_file(&pdev->dev, &dev_attr_device_id); | ||
1722 | |||
1723 | out_free_rw_buffer: | ||
1724 | kfree(info->partial_unaligned_rw_buffer); | ||
1725 | |||
1726 | out_free_bbbmap: | ||
1727 | kfree(info->bb_bitmap); | ||
1728 | |||
1729 | out_free_ecc: | ||
1730 | kfree(info->ecc_errs); | ||
1731 | |||
1732 | out_dis_irq: | ||
1733 | disable_ints(info, 0xffffffff); | ||
1734 | free_irq(pdev->resource[0].start, info); | ||
1735 | |||
1736 | out_free_ecc_buf: | ||
1737 | dma_free_coherent(NULL, ECC_BUF_SZ, info->ecc_buf, info->ecc_addr); | ||
1738 | |||
1739 | out_free_dma_buf: | ||
1740 | dma_free_coherent(NULL, 128, info->oob_dma_buf, info->oob_dma_addr); | ||
1741 | |||
1742 | out_free_info: | ||
1743 | platform_set_drvdata(pdev, NULL); | ||
1744 | kfree(info); | ||
1745 | |||
1746 | return err; | ||
1747 | } | ||
1748 | |||
1749 | static int __devexit tegra_nand_remove(struct platform_device *pdev) | ||
1750 | { | ||
1751 | struct tegra_nand_info *info = dev_get_drvdata(&pdev->dev); | ||
1752 | |||
1753 | dev_set_drvdata(&pdev->dev, NULL); | ||
1754 | |||
1755 | if (info) { | ||
1756 | free_irq(pdev->resource[0].start, info); | ||
1757 | kfree(info->bb_bitmap); | ||
1758 | kfree(info->ecc_errs); | ||
1759 | kfree(info->partial_unaligned_rw_buffer); | ||
1760 | |||
1761 | device_remove_file(&pdev->dev, &dev_attr_device_id); | ||
1762 | device_remove_file(&pdev->dev, &dev_attr_vendor_id); | ||
1763 | device_remove_file(&pdev->dev, &dev_attr_flash_size); | ||
1764 | device_remove_file(&pdev->dev, &dev_attr_num_bad_blocks); | ||
1765 | device_remove_file(&pdev->dev, &dev_attr_bb_bitmap); | ||
1766 | |||
1767 | dma_free_coherent(NULL, ECC_BUF_SZ, info->ecc_buf, | ||
1768 | info->ecc_addr); | ||
1769 | dma_free_coherent(NULL, info->mtd.writesize + info->mtd.oobsize, | ||
1770 | info->oob_dma_buf, info->oob_dma_addr); | ||
1771 | kfree(info); | ||
1772 | } | ||
1773 | |||
1774 | return 0; | ||
1775 | } | ||
1776 | |||
1777 | static struct platform_driver tegra_nand_driver = { | ||
1778 | .probe = tegra_nand_probe, | ||
1779 | .remove = __devexit_p(tegra_nand_remove), | ||
1780 | .suspend = NULL, | ||
1781 | .resume = NULL, | ||
1782 | .driver = { | ||
1783 | .name = "tegra_nand", | ||
1784 | .owner = THIS_MODULE, | ||
1785 | }, | ||
1786 | }; | ||
1787 | |||
1788 | static int __init tegra_nand_init(void) | ||
1789 | { | ||
1790 | return platform_driver_register(&tegra_nand_driver); | ||
1791 | } | ||
1792 | |||
1793 | static void __exit tegra_nand_exit(void) | ||
1794 | { | ||
1795 | platform_driver_unregister(&tegra_nand_driver); | ||
1796 | } | ||
1797 | |||
1798 | module_init(tegra_nand_init); | ||
1799 | module_exit(tegra_nand_exit); | ||
1800 | |||
1801 | MODULE_LICENSE("GPL"); | ||
1802 | MODULE_DESCRIPTION(DRIVER_DESC); | ||
diff --git a/drivers/mtd/devices/tegra_nand.h b/drivers/mtd/devices/tegra_nand.h new file mode 100644 index 00000000000..339d6cc7330 --- /dev/null +++ b/drivers/mtd/devices/tegra_nand.h | |||
@@ -0,0 +1,148 @@ | |||
1 | /* | ||
2 | * drivers/mtd/devices/tegra_nand.h | ||
3 | * | ||
4 | * Copyright (C) 2010 Google, Inc. | ||
5 | * Author: Dima Zavin <dima@android.com> | ||
6 | * Colin Cross <ccross@android.com> | ||
7 | * | ||
8 | * This software is licensed under the terms of the GNU General Public | ||
9 | * License version 2, as published by the Free Software Foundation, and | ||
10 | * may be copied, distributed, and modified under those terms. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | */ | ||
18 | |||
19 | #ifndef __MTD_DEV_TEGRA_NAND_H | ||
20 | #define __MTD_DEV_TEGRA_NAND_H | ||
21 | |||
22 | #include <mach/io.h> | ||
23 | |||
24 | #define __BITMASK0(len) ((1 << (len)) - 1) | ||
25 | #define __BITMASK(start, len) (__BITMASK0(len) << (start)) | ||
26 | #define REG_BIT(bit) (1 << (bit)) | ||
27 | #define REG_FIELD(val, start, len) (((val) & __BITMASK0(len)) << (start)) | ||
28 | #define REG_FIELD_MASK(start, len) (~(__BITMASK((start), (len)))) | ||
29 | #define REG_GET_FIELD(val, start, len) (((val) >> (start)) & __BITMASK0(len)) | ||
30 | |||
31 | /* tegra nand registers... */ | ||
32 | #define TEGRA_NAND_PHYS 0x70008000 | ||
33 | #define TEGRA_NAND_BASE IO_TO_VIRT(TEGRA_NAND_PHYS) | ||
34 | #define COMMAND_REG (TEGRA_NAND_BASE + 0x00) | ||
35 | #define STATUS_REG (TEGRA_NAND_BASE + 0x04) | ||
36 | #define ISR_REG (TEGRA_NAND_BASE + 0x08) | ||
37 | #define IER_REG (TEGRA_NAND_BASE + 0x0c) | ||
38 | #define CONFIG_REG (TEGRA_NAND_BASE + 0x10) | ||
39 | #define TIMING_REG (TEGRA_NAND_BASE + 0x14) | ||
40 | #define RESP_REG (TEGRA_NAND_BASE + 0x18) | ||
41 | #define TIMING2_REG (TEGRA_NAND_BASE + 0x1c) | ||
42 | #define CMD_REG1 (TEGRA_NAND_BASE + 0x20) | ||
43 | #define CMD_REG2 (TEGRA_NAND_BASE + 0x24) | ||
44 | #define ADDR_REG1 (TEGRA_NAND_BASE + 0x28) | ||
45 | #define ADDR_REG2 (TEGRA_NAND_BASE + 0x2c) | ||
46 | #define DMA_MST_CTRL_REG (TEGRA_NAND_BASE + 0x30) | ||
47 | #define DMA_CFG_A_REG (TEGRA_NAND_BASE + 0x34) | ||
48 | #define DMA_CFG_B_REG (TEGRA_NAND_BASE + 0x38) | ||
49 | #define FIFO_CTRL_REG (TEGRA_NAND_BASE + 0x3c) | ||
50 | #define DATA_BLOCK_PTR_REG (TEGRA_NAND_BASE + 0x40) | ||
51 | #define TAG_PTR_REG (TEGRA_NAND_BASE + 0x44) | ||
52 | #define ECC_PTR_REG (TEGRA_NAND_BASE + 0x48) | ||
53 | #define DEC_STATUS_REG (TEGRA_NAND_BASE + 0x4c) | ||
54 | #define HWSTATUS_CMD_REG (TEGRA_NAND_BASE + 0x50) | ||
55 | #define HWSTATUS_MASK_REG (TEGRA_NAND_BASE + 0x54) | ||
56 | #define LL_CONFIG_REG (TEGRA_NAND_BASE + 0x58) | ||
57 | #define LL_PTR_REG (TEGRA_NAND_BASE + 0x5c) | ||
58 | #define LL_STATUS_REG (TEGRA_NAND_BASE + 0x60) | ||
59 | |||
60 | /* nand_command bits */ | ||
61 | #define COMMAND_GO REG_BIT(31) | ||
62 | #define COMMAND_CLE REG_BIT(30) | ||
63 | #define COMMAND_ALE REG_BIT(29) | ||
64 | #define COMMAND_PIO REG_BIT(28) | ||
65 | #define COMMAND_TX REG_BIT(27) | ||
66 | #define COMMAND_RX REG_BIT(26) | ||
67 | #define COMMAND_SEC_CMD REG_BIT(25) | ||
68 | #define COMMAND_AFT_DAT REG_BIT(24) | ||
69 | #define COMMAND_TRANS_SIZE(val) REG_FIELD((val), 20, 4) | ||
70 | #define COMMAND_A_VALID REG_BIT(19) | ||
71 | #define COMMAND_B_VALID REG_BIT(18) | ||
72 | #define COMMAND_RD_STATUS_CHK REG_BIT(17) | ||
73 | #define COMMAND_RBSY_CHK REG_BIT(16) | ||
74 | #define COMMAND_CE(val) REG_BIT(8 + ((val) & 0x7)) | ||
75 | #define COMMAND_CLE_BYTE_SIZE(val) REG_FIELD((val), 4, 2) | ||
76 | #define COMMAND_ALE_BYTE_SIZE(val) REG_FIELD((val), 0, 4) | ||
77 | |||
78 | /* nand isr bits */ | ||
79 | #define ISR_UND REG_BIT(7) | ||
80 | #define ISR_OVR REG_BIT(6) | ||
81 | #define ISR_CMD_DONE REG_BIT(5) | ||
82 | #define ISR_ECC_ERR REG_BIT(4) | ||
83 | |||
84 | /* nand ier bits */ | ||
85 | #define IER_ERR_TRIG_VAL(val) REG_FIELD((val), 16, 4) | ||
86 | #define IER_UND REG_BIT(7) | ||
87 | #define IER_OVR REG_BIT(6) | ||
88 | #define IER_CMD_DONE REG_BIT(5) | ||
89 | #define IER_ECC_ERR REG_BIT(4) | ||
90 | #define IER_GIE REG_BIT(0) | ||
91 | |||
92 | /* nand config bits */ | ||
93 | #define CONFIG_HW_ECC REG_BIT(31) | ||
94 | #define CONFIG_ECC_SEL REG_BIT(30) | ||
95 | #define CONFIG_HW_ERR_CORRECTION REG_BIT(29) | ||
96 | #define CONFIG_PIPELINE_EN REG_BIT(28) | ||
97 | #define CONFIG_ECC_EN_TAG REG_BIT(27) | ||
98 | #define CONFIG_TVALUE(val) REG_FIELD((val), 24, 2) | ||
99 | #define CONFIG_SKIP_SPARE REG_BIT(23) | ||
100 | #define CONFIG_COM_BSY REG_BIT(22) | ||
101 | #define CONFIG_BUS_WIDTH REG_BIT(21) | ||
102 | #define CONFIG_EDO_MODE REG_BIT(19) | ||
103 | #define CONFIG_PAGE_SIZE_SEL(val) REG_FIELD((val), 16, 3) | ||
104 | #define CONFIG_SKIP_SPARE_SEL(val) REG_FIELD((val), 14, 2) | ||
105 | #define CONFIG_TAG_BYTE_SIZE(val) REG_FIELD((val), 0, 8) | ||
106 | |||
107 | /* nand timing bits */ | ||
108 | #define TIMING_TRP_RESP(val) REG_FIELD((val), 28, 4) | ||
109 | #define TIMING_TWB(val) REG_FIELD((val), 24, 4) | ||
110 | #define TIMING_TCR_TAR_TRR(val) REG_FIELD((val), 20, 4) | ||
111 | #define TIMING_TWHR(val) REG_FIELD((val), 16, 4) | ||
112 | #define TIMING_TCS(val) REG_FIELD((val), 14, 2) | ||
113 | #define TIMING_TWH(val) REG_FIELD((val), 12, 2) | ||
114 | #define TIMING_TWP(val) REG_FIELD((val), 8, 4) | ||
115 | #define TIMING_TRH(val) REG_FIELD((val), 4, 2) | ||
116 | #define TIMING_TRP(val) REG_FIELD((val), 0, 4) | ||
117 | |||
118 | /* nand timing2 bits */ | ||
119 | #define TIMING2_TADL(val) REG_FIELD((val), 0, 4) | ||
120 | |||
121 | /* nand dma_mst_ctrl bits */ | ||
122 | #define DMA_CTRL_DMA_GO REG_BIT(31) | ||
123 | #define DMA_CTRL_DIR REG_BIT(30) | ||
124 | #define DMA_CTRL_DMA_PERF_EN REG_BIT(29) | ||
125 | #define DMA_CTRL_IE_DMA_DONE REG_BIT(28) | ||
126 | #define DMA_CTRL_REUSE_BUFFER REG_BIT(27) | ||
127 | #define DMA_CTRL_BURST_SIZE(val) REG_FIELD((val), 24, 3) | ||
128 | #define DMA_CTRL_IS_DMA_DONE REG_BIT(20) | ||
129 | #define DMA_CTRL_DMA_EN_A REG_BIT(2) | ||
130 | #define DMA_CTRL_DMA_EN_B REG_BIT(1) | ||
131 | |||
132 | /* nand dma_cfg_a/cfg_b bits */ | ||
133 | #define DMA_CFG_BLOCK_SIZE(val) REG_FIELD((val), 0, 16) | ||
134 | |||
135 | /* nand dec_status bits */ | ||
136 | #define DEC_STATUS_ERR_PAGE_NUM(val) REG_GET_FIELD((val), 24, 8) | ||
137 | #define DEC_STATUS_ERR_CNT(val) REG_GET_FIELD((val), 16, 8) | ||
138 | #define DEC_STATUS_ECC_FAIL_A REG_BIT(1) | ||
139 | #define DEC_STATUS_ECC_FAIL_B REG_BIT(0) | ||
140 | |||
141 | /* nand hwstatus_mask bits */ | ||
142 | #define HWSTATUS_RDSTATUS_MASK(val) REG_FIELD((val), 24, 8) | ||
143 | #define HWSTATUS_RDSTATUS_EXP_VAL(val) REG_FIELD((val), 16, 8) | ||
144 | #define HWSTATUS_RBSY_MASK(val) REG_FIELD((val), 8, 8) | ||
145 | #define HWSTATUS_RBSY_EXP_VAL(val) REG_FIELD((val), 0, 8) | ||
146 | |||
147 | #endif | ||
148 | |||