aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/staging
diff options
context:
space:
mode:
authorDavid Woodhouse <David.Woodhouse@intel.com>2010-06-08 13:08:32 -0400
committerDavid Woodhouse <David.Woodhouse@intel.com>2010-06-08 13:08:32 -0400
commit8ae4f63623c6a6d164e28d6ac327cf8287b0a24d (patch)
tree1e6d7427245dcb0cdc8605fe2b0ae518b28d2574 /drivers/staging
parentf39b56f6d067786b6c37c9a20776d1c0eb405a49 (diff)
spectra: Move to drivers/staging
It'll take some work before this is really shippable. Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Diffstat (limited to 'drivers/staging')
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile1
-rw-r--r--drivers/staging/spectra/Kconfig40
-rw-r--r--drivers/staging/spectra/Makefile11
-rw-r--r--drivers/staging/spectra/README29
-rw-r--r--drivers/staging/spectra/ffsdefs.h58
-rw-r--r--drivers/staging/spectra/ffsport.c827
-rw-r--r--drivers/staging/spectra/ffsport.h84
-rw-r--r--drivers/staging/spectra/flash.c4731
-rw-r--r--drivers/staging/spectra/flash.h198
-rw-r--r--drivers/staging/spectra/lld.c339
-rw-r--r--drivers/staging/spectra/lld.h111
-rw-r--r--drivers/staging/spectra/lld_cdma.c910
-rw-r--r--drivers/staging/spectra/lld_cdma.h123
-rw-r--r--drivers/staging/spectra/lld_emu.c780
-rw-r--r--drivers/staging/spectra/lld_emu.h51
-rw-r--r--drivers/staging/spectra/lld_mtd.c687
-rw-r--r--drivers/staging/spectra/lld_mtd.h51
-rw-r--r--drivers/staging/spectra/lld_nand.c2601
-rw-r--r--drivers/staging/spectra/lld_nand.h131
-rw-r--r--drivers/staging/spectra/nand_regs.h619
-rw-r--r--drivers/staging/spectra/spectraswconfig.h82
22 files changed, 12466 insertions, 0 deletions
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 7696a664f8a..79cf5f72095 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -97,6 +97,8 @@ source "drivers/staging/octeon/Kconfig"
97 97
98source "drivers/staging/serqt_usb2/Kconfig" 98source "drivers/staging/serqt_usb2/Kconfig"
99 99
100source "drivers/staging/spectra/Kconfig"
101
100source "drivers/staging/quatech_usb2/Kconfig" 102source "drivers/staging/quatech_usb2/Kconfig"
101 103
102source "drivers/staging/vt6655/Kconfig" 104source "drivers/staging/vt6655/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index ea2e70e2fed..401049ef01d 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -22,6 +22,7 @@ obj-$(CONFIG_R8187SE) += rtl8187se/
22obj-$(CONFIG_RTL8192SU) += rtl8192su/ 22obj-$(CONFIG_RTL8192SU) += rtl8192su/
23obj-$(CONFIG_RTL8192U) += rtl8192u/ 23obj-$(CONFIG_RTL8192U) += rtl8192u/
24obj-$(CONFIG_RTL8192E) += rtl8192e/ 24obj-$(CONFIG_RTL8192E) += rtl8192e/
25obj-$(CONFIG_MRST_NAND) += spectra/
25obj-$(CONFIG_TRANZPORT) += frontier/ 26obj-$(CONFIG_TRANZPORT) += frontier/
26obj-$(CONFIG_DREAM) += dream/ 27obj-$(CONFIG_DREAM) += dream/
27obj-$(CONFIG_POHMELFS) += pohmelfs/ 28obj-$(CONFIG_POHMELFS) += pohmelfs/
diff --git a/drivers/staging/spectra/Kconfig b/drivers/staging/spectra/Kconfig
new file mode 100644
index 00000000000..4bed96f6883
--- /dev/null
+++ b/drivers/staging/spectra/Kconfig
@@ -0,0 +1,40 @@
1
2menuconfig MRST_NAND
3 tristate "Moorestown NAND Flash controller"
4 depends on BLOCK
5 default n
6 ---help---
7 Enable the driver for the NAND Flash controller in Intel Moorestown
8 Platform
9
10choice
11 prompt "Compile for"
12 depends on MRST_NAND
13 default MRST_NAND_HW
14
15config MRST_NAND_HW
16 bool "Actual hardware mode"
17 help
18 Driver communicates with the actual hardware's register interface.
19 in DMA mode.
20
21config MRST_NAND_MTD
22 bool "Linux MTD mode"
23 depends on MTD
24 help
25 Driver communicates with the kernel MTD subsystem instead of its own
26 built-in hardware driver.
27
28config MRST_NAND_EMU
29 bool "RAM emulator testing"
30 help
31 Driver emulates Flash on a RAM buffer and / or disk file. Useful to test the behavior of FTL layer.
32
33endchoice
34
35config MRST_NAND_HW_DMA
36 bool
37 default n
38 depends on MRST_NAND_HW
39 help
40 Use DMA for native hardware interface.
diff --git a/drivers/staging/spectra/Makefile b/drivers/staging/spectra/Makefile
new file mode 100644
index 00000000000..2a949038533
--- /dev/null
+++ b/drivers/staging/spectra/Makefile
@@ -0,0 +1,11 @@
1#
2# Makefile of Intel Moorestown NAND controller driver
3#
4
5obj-$(CONFIG_MRST_NAND) += spectra.o
6spectra-y := ffsport.o flash.o lld.o
7spectra-$(CONFIG_MRST_NAND_HW) += lld_nand.o
8spectra-$(CONFIG_MRST_NAND_HW_DMA) += lld_cdma.o
9spectra-$(CONFIG_MRST_NAND_EMU) += lld_emu.o
10spectra-$(CONFIG_MRST_NAND_MTD) += lld_mtd.o
11
diff --git a/drivers/staging/spectra/README b/drivers/staging/spectra/README
new file mode 100644
index 00000000000..ecba559b899
--- /dev/null
+++ b/drivers/staging/spectra/README
@@ -0,0 +1,29 @@
1This is a driver for NAND controller of Intel Moorestown platform.
2
3This driver is a standalone linux block device driver, it acts as if it's a normal hard disk.
4It includes three layer:
5 block layer interface - file ffsport.c
6 Flash Translation Layer (FTL) - file flash.c (implement the NAND flash Translation Layer, includs address mapping, garbage collection, wear-leveling and so on)
7 Low level layer - file lld_nand.c/lld_cdma.c/lld_emu.c (which implements actual controller hardware registers access)
8
9This driver can be build as modules or build-in.
10
11Dependency:
12This driver has dependency on IA Firmware of Intel Moorestown platform.
13It need the IA Firmware to create the block table for the first time.
14And to validate this driver code without IA Firmware, you can change the
15macro AUTO_FORMAT_FLASH from 0 to 1 in file spectraswconfig.h. Thus the
16driver will erase the whole nand flash and create a new block table.
17
18TODO:
19 - Enable Command DMA feature support
20 - lower the memory footprint
21 - Remove most of the unnecessary global variables
22 - Change all the upcase variable / functions name to lowercase
23 - Some other misc bugs
24
25Please send patches to:
26 Greg Kroah-Hartman <gregkh@suse.de>
27
28And Cc to: Gao Yunpeng <yunpeng.gao@intel.com>
29
diff --git a/drivers/staging/spectra/ffsdefs.h b/drivers/staging/spectra/ffsdefs.h
new file mode 100644
index 00000000000..a9e9cd233d2
--- /dev/null
+++ b/drivers/staging/spectra/ffsdefs.h
@@ -0,0 +1,58 @@
1/*
2 * NAND Flash Controller Device Driver
3 * Copyright (c) 2009, Intel Corporation and its suppliers.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 */
19
20#ifndef _FFSDEFS_
21#define _FFSDEFS_
22
23#define CLEAR 0 /*use this to clear a field instead of "fail"*/
24#define SET 1 /*use this to set a field instead of "pass"*/
25#define FAIL 1 /*failed flag*/
26#define PASS 0 /*success flag*/
27#define ERR -1 /*error flag*/
28
29#define ERASE_CMD 10
30#define WRITE_MAIN_CMD 11
31#define READ_MAIN_CMD 12
32#define WRITE_SPARE_CMD 13
33#define READ_SPARE_CMD 14
34#define WRITE_MAIN_SPARE_CMD 15
35#define READ_MAIN_SPARE_CMD 16
36#define MEMCOPY_CMD 17
37#define DUMMY_CMD 99
38
39#define EVENT_PASS 0x00
40#define EVENT_CORRECTABLE_DATA_ERROR_FIXED 0x01
41#define EVENT_UNCORRECTABLE_DATA_ERROR 0x02
42#define EVENT_TIME_OUT 0x03
43#define EVENT_PROGRAM_FAILURE 0x04
44#define EVENT_ERASE_FAILURE 0x05
45#define EVENT_MEMCOPY_FAILURE 0x06
46#define EVENT_FAIL 0x07
47
48#define EVENT_NONE 0x22
49#define EVENT_DMA_CMD_COMP 0x77
50#define EVENT_ECC_TRANSACTION_DONE 0x88
51#define EVENT_DMA_CMD_FAIL 0x99
52
53#define CMD_PASS 0
54#define CMD_FAIL 1
55#define CMD_ABORT 2
56#define CMD_NOT_DONE 3
57
58#endif /* _FFSDEFS_ */
diff --git a/drivers/staging/spectra/ffsport.c b/drivers/staging/spectra/ffsport.c
new file mode 100644
index 00000000000..3c3565d4054
--- /dev/null
+++ b/drivers/staging/spectra/ffsport.c
@@ -0,0 +1,827 @@
1/*
2 * NAND Flash Controller Device Driver
3 * Copyright (c) 2009, Intel Corporation and its suppliers.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 */
19
20#include "ffsport.h"
21#include "flash.h"
22#include <linux/interrupt.h>
23#include <linux/delay.h>
24#include <linux/blkdev.h>
25#include <linux/wait.h>
26#include <linux/mutex.h>
27#include <linux/kthread.h>
28#include <linux/log2.h>
29#include <linux/init.h>
30
31/**** Helper functions used for Div, Remainder operation on u64 ****/
32
33/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
34* Function: GLOB_Calc_Used_Bits
35* Inputs: Power of 2 number
36* Outputs: Number of Used Bits
37* 0, if the argument is 0
38* Description: Calculate the number of bits used by a given power of 2 number
39* Number can be upto 32 bit
40*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
41int GLOB_Calc_Used_Bits(u32 n)
42{
43 int tot_bits = 0;
44
45 if (n >= 1 << 16) {
46 n >>= 16;
47 tot_bits += 16;
48 }
49
50 if (n >= 1 << 8) {
51 n >>= 8;
52 tot_bits += 8;
53 }
54
55 if (n >= 1 << 4) {
56 n >>= 4;
57 tot_bits += 4;
58 }
59
60 if (n >= 1 << 2) {
61 n >>= 2;
62 tot_bits += 2;
63 }
64
65 if (n >= 1 << 1)
66 tot_bits += 1;
67
68 return ((n == 0) ? (0) : tot_bits);
69}
70
71/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
72* Function: GLOB_u64_Div
73* Inputs: Number of u64
74* A power of 2 number as Division
75* Outputs: Quotient of the Divisor operation
76* Description: It divides the address by divisor by using bit shift operation
77* (essentially without explicitely using "/").
78* Divisor is a power of 2 number and Divided is of u64
79*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
80u64 GLOB_u64_Div(u64 addr, u32 divisor)
81{
82 return (u64)(addr >> GLOB_Calc_Used_Bits(divisor));
83}
84
85/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
86* Function: GLOB_u64_Remainder
87* Inputs: Number of u64
88* Divisor Type (1 -PageAddress, 2- BlockAddress)
89* Outputs: Remainder of the Division operation
90* Description: It calculates the remainder of a number (of u64) by
91* divisor(power of 2 number ) by using bit shifting and multiply
92* operation(essentially without explicitely using "/").
93*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
94u64 GLOB_u64_Remainder(u64 addr, u32 divisor_type)
95{
96 u64 result = 0;
97
98 if (divisor_type == 1) { /* Remainder -- Page */
99 result = (addr >> DeviceInfo.nBitsInPageDataSize);
100 result = result * DeviceInfo.wPageDataSize;
101 } else if (divisor_type == 2) { /* Remainder -- Block */
102 result = (addr >> DeviceInfo.nBitsInBlockDataSize);
103 result = result * DeviceInfo.wBlockDataSize;
104 }
105
106 result = addr - result;
107
108 return result;
109}
110
111#define NUM_DEVICES 1
112#define PARTITIONS 8
113
114#define GLOB_SBD_NAME "nd"
115#define GLOB_SBD_IRQ_NUM (29)
116#define GLOB_VERSION "driver version 20091110"
117
118#define GLOB_SBD_IOCTL_GC (0x7701)
119#define GLOB_SBD_IOCTL_WL (0x7702)
120#define GLOB_SBD_IOCTL_FORMAT (0x7703)
121#define GLOB_SBD_IOCTL_ERASE_FLASH (0x7704)
122#define GLOB_SBD_IOCTL_FLUSH_CACHE (0x7705)
123#define GLOB_SBD_IOCTL_COPY_BLK_TABLE (0x7706)
124#define GLOB_SBD_IOCTL_COPY_WEAR_LEVELING_TABLE (0x7707)
125#define GLOB_SBD_IOCTL_GET_NAND_INFO (0x7708)
126#define GLOB_SBD_IOCTL_WRITE_DATA (0x7709)
127#define GLOB_SBD_IOCTL_READ_DATA (0x770A)
128
129static int reserved_mb = 0;
130module_param(reserved_mb, int, 0);
131MODULE_PARM_DESC(reserved_mb, "Reserved space for OS image, in MiB (default 25 MiB)");
132
133int nand_debug_level;
134module_param(nand_debug_level, int, 0644);
135MODULE_PARM_DESC(nand_debug_level, "debug level value: 1-3");
136
137MODULE_LICENSE("GPL");
138
139struct spectra_nand_dev {
140 struct pci_dev *dev;
141 u64 size;
142 u16 users;
143 spinlock_t qlock;
144 void __iomem *ioaddr; /* Mapped address */
145 struct request_queue *queue;
146 struct task_struct *thread;
147 struct gendisk *gd;
148 u8 *tmp_buf;
149};
150
151
152static int GLOB_SBD_majornum;
153
154static char *GLOB_version = GLOB_VERSION;
155
156static struct spectra_nand_dev nand_device[NUM_DEVICES];
157
158static struct mutex spectra_lock;
159
160static int res_blks_os = 1;
161
162struct spectra_indentfy_dev_tag IdentifyDeviceData;
163
164static int force_flush_cache(void)
165{
166 nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
167 __FILE__, __LINE__, __func__);
168
169 if (ERR == GLOB_FTL_Flush_Cache()) {
170 printk(KERN_ERR "Fail to Flush FTL Cache!\n");
171 return -EFAULT;
172 }
173#if CMD_DMA
174 if (glob_ftl_execute_cmds())
175 return -EIO;
176 else
177 return 0;
178#endif
179 return 0;
180}
181
182struct ioctl_rw_page_info {
183 u8 *data;
184 unsigned int page;
185};
186
187static int ioctl_read_page_data(unsigned long arg)
188{
189 u8 *buf;
190 struct ioctl_rw_page_info info;
191 int result = PASS;
192
193 if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
194 return -EFAULT;
195
196 buf = kmalloc(IdentifyDeviceData.PageDataSize, GFP_ATOMIC);
197 if (!buf) {
198 printk(KERN_ERR "ioctl_read_page_data: "
199 "failed to allocate memory\n");
200 return -ENOMEM;
201 }
202
203 mutex_lock(&spectra_lock);
204 result = GLOB_FTL_Page_Read(buf,
205 (u64)info.page * IdentifyDeviceData.PageDataSize);
206 mutex_unlock(&spectra_lock);
207
208 if (copy_to_user((void __user *)info.data, buf,
209 IdentifyDeviceData.PageDataSize)) {
210 printk(KERN_ERR "ioctl_read_page_data: "
211 "failed to copy user data\n");
212 kfree(buf);
213 return -EFAULT;
214 }
215
216 kfree(buf);
217 return result;
218}
219
220static int ioctl_write_page_data(unsigned long arg)
221{
222 u8 *buf;
223 struct ioctl_rw_page_info info;
224 int result = PASS;
225
226 if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
227 return -EFAULT;
228
229 buf = kmalloc(IdentifyDeviceData.PageDataSize, GFP_ATOMIC);
230 if (!buf) {
231 printk(KERN_ERR "ioctl_write_page_data: "
232 "failed to allocate memory\n");
233 return -ENOMEM;
234 }
235
236 if (copy_from_user(buf, (void __user *)info.data,
237 IdentifyDeviceData.PageDataSize)) {
238 printk(KERN_ERR "ioctl_write_page_data: "
239 "failed to copy user data\n");
240 kfree(buf);
241 return -EFAULT;
242 }
243
244 mutex_lock(&spectra_lock);
245 result = GLOB_FTL_Page_Write(buf,
246 (u64)info.page * IdentifyDeviceData.PageDataSize);
247 mutex_unlock(&spectra_lock);
248
249 kfree(buf);
250 return result;
251}
252
253/* Return how many blocks should be reserved for bad block replacement */
254static int get_res_blk_num_bad_blk(void)
255{
256 return IdentifyDeviceData.wDataBlockNum / 10;
257}
258
259/* Return how many blocks should be reserved for OS image */
260static int get_res_blk_num_os(void)
261{
262 u32 res_blks, blk_size;
263
264 blk_size = IdentifyDeviceData.PageDataSize *
265 IdentifyDeviceData.PagesPerBlock;
266
267 res_blks = (reserved_mb * 1024 * 1024) / blk_size;
268
269 if ((res_blks < 1) || (res_blks >= IdentifyDeviceData.wDataBlockNum))
270 res_blks = 1; /* Reserved 1 block for block table */
271
272 return res_blks;
273}
274
275static void SBD_prepare_flush(struct request_queue *q, struct request *rq)
276{
277 rq->cmd_type = REQ_TYPE_LINUX_BLOCK;
278 /* rq->timeout = 5 * HZ; */
279 rq->cmd[0] = REQ_LB_OP_FLUSH;
280}
281
282/* Transfer a full request. */
283static int do_transfer(struct spectra_nand_dev *tr, struct request *req)
284{
285 u64 start_addr, addr;
286 u32 logical_start_sect, hd_start_sect;
287 u32 nsect, hd_sects;
288 u32 rsect, tsect = 0;
289 char *buf;
290 u32 ratio = IdentifyDeviceData.PageDataSize >> 9;
291
292 start_addr = (u64)(blk_rq_pos(req)) << 9;
293 /* Add a big enough offset to prevent the OS Image from
294 * being accessed or damaged by file system */
295 start_addr += IdentifyDeviceData.PageDataSize *
296 IdentifyDeviceData.PagesPerBlock *
297 res_blks_os;
298
299 if (req->cmd_type == REQ_TYPE_LINUX_BLOCK &&
300 req->cmd[0] == REQ_LB_OP_FLUSH) {
301 if (force_flush_cache()) /* Fail to flush cache */
302 return -EIO;
303 else
304 return 0;
305 }
306
307 if (!blk_fs_request(req))
308 return -EIO;
309
310 if (blk_rq_pos(req) + blk_rq_cur_sectors(req) > get_capacity(tr->gd)) {
311 printk(KERN_ERR "Spectra error: request over the NAND "
312 "capacity!sector %d, current_nr_sectors %d, "
313 "while capacity is %d\n",
314 (int)blk_rq_pos(req),
315 blk_rq_cur_sectors(req),
316 (int)get_capacity(tr->gd));
317 return -EIO;
318 }
319
320 logical_start_sect = start_addr >> 9;
321 hd_start_sect = logical_start_sect / ratio;
322 rsect = logical_start_sect - hd_start_sect * ratio;
323
324 addr = (u64)hd_start_sect * ratio * 512;
325 buf = req->buffer;
326 nsect = blk_rq_cur_sectors(req);
327
328 if (rsect)
329 tsect = (ratio - rsect) < nsect ? (ratio - rsect) : nsect;
330
331 switch (rq_data_dir(req)) {
332 case READ:
333 /* Read the first NAND page */
334 if (rsect) {
335 if (GLOB_FTL_Page_Read(tr->tmp_buf, addr)) {
336 printk(KERN_ERR "Error in %s, Line %d\n",
337 __FILE__, __LINE__);
338 return -EIO;
339 }
340 memcpy(buf, tr->tmp_buf + (rsect << 9), tsect << 9);
341 addr += IdentifyDeviceData.PageDataSize;
342 buf += tsect << 9;
343 nsect -= tsect;
344 }
345
346 /* Read the other NAND pages */
347 for (hd_sects = nsect / ratio; hd_sects > 0; hd_sects--) {
348 if (GLOB_FTL_Page_Read(buf, addr)) {
349 printk(KERN_ERR "Error in %s, Line %d\n",
350 __FILE__, __LINE__);
351 return -EIO;
352 }
353 addr += IdentifyDeviceData.PageDataSize;
354 buf += IdentifyDeviceData.PageDataSize;
355 }
356
357 /* Read the last NAND pages */
358 if (nsect % ratio) {
359 if (GLOB_FTL_Page_Read(tr->tmp_buf, addr)) {
360 printk(KERN_ERR "Error in %s, Line %d\n",
361 __FILE__, __LINE__);
362 return -EIO;
363 }
364 memcpy(buf, tr->tmp_buf, (nsect % ratio) << 9);
365 }
366#if CMD_DMA
367 if (glob_ftl_execute_cmds())
368 return -EIO;
369 else
370 return 0;
371#endif
372 return 0;
373
374 case WRITE:
375 /* Write the first NAND page */
376 if (rsect) {
377 if (GLOB_FTL_Page_Read(tr->tmp_buf, addr)) {
378 printk(KERN_ERR "Error in %s, Line %d\n",
379 __FILE__, __LINE__);
380 return -EIO;
381 }
382 memcpy(tr->tmp_buf + (rsect << 9), buf, tsect << 9);
383 if (GLOB_FTL_Page_Write(tr->tmp_buf, addr)) {
384 printk(KERN_ERR "Error in %s, Line %d\n",
385 __FILE__, __LINE__);
386 return -EIO;
387 }
388 addr += IdentifyDeviceData.PageDataSize;
389 buf += tsect << 9;
390 nsect -= tsect;
391 }
392
393 /* Write the other NAND pages */
394 for (hd_sects = nsect / ratio; hd_sects > 0; hd_sects--) {
395 if (GLOB_FTL_Page_Write(buf, addr)) {
396 printk(KERN_ERR "Error in %s, Line %d\n",
397 __FILE__, __LINE__);
398 return -EIO;
399 }
400 addr += IdentifyDeviceData.PageDataSize;
401 buf += IdentifyDeviceData.PageDataSize;
402 }
403
404 /* Write the last NAND pages */
405 if (nsect % ratio) {
406 if (GLOB_FTL_Page_Read(tr->tmp_buf, addr)) {
407 printk(KERN_ERR "Error in %s, Line %d\n",
408 __FILE__, __LINE__);
409 return -EIO;
410 }
411 memcpy(tr->tmp_buf, buf, (nsect % ratio) << 9);
412 if (GLOB_FTL_Page_Write(tr->tmp_buf, addr)) {
413 printk(KERN_ERR "Error in %s, Line %d\n",
414 __FILE__, __LINE__);
415 return -EIO;
416 }
417 }
418#if CMD_DMA
419 if (glob_ftl_execute_cmds())
420 return -EIO;
421 else
422 return 0;
423#endif
424 return 0;
425
426 default:
427 printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req));
428 return -EIO;
429 }
430}
431
432/* This function is copied from drivers/mtd/mtd_blkdevs.c */
433static int spectra_trans_thread(void *arg)
434{
435 struct spectra_nand_dev *tr = arg;
436 struct request_queue *rq = tr->queue;
437 struct request *req = NULL;
438
439 /* we might get involved when memory gets low, so use PF_MEMALLOC */
440 current->flags |= PF_MEMALLOC;
441
442 spin_lock_irq(rq->queue_lock);
443 while (!kthread_should_stop()) {
444 int res;
445
446 if (!req) {
447 req = blk_fetch_request(rq);
448 if (!req) {
449 set_current_state(TASK_INTERRUPTIBLE);
450 spin_unlock_irq(rq->queue_lock);
451 schedule();
452 spin_lock_irq(rq->queue_lock);
453 continue;
454 }
455 }
456
457 spin_unlock_irq(rq->queue_lock);
458
459 mutex_lock(&spectra_lock);
460 res = do_transfer(tr, req);
461 mutex_unlock(&spectra_lock);
462
463 spin_lock_irq(rq->queue_lock);
464
465 if (!__blk_end_request_cur(req, res))
466 req = NULL;
467 }
468
469 if (req)
470 __blk_end_request_all(req, -EIO);
471
472 spin_unlock_irq(rq->queue_lock);
473
474 return 0;
475}
476
477
478/* Request function that "handles clustering". */
479static void GLOB_SBD_request(struct request_queue *rq)
480{
481 struct spectra_nand_dev *pdev = rq->queuedata;
482 wake_up_process(pdev->thread);
483}
484
485static int GLOB_SBD_open(struct block_device *bdev, fmode_t mode)
486
487{
488 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
489 __FILE__, __LINE__, __func__);
490 return 0;
491}
492
493static int GLOB_SBD_release(struct gendisk *disk, fmode_t mode)
494{
495 int ret;
496
497 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
498 __FILE__, __LINE__, __func__);
499
500 mutex_lock(&spectra_lock);
501 ret = force_flush_cache();
502 mutex_unlock(&spectra_lock);
503
504 return 0;
505}
506
507static int GLOB_SBD_getgeo(struct block_device *bdev, struct hd_geometry *geo)
508{
509 geo->heads = 4;
510 geo->sectors = 16;
511 geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
512
513 nand_dbg_print(NAND_DBG_DEBUG,
514 "heads: %d, sectors: %d, cylinders: %d\n",
515 geo->heads, geo->sectors, geo->cylinders);
516
517 return 0;
518}
519
520int GLOB_SBD_ioctl(struct block_device *bdev, fmode_t mode,
521 unsigned int cmd, unsigned long arg)
522{
523 int ret;
524
525 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
526 __FILE__, __LINE__, __func__);
527
528 switch (cmd) {
529 case GLOB_SBD_IOCTL_GC:
530 nand_dbg_print(NAND_DBG_DEBUG,
531 "Spectra IOCTL: Garbage Collection "
532 "being performed\n");
533 if (PASS != GLOB_FTL_Garbage_Collection())
534 return -EFAULT;
535 return 0;
536
537 case GLOB_SBD_IOCTL_WL:
538 nand_dbg_print(NAND_DBG_DEBUG,
539 "Spectra IOCTL: Static Wear Leveling "
540 "being performed\n");
541 if (PASS != GLOB_FTL_Wear_Leveling())
542 return -EFAULT;
543 return 0;
544
545 case GLOB_SBD_IOCTL_FORMAT:
546 nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: Flash format "
547 "being performed\n");
548 if (PASS != GLOB_FTL_Flash_Format())
549 return -EFAULT;
550 return 0;
551
552 case GLOB_SBD_IOCTL_FLUSH_CACHE:
553 nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: Cache flush "
554 "being performed\n");
555 mutex_lock(&spectra_lock);
556 ret = force_flush_cache();
557 mutex_unlock(&spectra_lock);
558 return ret;
559
560 case GLOB_SBD_IOCTL_COPY_BLK_TABLE:
561 nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: "
562 "Copy block table\n");
563 if (copy_to_user((void __user *)arg,
564 get_blk_table_start_addr(),
565 get_blk_table_len()))
566 return -EFAULT;
567 return 0;
568
569 case GLOB_SBD_IOCTL_COPY_WEAR_LEVELING_TABLE:
570 nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: "
571 "Copy wear leveling table\n");
572 if (copy_to_user((void __user *)arg,
573 get_wear_leveling_table_start_addr(),
574 get_wear_leveling_table_len()))
575 return -EFAULT;
576 return 0;
577
578 case GLOB_SBD_IOCTL_GET_NAND_INFO:
579 nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: "
580 "Get NAND info\n");
581 if (copy_to_user((void __user *)arg, &IdentifyDeviceData,
582 sizeof(IdentifyDeviceData)))
583 return -EFAULT;
584 return 0;
585
586 case GLOB_SBD_IOCTL_WRITE_DATA:
587 nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: "
588 "Write one page data\n");
589 return ioctl_write_page_data(arg);
590
591 case GLOB_SBD_IOCTL_READ_DATA:
592 nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: "
593 "Read one page data\n");
594 return ioctl_read_page_data(arg);
595 }
596
597 return -ENOTTY;
598}
599
600static struct block_device_operations GLOB_SBD_ops = {
601 .owner = THIS_MODULE,
602 .open = GLOB_SBD_open,
603 .release = GLOB_SBD_release,
604 .locked_ioctl = GLOB_SBD_ioctl,
605 .getgeo = GLOB_SBD_getgeo,
606};
607
608static int SBD_setup_device(struct spectra_nand_dev *dev, int which)
609{
610 int res_blks;
611 u32 sects;
612
613 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
614 __FILE__, __LINE__, __func__);
615
616 memset(dev, 0, sizeof(struct spectra_nand_dev));
617
618 nand_dbg_print(NAND_DBG_WARN, "Reserved %d blocks "
619 "for OS image, %d blocks for bad block replacement.\n",
620 get_res_blk_num_os(),
621 get_res_blk_num_bad_blk());
622
623 res_blks = get_res_blk_num_bad_blk() + get_res_blk_num_os();
624
625 dev->size = (u64)IdentifyDeviceData.PageDataSize *
626 IdentifyDeviceData.PagesPerBlock *
627 (IdentifyDeviceData.wDataBlockNum - res_blks);
628
629 res_blks_os = get_res_blk_num_os();
630
631 spin_lock_init(&dev->qlock);
632
633 dev->tmp_buf = kmalloc(IdentifyDeviceData.PageDataSize, GFP_ATOMIC);
634 if (!dev->tmp_buf) {
635 printk(KERN_ERR "Failed to kmalloc memory in %s Line %d, exit.\n",
636 __FILE__, __LINE__);
637 goto out_vfree;
638 }
639
640 dev->queue = blk_init_queue(GLOB_SBD_request, &dev->qlock);
641 if (dev->queue == NULL) {
642 printk(KERN_ERR
643 "Spectra: Request queue could not be initialized."
644 " Aborting\n ");
645 goto out_vfree;
646 }
647 dev->queue->queuedata = dev;
648
649 /* As Linux block layer doens't support >4KB hardware sector, */
650 /* Here we force report 512 byte hardware sector size to Kernel */
651 blk_queue_logical_block_size(dev->queue, 512);
652
653 blk_queue_ordered(dev->queue, QUEUE_ORDERED_DRAIN_FLUSH,
654 SBD_prepare_flush);
655
656 dev->thread = kthread_run(spectra_trans_thread, dev, "nand_thd");
657 if (IS_ERR(dev->thread)) {
658 blk_cleanup_queue(dev->queue);
659 unregister_blkdev(GLOB_SBD_majornum, GLOB_SBD_NAME);
660 return PTR_ERR(dev->thread);
661 }
662
663 dev->gd = alloc_disk(PARTITIONS);
664 if (!dev->gd) {
665 printk(KERN_ERR
666 "Spectra: Could not allocate disk. Aborting \n ");
667 goto out_vfree;
668 }
669 dev->gd->major = GLOB_SBD_majornum;
670 dev->gd->first_minor = which * PARTITIONS;
671 dev->gd->fops = &GLOB_SBD_ops;
672 dev->gd->queue = dev->queue;
673 dev->gd->private_data = dev;
674 snprintf(dev->gd->disk_name, 32, "%s%c", GLOB_SBD_NAME, which + 'a');
675
676 sects = dev->size >> 9;
677 nand_dbg_print(NAND_DBG_WARN, "Capacity sects: %d\n", sects);
678 set_capacity(dev->gd, sects);
679
680 add_disk(dev->gd);
681
682 return 0;
683out_vfree:
684 return -ENOMEM;
685}
686
687/*
688static ssize_t show_nand_block_num(struct device *dev,
689 struct device_attribute *attr, char *buf)
690{
691 return snprintf(buf, PAGE_SIZE, "%d\n",
692 (int)IdentifyDeviceData.wDataBlockNum);
693}
694
695static ssize_t show_nand_pages_per_block(struct device *dev,
696 struct device_attribute *attr, char *buf)
697{
698 return snprintf(buf, PAGE_SIZE, "%d\n",
699 (int)IdentifyDeviceData.PagesPerBlock);
700}
701
702static ssize_t show_nand_page_size(struct device *dev,
703 struct device_attribute *attr, char *buf)
704{
705 return snprintf(buf, PAGE_SIZE, "%d\n",
706 (int)IdentifyDeviceData.PageDataSize);
707}
708
709static DEVICE_ATTR(nand_block_num, 0444, show_nand_block_num, NULL);
710static DEVICE_ATTR(nand_pages_per_block, 0444, show_nand_pages_per_block, NULL);
711static DEVICE_ATTR(nand_page_size, 0444, show_nand_page_size, NULL);
712
713static void create_sysfs_entry(struct device *dev)
714{
715 if (device_create_file(dev, &dev_attr_nand_block_num))
716 printk(KERN_ERR "Spectra: "
717 "failed to create sysfs entry nand_block_num.\n");
718 if (device_create_file(dev, &dev_attr_nand_pages_per_block))
719 printk(KERN_ERR "Spectra: "
720 "failed to create sysfs entry nand_pages_per_block.\n");
721 if (device_create_file(dev, &dev_attr_nand_page_size))
722 printk(KERN_ERR "Spectra: "
723 "failed to create sysfs entry nand_page_size.\n");
724}
725*/
726
727static int GLOB_SBD_init(void)
728{
729 int i;
730
731 /* Set debug output level (0~3) here. 3 is most verbose */
732 printk(KERN_ALERT "Spectra: %s\n", GLOB_version);
733
734 mutex_init(&spectra_lock);
735
736 GLOB_SBD_majornum = register_blkdev(0, GLOB_SBD_NAME);
737 if (GLOB_SBD_majornum <= 0) {
738 printk(KERN_ERR "Unable to get the major %d for Spectra",
739 GLOB_SBD_majornum);
740 return -EBUSY;
741 }
742
743 if (PASS != GLOB_FTL_Flash_Init()) {
744 printk(KERN_ERR "Spectra: Unable to Initialize Flash Device. "
745 "Aborting\n");
746 goto out_flash_register;
747 }
748
749 /* create_sysfs_entry(&dev->dev); */
750
751 if (PASS != GLOB_FTL_IdentifyDevice(&IdentifyDeviceData)) {
752 printk(KERN_ERR "Spectra: Unable to Read Flash Device. "
753 "Aborting\n");
754 goto out_flash_register;
755 } else {
756 nand_dbg_print(NAND_DBG_WARN, "In GLOB_SBD_init: "
757 "Num blocks=%d, pagesperblock=%d, "
758 "pagedatasize=%d, ECCBytesPerSector=%d\n",
759 (int)IdentifyDeviceData.NumBlocks,
760 (int)IdentifyDeviceData.PagesPerBlock,
761 (int)IdentifyDeviceData.PageDataSize,
762 (int)IdentifyDeviceData.wECCBytesPerSector);
763 }
764
765 printk(KERN_ALERT "Spectra: searching block table, please wait ...\n");
766 if (GLOB_FTL_Init() != PASS) {
767 printk(KERN_ERR "Spectra: Unable to Initialize FTL Layer. "
768 "Aborting\n");
769 goto out_ftl_flash_register;
770 }
771 printk(KERN_ALERT "Spectra: block table has been found.\n");
772
773 for (i = 0; i < NUM_DEVICES; i++)
774 if (SBD_setup_device(&nand_device[i], i) == -ENOMEM)
775 goto out_ftl_flash_register;
776
777 nand_dbg_print(NAND_DBG_DEBUG,
778 "Spectra: module loaded with major number %d\n",
779 GLOB_SBD_majornum);
780
781 return 0;
782
783out_ftl_flash_register:
784 GLOB_FTL_Cache_Release();
785out_flash_register:
786 GLOB_FTL_Flash_Release();
787 unregister_blkdev(GLOB_SBD_majornum, GLOB_SBD_NAME);
788 printk(KERN_ERR "Spectra: Module load failed.\n");
789
790 return -ENOMEM;
791}
792
793static void __exit GLOB_SBD_exit(void)
794{
795 int i;
796
797 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
798 __FILE__, __LINE__, __func__);
799
800 for (i = 0; i < NUM_DEVICES; i++) {
801 struct spectra_nand_dev *dev = &nand_device[i];
802 if (dev->gd) {
803 del_gendisk(dev->gd);
804 put_disk(dev->gd);
805 }
806 if (dev->queue)
807 blk_cleanup_queue(dev->queue);
808 kfree(dev->tmp_buf);
809 }
810
811 unregister_blkdev(GLOB_SBD_majornum, GLOB_SBD_NAME);
812
813 mutex_lock(&spectra_lock);
814 force_flush_cache();
815 mutex_unlock(&spectra_lock);
816
817 GLOB_FTL_Cache_Release();
818
819 GLOB_FTL_Flash_Release();
820
821 nand_dbg_print(NAND_DBG_DEBUG,
822 "Spectra FTL module (major number %d) unloaded.\n",
823 GLOB_SBD_majornum);
824}
825
826module_init(GLOB_SBD_init);
827module_exit(GLOB_SBD_exit);
diff --git a/drivers/staging/spectra/ffsport.h b/drivers/staging/spectra/ffsport.h
new file mode 100644
index 00000000000..6c5d90c5343
--- /dev/null
+++ b/drivers/staging/spectra/ffsport.h
@@ -0,0 +1,84 @@
1/*
2 * NAND Flash Controller Device Driver
3 * Copyright (c) 2009, Intel Corporation and its suppliers.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 */
19
20#ifndef _FFSPORT_
21#define _FFSPORT_
22
23#include "ffsdefs.h"
24
25#if defined __GNUC__
26#define PACKED
27#define PACKED_GNU __attribute__ ((packed))
28#define UNALIGNED
29#endif
30
31#include <linux/semaphore.h>
32#include <linux/string.h> /* for strcpy(), stricmp(), etc */
33#include <linux/mm.h> /* for kmalloc(), kfree() */
34#include <linux/vmalloc.h>
35#include <linux/module.h>
36#include <linux/moduleparam.h>
37#include <linux/init.h>
38
39#include <linux/kernel.h> /* printk() */
40#include <linux/fs.h> /* everything... */
41#include <linux/errno.h> /* error codes */
42#include <linux/types.h> /* size_t */
43#include <linux/genhd.h>
44#include <linux/blkdev.h>
45#include <linux/hdreg.h>
46#include <linux/pci.h>
47#include "flash.h"
48
49#define VERBOSE 1
50
51#define NAND_DBG_WARN 1
52#define NAND_DBG_DEBUG 2
53#define NAND_DBG_TRACE 3
54
55extern int nand_debug_level;
56
57#ifdef VERBOSE
58#define nand_dbg_print(level, args...) \
59 do { \
60 if (level <= nand_debug_level) \
61 printk(KERN_ALERT args); \
62 } while (0)
63#else
64#define nand_dbg_print(level, args...)
65#endif
66
67#ifdef SUPPORT_BIG_ENDIAN
68#define INVERTUINT16(w) ((u16)(((u16)(w)) << 8) | \
69 (u16)((u16)(w) >> 8))
70
71#define INVERTUINT32(dw) (((u32)(dw) << 24) | \
72 (((u32)(dw) << 8) & 0x00ff0000) | \
73 (((u32)(dw) >> 8) & 0x0000ff00) | \
74 ((u32)(dw) >> 24))
75#else
76#define INVERTUINT16(w) w
77#define INVERTUINT32(dw) dw
78#endif
79
80extern int GLOB_Calc_Used_Bits(u32 n);
81extern u64 GLOB_u64_Div(u64 addr, u32 divisor);
82extern u64 GLOB_u64_Remainder(u64 addr, u32 divisor_type);
83
84#endif /* _FFSPORT_ */
diff --git a/drivers/staging/spectra/flash.c b/drivers/staging/spectra/flash.c
new file mode 100644
index 00000000000..134aa5166a8
--- /dev/null
+++ b/drivers/staging/spectra/flash.c
@@ -0,0 +1,4731 @@
1/*
2 * NAND Flash Controller Device Driver
3 * Copyright (c) 2009, Intel Corporation and its suppliers.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 */
19
20#include <linux/fs.h>
21#include <linux/slab.h>
22
23#include "flash.h"
24#include "ffsdefs.h"
25#include "lld.h"
26#include "lld_nand.h"
27#if CMD_DMA
28#include "lld_cdma.h"
29#endif
30
31#define BLK_FROM_ADDR(addr) ((u32)(addr >> DeviceInfo.nBitsInBlockDataSize))
32#define PAGE_FROM_ADDR(addr, Block) ((u16)((addr - (u64)Block * \
33 DeviceInfo.wBlockDataSize) >> DeviceInfo.nBitsInPageDataSize))
34
35#define IS_SPARE_BLOCK(blk) (BAD_BLOCK != (pbt[blk] &\
36 BAD_BLOCK) && SPARE_BLOCK == (pbt[blk] & SPARE_BLOCK))
37
38#define IS_DATA_BLOCK(blk) (0 == (pbt[blk] & BAD_BLOCK))
39
40#define IS_DISCARDED_BLOCK(blk) (BAD_BLOCK != (pbt[blk] &\
41 BAD_BLOCK) && DISCARD_BLOCK == (pbt[blk] & DISCARD_BLOCK))
42
43#define IS_BAD_BLOCK(blk) (BAD_BLOCK == (pbt[blk] & BAD_BLOCK))
44
45#if DEBUG_BNDRY
46void debug_boundary_lineno_error(int chnl, int limit, int no,
47 int lineno, char *filename)
48{
49 if (chnl >= limit)
50 printk(KERN_ERR "Boundary Check Fail value %d >= limit %d, "
51 "at %s:%d. Other info:%d. Aborting...\n",
52 chnl, limit, filename, lineno, no);
53}
54/* static int globalmemsize; */
55#endif
56
57static u16 FTL_Cache_If_Hit(u64 dwPageAddr);
58static int FTL_Cache_Read(u64 dwPageAddr);
59static void FTL_Cache_Read_Page(u8 *pData, u64 dwPageAddr,
60 u16 cache_blk);
61static void FTL_Cache_Write_Page(u8 *pData, u64 dwPageAddr,
62 u8 cache_blk, u16 flag);
63static int FTL_Cache_Write(void);
64static int FTL_Cache_Write_Back(u8 *pData, u64 blk_addr);
65static void FTL_Calculate_LRU(void);
66static u32 FTL_Get_Block_Index(u32 wBlockNum);
67
68static int FTL_Search_Block_Table_IN_Block(u32 BT_Block,
69 u8 BT_Tag, u16 *Page);
70static int FTL_Read_Block_Table(void);
71static int FTL_Write_Block_Table(int wForce);
72static int FTL_Write_Block_Table_Data(void);
73static int FTL_Check_Block_Table(int wOldTable);
74static int FTL_Static_Wear_Leveling(void);
75static u32 FTL_Replace_Block_Table(void);
76static int FTL_Write_IN_Progress_Block_Table_Page(void);
77
78static u32 FTL_Get_Page_Num(u64 length);
79static u64 FTL_Get_Physical_Block_Addr(u64 blk_addr);
80
81static u32 FTL_Replace_OneBlock(u32 wBlockNum,
82 u32 wReplaceNum);
83static u32 FTL_Replace_LWBlock(u32 wBlockNum,
84 int *pGarbageCollect);
85static u32 FTL_Replace_MWBlock(void);
86static int FTL_Replace_Block(u64 blk_addr);
87static int FTL_Adjust_Relative_Erase_Count(u32 Index_of_MAX);
88
89static int FTL_Flash_Error_Handle(u8 *pData, u64 old_page_addr, u64 blk_addr);
90
91struct device_info_tag DeviceInfo;
92struct flash_cache_tag Cache;
93static struct spectra_l2_cache_info cache_l2;
94
95static u8 *cache_l2_page_buf;
96static u8 *cache_l2_blk_buf;
97
98u8 *g_pBlockTable;
99u8 *g_pWearCounter;
100u16 *g_pReadCounter;
101u32 *g_pBTBlocks;
102static u16 g_wBlockTableOffset;
103static u32 g_wBlockTableIndex;
104static u8 g_cBlockTableStatus;
105
106static u8 *g_pTempBuf;
107static u8 *flag_check_blk_table;
108static u8 *tmp_buf_search_bt_in_block;
109static u8 *spare_buf_search_bt_in_block;
110static u8 *spare_buf_bt_search_bt_in_block;
111static u8 *tmp_buf1_read_blk_table;
112static u8 *tmp_buf2_read_blk_table;
113static u8 *flags_static_wear_leveling;
114static u8 *tmp_buf_write_blk_table_data;
115static u8 *tmp_buf_read_disturbance;
116
117u8 *buf_read_page_main_spare;
118u8 *buf_write_page_main_spare;
119u8 *buf_read_page_spare;
120u8 *buf_get_bad_block;
121
122#if (RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE && CMD_DMA)
123struct flash_cache_delta_list_tag int_cache[MAX_CHANS + MAX_DESCS];
124struct flash_cache_tag cache_start_copy;
125#endif
126
127int g_wNumFreeBlocks;
128u8 g_SBDCmdIndex;
129
130static u8 *g_pIPF;
131static u8 bt_flag = FIRST_BT_ID;
132static u8 bt_block_changed;
133
134static u16 cache_block_to_write;
135static u8 last_erased = FIRST_BT_ID;
136
137static u8 GC_Called;
138static u8 BT_GC_Called;
139
140#if CMD_DMA
141#define COPY_BACK_BUF_NUM 10
142
143static u8 ftl_cmd_cnt; /* Init value is 0 */
144u8 *g_pBTDelta;
145u8 *g_pBTDelta_Free;
146u8 *g_pBTStartingCopy;
147u8 *g_pWearCounterCopy;
148u16 *g_pReadCounterCopy;
149u8 *g_pBlockTableCopies;
150u8 *g_pNextBlockTable;
151static u8 *cp_back_buf_copies[COPY_BACK_BUF_NUM];
152static int cp_back_buf_idx;
153
154static u8 *g_temp_buf;
155
156#pragma pack(push, 1)
157#pragma pack(1)
158struct BTableChangesDelta {
159 u8 ftl_cmd_cnt;
160 u8 ValidFields;
161 u16 g_wBlockTableOffset;
162 u32 g_wBlockTableIndex;
163 u32 BT_Index;
164 u32 BT_Entry_Value;
165 u32 WC_Index;
166 u8 WC_Entry_Value;
167 u32 RC_Index;
168 u16 RC_Entry_Value;
169};
170
171#pragma pack(pop)
172
173struct BTableChangesDelta *p_BTableChangesDelta;
174#endif
175
176
177#define MARK_BLOCK_AS_BAD(blocknode) (blocknode |= BAD_BLOCK)
178#define MARK_BLK_AS_DISCARD(blk) (blk = (blk & ~SPARE_BLOCK) | DISCARD_BLOCK)
179
180#define FTL_Get_LBAPBA_Table_Mem_Size_Bytes() (DeviceInfo.wDataBlockNum *\
181 sizeof(u32))
182#define FTL_Get_WearCounter_Table_Mem_Size_Bytes() (DeviceInfo.wDataBlockNum *\
183 sizeof(u8))
184#define FTL_Get_ReadCounter_Table_Mem_Size_Bytes() (DeviceInfo.wDataBlockNum *\
185 sizeof(u16))
186#if SUPPORT_LARGE_BLOCKNUM
187#define FTL_Get_LBAPBA_Table_Flash_Size_Bytes() (DeviceInfo.wDataBlockNum *\
188 sizeof(u8) * 3)
189#else
190#define FTL_Get_LBAPBA_Table_Flash_Size_Bytes() (DeviceInfo.wDataBlockNum *\
191 sizeof(u16))
192#endif
193#define FTL_Get_WearCounter_Table_Flash_Size_Bytes \
194 FTL_Get_WearCounter_Table_Mem_Size_Bytes
195#define FTL_Get_ReadCounter_Table_Flash_Size_Bytes \
196 FTL_Get_ReadCounter_Table_Mem_Size_Bytes
197
198static u32 FTL_Get_Block_Table_Flash_Size_Bytes(void)
199{
200 u32 byte_num;
201
202 if (DeviceInfo.MLCDevice) {
203 byte_num = FTL_Get_LBAPBA_Table_Flash_Size_Bytes() +
204 DeviceInfo.wDataBlockNum * sizeof(u8) +
205 DeviceInfo.wDataBlockNum * sizeof(u16);
206 } else {
207 byte_num = FTL_Get_LBAPBA_Table_Flash_Size_Bytes() +
208 DeviceInfo.wDataBlockNum * sizeof(u8);
209 }
210
211 byte_num += 4 * sizeof(u8);
212
213 return byte_num;
214}
215
216static u16 FTL_Get_Block_Table_Flash_Size_Pages(void)
217{
218 return (u16)FTL_Get_Page_Num(FTL_Get_Block_Table_Flash_Size_Bytes());
219}
220
221static int FTL_Copy_Block_Table_To_Flash(u8 *flashBuf, u32 sizeToTx,
222 u32 sizeTxed)
223{
224 u32 wBytesCopied, blk_tbl_size, wBytes;
225 u32 *pbt = (u32 *)g_pBlockTable;
226
227 blk_tbl_size = FTL_Get_LBAPBA_Table_Flash_Size_Bytes();
228 for (wBytes = 0;
229 (wBytes < sizeToTx) && ((wBytes + sizeTxed) < blk_tbl_size);
230 wBytes++) {
231#if SUPPORT_LARGE_BLOCKNUM
232 flashBuf[wBytes] = (u8)(pbt[(wBytes + sizeTxed) / 3]
233 >> (((wBytes + sizeTxed) % 3) ?
234 ((((wBytes + sizeTxed) % 3) == 2) ? 0 : 8) : 16)) & 0xFF;
235#else
236 flashBuf[wBytes] = (u8)(pbt[(wBytes + sizeTxed) / 2]
237 >> (((wBytes + sizeTxed) % 2) ? 0 : 8)) & 0xFF;
238#endif
239 }
240
241 sizeTxed = (sizeTxed > blk_tbl_size) ? (sizeTxed - blk_tbl_size) : 0;
242 blk_tbl_size = FTL_Get_WearCounter_Table_Flash_Size_Bytes();
243 wBytesCopied = wBytes;
244 wBytes = ((blk_tbl_size - sizeTxed) > (sizeToTx - wBytesCopied)) ?
245 (sizeToTx - wBytesCopied) : (blk_tbl_size - sizeTxed);
246 memcpy(flashBuf + wBytesCopied, g_pWearCounter + sizeTxed, wBytes);
247
248 sizeTxed = (sizeTxed > blk_tbl_size) ? (sizeTxed - blk_tbl_size) : 0;
249
250 if (DeviceInfo.MLCDevice) {
251 blk_tbl_size = FTL_Get_ReadCounter_Table_Flash_Size_Bytes();
252 wBytesCopied += wBytes;
253 for (wBytes = 0; ((wBytes + wBytesCopied) < sizeToTx) &&
254 ((wBytes + sizeTxed) < blk_tbl_size); wBytes++)
255 flashBuf[wBytes + wBytesCopied] =
256 (g_pReadCounter[(wBytes + sizeTxed) / 2] >>
257 (((wBytes + sizeTxed) % 2) ? 0 : 8)) & 0xFF;
258 }
259
260 return wBytesCopied + wBytes;
261}
262
263static int FTL_Copy_Block_Table_From_Flash(u8 *flashBuf,
264 u32 sizeToTx, u32 sizeTxed)
265{
266 u32 wBytesCopied, blk_tbl_size, wBytes;
267 u32 *pbt = (u32 *)g_pBlockTable;
268
269 blk_tbl_size = FTL_Get_LBAPBA_Table_Flash_Size_Bytes();
270 for (wBytes = 0; (wBytes < sizeToTx) &&
271 ((wBytes + sizeTxed) < blk_tbl_size); wBytes++) {
272#if SUPPORT_LARGE_BLOCKNUM
273 if (!((wBytes + sizeTxed) % 3))
274 pbt[(wBytes + sizeTxed) / 3] = 0;
275 pbt[(wBytes + sizeTxed) / 3] |=
276 (flashBuf[wBytes] << (((wBytes + sizeTxed) % 3) ?
277 ((((wBytes + sizeTxed) % 3) == 2) ? 0 : 8) : 16));
278#else
279 if (!((wBytes + sizeTxed) % 2))
280 pbt[(wBytes + sizeTxed) / 2] = 0;
281 pbt[(wBytes + sizeTxed) / 2] |=
282 (flashBuf[wBytes] << (((wBytes + sizeTxed) % 2) ?
283 0 : 8));
284#endif
285 }
286
287 sizeTxed = (sizeTxed > blk_tbl_size) ? (sizeTxed - blk_tbl_size) : 0;
288 blk_tbl_size = FTL_Get_WearCounter_Table_Flash_Size_Bytes();
289 wBytesCopied = wBytes;
290 wBytes = ((blk_tbl_size - sizeTxed) > (sizeToTx - wBytesCopied)) ?
291 (sizeToTx - wBytesCopied) : (blk_tbl_size - sizeTxed);
292 memcpy(g_pWearCounter + sizeTxed, flashBuf + wBytesCopied, wBytes);
293 sizeTxed = (sizeTxed > blk_tbl_size) ? (sizeTxed - blk_tbl_size) : 0;
294
295 if (DeviceInfo.MLCDevice) {
296 wBytesCopied += wBytes;
297 blk_tbl_size = FTL_Get_ReadCounter_Table_Flash_Size_Bytes();
298 for (wBytes = 0; ((wBytes + wBytesCopied) < sizeToTx) &&
299 ((wBytes + sizeTxed) < blk_tbl_size); wBytes++) {
300 if (((wBytes + sizeTxed) % 2))
301 g_pReadCounter[(wBytes + sizeTxed) / 2] = 0;
302 g_pReadCounter[(wBytes + sizeTxed) / 2] |=
303 (flashBuf[wBytes] <<
304 (((wBytes + sizeTxed) % 2) ? 0 : 8));
305 }
306 }
307
308 return wBytesCopied+wBytes;
309}
310
311static int FTL_Insert_Block_Table_Signature(u8 *buf, u8 tag)
312{
313 int i;
314
315 for (i = 0; i < BTSIG_BYTES; i++)
316 buf[BTSIG_OFFSET + i] =
317 ((tag + (i * BTSIG_DELTA) - FIRST_BT_ID) %
318 (1 + LAST_BT_ID-FIRST_BT_ID)) + FIRST_BT_ID;
319
320 return PASS;
321}
322
323static int FTL_Extract_Block_Table_Tag(u8 *buf, u8 **tagarray)
324{
325 static u8 tag[BTSIG_BYTES >> 1];
326 int i, j, k, tagi, tagtemp, status;
327
328 *tagarray = (u8 *)tag;
329 tagi = 0;
330
331 for (i = 0; i < (BTSIG_BYTES - 1); i++) {
332 for (j = i + 1; (j < BTSIG_BYTES) &&
333 (tagi < (BTSIG_BYTES >> 1)); j++) {
334 tagtemp = buf[BTSIG_OFFSET + j] -
335 buf[BTSIG_OFFSET + i];
336 if (tagtemp && !(tagtemp % BTSIG_DELTA)) {
337 tagtemp = (buf[BTSIG_OFFSET + i] +
338 (1 + LAST_BT_ID - FIRST_BT_ID) -
339 (i * BTSIG_DELTA)) %
340 (1 + LAST_BT_ID - FIRST_BT_ID);
341 status = FAIL;
342 for (k = 0; k < tagi; k++) {
343 if (tagtemp == tag[k])
344 status = PASS;
345 }
346
347 if (status == FAIL) {
348 tag[tagi++] = tagtemp;
349 i = (j == (i + 1)) ? i + 1 : i;
350 j = (j == (i + 1)) ? i + 1 : i;
351 }
352 }
353 }
354 }
355
356 return tagi;
357}
358
359
360static int FTL_Execute_SPL_Recovery(void)
361{
362 u32 j, block, blks;
363 u32 *pbt = (u32 *)g_pBlockTable;
364 int ret;
365
366 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
367 __FILE__, __LINE__, __func__);
368
369 blks = DeviceInfo.wSpectraEndBlock - DeviceInfo.wSpectraStartBlock;
370 for (j = 0; j <= blks; j++) {
371 block = (pbt[j]);
372 if (((block & BAD_BLOCK) != BAD_BLOCK) &&
373 ((block & SPARE_BLOCK) == SPARE_BLOCK)) {
374 ret = GLOB_LLD_Erase_Block(block & ~BAD_BLOCK);
375 if (FAIL == ret) {
376 nand_dbg_print(NAND_DBG_WARN,
377 "NAND Program fail in %s, Line %d, "
378 "Function: %s, new Bad Block %d "
379 "generated!\n",
380 __FILE__, __LINE__, __func__,
381 (int)(block & ~BAD_BLOCK));
382 MARK_BLOCK_AS_BAD(pbt[j]);
383 }
384 }
385 }
386
387 return PASS;
388}
389
390/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
391* Function: GLOB_FTL_IdentifyDevice
392* Inputs: pointer to identify data structure
393* Outputs: PASS / FAIL
394* Description: the identify data structure is filled in with
395* information for the block driver.
396*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
397int GLOB_FTL_IdentifyDevice(struct spectra_indentfy_dev_tag *dev_data)
398{
399 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
400 __FILE__, __LINE__, __func__);
401
402 dev_data->NumBlocks = DeviceInfo.wTotalBlocks;
403 dev_data->PagesPerBlock = DeviceInfo.wPagesPerBlock;
404 dev_data->PageDataSize = DeviceInfo.wPageDataSize;
405 dev_data->wECCBytesPerSector = DeviceInfo.wECCBytesPerSector;
406 dev_data->wDataBlockNum = DeviceInfo.wDataBlockNum;
407
408 return PASS;
409}
410
411/* ..... */
412static int allocate_memory(void)
413{
414 u32 block_table_size, page_size, block_size, mem_size;
415 u32 total_bytes = 0;
416 int i;
417#if CMD_DMA
418 int j;
419#endif
420
421 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
422 __FILE__, __LINE__, __func__);
423
424 page_size = DeviceInfo.wPageSize;
425 block_size = DeviceInfo.wPagesPerBlock * DeviceInfo.wPageDataSize;
426
427 block_table_size = DeviceInfo.wDataBlockNum *
428 (sizeof(u32) + sizeof(u8) + sizeof(u16));
429 block_table_size += (DeviceInfo.wPageDataSize -
430 (block_table_size % DeviceInfo.wPageDataSize)) %
431 DeviceInfo.wPageDataSize;
432
433 /* Malloc memory for block tables */
434 g_pBlockTable = kmalloc(block_table_size, GFP_ATOMIC);
435 if (!g_pBlockTable)
436 goto block_table_fail;
437 memset(g_pBlockTable, 0, block_table_size);
438 total_bytes += block_table_size;
439
440 g_pWearCounter = (u8 *)(g_pBlockTable +
441 DeviceInfo.wDataBlockNum * sizeof(u32));
442
443 if (DeviceInfo.MLCDevice)
444 g_pReadCounter = (u16 *)(g_pBlockTable +
445 DeviceInfo.wDataBlockNum *
446 (sizeof(u32) + sizeof(u8)));
447
448 /* Malloc memory and init for cache items */
449 for (i = 0; i < CACHE_ITEM_NUM; i++) {
450 Cache.array[i].address = NAND_CACHE_INIT_ADDR;
451 Cache.array[i].use_cnt = 0;
452 Cache.array[i].changed = CLEAR;
453 Cache.array[i].buf = kmalloc(Cache.cache_item_size,
454 GFP_ATOMIC);
455 if (!Cache.array[i].buf)
456 goto cache_item_fail;
457 memset(Cache.array[i].buf, 0, Cache.cache_item_size);
458 total_bytes += Cache.cache_item_size;
459 }
460
461 /* Malloc memory for IPF */
462 g_pIPF = kmalloc(page_size, GFP_ATOMIC);
463 if (!g_pIPF)
464 goto ipf_fail;
465 memset(g_pIPF, 0, page_size);
466 total_bytes += page_size;
467
468 /* Malloc memory for data merging during Level2 Cache flush */
469 cache_l2_page_buf = kmalloc(page_size, GFP_ATOMIC);
470 if (!cache_l2_page_buf)
471 goto cache_l2_page_buf_fail;
472 memset(cache_l2_page_buf, 0xff, page_size);
473 total_bytes += page_size;
474
475 cache_l2_blk_buf = kmalloc(block_size, GFP_ATOMIC);
476 if (!cache_l2_blk_buf)
477 goto cache_l2_blk_buf_fail;
478 memset(cache_l2_blk_buf, 0xff, block_size);
479 total_bytes += block_size;
480
481 /* Malloc memory for temp buffer */
482 g_pTempBuf = kmalloc(Cache.cache_item_size, GFP_ATOMIC);
483 if (!g_pTempBuf)
484 goto Temp_buf_fail;
485 memset(g_pTempBuf, 0, Cache.cache_item_size);
486 total_bytes += Cache.cache_item_size;
487
488 /* Malloc memory for block table blocks */
489 mem_size = (1 + LAST_BT_ID - FIRST_BT_ID) * sizeof(u32);
490 g_pBTBlocks = kmalloc(mem_size, GFP_ATOMIC);
491 if (!g_pBTBlocks)
492 goto bt_blocks_fail;
493 memset(g_pBTBlocks, 0xff, mem_size);
494 total_bytes += mem_size;
495
496 /* Malloc memory for function FTL_Check_Block_Table */
497 flag_check_blk_table = kmalloc(DeviceInfo.wDataBlockNum, GFP_ATOMIC);
498 if (!flag_check_blk_table)
499 goto flag_check_blk_table_fail;
500 total_bytes += DeviceInfo.wDataBlockNum;
501
502 /* Malloc memory for function FTL_Search_Block_Table_IN_Block */
503 tmp_buf_search_bt_in_block = kmalloc(page_size, GFP_ATOMIC);
504 if (!tmp_buf_search_bt_in_block)
505 goto tmp_buf_search_bt_in_block_fail;
506 memset(tmp_buf_search_bt_in_block, 0xff, page_size);
507 total_bytes += page_size;
508
509 mem_size = DeviceInfo.wPageSize - DeviceInfo.wPageDataSize;
510 spare_buf_search_bt_in_block = kmalloc(mem_size, GFP_ATOMIC);
511 if (!spare_buf_search_bt_in_block)
512 goto spare_buf_search_bt_in_block_fail;
513 memset(spare_buf_search_bt_in_block, 0xff, mem_size);
514 total_bytes += mem_size;
515
516 spare_buf_bt_search_bt_in_block = kmalloc(mem_size, GFP_ATOMIC);
517 if (!spare_buf_bt_search_bt_in_block)
518 goto spare_buf_bt_search_bt_in_block_fail;
519 memset(spare_buf_bt_search_bt_in_block, 0xff, mem_size);
520 total_bytes += mem_size;
521
522 /* Malloc memory for function FTL_Read_Block_Table */
523 tmp_buf1_read_blk_table = kmalloc(page_size, GFP_ATOMIC);
524 if (!tmp_buf1_read_blk_table)
525 goto tmp_buf1_read_blk_table_fail;
526 memset(tmp_buf1_read_blk_table, 0xff, page_size);
527 total_bytes += page_size;
528
529 tmp_buf2_read_blk_table = kmalloc(page_size, GFP_ATOMIC);
530 if (!tmp_buf2_read_blk_table)
531 goto tmp_buf2_read_blk_table_fail;
532 memset(tmp_buf2_read_blk_table, 0xff, page_size);
533 total_bytes += page_size;
534
535 /* Malloc memory for function FTL_Static_Wear_Leveling */
536 flags_static_wear_leveling = kmalloc(DeviceInfo.wDataBlockNum,
537 GFP_ATOMIC);
538 if (!flags_static_wear_leveling)
539 goto flags_static_wear_leveling_fail;
540 total_bytes += DeviceInfo.wDataBlockNum;
541
542 /* Malloc memory for function FTL_Write_Block_Table_Data */
543 if (FTL_Get_Block_Table_Flash_Size_Pages() > 3)
544 mem_size = FTL_Get_Block_Table_Flash_Size_Bytes() -
545 2 * DeviceInfo.wPageSize;
546 else
547 mem_size = DeviceInfo.wPageSize;
548 tmp_buf_write_blk_table_data = kmalloc(mem_size, GFP_ATOMIC);
549 if (!tmp_buf_write_blk_table_data)
550 goto tmp_buf_write_blk_table_data_fail;
551 memset(tmp_buf_write_blk_table_data, 0xff, mem_size);
552 total_bytes += mem_size;
553
554 /* Malloc memory for function FTL_Read_Disturbance */
555 tmp_buf_read_disturbance = kmalloc(block_size, GFP_ATOMIC);
556 if (!tmp_buf_read_disturbance)
557 goto tmp_buf_read_disturbance_fail;
558 memset(tmp_buf_read_disturbance, 0xff, block_size);
559 total_bytes += block_size;
560
561 /* Alloc mem for function NAND_Read_Page_Main_Spare of lld_nand.c */
562 buf_read_page_main_spare = kmalloc(DeviceInfo.wPageSize, GFP_ATOMIC);
563 if (!buf_read_page_main_spare)
564 goto buf_read_page_main_spare_fail;
565 total_bytes += DeviceInfo.wPageSize;
566
567 /* Alloc mem for function NAND_Write_Page_Main_Spare of lld_nand.c */
568 buf_write_page_main_spare = kmalloc(DeviceInfo.wPageSize, GFP_ATOMIC);
569 if (!buf_write_page_main_spare)
570 goto buf_write_page_main_spare_fail;
571 total_bytes += DeviceInfo.wPageSize;
572
573 /* Alloc mem for function NAND_Read_Page_Spare of lld_nand.c */
574 buf_read_page_spare = kmalloc(DeviceInfo.wPageSpareSize, GFP_ATOMIC);
575 if (!buf_read_page_spare)
576 goto buf_read_page_spare_fail;
577 memset(buf_read_page_spare, 0xff, DeviceInfo.wPageSpareSize);
578 total_bytes += DeviceInfo.wPageSpareSize;
579
580 /* Alloc mem for function NAND_Get_Bad_Block of lld_nand.c */
581 buf_get_bad_block = kmalloc(DeviceInfo.wPageSpareSize, GFP_ATOMIC);
582 if (!buf_get_bad_block)
583 goto buf_get_bad_block_fail;
584 memset(buf_get_bad_block, 0xff, DeviceInfo.wPageSpareSize);
585 total_bytes += DeviceInfo.wPageSpareSize;
586
587#if CMD_DMA
588 g_temp_buf = kmalloc(block_size, GFP_ATOMIC);
589 if (!g_temp_buf)
590 goto temp_buf_fail;
591 memset(g_temp_buf, 0xff, block_size);
592 total_bytes += block_size;
593
594 /* Malloc memory for copy of block table used in CDMA mode */
595 g_pBTStartingCopy = kmalloc(block_table_size, GFP_ATOMIC);
596 if (!g_pBTStartingCopy)
597 goto bt_starting_copy;
598 memset(g_pBTStartingCopy, 0, block_table_size);
599 total_bytes += block_table_size;
600
601 g_pWearCounterCopy = (u8 *)(g_pBTStartingCopy +
602 DeviceInfo.wDataBlockNum * sizeof(u32));
603
604 if (DeviceInfo.MLCDevice)
605 g_pReadCounterCopy = (u16 *)(g_pBTStartingCopy +
606 DeviceInfo.wDataBlockNum *
607 (sizeof(u32) + sizeof(u8)));
608
609 /* Malloc memory for block table copies */
610 mem_size = 5 * DeviceInfo.wDataBlockNum * sizeof(u32) +
611 5 * DeviceInfo.wDataBlockNum * sizeof(u8);
612 if (DeviceInfo.MLCDevice)
613 mem_size += 5 * DeviceInfo.wDataBlockNum * sizeof(u16);
614 g_pBlockTableCopies = kmalloc(mem_size, GFP_ATOMIC);
615 if (!g_pBlockTableCopies)
616 goto blk_table_copies_fail;
617 memset(g_pBlockTableCopies, 0, mem_size);
618 total_bytes += mem_size;
619 g_pNextBlockTable = g_pBlockTableCopies;
620
621 /* Malloc memory for Block Table Delta */
622 mem_size = MAX_DESCS * sizeof(struct BTableChangesDelta);
623 g_pBTDelta = kmalloc(mem_size, GFP_ATOMIC);
624 if (!g_pBTDelta)
625 goto bt_delta_fail;
626 memset(g_pBTDelta, 0, mem_size);
627 total_bytes += mem_size;
628 g_pBTDelta_Free = g_pBTDelta;
629
630 /* Malloc memory for Copy Back Buffers */
631 for (j = 0; j < COPY_BACK_BUF_NUM; j++) {
632 cp_back_buf_copies[j] = kmalloc(block_size, GFP_ATOMIC);
633 if (!cp_back_buf_copies[j])
634 goto cp_back_buf_copies_fail;
635 memset(cp_back_buf_copies[j], 0, block_size);
636 total_bytes += block_size;
637 }
638 cp_back_buf_idx = 0;
639
640 /* Malloc memory for pending commands list */
641 mem_size = sizeof(struct pending_cmd) * MAX_DESCS;
642 info.pcmds = kzalloc(mem_size, GFP_KERNEL);
643 if (!info.pcmds)
644 goto pending_cmds_buf_fail;
645 total_bytes += mem_size;
646
647 /* Malloc memory for CDMA descripter table */
648 mem_size = sizeof(struct cdma_descriptor) * MAX_DESCS;
649 info.cdma_desc_buf = kzalloc(mem_size, GFP_KERNEL);
650 if (!info.cdma_desc_buf)
651 goto cdma_desc_buf_fail;
652 total_bytes += mem_size;
653
654 /* Malloc memory for Memcpy descripter table */
655 mem_size = sizeof(struct memcpy_descriptor) * MAX_DESCS;
656 info.memcp_desc_buf = kzalloc(mem_size, GFP_KERNEL);
657 if (!info.memcp_desc_buf)
658 goto memcp_desc_buf_fail;
659 total_bytes += mem_size;
660#endif
661
662 nand_dbg_print(NAND_DBG_WARN,
663 "Total memory allocated in FTL layer: %d\n", total_bytes);
664
665 return PASS;
666
667#if CMD_DMA
668memcp_desc_buf_fail:
669 kfree(info.cdma_desc_buf);
670cdma_desc_buf_fail:
671 kfree(info.pcmds);
672pending_cmds_buf_fail:
673cp_back_buf_copies_fail:
674 j--;
675 for (; j >= 0; j--)
676 kfree(cp_back_buf_copies[j]);
677 kfree(g_pBTDelta);
678bt_delta_fail:
679 kfree(g_pBlockTableCopies);
680blk_table_copies_fail:
681 kfree(g_pBTStartingCopy);
682bt_starting_copy:
683 kfree(g_temp_buf);
684temp_buf_fail:
685 kfree(buf_get_bad_block);
686#endif
687
688buf_get_bad_block_fail:
689 kfree(buf_read_page_spare);
690buf_read_page_spare_fail:
691 kfree(buf_write_page_main_spare);
692buf_write_page_main_spare_fail:
693 kfree(buf_read_page_main_spare);
694buf_read_page_main_spare_fail:
695 kfree(tmp_buf_read_disturbance);
696tmp_buf_read_disturbance_fail:
697 kfree(tmp_buf_write_blk_table_data);
698tmp_buf_write_blk_table_data_fail:
699 kfree(flags_static_wear_leveling);
700flags_static_wear_leveling_fail:
701 kfree(tmp_buf2_read_blk_table);
702tmp_buf2_read_blk_table_fail:
703 kfree(tmp_buf1_read_blk_table);
704tmp_buf1_read_blk_table_fail:
705 kfree(spare_buf_bt_search_bt_in_block);
706spare_buf_bt_search_bt_in_block_fail:
707 kfree(spare_buf_search_bt_in_block);
708spare_buf_search_bt_in_block_fail:
709 kfree(tmp_buf_search_bt_in_block);
710tmp_buf_search_bt_in_block_fail:
711 kfree(flag_check_blk_table);
712flag_check_blk_table_fail:
713 kfree(g_pBTBlocks);
714bt_blocks_fail:
715 kfree(g_pTempBuf);
716Temp_buf_fail:
717 kfree(cache_l2_blk_buf);
718cache_l2_blk_buf_fail:
719 kfree(cache_l2_page_buf);
720cache_l2_page_buf_fail:
721 kfree(g_pIPF);
722ipf_fail:
723cache_item_fail:
724 i--;
725 for (; i >= 0; i--)
726 kfree(Cache.array[i].buf);
727 kfree(g_pBlockTable);
728block_table_fail:
729 printk(KERN_ERR "Failed to kmalloc memory in %s Line %d.\n",
730 __FILE__, __LINE__);
731
732 return -ENOMEM;
733}
734
735/* .... */
736static int free_memory(void)
737{
738 int i;
739
740#if CMD_DMA
741 kfree(info.memcp_desc_buf);
742 kfree(info.cdma_desc_buf);
743 kfree(info.pcmds);
744 for (i = COPY_BACK_BUF_NUM - 1; i >= 0; i--)
745 kfree(cp_back_buf_copies[i]);
746 kfree(g_pBTDelta);
747 kfree(g_pBlockTableCopies);
748 kfree(g_pBTStartingCopy);
749 kfree(g_temp_buf);
750 kfree(buf_get_bad_block);
751#endif
752 kfree(buf_read_page_spare);
753 kfree(buf_write_page_main_spare);
754 kfree(buf_read_page_main_spare);
755 kfree(tmp_buf_read_disturbance);
756 kfree(tmp_buf_write_blk_table_data);
757 kfree(flags_static_wear_leveling);
758 kfree(tmp_buf2_read_blk_table);
759 kfree(tmp_buf1_read_blk_table);
760 kfree(spare_buf_bt_search_bt_in_block);
761 kfree(spare_buf_search_bt_in_block);
762 kfree(tmp_buf_search_bt_in_block);
763 kfree(flag_check_blk_table);
764 kfree(g_pBTBlocks);
765 kfree(g_pTempBuf);
766 kfree(g_pIPF);
767 for (i = CACHE_ITEM_NUM - 1; i >= 0; i--)
768 kfree(Cache.array[i].buf);
769 kfree(g_pBlockTable);
770
771 return 0;
772}
773
774static void dump_cache_l2_table(void)
775{
776 struct list_head *p;
777 struct spectra_l2_cache_list *pnd;
778 int n, i;
779
780 n = 0;
781 list_for_each(p, &cache_l2.table.list) {
782 pnd = list_entry(p, struct spectra_l2_cache_list, list);
783 nand_dbg_print(NAND_DBG_WARN, "dump_cache_l2_table node: %d, logical_blk_num: %d\n", n, pnd->logical_blk_num);
784/*
785 for (i = 0; i < DeviceInfo.wPagesPerBlock; i++) {
786 if (pnd->pages_array[i] != MAX_U32_VALUE)
787 nand_dbg_print(NAND_DBG_WARN, " pages_array[%d]: 0x%x\n", i, pnd->pages_array[i]);
788 }
789*/
790 n++;
791 }
792}
793
794/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
795* Function: GLOB_FTL_Init
796* Inputs: none
797* Outputs: PASS=0 / FAIL=1
798* Description: allocates the memory for cache array,
799* important data structures
800* clears the cache array
801* reads the block table from flash into array
802*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
803int GLOB_FTL_Init(void)
804{
805 int i;
806
807 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
808 __FILE__, __LINE__, __func__);
809
810 Cache.pages_per_item = 1;
811 Cache.cache_item_size = 1 * DeviceInfo.wPageDataSize;
812
813 if (allocate_memory() != PASS)
814 return FAIL;
815
816#if CMD_DMA
817#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
818 memcpy((void *)&cache_start_copy, (void *)&Cache,
819 sizeof(struct flash_cache_tag));
820 memset((void *)&int_cache, -1,
821 sizeof(struct flash_cache_delta_list_tag) *
822 (MAX_CHANS + MAX_DESCS));
823#endif
824 ftl_cmd_cnt = 0;
825#endif
826
827 if (FTL_Read_Block_Table() != PASS)
828 return FAIL;
829
830 /* Init the Level2 Cache data structure */
831 for (i = 0; i < BLK_NUM_FOR_L2_CACHE; i++)
832 cache_l2.blk_array[i] = MAX_U32_VALUE;
833 cache_l2.cur_blk_idx = 0;
834 cache_l2.cur_page_num = 0;
835 INIT_LIST_HEAD(&cache_l2.table.list);
836 cache_l2.table.logical_blk_num = MAX_U32_VALUE;
837
838 dump_cache_l2_table();
839
840 return 0;
841}
842
843
844#if CMD_DMA
845#if 0
846static void save_blk_table_changes(u16 idx)
847{
848 u8 ftl_cmd;
849 u32 *pbt = (u32 *)g_pBTStartingCopy;
850
851#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
852 u16 id;
853 u8 cache_blks;
854
855 id = idx - MAX_CHANS;
856 if (int_cache[id].item != -1) {
857 cache_blks = int_cache[id].item;
858 cache_start_copy.array[cache_blks].address =
859 int_cache[id].cache.address;
860 cache_start_copy.array[cache_blks].changed =
861 int_cache[id].cache.changed;
862 }
863#endif
864
865 ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
866
867 while (ftl_cmd <= PendingCMD[idx].Tag) {
868 if (p_BTableChangesDelta->ValidFields == 0x01) {
869 g_wBlockTableOffset =
870 p_BTableChangesDelta->g_wBlockTableOffset;
871 } else if (p_BTableChangesDelta->ValidFields == 0x0C) {
872 pbt[p_BTableChangesDelta->BT_Index] =
873 p_BTableChangesDelta->BT_Entry_Value;
874 debug_boundary_error(((
875 p_BTableChangesDelta->BT_Index)),
876 DeviceInfo.wDataBlockNum, 0);
877 } else if (p_BTableChangesDelta->ValidFields == 0x03) {
878 g_wBlockTableOffset =
879 p_BTableChangesDelta->g_wBlockTableOffset;
880 g_wBlockTableIndex =
881 p_BTableChangesDelta->g_wBlockTableIndex;
882 } else if (p_BTableChangesDelta->ValidFields == 0x30) {
883 g_pWearCounterCopy[p_BTableChangesDelta->WC_Index] =
884 p_BTableChangesDelta->WC_Entry_Value;
885 } else if ((DeviceInfo.MLCDevice) &&
886 (p_BTableChangesDelta->ValidFields == 0xC0)) {
887 g_pReadCounterCopy[p_BTableChangesDelta->RC_Index] =
888 p_BTableChangesDelta->RC_Entry_Value;
889 nand_dbg_print(NAND_DBG_DEBUG,
890 "In event status setting read counter "
891 "GLOB_ftl_cmd_cnt %u Count %u Index %u\n",
892 ftl_cmd,
893 p_BTableChangesDelta->RC_Entry_Value,
894 (unsigned int)p_BTableChangesDelta->RC_Index);
895 } else {
896 nand_dbg_print(NAND_DBG_DEBUG,
897 "This should never occur \n");
898 }
899 p_BTableChangesDelta += 1;
900 ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
901 }
902}
903
904static void discard_cmds(u16 n)
905{
906 u32 *pbt = (u32 *)g_pBTStartingCopy;
907 u8 ftl_cmd;
908 unsigned long k;
909#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
910 u8 cache_blks;
911 u16 id;
912#endif
913
914 if ((PendingCMD[n].CMD == WRITE_MAIN_CMD) ||
915 (PendingCMD[n].CMD == WRITE_MAIN_SPARE_CMD)) {
916 for (k = 0; k < DeviceInfo.wDataBlockNum; k++) {
917 if (PendingCMD[n].Block == (pbt[k] & (~BAD_BLOCK)))
918 MARK_BLK_AS_DISCARD(pbt[k]);
919 }
920 }
921
922 ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
923 while (ftl_cmd <= PendingCMD[n].Tag) {
924 p_BTableChangesDelta += 1;
925 ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
926 }
927
928#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
929 id = n - MAX_CHANS;
930
931 if (int_cache[id].item != -1) {
932 cache_blks = int_cache[id].item;
933 if (PendingCMD[n].CMD == MEMCOPY_CMD) {
934 if ((cache_start_copy.array[cache_blks].buf <=
935 PendingCMD[n].DataDestAddr) &&
936 ((cache_start_copy.array[cache_blks].buf +
937 Cache.cache_item_size) >
938 PendingCMD[n].DataDestAddr)) {
939 cache_start_copy.array[cache_blks].address =
940 NAND_CACHE_INIT_ADDR;
941 cache_start_copy.array[cache_blks].use_cnt =
942 0;
943 cache_start_copy.array[cache_blks].changed =
944 CLEAR;
945 }
946 } else {
947 cache_start_copy.array[cache_blks].address =
948 int_cache[id].cache.address;
949 cache_start_copy.array[cache_blks].changed =
950 int_cache[id].cache.changed;
951 }
952 }
953#endif
954}
955
956static void process_cmd_pass(int *first_failed_cmd, u16 idx)
957{
958 if (0 == *first_failed_cmd)
959 save_blk_table_changes(idx);
960 else
961 discard_cmds(idx);
962}
963
964static void process_cmd_fail_abort(int *first_failed_cmd,
965 u16 idx, int event)
966{
967 u32 *pbt = (u32 *)g_pBTStartingCopy;
968 u8 ftl_cmd;
969 unsigned long i;
970 int erase_fail, program_fail;
971#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
972 u8 cache_blks;
973 u16 id;
974#endif
975
976 if (0 == *first_failed_cmd)
977 *first_failed_cmd = PendingCMD[idx].SBDCmdIndex;
978
979 nand_dbg_print(NAND_DBG_DEBUG, "Uncorrectable error has occured "
980 "while executing %u Command %u accesing Block %u\n",
981 (unsigned int)p_BTableChangesDelta->ftl_cmd_cnt,
982 PendingCMD[idx].CMD,
983 (unsigned int)PendingCMD[idx].Block);
984
985 ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
986 while (ftl_cmd <= PendingCMD[idx].Tag) {
987 p_BTableChangesDelta += 1;
988 ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
989 }
990
991#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
992 id = idx - MAX_CHANS;
993
994 if (int_cache[id].item != -1) {
995 cache_blks = int_cache[id].item;
996 if ((PendingCMD[idx].CMD == WRITE_MAIN_CMD)) {
997 cache_start_copy.array[cache_blks].address =
998 int_cache[id].cache.address;
999 cache_start_copy.array[cache_blks].changed = SET;
1000 } else if ((PendingCMD[idx].CMD == READ_MAIN_CMD)) {
1001 cache_start_copy.array[cache_blks].address =
1002 NAND_CACHE_INIT_ADDR;
1003 cache_start_copy.array[cache_blks].use_cnt = 0;
1004 cache_start_copy.array[cache_blks].changed =
1005 CLEAR;
1006 } else if (PendingCMD[idx].CMD == ERASE_CMD) {
1007 /* ? */
1008 } else if (PendingCMD[idx].CMD == MEMCOPY_CMD) {
1009 /* ? */
1010 }
1011 }
1012#endif
1013
1014 erase_fail = (event == EVENT_ERASE_FAILURE) &&
1015 (PendingCMD[idx].CMD == ERASE_CMD);
1016
1017 program_fail = (event == EVENT_PROGRAM_FAILURE) &&
1018 ((PendingCMD[idx].CMD == WRITE_MAIN_CMD) ||
1019 (PendingCMD[idx].CMD == WRITE_MAIN_SPARE_CMD));
1020
1021 if (erase_fail || program_fail) {
1022 for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
1023 if (PendingCMD[idx].Block ==
1024 (pbt[i] & (~BAD_BLOCK)))
1025 MARK_BLOCK_AS_BAD(pbt[i]);
1026 }
1027 }
1028}
1029
1030static void process_cmd(int *first_failed_cmd, u16 idx, int event)
1031{
1032 u8 ftl_cmd;
1033 int cmd_match = 0;
1034
1035 if (p_BTableChangesDelta->ftl_cmd_cnt == PendingCMD[idx].Tag)
1036 cmd_match = 1;
1037
1038 if (PendingCMD[idx].Status == CMD_PASS) {
1039 process_cmd_pass(first_failed_cmd, idx);
1040 } else if ((PendingCMD[idx].Status == CMD_FAIL) ||
1041 (PendingCMD[idx].Status == CMD_ABORT)) {
1042 process_cmd_fail_abort(first_failed_cmd, idx, event);
1043 } else if ((PendingCMD[idx].Status == CMD_NOT_DONE) &&
1044 PendingCMD[idx].Tag) {
1045 nand_dbg_print(NAND_DBG_DEBUG,
1046 " Command no. %hu is not executed\n",
1047 (unsigned int)PendingCMD[idx].Tag);
1048 ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
1049 while (ftl_cmd <= PendingCMD[idx].Tag) {
1050 p_BTableChangesDelta += 1;
1051 ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
1052 }
1053 }
1054}
1055#endif
1056
1057static void process_cmd(int *first_failed_cmd, u16 idx, int event)
1058{
1059 printk(KERN_ERR "temporary workaround function. "
1060 "Should not be called! \n");
1061}
1062
1063/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1064* Function: GLOB_FTL_Event_Status
1065* Inputs: none
1066* Outputs: Event Code
1067* Description: It is called by SBD after hardware interrupt signalling
1068* completion of commands chain
1069* It does following things
1070* get event status from LLD
1071* analyze command chain status
1072* determine last command executed
1073* analyze results
1074* rebuild the block table in case of uncorrectable error
1075* return event code
1076*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1077int GLOB_FTL_Event_Status(int *first_failed_cmd)
1078{
1079 int event_code = PASS;
1080 u16 i_P;
1081
1082 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1083 __FILE__, __LINE__, __func__);
1084
1085 *first_failed_cmd = 0;
1086
1087 event_code = GLOB_LLD_Event_Status();
1088
1089 switch (event_code) {
1090 case EVENT_PASS:
1091 nand_dbg_print(NAND_DBG_DEBUG, "Handling EVENT_PASS\n");
1092 break;
1093 case EVENT_UNCORRECTABLE_DATA_ERROR:
1094 nand_dbg_print(NAND_DBG_DEBUG, "Handling Uncorrectable ECC!\n");
1095 break;
1096 case EVENT_PROGRAM_FAILURE:
1097 case EVENT_ERASE_FAILURE:
1098 nand_dbg_print(NAND_DBG_WARN, "Handling Ugly case. "
1099 "Event code: 0x%x\n", event_code);
1100 p_BTableChangesDelta =
1101 (struct BTableChangesDelta *)g_pBTDelta;
1102 for (i_P = MAX_CHANS; i_P < (ftl_cmd_cnt + MAX_CHANS);
1103 i_P++)
1104 process_cmd(first_failed_cmd, i_P, event_code);
1105 memcpy(g_pBlockTable, g_pBTStartingCopy,
1106 DeviceInfo.wDataBlockNum * sizeof(u32));
1107 memcpy(g_pWearCounter, g_pWearCounterCopy,
1108 DeviceInfo.wDataBlockNum * sizeof(u8));
1109 if (DeviceInfo.MLCDevice)
1110 memcpy(g_pReadCounter, g_pReadCounterCopy,
1111 DeviceInfo.wDataBlockNum * sizeof(u16));
1112
1113#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
1114 memcpy((void *)&Cache, (void *)&cache_start_copy,
1115 sizeof(struct flash_cache_tag));
1116 memset((void *)&int_cache, -1,
1117 sizeof(struct flash_cache_delta_list_tag) *
1118 (MAX_DESCS + MAX_CHANS));
1119#endif
1120 break;
1121 default:
1122 nand_dbg_print(NAND_DBG_WARN,
1123 "Handling unexpected event code - 0x%x\n",
1124 event_code);
1125 event_code = ERR;
1126 break;
1127 }
1128
1129 memcpy(g_pBTStartingCopy, g_pBlockTable,
1130 DeviceInfo.wDataBlockNum * sizeof(u32));
1131 memcpy(g_pWearCounterCopy, g_pWearCounter,
1132 DeviceInfo.wDataBlockNum * sizeof(u8));
1133 if (DeviceInfo.MLCDevice)
1134 memcpy(g_pReadCounterCopy, g_pReadCounter,
1135 DeviceInfo.wDataBlockNum * sizeof(u16));
1136
1137 g_pBTDelta_Free = g_pBTDelta;
1138 ftl_cmd_cnt = 0;
1139 g_pNextBlockTable = g_pBlockTableCopies;
1140 cp_back_buf_idx = 0;
1141
1142#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
1143 memcpy((void *)&cache_start_copy, (void *)&Cache,
1144 sizeof(struct flash_cache_tag));
1145 memset((void *)&int_cache, -1,
1146 sizeof(struct flash_cache_delta_list_tag) *
1147 (MAX_DESCS + MAX_CHANS));
1148#endif
1149
1150 return event_code;
1151}
1152
1153/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1154* Function: glob_ftl_execute_cmds
1155* Inputs: none
1156* Outputs: none
1157* Description: pass thru to LLD
1158***************************************************************/
1159u16 glob_ftl_execute_cmds(void)
1160{
1161 nand_dbg_print(NAND_DBG_TRACE,
1162 "glob_ftl_execute_cmds: ftl_cmd_cnt %u\n",
1163 (unsigned int)ftl_cmd_cnt);
1164 g_SBDCmdIndex = 0;
1165 return glob_lld_execute_cmds();
1166}
1167
1168#endif
1169
1170#if !CMD_DMA
1171/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1172* Function: GLOB_FTL_Read Immediate
1173* Inputs: pointer to data
1174* address of data
1175* Outputs: PASS / FAIL
1176* Description: Reads one page of data into RAM directly from flash without
1177* using or disturbing cache.It is assumed this function is called
1178* with CMD-DMA disabled.
1179*****************************************************************/
1180int GLOB_FTL_Read_Immediate(u8 *read_data, u64 addr)
1181{
1182 int wResult = FAIL;
1183 u32 Block;
1184 u16 Page;
1185 u32 phy_blk;
1186 u32 *pbt = (u32 *)g_pBlockTable;
1187
1188 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1189 __FILE__, __LINE__, __func__);
1190
1191 Block = BLK_FROM_ADDR(addr);
1192 Page = PAGE_FROM_ADDR(addr, Block);
1193
1194 if (!IS_SPARE_BLOCK(Block))
1195 return FAIL;
1196
1197 phy_blk = pbt[Block];
1198 wResult = GLOB_LLD_Read_Page_Main(read_data, phy_blk, Page, 1);
1199
1200 if (DeviceInfo.MLCDevice) {
1201 g_pReadCounter[phy_blk - DeviceInfo.wSpectraStartBlock]++;
1202 if (g_pReadCounter[phy_blk - DeviceInfo.wSpectraStartBlock]
1203 >= MAX_READ_COUNTER)
1204 FTL_Read_Disturbance(phy_blk);
1205 if (g_cBlockTableStatus != IN_PROGRESS_BLOCK_TABLE) {
1206 g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
1207 FTL_Write_IN_Progress_Block_Table_Page();
1208 }
1209 }
1210
1211 return wResult;
1212}
1213#endif
1214
1215#ifdef SUPPORT_BIG_ENDIAN
1216/*********************************************************************
1217* Function: FTL_Invert_Block_Table
1218* Inputs: none
1219* Outputs: none
1220* Description: Re-format the block table in ram based on BIG_ENDIAN and
1221* LARGE_BLOCKNUM if necessary
1222**********************************************************************/
1223static void FTL_Invert_Block_Table(void)
1224{
1225 u32 i;
1226 u32 *pbt = (u32 *)g_pBlockTable;
1227
1228 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1229 __FILE__, __LINE__, __func__);
1230
1231#ifdef SUPPORT_LARGE_BLOCKNUM
1232 for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
1233 pbt[i] = INVERTUINT32(pbt[i]);
1234 g_pWearCounter[i] = INVERTUINT32(g_pWearCounter[i]);
1235 }
1236#else
1237 for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
1238 pbt[i] = INVERTUINT16(pbt[i]);
1239 g_pWearCounter[i] = INVERTUINT16(g_pWearCounter[i]);
1240 }
1241#endif
1242}
1243#endif
1244
1245/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1246* Function: GLOB_FTL_Flash_Init
1247* Inputs: none
1248* Outputs: PASS=0 / FAIL=0x01 (based on read ID)
1249* Description: The flash controller is initialized
1250* The flash device is reset
1251* Perform a flash READ ID command to confirm that a
1252* valid device is attached and active.
1253* The DeviceInfo structure gets filled in
1254*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1255int GLOB_FTL_Flash_Init(void)
1256{
1257 int status = FAIL;
1258
1259 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1260 __FILE__, __LINE__, __func__);
1261
1262 g_SBDCmdIndex = 0;
1263
1264 GLOB_LLD_Flash_Init();
1265
1266 status = GLOB_LLD_Read_Device_ID();
1267
1268 return status;
1269}
1270
1271/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1272* Inputs: none
1273* Outputs: PASS=0 / FAIL=0x01 (based on read ID)
1274* Description: The flash controller is released
1275*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1276int GLOB_FTL_Flash_Release(void)
1277{
1278 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1279 __FILE__, __LINE__, __func__);
1280
1281 return GLOB_LLD_Flash_Release();
1282}
1283
1284
1285/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1286* Function: GLOB_FTL_Cache_Release
1287* Inputs: none
1288* Outputs: none
1289* Description: release all allocated memory in GLOB_FTL_Init
1290* (allocated in GLOB_FTL_Init)
1291*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1292void GLOB_FTL_Cache_Release(void)
1293{
1294 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1295 __FILE__, __LINE__, __func__);
1296
1297 free_memory();
1298}
1299
1300/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1301* Function: FTL_Cache_If_Hit
1302* Inputs: Page Address
1303* Outputs: Block number/UNHIT BLOCK
1304* Description: Determines if the addressed page is in cache
1305*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1306static u16 FTL_Cache_If_Hit(u64 page_addr)
1307{
1308 u16 item;
1309 u64 addr;
1310 int i;
1311
1312 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1313 __FILE__, __LINE__, __func__);
1314
1315 item = UNHIT_CACHE_ITEM;
1316 for (i = 0; i < CACHE_ITEM_NUM; i++) {
1317 addr = Cache.array[i].address;
1318 if ((page_addr >= addr) &&
1319 (page_addr < (addr + Cache.cache_item_size))) {
1320 item = i;
1321 break;
1322 }
1323 }
1324
1325 return item;
1326}
1327
1328/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1329* Function: FTL_Calculate_LRU
1330* Inputs: None
1331* Outputs: None
1332* Description: Calculate the least recently block in a cache and record its
1333* index in LRU field.
1334*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1335static void FTL_Calculate_LRU(void)
1336{
1337 u16 i, bCurrentLRU, bTempCount;
1338
1339 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1340 __FILE__, __LINE__, __func__);
1341
1342 bCurrentLRU = 0;
1343 bTempCount = MAX_WORD_VALUE;
1344
1345 for (i = 0; i < CACHE_ITEM_NUM; i++) {
1346 if (Cache.array[i].use_cnt < bTempCount) {
1347 bCurrentLRU = i;
1348 bTempCount = Cache.array[i].use_cnt;
1349 }
1350 }
1351
1352 Cache.LRU = bCurrentLRU;
1353}
1354
1355/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1356* Function: FTL_Cache_Read_Page
1357* Inputs: pointer to read buffer, logical address and cache item number
1358* Outputs: None
1359* Description: Read the page from the cached block addressed by blocknumber
1360*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1361static void FTL_Cache_Read_Page(u8 *data_buf, u64 logic_addr, u16 cache_item)
1362{
1363 u8 *start_addr;
1364
1365 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1366 __FILE__, __LINE__, __func__);
1367
1368 start_addr = Cache.array[cache_item].buf;
1369 start_addr += (u32)(((logic_addr - Cache.array[cache_item].address) >>
1370 DeviceInfo.nBitsInPageDataSize) * DeviceInfo.wPageDataSize);
1371
1372#if CMD_DMA
1373 GLOB_LLD_MemCopy_CMD(data_buf, start_addr,
1374 DeviceInfo.wPageDataSize, 0);
1375 ftl_cmd_cnt++;
1376#else
1377 memcpy(data_buf, start_addr, DeviceInfo.wPageDataSize);
1378#endif
1379
1380 if (Cache.array[cache_item].use_cnt < MAX_WORD_VALUE)
1381 Cache.array[cache_item].use_cnt++;
1382}
1383
1384/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1385* Function: FTL_Cache_Read_All
1386* Inputs: pointer to read buffer,block address
1387* Outputs: PASS=0 / FAIL =1
1388* Description: It reads pages in cache
1389*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1390static int FTL_Cache_Read_All(u8 *pData, u64 phy_addr)
1391{
1392 int wResult = PASS;
1393 u32 Block;
1394 u32 lba;
1395 u16 Page;
1396 u16 PageCount;
1397 u32 *pbt = (u32 *)g_pBlockTable;
1398 u32 i;
1399
1400 Block = BLK_FROM_ADDR(phy_addr);
1401 Page = PAGE_FROM_ADDR(phy_addr, Block);
1402 PageCount = Cache.pages_per_item;
1403
1404 nand_dbg_print(NAND_DBG_DEBUG,
1405 "%s, Line %d, Function: %s, Block: 0x%x\n",
1406 __FILE__, __LINE__, __func__, Block);
1407
1408 lba = 0xffffffff;
1409 for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
1410 if ((pbt[i] & (~BAD_BLOCK)) == Block) {
1411 lba = i;
1412 if (IS_SPARE_BLOCK(i) || IS_BAD_BLOCK(i) ||
1413 IS_DISCARDED_BLOCK(i)) {
1414 /* Add by yunpeng -2008.12.3 */
1415#if CMD_DMA
1416 GLOB_LLD_MemCopy_CMD(pData, g_temp_buf,
1417 PageCount * DeviceInfo.wPageDataSize, 0);
1418 ftl_cmd_cnt++;
1419#else
1420 memset(pData, 0xFF,
1421 PageCount * DeviceInfo.wPageDataSize);
1422#endif
1423 return wResult;
1424 } else {
1425 continue; /* break ?? */
1426 }
1427 }
1428 }
1429
1430 if (0xffffffff == lba)
1431 printk(KERN_ERR "FTL_Cache_Read_All: Block is not found in BT\n");
1432
1433#if CMD_DMA
1434 wResult = GLOB_LLD_Read_Page_Main_cdma(pData, Block, Page,
1435 PageCount, LLD_CMD_FLAG_MODE_CDMA);
1436 if (DeviceInfo.MLCDevice) {
1437 g_pReadCounter[Block - DeviceInfo.wSpectraStartBlock]++;
1438 nand_dbg_print(NAND_DBG_DEBUG,
1439 "Read Counter modified in ftl_cmd_cnt %u"
1440 " Block %u Counter%u\n",
1441 ftl_cmd_cnt, (unsigned int)Block,
1442 g_pReadCounter[Block -
1443 DeviceInfo.wSpectraStartBlock]);
1444
1445 p_BTableChangesDelta =
1446 (struct BTableChangesDelta *)g_pBTDelta_Free;
1447 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
1448 p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
1449 p_BTableChangesDelta->RC_Index =
1450 Block - DeviceInfo.wSpectraStartBlock;
1451 p_BTableChangesDelta->RC_Entry_Value =
1452 g_pReadCounter[Block - DeviceInfo.wSpectraStartBlock];
1453 p_BTableChangesDelta->ValidFields = 0xC0;
1454
1455 ftl_cmd_cnt++;
1456
1457 if (g_pReadCounter[Block - DeviceInfo.wSpectraStartBlock] >=
1458 MAX_READ_COUNTER)
1459 FTL_Read_Disturbance(Block);
1460 if (g_cBlockTableStatus != IN_PROGRESS_BLOCK_TABLE) {
1461 g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
1462 FTL_Write_IN_Progress_Block_Table_Page();
1463 }
1464 } else {
1465 ftl_cmd_cnt++;
1466 }
1467#else
1468 wResult = GLOB_LLD_Read_Page_Main(pData, Block, Page, PageCount);
1469 if (wResult == FAIL)
1470 return wResult;
1471
1472 if (DeviceInfo.MLCDevice) {
1473 g_pReadCounter[Block - DeviceInfo.wSpectraStartBlock]++;
1474 if (g_pReadCounter[Block - DeviceInfo.wSpectraStartBlock] >=
1475 MAX_READ_COUNTER)
1476 FTL_Read_Disturbance(Block);
1477 if (g_cBlockTableStatus != IN_PROGRESS_BLOCK_TABLE) {
1478 g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
1479 FTL_Write_IN_Progress_Block_Table_Page();
1480 }
1481 }
1482#endif
1483 return wResult;
1484}
1485
1486/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1487* Function: FTL_Cache_Write_All
1488* Inputs: pointer to cache in sys memory
1489* address of free block in flash
1490* Outputs: PASS=0 / FAIL=1
1491* Description: writes all the pages of the block in cache to flash
1492*
1493* NOTE:need to make sure this works ok when cache is limited
1494* to a partial block. This is where copy-back would be
1495* activated. This would require knowing which pages in the
1496* cached block are clean/dirty.Right now we only know if
1497* the whole block is clean/dirty.
1498*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1499static int FTL_Cache_Write_All(u8 *pData, u64 blk_addr)
1500{
1501 u16 wResult = PASS;
1502 u32 Block;
1503 u16 Page;
1504 u16 PageCount;
1505
1506 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1507 __FILE__, __LINE__, __func__);
1508
1509 nand_dbg_print(NAND_DBG_DEBUG, "This block %d going to be written "
1510 "on %d\n", cache_block_to_write,
1511 (u32)(blk_addr >> DeviceInfo.nBitsInBlockDataSize));
1512
1513 Block = BLK_FROM_ADDR(blk_addr);
1514 Page = PAGE_FROM_ADDR(blk_addr, Block);
1515 PageCount = Cache.pages_per_item;
1516
1517#if CMD_DMA
1518 if (FAIL == GLOB_LLD_Write_Page_Main_cdma(pData,
1519 Block, Page, PageCount)) {
1520 nand_dbg_print(NAND_DBG_WARN,
1521 "NAND Program fail in %s, Line %d, "
1522 "Function: %s, new Bad Block %d generated! "
1523 "Need Bad Block replacing.\n",
1524 __FILE__, __LINE__, __func__, Block);
1525 wResult = FAIL;
1526 }
1527 ftl_cmd_cnt++;
1528#else
1529 if (FAIL == GLOB_LLD_Write_Page_Main(pData, Block, Page, PageCount)) {
1530 nand_dbg_print(NAND_DBG_WARN, "NAND Program fail in %s,"
1531 " Line %d, Function %s, new Bad Block %d generated!"
1532 "Need Bad Block replacing.\n",
1533 __FILE__, __LINE__, __func__, Block);
1534 wResult = FAIL;
1535 }
1536#endif
1537 return wResult;
1538}
1539
1540/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1541* Function: FTL_Cache_Update_Block
1542* Inputs: pointer to buffer,page address,block address
1543* Outputs: PASS=0 / FAIL=1
1544* Description: It updates the cache
1545*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1546static int FTL_Cache_Update_Block(u8 *pData,
1547 u64 old_page_addr, u64 blk_addr)
1548{
1549 int i, j;
1550 u8 *buf = pData;
1551 int wResult = PASS;
1552 int wFoundInCache;
1553 u64 page_addr;
1554 u64 addr;
1555 u64 old_blk_addr;
1556 u16 page_offset;
1557
1558 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1559 __FILE__, __LINE__, __func__);
1560
1561 old_blk_addr = (u64)(old_page_addr >>
1562 DeviceInfo.nBitsInBlockDataSize) * DeviceInfo.wBlockDataSize;
1563 page_offset = (u16)(GLOB_u64_Remainder(old_page_addr, 2) >>
1564 DeviceInfo.nBitsInPageDataSize);
1565
1566 for (i = 0; i < DeviceInfo.wPagesPerBlock; i += Cache.pages_per_item) {
1567 page_addr = old_blk_addr + i * DeviceInfo.wPageDataSize;
1568 if (i != page_offset) {
1569 wFoundInCache = FAIL;
1570 for (j = 0; j < CACHE_ITEM_NUM; j++) {
1571 addr = Cache.array[j].address;
1572 addr = FTL_Get_Physical_Block_Addr(addr) +
1573 GLOB_u64_Remainder(addr, 2);
1574 if ((addr >= page_addr) && addr <
1575 (page_addr + Cache.cache_item_size)) {
1576 wFoundInCache = PASS;
1577 buf = Cache.array[j].buf;
1578 Cache.array[j].changed = SET;
1579#if CMD_DMA
1580#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
1581 int_cache[ftl_cmd_cnt].item = j;
1582 int_cache[ftl_cmd_cnt].cache.address =
1583 Cache.array[j].address;
1584 int_cache[ftl_cmd_cnt].cache.changed =
1585 Cache.array[j].changed;
1586#endif
1587#endif
1588 break;
1589 }
1590 }
1591 if (FAIL == wFoundInCache) {
1592 if (ERR == FTL_Cache_Read_All(g_pTempBuf,
1593 page_addr)) {
1594 wResult = FAIL;
1595 break;
1596 }
1597 buf = g_pTempBuf;
1598 }
1599 } else {
1600 buf = pData;
1601 }
1602
1603 if (FAIL == FTL_Cache_Write_All(buf,
1604 blk_addr + (page_addr - old_blk_addr))) {
1605 wResult = FAIL;
1606 break;
1607 }
1608 }
1609
1610 return wResult;
1611}
1612
1613/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1614* Function: FTL_Copy_Block
1615* Inputs: source block address
1616* Destination block address
1617* Outputs: PASS=0 / FAIL=1
1618* Description: used only for static wear leveling to move the block
1619* containing static data to new blocks(more worn)
1620*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1621int FTL_Copy_Block(u64 old_blk_addr, u64 blk_addr)
1622{
1623 int i, r1, r2, wResult = PASS;
1624
1625 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1626 __FILE__, __LINE__, __func__);
1627
1628 for (i = 0; i < DeviceInfo.wPagesPerBlock; i += Cache.pages_per_item) {
1629 r1 = FTL_Cache_Read_All(g_pTempBuf, old_blk_addr +
1630 i * DeviceInfo.wPageDataSize);
1631 r2 = FTL_Cache_Write_All(g_pTempBuf, blk_addr +
1632 i * DeviceInfo.wPageDataSize);
1633 if ((ERR == r1) || (FAIL == r2)) {
1634 wResult = FAIL;
1635 break;
1636 }
1637 }
1638
1639 return wResult;
1640}
1641
1642/* Search the block table to find out the least wear block and then return it */
1643static u32 find_least_worn_blk_for_l2_cache(void)
1644{
1645 int i;
1646 u32 *pbt = (u32 *)g_pBlockTable;
1647 u8 least_wear_cnt = MAX_BYTE_VALUE;
1648 u32 least_wear_blk_idx = MAX_U32_VALUE;
1649 u32 phy_idx;
1650
1651 for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
1652 if (IS_SPARE_BLOCK(i)) {
1653 phy_idx = (u32)((~BAD_BLOCK) & pbt[i]);
1654 if (phy_idx > DeviceInfo.wSpectraEndBlock)
1655 printk(KERN_ERR "find_least_worn_blk_for_l2_cache: "
1656 "Too big phy block num (%d)\n", phy_idx);
1657 if (g_pWearCounter[phy_idx -DeviceInfo.wSpectraStartBlock] < least_wear_cnt) {
1658 least_wear_cnt = g_pWearCounter[phy_idx - DeviceInfo.wSpectraStartBlock];
1659 least_wear_blk_idx = i;
1660 }
1661 }
1662 }
1663
1664 nand_dbg_print(NAND_DBG_WARN,
1665 "find_least_worn_blk_for_l2_cache: "
1666 "find block %d with least worn counter (%d)\n",
1667 least_wear_blk_idx, least_wear_cnt);
1668
1669 return least_wear_blk_idx;
1670}
1671
1672
1673
1674/* Get blocks for Level2 Cache */
1675static int get_l2_cache_blks(void)
1676{
1677 int n;
1678 u32 blk;
1679 u32 *pbt = (u32 *)g_pBlockTable;
1680
1681 for (n = 0; n < BLK_NUM_FOR_L2_CACHE; n++) {
1682 blk = find_least_worn_blk_for_l2_cache();
1683 if (blk > DeviceInfo.wDataBlockNum) {
1684 nand_dbg_print(NAND_DBG_WARN,
1685 "find_least_worn_blk_for_l2_cache: "
1686 "No enough free NAND blocks (n: %d) for L2 Cache!\n", n);
1687 return FAIL;
1688 }
1689 /* Tag the free block as discard in block table */
1690 pbt[blk] = (pbt[blk] & (~BAD_BLOCK)) | DISCARD_BLOCK;
1691 /* Add the free block to the L2 Cache block array */
1692 cache_l2.blk_array[n] = pbt[blk] & (~BAD_BLOCK);
1693 }
1694
1695 return PASS;
1696}
1697
1698static int erase_l2_cache_blocks(void)
1699{
1700 int i, ret = PASS;
1701 u32 pblk, lblk;
1702 u64 addr;
1703 u32 *pbt = (u32 *)g_pBlockTable;
1704
1705 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
1706 __FILE__, __LINE__, __func__);
1707
1708 for (i = 0; i < BLK_NUM_FOR_L2_CACHE; i++) {
1709 pblk = cache_l2.blk_array[i];
1710
1711 /* If the L2 cache block is invalid, then just skip it */
1712 if (MAX_U32_VALUE == pblk)
1713 continue;
1714
1715 BUG_ON(pblk > DeviceInfo.wSpectraEndBlock);
1716
1717 addr = (u64)pblk << DeviceInfo.nBitsInBlockDataSize;
1718 if (PASS == GLOB_FTL_Block_Erase(addr)) {
1719 /* Get logical block number of the erased block */
1720 lblk = FTL_Get_Block_Index(pblk);
1721 BUG_ON(BAD_BLOCK == lblk);
1722 /* Tag it as free in the block table */
1723 pbt[lblk] &= (u32)(~DISCARD_BLOCK);
1724 pbt[lblk] |= (u32)(SPARE_BLOCK);
1725 } else {
1726 MARK_BLOCK_AS_BAD(pbt[lblk]);
1727 ret = ERR;
1728 }
1729 }
1730
1731 return ret;
1732}
1733
1734/*
1735 * Merge the valid data page in the L2 cache blocks into NAND.
1736*/
1737static int flush_l2_cache(void)
1738{
1739 struct list_head *p;
1740 struct spectra_l2_cache_list *pnd, *tmp_pnd;
1741 u32 *pbt = (u32 *)g_pBlockTable;
1742 u32 phy_blk, l2_blk;
1743 u64 addr;
1744 u16 l2_page;
1745 int i, ret = PASS;
1746
1747 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
1748 __FILE__, __LINE__, __func__);
1749
1750 if (list_empty(&cache_l2.table.list)) /* No data to flush */
1751 return ret;
1752
1753 //dump_cache_l2_table();
1754
1755 if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
1756 g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
1757 FTL_Write_IN_Progress_Block_Table_Page();
1758 }
1759
1760 list_for_each(p, &cache_l2.table.list) {
1761 pnd = list_entry(p, struct spectra_l2_cache_list, list);
1762 if (IS_SPARE_BLOCK(pnd->logical_blk_num) ||
1763 IS_BAD_BLOCK(pnd->logical_blk_num) ||
1764 IS_DISCARDED_BLOCK(pnd->logical_blk_num)) {
1765 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d\n", __FILE__, __LINE__);
1766 memset(cache_l2_blk_buf, 0xff, DeviceInfo.wPagesPerBlock * DeviceInfo.wPageDataSize);
1767 } else {
1768 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d\n", __FILE__, __LINE__);
1769 phy_blk = pbt[pnd->logical_blk_num] & (~BAD_BLOCK);
1770 ret = GLOB_LLD_Read_Page_Main(cache_l2_blk_buf,
1771 phy_blk, 0, DeviceInfo.wPagesPerBlock);
1772 if (ret == FAIL) {
1773 printk(KERN_ERR "Read NAND page fail in %s, Line %d\n", __FILE__, __LINE__);
1774 }
1775 }
1776
1777 for (i = 0; i < DeviceInfo.wPagesPerBlock; i++) {
1778 if (pnd->pages_array[i] != MAX_U32_VALUE) {
1779 l2_blk = cache_l2.blk_array[(pnd->pages_array[i] >> 16) & 0xffff];
1780 l2_page = pnd->pages_array[i] & 0xffff;
1781 ret = GLOB_LLD_Read_Page_Main(cache_l2_page_buf, l2_blk, l2_page, 1);
1782 if (ret == FAIL) {
1783 printk(KERN_ERR "Read NAND page fail in %s, Line %d\n", __FILE__, __LINE__);
1784 }
1785 memcpy(cache_l2_blk_buf + i * DeviceInfo.wPageDataSize, cache_l2_page_buf, DeviceInfo.wPageDataSize);
1786 }
1787 }
1788
1789 /* Find a free block and tag the original block as discarded */
1790 addr = (u64)pnd->logical_blk_num << DeviceInfo.nBitsInBlockDataSize;
1791 ret = FTL_Replace_Block(addr);
1792 if (ret == FAIL) {
1793 printk(KERN_ERR "FTL_Replace_Block fail in %s, Line %d\n", __FILE__, __LINE__);
1794 }
1795
1796 /* Write back the updated data into NAND */
1797 phy_blk = pbt[pnd->logical_blk_num] & (~BAD_BLOCK);
1798 if (FAIL == GLOB_LLD_Write_Page_Main(cache_l2_blk_buf, phy_blk, 0, DeviceInfo.wPagesPerBlock)) {
1799 nand_dbg_print(NAND_DBG_WARN,
1800 "Program NAND block %d fail in %s, Line %d\n",
1801 phy_blk, __FILE__, __LINE__);
1802 /* This may not be really a bad block. So just tag it as discarded. */
1803 /* Then it has a chance to be erased when garbage collection. */
1804 /* If it is really bad, then the erase will fail and it will be marked */
1805 /* as bad then. Otherwise it will be marked as free and can be used again */
1806 MARK_BLK_AS_DISCARD(pbt[pnd->logical_blk_num]);
1807 /* Find another free block and write it again */
1808 FTL_Replace_Block(addr);
1809 phy_blk = pbt[pnd->logical_blk_num] & (~BAD_BLOCK);
1810 if (FAIL == GLOB_LLD_Write_Page_Main(cache_l2_blk_buf, phy_blk, 0, DeviceInfo.wPagesPerBlock)) {
1811 printk(KERN_ERR "Failed to write back block %d when flush L2 cache."
1812 "Some data will be lost!\n", phy_blk);
1813 MARK_BLOCK_AS_BAD(pbt[pnd->logical_blk_num]);
1814 }
1815 } else {
1816 /* tag the new free block as used block */
1817 pbt[pnd->logical_blk_num] &= (~SPARE_BLOCK);
1818 }
1819 }
1820
1821 /* Destroy the L2 Cache table and free the memory of all nodes */
1822 list_for_each_entry_safe(pnd, tmp_pnd, &cache_l2.table.list, list) {
1823 list_del(&pnd->list);
1824 kfree(pnd);
1825 }
1826
1827 /* Erase discard L2 cache blocks */
1828 if (erase_l2_cache_blocks() != PASS)
1829 nand_dbg_print(NAND_DBG_WARN,
1830 " Erase L2 cache blocks error in %s, Line %d\n",
1831 __FILE__, __LINE__);
1832
1833 /* Init the Level2 Cache data structure */
1834 for (i = 0; i < BLK_NUM_FOR_L2_CACHE; i++)
1835 cache_l2.blk_array[i] = MAX_U32_VALUE;
1836 cache_l2.cur_blk_idx = 0;
1837 cache_l2.cur_page_num = 0;
1838 INIT_LIST_HEAD(&cache_l2.table.list);
1839 cache_l2.table.logical_blk_num = MAX_U32_VALUE;
1840
1841 return ret;
1842}
1843
1844/*
1845 * Write back a changed victim cache item to the Level2 Cache
1846 * and update the L2 Cache table to map the change.
1847 * If the L2 Cache is full, then start to do the L2 Cache flush.
1848*/
1849static int write_back_to_l2_cache(u8 *buf, u64 logical_addr)
1850{
1851 u32 logical_blk_num;
1852 u16 logical_page_num;
1853 struct list_head *p;
1854 struct spectra_l2_cache_list *pnd, *pnd_new;
1855 u32 node_size;
1856 int i, found;
1857
1858 nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
1859 __FILE__, __LINE__, __func__);
1860
1861 /*
1862 * If Level2 Cache table is empty, then it means either:
1863 * 1. This is the first time that the function called after FTL_init
1864 * or
1865 * 2. The Level2 Cache has just been flushed
1866 *
1867 * So, 'steal' some free blocks from NAND for L2 Cache using
1868 * by just mask them as discard in the block table
1869 */
1870 if (list_empty(&cache_l2.table.list)) {
1871 BUG_ON(cache_l2.cur_blk_idx != 0);
1872 BUG_ON(cache_l2.cur_page_num!= 0);
1873 BUG_ON(cache_l2.table.logical_blk_num != MAX_U32_VALUE);
1874 if (FAIL == get_l2_cache_blks()) {
1875 GLOB_FTL_Garbage_Collection();
1876 if (FAIL == get_l2_cache_blks()) {
1877 printk(KERN_ALERT "Fail to get L2 cache blks!\n");
1878 return FAIL;
1879 }
1880 }
1881 }
1882
1883 logical_blk_num = BLK_FROM_ADDR(logical_addr);
1884 logical_page_num = PAGE_FROM_ADDR(logical_addr, logical_blk_num);
1885 BUG_ON(logical_blk_num == MAX_U32_VALUE);
1886
1887 /* Write the cache item data into the current position of L2 Cache */
1888#if CMD_DMA
1889 /*
1890 * TODO
1891 */
1892#else
1893 if (FAIL == GLOB_LLD_Write_Page_Main(buf,
1894 cache_l2.blk_array[cache_l2.cur_blk_idx],
1895 cache_l2.cur_page_num, 1)) {
1896 nand_dbg_print(NAND_DBG_WARN, "NAND Program fail in "
1897 "%s, Line %d, new Bad Block %d generated!\n",
1898 __FILE__, __LINE__,
1899 cache_l2.blk_array[cache_l2.cur_blk_idx]);
1900
1901 /* TODO: tag the current block as bad and try again */
1902
1903 return FAIL;
1904 }
1905#endif
1906
1907 /*
1908 * Update the L2 Cache table.
1909 *
1910 * First seaching in the table to see whether the logical block
1911 * has been mapped. If not, then kmalloc a new node for the
1912 * logical block, fill data, and then insert it to the list.
1913 * Otherwise, just update the mapped node directly.
1914 */
1915 found = 0;
1916 list_for_each(p, &cache_l2.table.list) {
1917 pnd = list_entry(p, struct spectra_l2_cache_list, list);
1918 if (pnd->logical_blk_num == logical_blk_num) {
1919 pnd->pages_array[logical_page_num] =
1920 (cache_l2.cur_blk_idx << 16) |
1921 cache_l2.cur_page_num;
1922 found = 1;
1923 break;
1924 }
1925 }
1926 if (!found) { /* Create new node for the logical block here */
1927
1928 /* The logical pages to physical pages map array is
1929 * located at the end of struct spectra_l2_cache_list.
1930 */
1931 node_size = sizeof(struct spectra_l2_cache_list) +
1932 sizeof(u32) * DeviceInfo.wPagesPerBlock;
1933 pnd_new = kmalloc(node_size, GFP_ATOMIC);
1934 if (!pnd_new) {
1935 printk(KERN_ERR "Failed to kmalloc in %s Line %d\n",
1936 __FILE__, __LINE__);
1937 /*
1938 * TODO: Need to flush all the L2 cache into NAND ASAP
1939 * since no memory available here
1940 */
1941 }
1942 pnd_new->logical_blk_num = logical_blk_num;
1943 for (i = 0; i < DeviceInfo.wPagesPerBlock; i++)
1944 pnd_new->pages_array[i] = MAX_U32_VALUE;
1945 pnd_new->pages_array[logical_page_num] =
1946 (cache_l2.cur_blk_idx << 16) | cache_l2.cur_page_num;
1947 list_add(&pnd_new->list, &cache_l2.table.list);
1948 }
1949
1950 /* Increasing the current position pointer of the L2 Cache */
1951 cache_l2.cur_page_num++;
1952 if (cache_l2.cur_page_num >= DeviceInfo.wPagesPerBlock) {
1953 cache_l2.cur_blk_idx++;
1954 if (cache_l2.cur_blk_idx >= BLK_NUM_FOR_L2_CACHE) {
1955 /* The L2 Cache is full. Need to flush it now */
1956 nand_dbg_print(NAND_DBG_WARN,
1957 "L2 Cache is full, will start to flush it\n");
1958 flush_l2_cache();
1959 } else {
1960 cache_l2.cur_page_num = 0;
1961 }
1962 }
1963
1964 return PASS;
1965}
1966
1967/*
1968 * Seach in the Level2 Cache table to find the cache item.
1969 * If find, read the data from the NAND page of L2 Cache,
1970 * Otherwise, return FAIL.
1971 */
1972static int search_l2_cache(u8 *buf, u64 logical_addr)
1973{
1974 u32 logical_blk_num;
1975 u16 logical_page_num;
1976 struct list_head *p;
1977 struct spectra_l2_cache_list *pnd;
1978 u32 tmp = MAX_U32_VALUE;
1979 u32 phy_blk;
1980 u16 phy_page;
1981 int ret = FAIL;
1982
1983 logical_blk_num = BLK_FROM_ADDR(logical_addr);
1984 logical_page_num = PAGE_FROM_ADDR(logical_addr, logical_blk_num);
1985
1986 list_for_each(p, &cache_l2.table.list) {
1987 pnd = list_entry(p, struct spectra_l2_cache_list, list);
1988 if (pnd->logical_blk_num == logical_blk_num) {
1989 tmp = pnd->pages_array[logical_page_num];
1990 break;
1991 }
1992 }
1993
1994 if (tmp != MAX_U32_VALUE) { /* Found valid map */
1995 phy_blk = cache_l2.blk_array[(tmp >> 16) & 0xFFFF];
1996 phy_page = tmp & 0xFFFF;
1997#if CMD_DMA
1998 /* TODO */
1999#else
2000 ret = GLOB_LLD_Read_Page_Main(buf, phy_blk, phy_page, 1);
2001#endif
2002 }
2003
2004 return ret;
2005}
2006
2007/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
2008* Function: FTL_Cache_Write_Back
2009* Inputs: pointer to data cached in sys memory
2010* address of free block in flash
2011* Outputs: PASS=0 / FAIL=1
2012* Description: writes all the pages of Cache Block to flash
2013*
2014*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
2015static int FTL_Cache_Write_Back(u8 *pData, u64 blk_addr)
2016{
2017 int i, j, iErase;
2018 u64 old_page_addr, addr, phy_addr;
2019 u32 *pbt = (u32 *)g_pBlockTable;
2020 u32 lba;
2021
2022 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
2023 __FILE__, __LINE__, __func__);
2024
2025 old_page_addr = FTL_Get_Physical_Block_Addr(blk_addr) +
2026 GLOB_u64_Remainder(blk_addr, 2);
2027
2028 iErase = (FAIL == FTL_Replace_Block(blk_addr)) ? PASS : FAIL;
2029
2030 pbt[BLK_FROM_ADDR(blk_addr)] &= (~SPARE_BLOCK);
2031
2032#if CMD_DMA
2033 p_BTableChangesDelta = (struct BTableChangesDelta *)g_pBTDelta_Free;
2034 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
2035
2036 p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
2037 p_BTableChangesDelta->BT_Index = (u32)(blk_addr >>
2038 DeviceInfo.nBitsInBlockDataSize);
2039 p_BTableChangesDelta->BT_Entry_Value =
2040 pbt[(u32)(blk_addr >> DeviceInfo.nBitsInBlockDataSize)];
2041 p_BTableChangesDelta->ValidFields = 0x0C;
2042#endif
2043
2044 if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
2045 g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
2046 FTL_Write_IN_Progress_Block_Table_Page();
2047 }
2048
2049 for (i = 0; i < RETRY_TIMES; i++) {
2050 if (PASS == iErase) {
2051 phy_addr = FTL_Get_Physical_Block_Addr(blk_addr);
2052 if (FAIL == GLOB_FTL_Block_Erase(phy_addr)) {
2053 lba = BLK_FROM_ADDR(blk_addr);
2054 MARK_BLOCK_AS_BAD(pbt[lba]);
2055 i = RETRY_TIMES;
2056 break;
2057 }
2058 }
2059
2060 for (j = 0; j < CACHE_ITEM_NUM; j++) {
2061 addr = Cache.array[j].address;
2062 if ((addr <= blk_addr) &&
2063 ((addr + Cache.cache_item_size) > blk_addr))
2064 cache_block_to_write = j;
2065 }
2066
2067 phy_addr = FTL_Get_Physical_Block_Addr(blk_addr);
2068 if (PASS == FTL_Cache_Update_Block(pData,
2069 old_page_addr, phy_addr)) {
2070 cache_block_to_write = UNHIT_CACHE_ITEM;
2071 break;
2072 } else {
2073 iErase = PASS;
2074 }
2075 }
2076
2077 if (i >= RETRY_TIMES) {
2078 if (ERR == FTL_Flash_Error_Handle(pData,
2079 old_page_addr, blk_addr))
2080 return ERR;
2081 else
2082 return FAIL;
2083 }
2084
2085 return PASS;
2086}
2087
2088/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
2089* Function: FTL_Cache_Write_Page
2090* Inputs: Pointer to buffer, page address, cache block number
2091* Outputs: PASS=0 / FAIL=1
2092* Description: It writes the data in Cache Block
2093*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
2094static void FTL_Cache_Write_Page(u8 *pData, u64 page_addr,
2095 u8 cache_blk, u16 flag)
2096{
2097 u8 *pDest;
2098 u64 addr;
2099
2100 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
2101 __FILE__, __LINE__, __func__);
2102
2103 addr = Cache.array[cache_blk].address;
2104 pDest = Cache.array[cache_blk].buf;
2105
2106 pDest += (unsigned long)(page_addr - addr);
2107 Cache.array[cache_blk].changed = SET;
2108#if CMD_DMA
2109#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
2110 int_cache[ftl_cmd_cnt].item = cache_blk;
2111 int_cache[ftl_cmd_cnt].cache.address =
2112 Cache.array[cache_blk].address;
2113 int_cache[ftl_cmd_cnt].cache.changed =
2114 Cache.array[cache_blk].changed;
2115#endif
2116 GLOB_LLD_MemCopy_CMD(pDest, pData, DeviceInfo.wPageDataSize, flag);
2117 ftl_cmd_cnt++;
2118#else
2119 memcpy(pDest, pData, DeviceInfo.wPageDataSize);
2120#endif
2121 if (Cache.array[cache_blk].use_cnt < MAX_WORD_VALUE)
2122 Cache.array[cache_blk].use_cnt++;
2123}
2124
2125/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
2126* Function: FTL_Cache_Write
2127* Inputs: none
2128* Outputs: PASS=0 / FAIL=1
2129* Description: It writes least frequently used Cache block to flash if it
2130* has been changed
2131*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
2132static int FTL_Cache_Write(void)
2133{
2134 int i, bResult = PASS;
2135 u16 bNO, least_count = 0xFFFF;
2136
2137 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
2138 __FILE__, __LINE__, __func__);
2139
2140 FTL_Calculate_LRU();
2141
2142 bNO = Cache.LRU;
2143 nand_dbg_print(NAND_DBG_DEBUG, "FTL_Cache_Write: "
2144 "Least used cache block is %d\n", bNO);
2145
2146 if (Cache.array[bNO].changed != SET)
2147 return bResult;
2148
2149 nand_dbg_print(NAND_DBG_DEBUG, "FTL_Cache_Write: Cache"
2150 " Block %d containing logical block %d is dirty\n",
2151 bNO,
2152 (u32)(Cache.array[bNO].address >>
2153 DeviceInfo.nBitsInBlockDataSize));
2154#if CMD_DMA
2155#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
2156 int_cache[ftl_cmd_cnt].item = bNO;
2157 int_cache[ftl_cmd_cnt].cache.address =
2158 Cache.array[bNO].address;
2159 int_cache[ftl_cmd_cnt].cache.changed = CLEAR;
2160#endif
2161#endif
2162 bResult = write_back_to_l2_cache(Cache.array[bNO].buf,
2163 Cache.array[bNO].address);
2164 if (bResult != ERR)
2165 Cache.array[bNO].changed = CLEAR;
2166
2167 least_count = Cache.array[bNO].use_cnt;
2168
2169 for (i = 0; i < CACHE_ITEM_NUM; i++) {
2170 if (i == bNO)
2171 continue;
2172 if (Cache.array[i].use_cnt > 0)
2173 Cache.array[i].use_cnt -= least_count;
2174 }
2175
2176 return bResult;
2177}
2178
2179/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
2180* Function: FTL_Cache_Read
2181* Inputs: Page address
2182* Outputs: PASS=0 / FAIL=1
2183* Description: It reads the block from device in Cache Block
2184* Set the LRU count to 1
2185* Mark the Cache Block as clean
2186*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
2187static int FTL_Cache_Read(u64 logical_addr)
2188{
2189 u64 item_addr, phy_addr;
2190 u16 num;
2191 int ret;
2192
2193 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
2194 __FILE__, __LINE__, __func__);
2195
2196 num = Cache.LRU; /* The LRU cache item will be overwritten */
2197
2198 item_addr = (u64)GLOB_u64_Div(logical_addr, Cache.cache_item_size) *
2199 Cache.cache_item_size;
2200 Cache.array[num].address = item_addr;
2201 Cache.array[num].use_cnt = 1;
2202 Cache.array[num].changed = CLEAR;
2203
2204#if CMD_DMA
2205#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
2206 int_cache[ftl_cmd_cnt].item = num;
2207 int_cache[ftl_cmd_cnt].cache.address =
2208 Cache.array[num].address;
2209 int_cache[ftl_cmd_cnt].cache.changed =
2210 Cache.array[num].changed;
2211#endif
2212#endif
2213 /*
2214 * Search in L2 Cache. If hit, fill data into L1 Cache item buffer,
2215 * Otherwise, read it from NAND
2216 */
2217 ret = search_l2_cache(Cache.array[num].buf, logical_addr);
2218 if (PASS == ret) /* Hit in L2 Cache */
2219 return ret;
2220
2221 /* Compute the physical start address of NAND device according to */
2222 /* the logical start address of the cache item (LRU cache item) */
2223 phy_addr = FTL_Get_Physical_Block_Addr(item_addr) +
2224 GLOB_u64_Remainder(item_addr, 2);
2225
2226 return FTL_Cache_Read_All(Cache.array[num].buf, phy_addr);
2227}
2228
2229/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
2230* Function: FTL_Check_Block_Table
2231* Inputs: ?
2232* Outputs: PASS=0 / FAIL=1
2233* Description: It checks the correctness of each block table entry
2234*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
2235static int FTL_Check_Block_Table(int wOldTable)
2236{
2237 u32 i;
2238 int wResult = PASS;
2239 u32 blk_idx;
2240 u32 *pbt = (u32 *)g_pBlockTable;
2241 u8 *pFlag = flag_check_blk_table;
2242
2243 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
2244 __FILE__, __LINE__, __func__);
2245
2246 if (NULL != pFlag) {
2247 memset(pFlag, FAIL, DeviceInfo.wDataBlockNum);
2248 for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
2249 blk_idx = (u32)(pbt[i] & (~BAD_BLOCK));
2250
2251 /*
2252 * 20081006/KBV - Changed to pFlag[i] reference
2253 * to avoid buffer overflow
2254 */
2255
2256 /*
2257 * 2008-10-20 Yunpeng Note: This change avoid
2258 * buffer overflow, but changed function of
2259 * the code, so it should be re-write later
2260 */
2261 if ((blk_idx > DeviceInfo.wSpectraEndBlock) ||
2262 PASS == pFlag[i]) {
2263 wResult = FAIL;
2264 break;
2265 } else {
2266 pFlag[i] = PASS;
2267 }
2268 }
2269 }
2270
2271 return wResult;
2272}
2273
2274
2275/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
2276* Function: FTL_Write_Block_Table
2277* Inputs: flasg
2278* Outputs: 0=Block Table was updated. No write done. 1=Block write needs to
2279* happen. -1 Error
2280* Description: It writes the block table
2281* Block table always mapped to LBA 0 which inturn mapped
2282* to any physical block
2283*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
2284static int FTL_Write_Block_Table(int wForce)
2285{
2286 u32 *pbt = (u32 *)g_pBlockTable;
2287 int wSuccess = PASS;
2288 u32 wTempBlockTableIndex;
2289 u16 bt_pages, new_bt_offset;
2290 u8 blockchangeoccured = 0;
2291
2292 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
2293 __FILE__, __LINE__, __func__);
2294
2295 bt_pages = FTL_Get_Block_Table_Flash_Size_Pages();
2296
2297 if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus)
2298 return 0;
2299
2300 if (PASS == wForce) {
2301 g_wBlockTableOffset =
2302 (u16)(DeviceInfo.wPagesPerBlock - bt_pages);
2303#if CMD_DMA
2304 p_BTableChangesDelta =
2305 (struct BTableChangesDelta *)g_pBTDelta_Free;
2306 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
2307
2308 p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
2309 p_BTableChangesDelta->g_wBlockTableOffset =
2310 g_wBlockTableOffset;
2311 p_BTableChangesDelta->ValidFields = 0x01;
2312#endif
2313 }
2314
2315 nand_dbg_print(NAND_DBG_DEBUG,
2316 "Inside FTL_Write_Block_Table: block %d Page:%d\n",
2317 g_wBlockTableIndex, g_wBlockTableOffset);
2318
2319 do {
2320 new_bt_offset = g_wBlockTableOffset + bt_pages + 1;
2321 if ((0 == (new_bt_offset % DeviceInfo.wPagesPerBlock)) ||
2322 (new_bt_offset > DeviceInfo.wPagesPerBlock) ||
2323 (FAIL == wSuccess)) {
2324 wTempBlockTableIndex = FTL_Replace_Block_Table();
2325 if (BAD_BLOCK == wTempBlockTableIndex)
2326 return ERR;
2327 if (!blockchangeoccured) {
2328 bt_block_changed = 1;
2329 blockchangeoccured = 1;
2330 }
2331
2332 g_wBlockTableIndex = wTempBlockTableIndex;
2333 g_wBlockTableOffset = 0;
2334 pbt[BLOCK_TABLE_INDEX] = g_wBlockTableIndex;
2335#if CMD_DMA
2336 p_BTableChangesDelta =
2337 (struct BTableChangesDelta *)g_pBTDelta_Free;
2338 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
2339
2340 p_BTableChangesDelta->ftl_cmd_cnt =
2341 ftl_cmd_cnt;
2342 p_BTableChangesDelta->g_wBlockTableOffset =
2343 g_wBlockTableOffset;
2344 p_BTableChangesDelta->g_wBlockTableIndex =
2345 g_wBlockTableIndex;
2346 p_BTableChangesDelta->ValidFields = 0x03;
2347
2348 p_BTableChangesDelta =
2349 (struct BTableChangesDelta *)g_pBTDelta_Free;
2350 g_pBTDelta_Free +=
2351 sizeof(struct BTableChangesDelta);
2352
2353 p_BTableChangesDelta->ftl_cmd_cnt =
2354 ftl_cmd_cnt;
2355 p_BTableChangesDelta->BT_Index =
2356 BLOCK_TABLE_INDEX;
2357 p_BTableChangesDelta->BT_Entry_Value =
2358 pbt[BLOCK_TABLE_INDEX];
2359 p_BTableChangesDelta->ValidFields = 0x0C;
2360#endif
2361 }
2362
2363 wSuccess = FTL_Write_Block_Table_Data();
2364 if (FAIL == wSuccess)
2365 MARK_BLOCK_AS_BAD(pbt[BLOCK_TABLE_INDEX]);
2366 } while (FAIL == wSuccess);
2367
2368 g_cBlockTableStatus = CURRENT_BLOCK_TABLE;
2369
2370 return 1;
2371}
2372
2373/******************************************************************
2374* Function: GLOB_FTL_Flash_Format
2375* Inputs: none
2376* Outputs: PASS
2377* Description: The block table stores bad block info, including MDF+
2378* blocks gone bad over the ages. Therefore, if we have a
2379* block table in place, then use it to scan for bad blocks
2380* If not, then scan for MDF.
2381* Now, a block table will only be found if spectra was already
2382* being used. For a fresh flash, we'll go thru scanning for
2383* MDF. If spectra was being used, then there is a chance that
2384* the MDF has been corrupted. Spectra avoids writing to the
2385* first 2 bytes of the spare area to all pages in a block. This
2386* covers all known flash devices. However, since flash
2387* manufacturers have no standard of where the MDF is stored,
2388* this cannot guarantee that the MDF is protected for future
2389* devices too. The initial scanning for the block table assures
2390* this. It is ok even if the block table is outdated, as all
2391* we're looking for are bad block markers.
2392* Use this when mounting a file system or starting a
2393* new flash.
2394*
2395*********************************************************************/
2396static int FTL_Format_Flash(u8 valid_block_table)
2397{
2398 u32 i, j;
2399 u32 *pbt = (u32 *)g_pBlockTable;
2400 u32 tempNode;
2401 int ret;
2402
2403#if CMD_DMA
2404 u32 *pbtStartingCopy = (u32 *)g_pBTStartingCopy;
2405 if (ftl_cmd_cnt)
2406 return FAIL;
2407#endif
2408
2409 if (FAIL == FTL_Check_Block_Table(FAIL))
2410 valid_block_table = 0;
2411
2412 if (valid_block_table) {
2413 u8 switched = 1;
2414 u32 block, k;
2415
2416 k = DeviceInfo.wSpectraStartBlock;
2417 while (switched && (k < DeviceInfo.wSpectraEndBlock)) {
2418 switched = 0;
2419 k++;
2420 for (j = DeviceInfo.wSpectraStartBlock, i = 0;
2421 j <= DeviceInfo.wSpectraEndBlock;
2422 j++, i++) {
2423 block = (pbt[i] & ~BAD_BLOCK) -
2424 DeviceInfo.wSpectraStartBlock;
2425 if (block != i) {
2426 switched = 1;
2427 tempNode = pbt[i];
2428 pbt[i] = pbt[block];
2429 pbt[block] = tempNode;
2430 }
2431 }
2432 }
2433 if ((k == DeviceInfo.wSpectraEndBlock) && switched)
2434 valid_block_table = 0;
2435 }
2436
2437 if (!valid_block_table) {
2438 memset(g_pBlockTable, 0,
2439 DeviceInfo.wDataBlockNum * sizeof(u32));
2440 memset(g_pWearCounter, 0,
2441 DeviceInfo.wDataBlockNum * sizeof(u8));
2442 if (DeviceInfo.MLCDevice)
2443 memset(g_pReadCounter, 0,
2444 DeviceInfo.wDataBlockNum * sizeof(u16));
2445#if CMD_DMA
2446 memset(g_pBTStartingCopy, 0,
2447 DeviceInfo.wDataBlockNum * sizeof(u32));
2448 memset(g_pWearCounterCopy, 0,
2449 DeviceInfo.wDataBlockNum * sizeof(u8));
2450 if (DeviceInfo.MLCDevice)
2451 memset(g_pReadCounterCopy, 0,
2452 DeviceInfo.wDataBlockNum * sizeof(u16));
2453#endif
2454 for (j = DeviceInfo.wSpectraStartBlock, i = 0;
2455 j <= DeviceInfo.wSpectraEndBlock;
2456 j++, i++) {
2457 if (GLOB_LLD_Get_Bad_Block((u32)j))
2458 pbt[i] = (u32)(BAD_BLOCK | j);
2459 }
2460 }
2461
2462 nand_dbg_print(NAND_DBG_WARN, "Erasing all blocks in the NAND\n");
2463
2464 for (j = DeviceInfo.wSpectraStartBlock, i = 0;
2465 j <= DeviceInfo.wSpectraEndBlock;
2466 j++, i++) {
2467 if ((pbt[i] & BAD_BLOCK) != BAD_BLOCK) {
2468 ret = GLOB_LLD_Erase_Block(j);
2469 if (FAIL == ret) {
2470 pbt[i] = (u32)(j);
2471 MARK_BLOCK_AS_BAD(pbt[i]);
2472 nand_dbg_print(NAND_DBG_WARN,
2473 "NAND Program fail in %s, Line %d, "
2474 "Function: %s, new Bad Block %d generated!\n",
2475 __FILE__, __LINE__, __func__, (int)j);
2476 } else {
2477 pbt[i] = (u32)(SPARE_BLOCK | j);
2478 }
2479 }
2480#if CMD_DMA
2481 pbtStartingCopy[i] = pbt[i];
2482#endif
2483 }
2484
2485 g_wBlockTableOffset = 0;
2486 for (i = 0; (i <= (DeviceInfo.wSpectraEndBlock -
2487 DeviceInfo.wSpectraStartBlock))
2488 && ((pbt[i] & BAD_BLOCK) == BAD_BLOCK); i++)
2489 ;
2490 if (i > (DeviceInfo.wSpectraEndBlock - DeviceInfo.wSpectraStartBlock)) {
2491 printk(KERN_ERR "All blocks bad!\n");
2492 return FAIL;
2493 } else {
2494 g_wBlockTableIndex = pbt[i] & ~BAD_BLOCK;
2495 if (i != BLOCK_TABLE_INDEX) {
2496 tempNode = pbt[i];
2497 pbt[i] = pbt[BLOCK_TABLE_INDEX];
2498 pbt[BLOCK_TABLE_INDEX] = tempNode;
2499 }
2500 }
2501 pbt[BLOCK_TABLE_INDEX] &= (~SPARE_BLOCK);
2502
2503#if CMD_DMA
2504 pbtStartingCopy[BLOCK_TABLE_INDEX] &= (~SPARE_BLOCK);
2505#endif
2506
2507 g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
2508 memset(g_pBTBlocks, 0xFF,
2509 (1 + LAST_BT_ID - FIRST_BT_ID) * sizeof(u32));
2510 g_pBTBlocks[FIRST_BT_ID-FIRST_BT_ID] = g_wBlockTableIndex;
2511 FTL_Write_Block_Table(FAIL);
2512
2513 for (i = 0; i < CACHE_ITEM_NUM; i++) {
2514 Cache.array[i].address = NAND_CACHE_INIT_ADDR;
2515 Cache.array[i].use_cnt = 0;
2516 Cache.array[i].changed = CLEAR;
2517 }
2518
2519#if (RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE && CMD_DMA)
2520 memcpy((void *)&cache_start_copy, (void *)&Cache,
2521 sizeof(struct flash_cache_tag));
2522#endif
2523 return PASS;
2524}
2525
2526static int force_format_nand(void)
2527{
2528 u32 i;
2529
2530 /* Force erase the whole unprotected physical partiton of NAND */
2531 printk(KERN_ALERT "Start to force erase whole NAND device ...\n");
2532 printk(KERN_ALERT "From phyical block %d to %d\n",
2533 DeviceInfo.wSpectraStartBlock, DeviceInfo.wSpectraEndBlock);
2534 for (i = DeviceInfo.wSpectraStartBlock; i <= DeviceInfo.wSpectraEndBlock; i++) {
2535 if (GLOB_LLD_Erase_Block(i))
2536 printk(KERN_ERR "Failed to force erase NAND block %d\n", i);
2537 }
2538 printk(KERN_ALERT "Force Erase ends. Please reboot the system ...\n");
2539 while(1);
2540
2541 return PASS;
2542}
2543
2544int GLOB_FTL_Flash_Format(void)
2545{
2546 //return FTL_Format_Flash(1);
2547 return force_format_nand();
2548
2549}
2550
2551/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
2552* Function: FTL_Search_Block_Table_IN_Block
2553* Inputs: Block Number
2554* Pointer to page
2555* Outputs: PASS / FAIL
2556* Page contatining the block table
2557* Description: It searches the block table in the block
2558* passed as an argument.
2559*
2560*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
2561static int FTL_Search_Block_Table_IN_Block(u32 BT_Block,
2562 u8 BT_Tag, u16 *Page)
2563{
2564 u16 i, j, k;
2565 u16 Result = PASS;
2566 u16 Last_IPF = 0;
2567 u8 BT_Found = 0;
2568 u8 *tagarray;
2569 u8 *tempbuf = tmp_buf_search_bt_in_block;
2570 u8 *pSpareBuf = spare_buf_search_bt_in_block;
2571 u8 *pSpareBufBTLastPage = spare_buf_bt_search_bt_in_block;
2572 u8 bt_flag_last_page = 0xFF;
2573 u8 search_in_previous_pages = 0;
2574 u16 bt_pages;
2575
2576 nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
2577 __FILE__, __LINE__, __func__);
2578
2579 nand_dbg_print(NAND_DBG_DEBUG,
2580 "Searching block table in %u block\n",
2581 (unsigned int)BT_Block);
2582
2583 bt_pages = FTL_Get_Block_Table_Flash_Size_Pages();
2584
2585 for (i = bt_pages; i < DeviceInfo.wPagesPerBlock;
2586 i += (bt_pages + 1)) {
2587 nand_dbg_print(NAND_DBG_DEBUG,
2588 "Searching last IPF: %d\n", i);
2589 Result = GLOB_LLD_Read_Page_Main_Polling(tempbuf,
2590 BT_Block, i, 1);
2591
2592 if (0 == memcmp(tempbuf, g_pIPF, DeviceInfo.wPageDataSize)) {
2593 if ((i + bt_pages + 1) < DeviceInfo.wPagesPerBlock) {
2594 continue;
2595 } else {
2596 search_in_previous_pages = 1;
2597 Last_IPF = i;
2598 }
2599 }
2600
2601 if (!search_in_previous_pages) {
2602 if (i != bt_pages) {
2603 i -= (bt_pages + 1);
2604 Last_IPF = i;
2605 }
2606 }
2607
2608 if (0 == Last_IPF)
2609 break;
2610
2611 if (!search_in_previous_pages) {
2612 i = i + 1;
2613 nand_dbg_print(NAND_DBG_DEBUG,
2614 "Reading the spare area of Block %u Page %u",
2615 (unsigned int)BT_Block, i);
2616 Result = GLOB_LLD_Read_Page_Spare(pSpareBuf,
2617 BT_Block, i, 1);
2618 nand_dbg_print(NAND_DBG_DEBUG,
2619 "Reading the spare area of Block %u Page %u",
2620 (unsigned int)BT_Block, i + bt_pages - 1);
2621 Result = GLOB_LLD_Read_Page_Spare(pSpareBufBTLastPage,
2622 BT_Block, i + bt_pages - 1, 1);
2623
2624 k = 0;
2625 j = FTL_Extract_Block_Table_Tag(pSpareBuf, &tagarray);
2626 if (j) {
2627 for (; k < j; k++) {
2628 if (tagarray[k] == BT_Tag)
2629 break;
2630 }
2631 }
2632
2633 if (k < j)
2634 bt_flag = tagarray[k];
2635 else
2636 Result = FAIL;
2637
2638 if (Result == PASS) {
2639 k = 0;
2640 j = FTL_Extract_Block_Table_Tag(
2641 pSpareBufBTLastPage, &tagarray);
2642 if (j) {
2643 for (; k < j; k++) {
2644 if (tagarray[k] == BT_Tag)
2645 break;
2646 }
2647 }
2648
2649 if (k < j)
2650 bt_flag_last_page = tagarray[k];
2651 else
2652 Result = FAIL;
2653
2654 if (Result == PASS) {
2655 if (bt_flag == bt_flag_last_page) {
2656 nand_dbg_print(NAND_DBG_DEBUG,
2657 "Block table is found"
2658 " in page after IPF "
2659 "at block %d "
2660 "page %d\n",
2661 (int)BT_Block, i);
2662 BT_Found = 1;
2663 *Page = i;
2664 g_cBlockTableStatus =
2665 CURRENT_BLOCK_TABLE;
2666 break;
2667 } else {
2668 Result = FAIL;
2669 }
2670 }
2671 }
2672 }
2673
2674 if (search_in_previous_pages)
2675 i = i - bt_pages;
2676 else
2677 i = i - (bt_pages + 1);
2678
2679 Result = PASS;
2680
2681 nand_dbg_print(NAND_DBG_DEBUG,
2682 "Reading the spare area of Block %d Page %d",
2683 (int)BT_Block, i);
2684
2685 Result = GLOB_LLD_Read_Page_Spare(pSpareBuf, BT_Block, i, 1);
2686 nand_dbg_print(NAND_DBG_DEBUG,
2687 "Reading the spare area of Block %u Page %u",
2688 (unsigned int)BT_Block, i + bt_pages - 1);
2689
2690 Result = GLOB_LLD_Read_Page_Spare(pSpareBufBTLastPage,
2691 BT_Block, i + bt_pages - 1, 1);
2692
2693 k = 0;
2694 j = FTL_Extract_Block_Table_Tag(pSpareBuf, &tagarray);
2695 if (j) {
2696 for (; k < j; k++) {
2697 if (tagarray[k] == BT_Tag)
2698 break;
2699 }
2700 }
2701
2702 if (k < j)
2703 bt_flag = tagarray[k];
2704 else
2705 Result = FAIL;
2706
2707 if (Result == PASS) {
2708 k = 0;
2709 j = FTL_Extract_Block_Table_Tag(pSpareBufBTLastPage,
2710 &tagarray);
2711 if (j) {
2712 for (; k < j; k++) {
2713 if (tagarray[k] == BT_Tag)
2714 break;
2715 }
2716 }
2717
2718 if (k < j) {
2719 bt_flag_last_page = tagarray[k];
2720 } else {
2721 Result = FAIL;
2722 break;
2723 }
2724
2725 if (Result == PASS) {
2726 if (bt_flag == bt_flag_last_page) {
2727 nand_dbg_print(NAND_DBG_DEBUG,
2728 "Block table is found "
2729 "in page prior to IPF "
2730 "at block %u page %d\n",
2731 (unsigned int)BT_Block, i);
2732 BT_Found = 1;
2733 *Page = i;
2734 g_cBlockTableStatus =
2735 IN_PROGRESS_BLOCK_TABLE;
2736 break;
2737 } else {
2738 Result = FAIL;
2739 break;
2740 }
2741 }
2742 }
2743 }
2744
2745 if (Result == FAIL) {
2746 if ((Last_IPF > bt_pages) && (i < Last_IPF) && (!BT_Found)) {
2747 BT_Found = 1;
2748 *Page = i - (bt_pages + 1);
2749 }
2750 if ((Last_IPF == bt_pages) && (i < Last_IPF) && (!BT_Found))
2751 goto func_return;
2752 }
2753
2754 if (Last_IPF == 0) {
2755 i = 0;
2756 Result = PASS;
2757 nand_dbg_print(NAND_DBG_DEBUG, "Reading the spare area of "
2758 "Block %u Page %u", (unsigned int)BT_Block, i);
2759
2760 Result = GLOB_LLD_Read_Page_Spare(pSpareBuf, BT_Block, i, 1);
2761 nand_dbg_print(NAND_DBG_DEBUG,
2762 "Reading the spare area of Block %u Page %u",
2763 (unsigned int)BT_Block, i + bt_pages - 1);
2764 Result = GLOB_LLD_Read_Page_Spare(pSpareBufBTLastPage,
2765 BT_Block, i + bt_pages - 1, 1);
2766
2767 k = 0;
2768 j = FTL_Extract_Block_Table_Tag(pSpareBuf, &tagarray);
2769 if (j) {
2770 for (; k < j; k++) {
2771 if (tagarray[k] == BT_Tag)
2772 break;
2773 }
2774 }
2775
2776 if (k < j)
2777 bt_flag = tagarray[k];
2778 else
2779 Result = FAIL;
2780
2781 if (Result == PASS) {
2782 k = 0;
2783 j = FTL_Extract_Block_Table_Tag(pSpareBufBTLastPage,
2784 &tagarray);
2785 if (j) {
2786 for (; k < j; k++) {
2787 if (tagarray[k] == BT_Tag)
2788 break;
2789 }
2790 }
2791
2792 if (k < j)
2793 bt_flag_last_page = tagarray[k];
2794 else
2795 Result = FAIL;
2796
2797 if (Result == PASS) {
2798 if (bt_flag == bt_flag_last_page) {
2799 nand_dbg_print(NAND_DBG_DEBUG,
2800 "Block table is found "
2801 "in page after IPF at "
2802 "block %u page %u\n",
2803 (unsigned int)BT_Block,
2804 (unsigned int)i);
2805 BT_Found = 1;
2806 *Page = i;
2807 g_cBlockTableStatus =
2808 CURRENT_BLOCK_TABLE;
2809 goto func_return;
2810 } else {
2811 Result = FAIL;
2812 }
2813 }
2814 }
2815
2816 if (Result == FAIL)
2817 goto func_return;
2818 }
2819func_return:
2820 return Result;
2821}
2822
2823u8 *get_blk_table_start_addr(void)
2824{
2825 return g_pBlockTable;
2826}
2827
2828unsigned long get_blk_table_len(void)
2829{
2830 return DeviceInfo.wDataBlockNum * sizeof(u32);
2831}
2832
2833u8 *get_wear_leveling_table_start_addr(void)
2834{
2835 return g_pWearCounter;
2836}
2837
2838unsigned long get_wear_leveling_table_len(void)
2839{
2840 return DeviceInfo.wDataBlockNum * sizeof(u8);
2841}
2842
2843/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
2844* Function: FTL_Read_Block_Table
2845* Inputs: none
2846* Outputs: PASS / FAIL
2847* Description: read the flash spare area and find a block containing the
2848* most recent block table(having largest block_table_counter).
2849* Find the last written Block table in this block.
2850* Check the correctness of Block Table
2851* If CDMA is enabled, this function is called in
2852* polling mode.
2853* We don't need to store changes in Block table in this
2854* function as it is called only at initialization
2855*
2856* Note: Currently this function is called at initialization
2857* before any read/erase/write command issued to flash so,
2858* there is no need to wait for CDMA list to complete as of now
2859*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
2860static int FTL_Read_Block_Table(void)
2861{
2862 u16 i = 0;
2863 int k, j;
2864 u8 *tempBuf, *tagarray;
2865 int wResult = FAIL;
2866 int status = FAIL;
2867 u8 block_table_found = 0;
2868 int search_result;
2869 u32 Block;
2870 u16 Page = 0;
2871 u16 PageCount;
2872 u16 bt_pages;
2873 int wBytesCopied = 0, tempvar;
2874
2875 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
2876 __FILE__, __LINE__, __func__);
2877
2878 tempBuf = tmp_buf1_read_blk_table;
2879 bt_pages = FTL_Get_Block_Table_Flash_Size_Pages();
2880
2881 for (j = DeviceInfo.wSpectraStartBlock;
2882 j <= (int)DeviceInfo.wSpectraEndBlock;
2883 j++) {
2884 status = GLOB_LLD_Read_Page_Spare(tempBuf, j, 0, 1);
2885 k = 0;
2886 i = FTL_Extract_Block_Table_Tag(tempBuf, &tagarray);
2887 if (i) {
2888 status = GLOB_LLD_Read_Page_Main_Polling(tempBuf,
2889 j, 0, 1);
2890 for (; k < i; k++) {
2891 if (tagarray[k] == tempBuf[3])
2892 break;
2893 }
2894 }
2895
2896 if (k < i)
2897 k = tagarray[k];
2898 else
2899 continue;
2900
2901 nand_dbg_print(NAND_DBG_DEBUG,
2902 "Block table is contained in Block %d %d\n",
2903 (unsigned int)j, (unsigned int)k);
2904
2905 if (g_pBTBlocks[k-FIRST_BT_ID] == BTBLOCK_INVAL) {
2906 g_pBTBlocks[k-FIRST_BT_ID] = j;
2907 block_table_found = 1;
2908 } else {
2909 printk(KERN_ERR "FTL_Read_Block_Table -"
2910 "This should never happens. "
2911 "Two block table have same counter %u!\n", k);
2912 }
2913 }
2914
2915 if (block_table_found) {
2916 if (g_pBTBlocks[FIRST_BT_ID - FIRST_BT_ID] != BTBLOCK_INVAL &&
2917 g_pBTBlocks[LAST_BT_ID - FIRST_BT_ID] != BTBLOCK_INVAL) {
2918 j = LAST_BT_ID;
2919 while ((j > FIRST_BT_ID) &&
2920 (g_pBTBlocks[j - FIRST_BT_ID] != BTBLOCK_INVAL))
2921 j--;
2922 if (j == FIRST_BT_ID) {
2923 j = LAST_BT_ID;
2924 last_erased = LAST_BT_ID;
2925 } else {
2926 last_erased = (u8)j + 1;
2927 while ((j > FIRST_BT_ID) && (BTBLOCK_INVAL ==
2928 g_pBTBlocks[j - FIRST_BT_ID]))
2929 j--;
2930 }
2931 } else {
2932 j = FIRST_BT_ID;
2933 while (g_pBTBlocks[j - FIRST_BT_ID] == BTBLOCK_INVAL)
2934 j++;
2935 last_erased = (u8)j;
2936 while ((j < LAST_BT_ID) && (BTBLOCK_INVAL !=
2937 g_pBTBlocks[j - FIRST_BT_ID]))
2938 j++;
2939 if (g_pBTBlocks[j-FIRST_BT_ID] == BTBLOCK_INVAL)
2940 j--;
2941 }
2942
2943 if (last_erased > j)
2944 j += (1 + LAST_BT_ID - FIRST_BT_ID);
2945
2946 for (; (j >= last_erased) && (FAIL == wResult); j--) {
2947 i = (j - FIRST_BT_ID) %
2948 (1 + LAST_BT_ID - FIRST_BT_ID);
2949 search_result =
2950 FTL_Search_Block_Table_IN_Block(g_pBTBlocks[i],
2951 i + FIRST_BT_ID, &Page);
2952 if (g_cBlockTableStatus == IN_PROGRESS_BLOCK_TABLE)
2953 block_table_found = 0;
2954
2955 while ((search_result == PASS) && (FAIL == wResult)) {
2956 nand_dbg_print(NAND_DBG_DEBUG,
2957 "FTL_Read_Block_Table:"
2958 "Block: %u Page: %u "
2959 "contains block table\n",
2960 (unsigned int)g_pBTBlocks[i],
2961 (unsigned int)Page);
2962
2963 tempBuf = tmp_buf2_read_blk_table;
2964
2965 for (k = 0; k < bt_pages; k++) {
2966 Block = g_pBTBlocks[i];
2967 PageCount = 1;
2968
2969 status =
2970 GLOB_LLD_Read_Page_Main_Polling(
2971 tempBuf, Block, Page, PageCount);
2972
2973 tempvar = k ? 0 : 4;
2974
2975 wBytesCopied +=
2976 FTL_Copy_Block_Table_From_Flash(
2977 tempBuf + tempvar,
2978 DeviceInfo.wPageDataSize - tempvar,
2979 wBytesCopied);
2980
2981 Page++;
2982 }
2983
2984 wResult = FTL_Check_Block_Table(FAIL);
2985 if (FAIL == wResult) {
2986 block_table_found = 0;
2987 if (Page > bt_pages)
2988 Page -= ((bt_pages<<1) + 1);
2989 else
2990 search_result = FAIL;
2991 }
2992 }
2993 }
2994 }
2995
2996 if (PASS == wResult) {
2997 if (!block_table_found)
2998 FTL_Execute_SPL_Recovery();
2999
3000 if (g_cBlockTableStatus == IN_PROGRESS_BLOCK_TABLE)
3001 g_wBlockTableOffset = (u16)Page + 1;
3002 else
3003 g_wBlockTableOffset = (u16)Page - bt_pages;
3004
3005 g_wBlockTableIndex = (u32)g_pBTBlocks[i];
3006
3007#if CMD_DMA
3008 if (DeviceInfo.MLCDevice)
3009 memcpy(g_pBTStartingCopy, g_pBlockTable,
3010 DeviceInfo.wDataBlockNum * sizeof(u32)
3011 + DeviceInfo.wDataBlockNum * sizeof(u8)
3012 + DeviceInfo.wDataBlockNum * sizeof(u16));
3013 else
3014 memcpy(g_pBTStartingCopy, g_pBlockTable,
3015 DeviceInfo.wDataBlockNum * sizeof(u32)
3016 + DeviceInfo.wDataBlockNum * sizeof(u8));
3017#endif
3018 }
3019
3020 if (FAIL == wResult)
3021 printk(KERN_ERR "Yunpeng - "
3022 "Can not find valid spectra block table!\n");
3023
3024#if AUTO_FORMAT_FLASH
3025 if (FAIL == wResult) {
3026 nand_dbg_print(NAND_DBG_DEBUG, "doing auto-format\n");
3027 wResult = FTL_Format_Flash(0);
3028 }
3029#endif
3030
3031 return wResult;
3032}
3033
3034
3035/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3036* Function: FTL_Flash_Error_Handle
3037* Inputs: Pointer to data
3038* Page address
3039* Block address
3040* Outputs: PASS=0 / FAIL=1
3041* Description: It handles any error occured during Spectra operation
3042*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3043static int FTL_Flash_Error_Handle(u8 *pData, u64 old_page_addr,
3044 u64 blk_addr)
3045{
3046 u32 i;
3047 int j;
3048 u32 tmp_node, blk_node = BLK_FROM_ADDR(blk_addr);
3049 u64 phy_addr;
3050 int wErase = FAIL;
3051 int wResult = FAIL;
3052 u32 *pbt = (u32 *)g_pBlockTable;
3053
3054 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
3055 __FILE__, __LINE__, __func__);
3056
3057 if (ERR == GLOB_FTL_Garbage_Collection())
3058 return ERR;
3059
3060 do {
3061 for (i = DeviceInfo.wSpectraEndBlock -
3062 DeviceInfo.wSpectraStartBlock;
3063 i > 0; i--) {
3064 if (IS_SPARE_BLOCK(i)) {
3065 tmp_node = (u32)(BAD_BLOCK |
3066 pbt[blk_node]);
3067 pbt[blk_node] = (u32)(pbt[i] &
3068 (~SPARE_BLOCK));
3069 pbt[i] = tmp_node;
3070#if CMD_DMA
3071 p_BTableChangesDelta =
3072 (struct BTableChangesDelta *)
3073 g_pBTDelta_Free;
3074 g_pBTDelta_Free +=
3075 sizeof(struct BTableChangesDelta);
3076
3077 p_BTableChangesDelta->ftl_cmd_cnt =
3078 ftl_cmd_cnt;
3079 p_BTableChangesDelta->BT_Index =
3080 blk_node;
3081 p_BTableChangesDelta->BT_Entry_Value =
3082 pbt[blk_node];
3083 p_BTableChangesDelta->ValidFields = 0x0C;
3084
3085 p_BTableChangesDelta =
3086 (struct BTableChangesDelta *)
3087 g_pBTDelta_Free;
3088 g_pBTDelta_Free +=
3089 sizeof(struct BTableChangesDelta);
3090
3091 p_BTableChangesDelta->ftl_cmd_cnt =
3092 ftl_cmd_cnt;
3093 p_BTableChangesDelta->BT_Index = i;
3094 p_BTableChangesDelta->BT_Entry_Value = pbt[i];
3095 p_BTableChangesDelta->ValidFields = 0x0C;
3096#endif
3097 wResult = PASS;
3098 break;
3099 }
3100 }
3101
3102 if (FAIL == wResult) {
3103 if (FAIL == GLOB_FTL_Garbage_Collection())
3104 break;
3105 else
3106 continue;
3107 }
3108
3109 if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
3110 g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
3111 FTL_Write_IN_Progress_Block_Table_Page();
3112 }
3113
3114 phy_addr = FTL_Get_Physical_Block_Addr(blk_addr);
3115
3116 for (j = 0; j < RETRY_TIMES; j++) {
3117 if (PASS == wErase) {
3118 if (FAIL == GLOB_FTL_Block_Erase(phy_addr)) {
3119 MARK_BLOCK_AS_BAD(pbt[blk_node]);
3120 break;
3121 }
3122 }
3123 if (PASS == FTL_Cache_Update_Block(pData,
3124 old_page_addr,
3125 phy_addr)) {
3126 wResult = PASS;
3127 break;
3128 } else {
3129 wResult = FAIL;
3130 wErase = PASS;
3131 }
3132 }
3133 } while (FAIL == wResult);
3134
3135 FTL_Write_Block_Table(FAIL);
3136
3137 return wResult;
3138}
3139
3140/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3141* Function: FTL_Get_Page_Num
3142* Inputs: Size in bytes
3143* Outputs: Size in pages
3144* Description: It calculates the pages required for the length passed
3145*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3146static u32 FTL_Get_Page_Num(u64 length)
3147{
3148 return (u32)((length >> DeviceInfo.nBitsInPageDataSize) +
3149 (GLOB_u64_Remainder(length , 1) > 0 ? 1 : 0));
3150}
3151
3152/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3153* Function: FTL_Get_Physical_Block_Addr
3154* Inputs: Block Address (byte format)
3155* Outputs: Physical address of the block.
3156* Description: It translates LBA to PBA by returning address stored
3157* at the LBA location in the block table
3158*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3159static u64 FTL_Get_Physical_Block_Addr(u64 logical_addr)
3160{
3161 u32 *pbt;
3162 u64 physical_addr;
3163
3164 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
3165 __FILE__, __LINE__, __func__);
3166
3167 pbt = (u32 *)g_pBlockTable;
3168 physical_addr = (u64) DeviceInfo.wBlockDataSize *
3169 (pbt[BLK_FROM_ADDR(logical_addr)] & (~BAD_BLOCK));
3170
3171 return physical_addr;
3172}
3173
3174/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3175* Function: FTL_Get_Block_Index
3176* Inputs: Physical Block no.
3177* Outputs: Logical block no. /BAD_BLOCK
3178* Description: It returns the logical block no. for the PBA passed
3179*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3180static u32 FTL_Get_Block_Index(u32 wBlockNum)
3181{
3182 u32 *pbt = (u32 *)g_pBlockTable;
3183 u32 i;
3184
3185 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
3186 __FILE__, __LINE__, __func__);
3187
3188 for (i = 0; i < DeviceInfo.wDataBlockNum; i++)
3189 if (wBlockNum == (pbt[i] & (~BAD_BLOCK)))
3190 return i;
3191
3192 return BAD_BLOCK;
3193}
3194
3195/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3196* Function: GLOB_FTL_Wear_Leveling
3197* Inputs: none
3198* Outputs: PASS=0
3199* Description: This is static wear leveling (done by explicit call)
3200* do complete static wear leveling
3201* do complete garbage collection
3202*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3203int GLOB_FTL_Wear_Leveling(void)
3204{
3205 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
3206 __FILE__, __LINE__, __func__);
3207
3208 FTL_Static_Wear_Leveling();
3209 GLOB_FTL_Garbage_Collection();
3210
3211 return PASS;
3212}
3213
3214static void find_least_most_worn(u8 *chg,
3215 u32 *least_idx, u8 *least_cnt,
3216 u32 *most_idx, u8 *most_cnt)
3217{
3218 u32 *pbt = (u32 *)g_pBlockTable;
3219 u32 idx;
3220 u8 cnt;
3221 int i;
3222
3223 for (i = BLOCK_TABLE_INDEX + 1; i < DeviceInfo.wDataBlockNum; i++) {
3224 if (IS_BAD_BLOCK(i) || PASS == chg[i])
3225 continue;
3226
3227 idx = (u32) ((~BAD_BLOCK) & pbt[i]);
3228 cnt = g_pWearCounter[idx - DeviceInfo.wSpectraStartBlock];
3229
3230 if (IS_SPARE_BLOCK(i)) {
3231 if (cnt > *most_cnt) {
3232 *most_cnt = cnt;
3233 *most_idx = idx;
3234 }
3235 }
3236
3237 if (IS_DATA_BLOCK(i)) {
3238 if (cnt < *least_cnt) {
3239 *least_cnt = cnt;
3240 *least_idx = idx;
3241 }
3242 }
3243
3244 if (PASS == chg[*most_idx] || PASS == chg[*least_idx]) {
3245 debug_boundary_error(*most_idx,
3246 DeviceInfo.wDataBlockNum, 0);
3247 debug_boundary_error(*least_idx,
3248 DeviceInfo.wDataBlockNum, 0);
3249 continue;
3250 }
3251 }
3252}
3253
3254static int move_blks_for_wear_leveling(u8 *chg,
3255 u32 *least_idx, u32 *rep_blk_num, int *result)
3256{
3257 u32 *pbt = (u32 *)g_pBlockTable;
3258 u32 rep_blk;
3259 int j, ret_cp_blk, ret_erase;
3260 int ret = PASS;
3261
3262 chg[*least_idx] = PASS;
3263 debug_boundary_error(*least_idx, DeviceInfo.wDataBlockNum, 0);
3264
3265 rep_blk = FTL_Replace_MWBlock();
3266 if (rep_blk != BAD_BLOCK) {
3267 nand_dbg_print(NAND_DBG_DEBUG,
3268 "More than two spare blocks exist so do it\n");
3269 nand_dbg_print(NAND_DBG_DEBUG, "Block Replaced is %d\n",
3270 rep_blk);
3271
3272 chg[rep_blk] = PASS;
3273
3274 if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
3275 g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
3276 FTL_Write_IN_Progress_Block_Table_Page();
3277 }
3278
3279 for (j = 0; j < RETRY_TIMES; j++) {
3280 ret_cp_blk = FTL_Copy_Block((u64)(*least_idx) *
3281 DeviceInfo.wBlockDataSize,
3282 (u64)rep_blk * DeviceInfo.wBlockDataSize);
3283 if (FAIL == ret_cp_blk) {
3284 ret_erase = GLOB_FTL_Block_Erase((u64)rep_blk
3285 * DeviceInfo.wBlockDataSize);
3286 if (FAIL == ret_erase)
3287 MARK_BLOCK_AS_BAD(pbt[rep_blk]);
3288 } else {
3289 nand_dbg_print(NAND_DBG_DEBUG,
3290 "FTL_Copy_Block == OK\n");
3291 break;
3292 }
3293 }
3294
3295 if (j < RETRY_TIMES) {
3296 u32 tmp;
3297 u32 old_idx = FTL_Get_Block_Index(*least_idx);
3298 u32 rep_idx = FTL_Get_Block_Index(rep_blk);
3299 tmp = (u32)(DISCARD_BLOCK | pbt[old_idx]);
3300 pbt[old_idx] = (u32)((~SPARE_BLOCK) &
3301 pbt[rep_idx]);
3302 pbt[rep_idx] = tmp;
3303#if CMD_DMA
3304 p_BTableChangesDelta = (struct BTableChangesDelta *)
3305 g_pBTDelta_Free;
3306 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
3307 p_BTableChangesDelta->ftl_cmd_cnt =
3308 ftl_cmd_cnt;
3309 p_BTableChangesDelta->BT_Index = old_idx;
3310 p_BTableChangesDelta->BT_Entry_Value = pbt[old_idx];
3311 p_BTableChangesDelta->ValidFields = 0x0C;
3312
3313 p_BTableChangesDelta = (struct BTableChangesDelta *)
3314 g_pBTDelta_Free;
3315 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
3316
3317 p_BTableChangesDelta->ftl_cmd_cnt =
3318 ftl_cmd_cnt;
3319 p_BTableChangesDelta->BT_Index = rep_idx;
3320 p_BTableChangesDelta->BT_Entry_Value = pbt[rep_idx];
3321 p_BTableChangesDelta->ValidFields = 0x0C;
3322#endif
3323 } else {
3324 pbt[FTL_Get_Block_Index(rep_blk)] |= BAD_BLOCK;
3325#if CMD_DMA
3326 p_BTableChangesDelta = (struct BTableChangesDelta *)
3327 g_pBTDelta_Free;
3328 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
3329
3330 p_BTableChangesDelta->ftl_cmd_cnt =
3331 ftl_cmd_cnt;
3332 p_BTableChangesDelta->BT_Index =
3333 FTL_Get_Block_Index(rep_blk);
3334 p_BTableChangesDelta->BT_Entry_Value =
3335 pbt[FTL_Get_Block_Index(rep_blk)];
3336 p_BTableChangesDelta->ValidFields = 0x0C;
3337#endif
3338 *result = FAIL;
3339 ret = FAIL;
3340 }
3341
3342 if (((*rep_blk_num)++) > WEAR_LEVELING_BLOCK_NUM)
3343 ret = FAIL;
3344 } else {
3345 printk(KERN_ERR "Less than 3 spare blocks exist so quit\n");
3346 ret = FAIL;
3347 }
3348
3349 return ret;
3350}
3351
3352/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3353* Function: FTL_Static_Wear_Leveling
3354* Inputs: none
3355* Outputs: PASS=0 / FAIL=1
3356* Description: This is static wear leveling (done by explicit call)
3357* search for most&least used
3358* if difference < GATE:
3359* update the block table with exhange
3360* mark block table in flash as IN_PROGRESS
3361* copy flash block
3362* the caller should handle GC clean up after calling this function
3363*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3364int FTL_Static_Wear_Leveling(void)
3365{
3366 u8 most_worn_cnt;
3367 u8 least_worn_cnt;
3368 u32 most_worn_idx;
3369 u32 least_worn_idx;
3370 int result = PASS;
3371 int go_on = PASS;
3372 u32 replaced_blks = 0;
3373 u8 *chang_flag = flags_static_wear_leveling;
3374
3375 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
3376 __FILE__, __LINE__, __func__);
3377
3378 if (!chang_flag)
3379 return FAIL;
3380
3381 memset(chang_flag, FAIL, DeviceInfo.wDataBlockNum);
3382 while (go_on == PASS) {
3383 nand_dbg_print(NAND_DBG_DEBUG,
3384 "starting static wear leveling\n");
3385 most_worn_cnt = 0;
3386 least_worn_cnt = 0xFF;
3387 least_worn_idx = BLOCK_TABLE_INDEX;
3388 most_worn_idx = BLOCK_TABLE_INDEX;
3389
3390 find_least_most_worn(chang_flag, &least_worn_idx,
3391 &least_worn_cnt, &most_worn_idx, &most_worn_cnt);
3392
3393 nand_dbg_print(NAND_DBG_DEBUG,
3394 "Used and least worn is block %u, whos count is %u\n",
3395 (unsigned int)least_worn_idx,
3396 (unsigned int)least_worn_cnt);
3397
3398 nand_dbg_print(NAND_DBG_DEBUG,
3399 "Free and most worn is block %u, whos count is %u\n",
3400 (unsigned int)most_worn_idx,
3401 (unsigned int)most_worn_cnt);
3402
3403 if ((most_worn_cnt > least_worn_cnt) &&
3404 (most_worn_cnt - least_worn_cnt > WEAR_LEVELING_GATE))
3405 go_on = move_blks_for_wear_leveling(chang_flag,
3406 &least_worn_idx, &replaced_blks, &result);
3407 else
3408 go_on = FAIL;
3409 }
3410
3411 return result;
3412}
3413
3414#if CMD_DMA
3415static int do_garbage_collection(u32 discard_cnt)
3416{
3417 u32 *pbt = (u32 *)g_pBlockTable;
3418 u32 pba;
3419 u8 bt_block_erased = 0;
3420 int i, cnt, ret = FAIL;
3421 u64 addr;
3422
3423 i = 0;
3424 while ((i < DeviceInfo.wDataBlockNum) && (discard_cnt > 0) &&
3425 ((ftl_cmd_cnt + 28) < 256)) {
3426 if (((pbt[i] & BAD_BLOCK) != BAD_BLOCK) &&
3427 (pbt[i] & DISCARD_BLOCK)) {
3428 if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
3429 g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
3430 FTL_Write_IN_Progress_Block_Table_Page();
3431 }
3432
3433 addr = FTL_Get_Physical_Block_Addr((u64)i *
3434 DeviceInfo.wBlockDataSize);
3435 pba = BLK_FROM_ADDR(addr);
3436
3437 for (cnt = FIRST_BT_ID; cnt <= LAST_BT_ID; cnt++) {
3438 if (pba == g_pBTBlocks[cnt - FIRST_BT_ID]) {
3439 nand_dbg_print(NAND_DBG_DEBUG,
3440 "GC will erase BT block %u\n",
3441 (unsigned int)pba);
3442 discard_cnt--;
3443 i++;
3444 bt_block_erased = 1;
3445 break;
3446 }
3447 }
3448
3449 if (bt_block_erased) {
3450 bt_block_erased = 0;
3451 continue;
3452 }
3453
3454 addr = FTL_Get_Physical_Block_Addr((u64)i *
3455 DeviceInfo.wBlockDataSize);
3456
3457 if (PASS == GLOB_FTL_Block_Erase(addr)) {
3458 pbt[i] &= (u32)(~DISCARD_BLOCK);
3459 pbt[i] |= (u32)(SPARE_BLOCK);
3460 p_BTableChangesDelta =
3461 (struct BTableChangesDelta *)
3462 g_pBTDelta_Free;
3463 g_pBTDelta_Free +=
3464 sizeof(struct BTableChangesDelta);
3465 p_BTableChangesDelta->ftl_cmd_cnt =
3466 ftl_cmd_cnt - 1;
3467 p_BTableChangesDelta->BT_Index = i;
3468 p_BTableChangesDelta->BT_Entry_Value = pbt[i];
3469 p_BTableChangesDelta->ValidFields = 0x0C;
3470 discard_cnt--;
3471 ret = PASS;
3472 } else {
3473 MARK_BLOCK_AS_BAD(pbt[i]);
3474 }
3475 }
3476
3477 i++;
3478 }
3479
3480 return ret;
3481}
3482
3483#else
3484static int do_garbage_collection(u32 discard_cnt)
3485{
3486 u32 *pbt = (u32 *)g_pBlockTable;
3487 u32 pba;
3488 u8 bt_block_erased = 0;
3489 int i, cnt, ret = FAIL;
3490 u64 addr;
3491
3492 i = 0;
3493 while ((i < DeviceInfo.wDataBlockNum) && (discard_cnt > 0)) {
3494 if (((pbt[i] & BAD_BLOCK) != BAD_BLOCK) &&
3495 (pbt[i] & DISCARD_BLOCK)) {
3496 if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
3497 g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
3498 FTL_Write_IN_Progress_Block_Table_Page();
3499 }
3500
3501 addr = FTL_Get_Physical_Block_Addr((u64)i *
3502 DeviceInfo.wBlockDataSize);
3503 pba = BLK_FROM_ADDR(addr);
3504
3505 for (cnt = FIRST_BT_ID; cnt <= LAST_BT_ID; cnt++) {
3506 if (pba == g_pBTBlocks[cnt - FIRST_BT_ID]) {
3507 nand_dbg_print(NAND_DBG_DEBUG,
3508 "GC will erase BT block %d\n",
3509 pba);
3510 discard_cnt--;
3511 i++;
3512 bt_block_erased = 1;
3513 break;
3514 }
3515 }
3516
3517 if (bt_block_erased) {
3518 bt_block_erased = 0;
3519 continue;
3520 }
3521
3522 /* If the discard block is L2 cache block, then just skip it */
3523 for (cnt = 0; cnt < BLK_NUM_FOR_L2_CACHE; cnt++) {
3524 if (cache_l2.blk_array[cnt] == pba) {
3525 nand_dbg_print(NAND_DBG_DEBUG,
3526 "GC will erase L2 cache blk %d\n",
3527 pba);
3528 break;
3529 }
3530 }
3531 if (cnt < BLK_NUM_FOR_L2_CACHE) { /* Skip it */
3532 discard_cnt--;
3533 i++;
3534 continue;
3535 }
3536
3537 addr = FTL_Get_Physical_Block_Addr((u64)i *
3538 DeviceInfo.wBlockDataSize);
3539
3540 if (PASS == GLOB_FTL_Block_Erase(addr)) {
3541 pbt[i] &= (u32)(~DISCARD_BLOCK);
3542 pbt[i] |= (u32)(SPARE_BLOCK);
3543 discard_cnt--;
3544 ret = PASS;
3545 } else {
3546 MARK_BLOCK_AS_BAD(pbt[i]);
3547 }
3548 }
3549
3550 i++;
3551 }
3552
3553 return ret;
3554}
3555#endif
3556
3557/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3558* Function: GLOB_FTL_Garbage_Collection
3559* Inputs: none
3560* Outputs: PASS / FAIL (returns the number of un-erased blocks
3561* Description: search the block table for all discarded blocks to erase
3562* for each discarded block:
3563* set the flash block to IN_PROGRESS
3564* erase the block
3565* update the block table
3566* write the block table to flash
3567*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3568int GLOB_FTL_Garbage_Collection(void)
3569{
3570 u32 i;
3571 u32 wDiscard = 0;
3572 int wResult = FAIL;
3573 u32 *pbt = (u32 *)g_pBlockTable;
3574
3575 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
3576 __FILE__, __LINE__, __func__);
3577
3578 if (GC_Called) {
3579 printk(KERN_ALERT "GLOB_FTL_Garbage_Collection() "
3580 "has been re-entered! Exit.\n");
3581 return PASS;
3582 }
3583
3584 GC_Called = 1;
3585
3586 GLOB_FTL_BT_Garbage_Collection();
3587
3588 for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
3589 if (IS_DISCARDED_BLOCK(i))
3590 wDiscard++;
3591 }
3592
3593 if (wDiscard <= 0) {
3594 GC_Called = 0;
3595 return wResult;
3596 }
3597
3598 nand_dbg_print(NAND_DBG_DEBUG,
3599 "Found %d discarded blocks\n", wDiscard);
3600
3601 FTL_Write_Block_Table(FAIL);
3602
3603 wResult = do_garbage_collection(wDiscard);
3604
3605 FTL_Write_Block_Table(FAIL);
3606
3607 GC_Called = 0;
3608
3609 return wResult;
3610}
3611
3612
3613#if CMD_DMA
3614static int do_bt_garbage_collection(void)
3615{
3616 u32 pba, lba;
3617 u32 *pbt = (u32 *)g_pBlockTable;
3618 u32 *pBTBlocksNode = (u32 *)g_pBTBlocks;
3619 u64 addr;
3620 int i, ret = FAIL;
3621
3622 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
3623 __FILE__, __LINE__, __func__);
3624
3625 if (BT_GC_Called)
3626 return PASS;
3627
3628 BT_GC_Called = 1;
3629
3630 for (i = last_erased; (i <= LAST_BT_ID) &&
3631 (g_pBTBlocks[((i + 2) % (1 + LAST_BT_ID - FIRST_BT_ID)) +
3632 FIRST_BT_ID - FIRST_BT_ID] != BTBLOCK_INVAL) &&
3633 ((ftl_cmd_cnt + 28)) < 256; i++) {
3634 pba = pBTBlocksNode[i - FIRST_BT_ID];
3635 lba = FTL_Get_Block_Index(pba);
3636 nand_dbg_print(NAND_DBG_DEBUG,
3637 "do_bt_garbage_collection: pba %d, lba %d\n",
3638 pba, lba);
3639 nand_dbg_print(NAND_DBG_DEBUG,
3640 "Block Table Entry: %d", pbt[lba]);
3641
3642 if (((pbt[lba] & BAD_BLOCK) != BAD_BLOCK) &&
3643 (pbt[lba] & DISCARD_BLOCK)) {
3644 nand_dbg_print(NAND_DBG_DEBUG,
3645 "do_bt_garbage_collection_cdma: "
3646 "Erasing Block tables present in block %d\n",
3647 pba);
3648 addr = FTL_Get_Physical_Block_Addr((u64)lba *
3649 DeviceInfo.wBlockDataSize);
3650 if (PASS == GLOB_FTL_Block_Erase(addr)) {
3651 pbt[lba] &= (u32)(~DISCARD_BLOCK);
3652 pbt[lba] |= (u32)(SPARE_BLOCK);
3653
3654 p_BTableChangesDelta =
3655 (struct BTableChangesDelta *)
3656 g_pBTDelta_Free;
3657 g_pBTDelta_Free +=
3658 sizeof(struct BTableChangesDelta);
3659
3660 p_BTableChangesDelta->ftl_cmd_cnt =
3661 ftl_cmd_cnt - 1;
3662 p_BTableChangesDelta->BT_Index = lba;
3663 p_BTableChangesDelta->BT_Entry_Value =
3664 pbt[lba];
3665
3666 p_BTableChangesDelta->ValidFields = 0x0C;
3667
3668 ret = PASS;
3669 pBTBlocksNode[last_erased - FIRST_BT_ID] =
3670 BTBLOCK_INVAL;
3671 nand_dbg_print(NAND_DBG_DEBUG,
3672 "resetting bt entry at index %d "
3673 "value %d\n", i,
3674 pBTBlocksNode[i - FIRST_BT_ID]);
3675 if (last_erased == LAST_BT_ID)
3676 last_erased = FIRST_BT_ID;
3677 else
3678 last_erased++;
3679 } else {
3680 MARK_BLOCK_AS_BAD(pbt[lba]);
3681 }
3682 }
3683 }
3684
3685 BT_GC_Called = 0;
3686
3687 return ret;
3688}
3689
3690#else
3691static int do_bt_garbage_collection(void)
3692{
3693 u32 pba, lba;
3694 u32 *pbt = (u32 *)g_pBlockTable;
3695 u32 *pBTBlocksNode = (u32 *)g_pBTBlocks;
3696 u64 addr;
3697 int i, ret = FAIL;
3698
3699 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
3700 __FILE__, __LINE__, __func__);
3701
3702 if (BT_GC_Called)
3703 return PASS;
3704
3705 BT_GC_Called = 1;
3706
3707 for (i = last_erased; (i <= LAST_BT_ID) &&
3708 (g_pBTBlocks[((i + 2) % (1 + LAST_BT_ID - FIRST_BT_ID)) +
3709 FIRST_BT_ID - FIRST_BT_ID] != BTBLOCK_INVAL); i++) {
3710 pba = pBTBlocksNode[i - FIRST_BT_ID];
3711 lba = FTL_Get_Block_Index(pba);
3712 nand_dbg_print(NAND_DBG_DEBUG,
3713 "do_bt_garbage_collection_cdma: pba %d, lba %d\n",
3714 pba, lba);
3715 nand_dbg_print(NAND_DBG_DEBUG,
3716 "Block Table Entry: %d", pbt[lba]);
3717
3718 if (((pbt[lba] & BAD_BLOCK) != BAD_BLOCK) &&
3719 (pbt[lba] & DISCARD_BLOCK)) {
3720 nand_dbg_print(NAND_DBG_DEBUG,
3721 "do_bt_garbage_collection: "
3722 "Erasing Block tables present in block %d\n",
3723 pba);
3724 addr = FTL_Get_Physical_Block_Addr((u64)lba *
3725 DeviceInfo.wBlockDataSize);
3726 if (PASS == GLOB_FTL_Block_Erase(addr)) {
3727 pbt[lba] &= (u32)(~DISCARD_BLOCK);
3728 pbt[lba] |= (u32)(SPARE_BLOCK);
3729 ret = PASS;
3730 pBTBlocksNode[last_erased - FIRST_BT_ID] =
3731 BTBLOCK_INVAL;
3732 nand_dbg_print(NAND_DBG_DEBUG,
3733 "resetting bt entry at index %d "
3734 "value %d\n", i,
3735 pBTBlocksNode[i - FIRST_BT_ID]);
3736 if (last_erased == LAST_BT_ID)
3737 last_erased = FIRST_BT_ID;
3738 else
3739 last_erased++;
3740 } else {
3741 MARK_BLOCK_AS_BAD(pbt[lba]);
3742 }
3743 }
3744 }
3745
3746 BT_GC_Called = 0;
3747
3748 return ret;
3749}
3750
3751#endif
3752
3753/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3754* Function: GLOB_FTL_BT_Garbage_Collection
3755* Inputs: none
3756* Outputs: PASS / FAIL (returns the number of un-erased blocks
3757* Description: Erases discarded blocks containing Block table
3758*
3759*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3760int GLOB_FTL_BT_Garbage_Collection(void)
3761{
3762 return do_bt_garbage_collection();
3763}
3764
3765/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3766* Function: FTL_Replace_OneBlock
3767* Inputs: Block number 1
3768* Block number 2
3769* Outputs: Replaced Block Number
3770* Description: Interchange block table entries at wBlockNum and wReplaceNum
3771*
3772*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3773static u32 FTL_Replace_OneBlock(u32 blk, u32 rep_blk)
3774{
3775 u32 tmp_blk;
3776 u32 replace_node = BAD_BLOCK;
3777 u32 *pbt = (u32 *)g_pBlockTable;
3778
3779 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
3780 __FILE__, __LINE__, __func__);
3781
3782 if (rep_blk != BAD_BLOCK) {
3783 if (IS_BAD_BLOCK(blk))
3784 tmp_blk = pbt[blk];
3785 else
3786 tmp_blk = DISCARD_BLOCK | (~SPARE_BLOCK & pbt[blk]);
3787
3788 replace_node = (u32) ((~SPARE_BLOCK) & pbt[rep_blk]);
3789 pbt[blk] = replace_node;
3790 pbt[rep_blk] = tmp_blk;
3791
3792#if CMD_DMA
3793 p_BTableChangesDelta =
3794 (struct BTableChangesDelta *)g_pBTDelta_Free;
3795 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
3796
3797 p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
3798 p_BTableChangesDelta->BT_Index = blk;
3799 p_BTableChangesDelta->BT_Entry_Value = pbt[blk];
3800
3801 p_BTableChangesDelta->ValidFields = 0x0C;
3802
3803 p_BTableChangesDelta =
3804 (struct BTableChangesDelta *)g_pBTDelta_Free;
3805 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
3806
3807 p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
3808 p_BTableChangesDelta->BT_Index = rep_blk;
3809 p_BTableChangesDelta->BT_Entry_Value = pbt[rep_blk];
3810 p_BTableChangesDelta->ValidFields = 0x0C;
3811#endif
3812 }
3813
3814 return replace_node;
3815}
3816
3817/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3818* Function: FTL_Write_Block_Table_Data
3819* Inputs: Block table size in pages
3820* Outputs: PASS=0 / FAIL=1
3821* Description: Write block table data in flash
3822* If first page and last page
3823* Write data+BT flag
3824* else
3825* Write data
3826* BT flag is a counter. Its value is incremented for block table
3827* write in a new Block
3828*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3829static int FTL_Write_Block_Table_Data(void)
3830{
3831 u64 dwBlockTableAddr, pTempAddr;
3832 u32 Block;
3833 u16 Page, PageCount;
3834 u8 *tempBuf = tmp_buf_write_blk_table_data;
3835 int wBytesCopied;
3836 u16 bt_pages;
3837
3838 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
3839 __FILE__, __LINE__, __func__);
3840
3841 dwBlockTableAddr =
3842 (u64)((u64)g_wBlockTableIndex * DeviceInfo.wBlockDataSize +
3843 (u64)g_wBlockTableOffset * DeviceInfo.wPageDataSize);
3844 pTempAddr = dwBlockTableAddr;
3845
3846 bt_pages = FTL_Get_Block_Table_Flash_Size_Pages();
3847
3848 nand_dbg_print(NAND_DBG_DEBUG, "FTL_Write_Block_Table_Data: "
3849 "page= %d BlockTableIndex= %d "
3850 "BlockTableOffset=%d\n", bt_pages,
3851 g_wBlockTableIndex, g_wBlockTableOffset);
3852
3853 Block = BLK_FROM_ADDR(pTempAddr);
3854 Page = PAGE_FROM_ADDR(pTempAddr, Block);
3855 PageCount = 1;
3856
3857 if (bt_block_changed) {
3858 if (bt_flag == LAST_BT_ID) {
3859 bt_flag = FIRST_BT_ID;
3860 g_pBTBlocks[bt_flag - FIRST_BT_ID] = Block;
3861 } else if (bt_flag < LAST_BT_ID) {
3862 bt_flag++;
3863 g_pBTBlocks[bt_flag - FIRST_BT_ID] = Block;
3864 }
3865
3866 if ((bt_flag > (LAST_BT_ID-4)) &&
3867 g_pBTBlocks[FIRST_BT_ID - FIRST_BT_ID] !=
3868 BTBLOCK_INVAL) {
3869 bt_block_changed = 0;
3870 GLOB_FTL_BT_Garbage_Collection();
3871 }
3872
3873 bt_block_changed = 0;
3874 nand_dbg_print(NAND_DBG_DEBUG,
3875 "Block Table Counter is %u Block %u\n",
3876 bt_flag, (unsigned int)Block);
3877 }
3878
3879 memset(tempBuf, 0, 3);
3880 tempBuf[3] = bt_flag;
3881 wBytesCopied = FTL_Copy_Block_Table_To_Flash(tempBuf + 4,
3882 DeviceInfo.wPageDataSize - 4, 0);
3883 memset(&tempBuf[wBytesCopied + 4], 0xff,
3884 DeviceInfo.wPageSize - (wBytesCopied + 4));
3885 FTL_Insert_Block_Table_Signature(&tempBuf[DeviceInfo.wPageDataSize],
3886 bt_flag);
3887
3888#if CMD_DMA
3889 memcpy(g_pNextBlockTable, tempBuf,
3890 DeviceInfo.wPageSize * sizeof(u8));
3891 nand_dbg_print(NAND_DBG_DEBUG, "Writing First Page of Block Table "
3892 "Block %u Page %u\n", (unsigned int)Block, Page);
3893 if (FAIL == GLOB_LLD_Write_Page_Main_Spare_cdma(g_pNextBlockTable,
3894 Block, Page, 1,
3895 LLD_CMD_FLAG_MODE_CDMA | LLD_CMD_FLAG_ORDER_BEFORE_REST)) {
3896 nand_dbg_print(NAND_DBG_WARN, "NAND Program fail in "
3897 "%s, Line %d, Function: %s, "
3898 "new Bad Block %d generated!\n",
3899 __FILE__, __LINE__, __func__, Block);
3900 goto func_return;
3901 }
3902
3903 ftl_cmd_cnt++;
3904 g_pNextBlockTable += ((DeviceInfo.wPageSize * sizeof(u8)));
3905#else
3906 if (FAIL == GLOB_LLD_Write_Page_Main_Spare(tempBuf, Block, Page, 1)) {
3907 nand_dbg_print(NAND_DBG_WARN,
3908 "NAND Program fail in %s, Line %d, Function: %s, "
3909 "new Bad Block %d generated!\n",
3910 __FILE__, __LINE__, __func__, Block);
3911 goto func_return;
3912 }
3913#endif
3914
3915 if (bt_pages > 1) {
3916 PageCount = bt_pages - 1;
3917 if (PageCount > 1) {
3918 wBytesCopied += FTL_Copy_Block_Table_To_Flash(tempBuf,
3919 DeviceInfo.wPageDataSize * (PageCount - 1),
3920 wBytesCopied);
3921
3922#if CMD_DMA
3923 memcpy(g_pNextBlockTable, tempBuf,
3924 (PageCount - 1) * DeviceInfo.wPageDataSize);
3925 if (FAIL == GLOB_LLD_Write_Page_Main_cdma(
3926 g_pNextBlockTable, Block, Page + 1,
3927 PageCount - 1)) {
3928 nand_dbg_print(NAND_DBG_WARN,
3929 "NAND Program fail in %s, Line %d, "
3930 "Function: %s, "
3931 "new Bad Block %d generated!\n",
3932 __FILE__, __LINE__, __func__,
3933 (int)Block);
3934 goto func_return;
3935 }
3936
3937 ftl_cmd_cnt++;
3938 g_pNextBlockTable += (PageCount - 1) *
3939 DeviceInfo.wPageDataSize * sizeof(u8);
3940#else
3941 if (FAIL == GLOB_LLD_Write_Page_Main(tempBuf,
3942 Block, Page + 1, PageCount - 1)) {
3943 nand_dbg_print(NAND_DBG_WARN,
3944 "NAND Program fail in %s, Line %d, "
3945 "Function: %s, "
3946 "new Bad Block %d generated!\n",
3947 __FILE__, __LINE__, __func__,
3948 (int)Block);
3949 goto func_return;
3950 }
3951#endif
3952 }
3953
3954 wBytesCopied = FTL_Copy_Block_Table_To_Flash(tempBuf,
3955 DeviceInfo.wPageDataSize, wBytesCopied);
3956 memset(&tempBuf[wBytesCopied], 0xff,
3957 DeviceInfo.wPageSize-wBytesCopied);
3958 FTL_Insert_Block_Table_Signature(
3959 &tempBuf[DeviceInfo.wPageDataSize], bt_flag);
3960#if CMD_DMA
3961 memcpy(g_pNextBlockTable, tempBuf,
3962 DeviceInfo.wPageSize * sizeof(u8));
3963 nand_dbg_print(NAND_DBG_DEBUG,
3964 "Writing the last Page of Block Table "
3965 "Block %u Page %u\n",
3966 (unsigned int)Block, Page + bt_pages - 1);
3967 if (FAIL == GLOB_LLD_Write_Page_Main_Spare_cdma(
3968 g_pNextBlockTable, Block, Page + bt_pages - 1, 1,
3969 LLD_CMD_FLAG_MODE_CDMA |
3970 LLD_CMD_FLAG_ORDER_BEFORE_REST)) {
3971 nand_dbg_print(NAND_DBG_WARN,
3972 "NAND Program fail in %s, Line %d, "
3973 "Function: %s, new Bad Block %d generated!\n",
3974 __FILE__, __LINE__, __func__, Block);
3975 goto func_return;
3976 }
3977 ftl_cmd_cnt++;
3978#else
3979 if (FAIL == GLOB_LLD_Write_Page_Main_Spare(tempBuf,
3980 Block, Page+bt_pages - 1, 1)) {
3981 nand_dbg_print(NAND_DBG_WARN,
3982 "NAND Program fail in %s, Line %d, "
3983 "Function: %s, "
3984 "new Bad Block %d generated!\n",
3985 __FILE__, __LINE__, __func__, Block);
3986 goto func_return;
3987 }
3988#endif
3989 }
3990
3991 nand_dbg_print(NAND_DBG_DEBUG, "FTL_Write_Block_Table_Data: done\n");
3992
3993func_return:
3994 return PASS;
3995}
3996
3997/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3998* Function: FTL_Replace_Block_Table
3999* Inputs: None
4000* Outputs: PASS=0 / FAIL=1
4001* Description: Get a new block to write block table
4002*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
4003static u32 FTL_Replace_Block_Table(void)
4004{
4005 u32 blk;
4006 int gc;
4007
4008 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
4009 __FILE__, __LINE__, __func__);
4010
4011 blk = FTL_Replace_LWBlock(BLOCK_TABLE_INDEX, &gc);
4012
4013 if ((BAD_BLOCK == blk) && (PASS == gc)) {
4014 GLOB_FTL_Garbage_Collection();
4015 blk = FTL_Replace_LWBlock(BLOCK_TABLE_INDEX, &gc);
4016 }
4017 if (BAD_BLOCK == blk)
4018 printk(KERN_ERR "%s, %s: There is no spare block. "
4019 "It should never happen\n",
4020 __FILE__, __func__);
4021
4022 nand_dbg_print(NAND_DBG_DEBUG, "New Block table Block is %d\n", blk);
4023
4024 return blk;
4025}
4026
4027/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
4028* Function: FTL_Replace_LWBlock
4029* Inputs: Block number
4030* Pointer to Garbage Collect flag
4031* Outputs:
4032* Description: Determine the least weared block by traversing
4033* block table
4034* Set Garbage collection to be called if number of spare
4035* block is less than Free Block Gate count
4036* Change Block table entry to map least worn block for current
4037* operation
4038*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
4039static u32 FTL_Replace_LWBlock(u32 wBlockNum, int *pGarbageCollect)
4040{
4041 u32 i;
4042 u32 *pbt = (u32 *)g_pBlockTable;
4043 u8 wLeastWornCounter = 0xFF;
4044 u32 wLeastWornIndex = BAD_BLOCK;
4045 u32 wSpareBlockNum = 0;
4046 u32 wDiscardBlockNum = 0;
4047
4048 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
4049 __FILE__, __LINE__, __func__);
4050
4051 if (IS_SPARE_BLOCK(wBlockNum)) {
4052 *pGarbageCollect = FAIL;
4053 pbt[wBlockNum] = (u32)(pbt[wBlockNum] & (~SPARE_BLOCK));
4054#if CMD_DMA
4055 p_BTableChangesDelta =
4056 (struct BTableChangesDelta *)g_pBTDelta_Free;
4057 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
4058 p_BTableChangesDelta->ftl_cmd_cnt =
4059 ftl_cmd_cnt;
4060 p_BTableChangesDelta->BT_Index = (u32)(wBlockNum);
4061 p_BTableChangesDelta->BT_Entry_Value = pbt[wBlockNum];
4062 p_BTableChangesDelta->ValidFields = 0x0C;
4063#endif
4064 return pbt[wBlockNum];
4065 }
4066
4067 for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
4068 if (IS_DISCARDED_BLOCK(i))
4069 wDiscardBlockNum++;
4070
4071 if (IS_SPARE_BLOCK(i)) {
4072 u32 wPhysicalIndex = (u32)((~BAD_BLOCK) & pbt[i]);
4073 if (wPhysicalIndex > DeviceInfo.wSpectraEndBlock)
4074 printk(KERN_ERR "FTL_Replace_LWBlock: "
4075 "This should never occur!\n");
4076 if (g_pWearCounter[wPhysicalIndex -
4077 DeviceInfo.wSpectraStartBlock] <
4078 wLeastWornCounter) {
4079 wLeastWornCounter =
4080 g_pWearCounter[wPhysicalIndex -
4081 DeviceInfo.wSpectraStartBlock];
4082 wLeastWornIndex = i;
4083 }
4084 wSpareBlockNum++;
4085 }
4086 }
4087
4088 nand_dbg_print(NAND_DBG_WARN,
4089 "FTL_Replace_LWBlock: Least Worn Counter %d\n",
4090 (int)wLeastWornCounter);
4091
4092 if ((wDiscardBlockNum >= NUM_FREE_BLOCKS_GATE) ||
4093 (wSpareBlockNum <= NUM_FREE_BLOCKS_GATE))
4094 *pGarbageCollect = PASS;
4095 else
4096 *pGarbageCollect = FAIL;
4097
4098 nand_dbg_print(NAND_DBG_DEBUG,
4099 "FTL_Replace_LWBlock: Discarded Blocks %u Spare"
4100 " Blocks %u\n",
4101 (unsigned int)wDiscardBlockNum,
4102 (unsigned int)wSpareBlockNum);
4103
4104 return FTL_Replace_OneBlock(wBlockNum, wLeastWornIndex);
4105}
4106
4107/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
4108* Function: FTL_Replace_MWBlock
4109* Inputs: None
4110* Outputs: most worn spare block no./BAD_BLOCK
4111* Description: It finds most worn spare block.
4112*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
4113static u32 FTL_Replace_MWBlock(void)
4114{
4115 u32 i;
4116 u32 *pbt = (u32 *)g_pBlockTable;
4117 u8 wMostWornCounter = 0;
4118 u32 wMostWornIndex = BAD_BLOCK;
4119 u32 wSpareBlockNum = 0;
4120
4121 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
4122 __FILE__, __LINE__, __func__);
4123
4124 for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
4125 if (IS_SPARE_BLOCK(i)) {
4126 u32 wPhysicalIndex = (u32)((~SPARE_BLOCK) & pbt[i]);
4127 if (g_pWearCounter[wPhysicalIndex -
4128 DeviceInfo.wSpectraStartBlock] >
4129 wMostWornCounter) {
4130 wMostWornCounter =
4131 g_pWearCounter[wPhysicalIndex -
4132 DeviceInfo.wSpectraStartBlock];
4133 wMostWornIndex = wPhysicalIndex;
4134 }
4135 wSpareBlockNum++;
4136 }
4137 }
4138
4139 if (wSpareBlockNum <= 2)
4140 return BAD_BLOCK;
4141
4142 return wMostWornIndex;
4143}
4144
4145/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
4146* Function: FTL_Replace_Block
4147* Inputs: Block Address
4148* Outputs: PASS=0 / FAIL=1
4149* Description: If block specified by blk_addr parameter is not free,
4150* replace it with the least worn block.
4151*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
4152static int FTL_Replace_Block(u64 blk_addr)
4153{
4154 u32 current_blk = BLK_FROM_ADDR(blk_addr);
4155 u32 *pbt = (u32 *)g_pBlockTable;
4156 int wResult = PASS;
4157 int GarbageCollect = FAIL;
4158
4159 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
4160 __FILE__, __LINE__, __func__);
4161
4162 if (IS_SPARE_BLOCK(current_blk)) {
4163 pbt[current_blk] = (~SPARE_BLOCK) & pbt[current_blk];
4164#if CMD_DMA
4165 p_BTableChangesDelta =
4166 (struct BTableChangesDelta *)g_pBTDelta_Free;
4167 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
4168 p_BTableChangesDelta->ftl_cmd_cnt =
4169 ftl_cmd_cnt;
4170 p_BTableChangesDelta->BT_Index = current_blk;
4171 p_BTableChangesDelta->BT_Entry_Value = pbt[current_blk];
4172 p_BTableChangesDelta->ValidFields = 0x0C ;
4173#endif
4174 return wResult;
4175 }
4176
4177 FTL_Replace_LWBlock(current_blk, &GarbageCollect);
4178
4179 if (PASS == GarbageCollect)
4180 wResult = GLOB_FTL_Garbage_Collection();
4181
4182 return wResult;
4183}
4184
4185/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
4186* Function: GLOB_FTL_Is_BadBlock
4187* Inputs: block number to test
4188* Outputs: PASS (block is BAD) / FAIL (block is not bad)
4189* Description: test if this block number is flagged as bad
4190*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
4191int GLOB_FTL_Is_BadBlock(u32 wBlockNum)
4192{
4193 u32 *pbt = (u32 *)g_pBlockTable;
4194
4195 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
4196 __FILE__, __LINE__, __func__);
4197
4198 if (wBlockNum >= DeviceInfo.wSpectraStartBlock
4199 && BAD_BLOCK == (pbt[wBlockNum] & BAD_BLOCK))
4200 return PASS;
4201 else
4202 return FAIL;
4203}
4204
4205/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
4206* Function: GLOB_FTL_Flush_Cache
4207* Inputs: none
4208* Outputs: PASS=0 / FAIL=1
4209* Description: flush all the cache blocks to flash
4210* if a cache block is not dirty, don't do anything with it
4211* else, write the block and update the block table
4212* Note: This function should be called at shutdown/power down.
4213* to write important data into device
4214*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
4215int GLOB_FTL_Flush_Cache(void)
4216{
4217 int i, ret;
4218
4219 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
4220 __FILE__, __LINE__, __func__);
4221
4222 for (i = 0; i < CACHE_ITEM_NUM; i++) {
4223 if (SET == Cache.array[i].changed) {
4224#if CMD_DMA
4225#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
4226 int_cache[ftl_cmd_cnt].item = i;
4227 int_cache[ftl_cmd_cnt].cache.address =
4228 Cache.array[i].address;
4229 int_cache[ftl_cmd_cnt].cache.changed = CLEAR;
4230#endif
4231#endif
4232 ret = write_back_to_l2_cache(Cache.array[i].buf, Cache.array[i].address);
4233 if (PASS == ret) {
4234 Cache.array[i].changed = CLEAR;
4235 } else {
4236 printk(KERN_ALERT "Failed when write back to L2 cache!\n");
4237 /* TODO - How to handle this? */
4238 }
4239 }
4240 }
4241
4242 flush_l2_cache();
4243
4244 return FTL_Write_Block_Table(FAIL);
4245}
4246
4247/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
4248* Function: GLOB_FTL_Page_Read
4249* Inputs: pointer to data
4250* logical address of data (u64 is LBA * Bytes/Page)
4251* Outputs: PASS=0 / FAIL=1
4252* Description: reads a page of data into RAM from the cache
4253* if the data is not already in cache, read from flash to cache
4254*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
4255int GLOB_FTL_Page_Read(u8 *data, u64 logical_addr)
4256{
4257 u16 cache_item;
4258 int res = PASS;
4259
4260 nand_dbg_print(NAND_DBG_DEBUG, "GLOB_FTL_Page_Read - "
4261 "page_addr: %llu\n", logical_addr);
4262
4263 cache_item = FTL_Cache_If_Hit(logical_addr);
4264
4265 if (UNHIT_CACHE_ITEM == cache_item) {
4266 nand_dbg_print(NAND_DBG_DEBUG,
4267 "GLOB_FTL_Page_Read: Cache not hit\n");
4268 res = FTL_Cache_Write();
4269 if (ERR == FTL_Cache_Read(logical_addr))
4270 res = ERR;
4271 cache_item = Cache.LRU;
4272 }
4273
4274 FTL_Cache_Read_Page(data, logical_addr, cache_item);
4275
4276 return res;
4277}
4278
4279/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
4280* Function: GLOB_FTL_Page_Write
4281* Inputs: pointer to data
4282* address of data (ADDRESSTYPE is LBA * Bytes/Page)
4283* Outputs: PASS=0 / FAIL=1
4284* Description: writes a page of data from RAM to the cache
4285* if the data is not already in cache, write back the
4286* least recently used block and read the addressed block
4287* from flash to cache
4288*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
4289int GLOB_FTL_Page_Write(u8 *pData, u64 dwPageAddr)
4290{
4291 u16 cache_blk;
4292 u32 *pbt = (u32 *)g_pBlockTable;
4293 int wResult = PASS;
4294
4295 nand_dbg_print(NAND_DBG_TRACE, "GLOB_FTL_Page_Write - "
4296 "dwPageAddr: %llu\n", dwPageAddr);
4297
4298 cache_blk = FTL_Cache_If_Hit(dwPageAddr);
4299
4300 if (UNHIT_CACHE_ITEM == cache_blk) {
4301 wResult = FTL_Cache_Write();
4302 if (IS_BAD_BLOCK(BLK_FROM_ADDR(dwPageAddr))) {
4303 wResult = FTL_Replace_Block(dwPageAddr);
4304 pbt[BLK_FROM_ADDR(dwPageAddr)] |= SPARE_BLOCK;
4305 if (wResult == FAIL)
4306 return FAIL;
4307 }
4308 if (ERR == FTL_Cache_Read(dwPageAddr))
4309 wResult = ERR;
4310 cache_blk = Cache.LRU;
4311 FTL_Cache_Write_Page(pData, dwPageAddr, cache_blk, 0);
4312 } else {
4313#if CMD_DMA
4314 FTL_Cache_Write_Page(pData, dwPageAddr, cache_blk,
4315 LLD_CMD_FLAG_ORDER_BEFORE_REST);
4316#else
4317 FTL_Cache_Write_Page(pData, dwPageAddr, cache_blk, 0);
4318#endif
4319 }
4320
4321 return wResult;
4322}
4323
4324/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
4325* Function: GLOB_FTL_Block_Erase
4326* Inputs: address of block to erase (now in byte format, should change to
4327* block format)
4328* Outputs: PASS=0 / FAIL=1
4329* Description: erases the specified block
4330* increments the erase count
4331* If erase count reaches its upper limit,call function to
4332* do the ajustment as per the relative erase count values
4333*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
4334int GLOB_FTL_Block_Erase(u64 blk_addr)
4335{
4336 int status;
4337 u32 BlkIdx;
4338
4339 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
4340 __FILE__, __LINE__, __func__);
4341
4342 BlkIdx = (u32)(blk_addr >> DeviceInfo.nBitsInBlockDataSize);
4343
4344 if (BlkIdx < DeviceInfo.wSpectraStartBlock) {
4345 printk(KERN_ERR "GLOB_FTL_Block_Erase: "
4346 "This should never occur\n");
4347 return FAIL;
4348 }
4349
4350#if CMD_DMA
4351 status = GLOB_LLD_Erase_Block_cdma(BlkIdx, LLD_CMD_FLAG_MODE_CDMA);
4352 if (status == FAIL)
4353 nand_dbg_print(NAND_DBG_WARN,
4354 "NAND Program fail in %s, Line %d, "
4355 "Function: %s, new Bad Block %d generated!\n",
4356 __FILE__, __LINE__, __func__, BlkIdx);
4357#else
4358 status = GLOB_LLD_Erase_Block(BlkIdx);
4359 if (status == FAIL) {
4360 nand_dbg_print(NAND_DBG_WARN,
4361 "NAND Program fail in %s, Line %d, "
4362 "Function: %s, new Bad Block %d generated!\n",
4363 __FILE__, __LINE__, __func__, BlkIdx);
4364 return status;
4365 }
4366#endif
4367
4368 if (DeviceInfo.MLCDevice) {
4369 g_pReadCounter[BlkIdx - DeviceInfo.wSpectraStartBlock] = 0;
4370 if (g_cBlockTableStatus != IN_PROGRESS_BLOCK_TABLE) {
4371 g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
4372 FTL_Write_IN_Progress_Block_Table_Page();
4373 }
4374 }
4375
4376 g_pWearCounter[BlkIdx - DeviceInfo.wSpectraStartBlock]++;
4377
4378#if CMD_DMA
4379 p_BTableChangesDelta =
4380 (struct BTableChangesDelta *)g_pBTDelta_Free;
4381 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
4382 p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
4383 p_BTableChangesDelta->WC_Index =
4384 BlkIdx - DeviceInfo.wSpectraStartBlock;
4385 p_BTableChangesDelta->WC_Entry_Value =
4386 g_pWearCounter[BlkIdx - DeviceInfo.wSpectraStartBlock];
4387 p_BTableChangesDelta->ValidFields = 0x30;
4388
4389 if (DeviceInfo.MLCDevice) {
4390 p_BTableChangesDelta =
4391 (struct BTableChangesDelta *)g_pBTDelta_Free;
4392 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
4393 p_BTableChangesDelta->ftl_cmd_cnt =
4394 ftl_cmd_cnt;
4395 p_BTableChangesDelta->RC_Index =
4396 BlkIdx - DeviceInfo.wSpectraStartBlock;
4397 p_BTableChangesDelta->RC_Entry_Value =
4398 g_pReadCounter[BlkIdx -
4399 DeviceInfo.wSpectraStartBlock];
4400 p_BTableChangesDelta->ValidFields = 0xC0;
4401 }
4402
4403 ftl_cmd_cnt++;
4404#endif
4405
4406 if (g_pWearCounter[BlkIdx - DeviceInfo.wSpectraStartBlock] == 0xFE)
4407 FTL_Adjust_Relative_Erase_Count(BlkIdx);
4408
4409 return status;
4410}
4411
4412
4413/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
4414* Function: FTL_Adjust_Relative_Erase_Count
4415* Inputs: index to block that was just incremented and is at the max
4416* Outputs: PASS=0 / FAIL=1
4417* Description: If any erase counts at MAX, adjusts erase count of every
4418* block by substracting least worn
4419* counter from counter value of every entry in wear table
4420*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
4421static int FTL_Adjust_Relative_Erase_Count(u32 Index_of_MAX)
4422{
4423 u8 wLeastWornCounter = MAX_BYTE_VALUE;
4424 u8 wWearCounter;
4425 u32 i, wWearIndex;
4426 u32 *pbt = (u32 *)g_pBlockTable;
4427 int wResult = PASS;
4428
4429 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
4430 __FILE__, __LINE__, __func__);
4431
4432 for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
4433 if (IS_BAD_BLOCK(i))
4434 continue;
4435 wWearIndex = (u32)(pbt[i] & (~BAD_BLOCK));
4436
4437 if ((wWearIndex - DeviceInfo.wSpectraStartBlock) < 0)
4438 printk(KERN_ERR "FTL_Adjust_Relative_Erase_Count:"
4439 "This should never occur\n");
4440 wWearCounter = g_pWearCounter[wWearIndex -
4441 DeviceInfo.wSpectraStartBlock];
4442 if (wWearCounter < wLeastWornCounter)
4443 wLeastWornCounter = wWearCounter;
4444 }
4445
4446 if (wLeastWornCounter == 0) {
4447 nand_dbg_print(NAND_DBG_WARN,
4448 "Adjusting Wear Levelling Counters: Special Case\n");
4449 g_pWearCounter[Index_of_MAX -
4450 DeviceInfo.wSpectraStartBlock]--;
4451#if CMD_DMA
4452 p_BTableChangesDelta =
4453 (struct BTableChangesDelta *)g_pBTDelta_Free;
4454 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
4455 p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
4456 p_BTableChangesDelta->WC_Index =
4457 Index_of_MAX - DeviceInfo.wSpectraStartBlock;
4458 p_BTableChangesDelta->WC_Entry_Value =
4459 g_pWearCounter[Index_of_MAX -
4460 DeviceInfo.wSpectraStartBlock];
4461 p_BTableChangesDelta->ValidFields = 0x30;
4462#endif
4463 FTL_Static_Wear_Leveling();
4464 } else {
4465 for (i = 0; i < DeviceInfo.wDataBlockNum; i++)
4466 if (!IS_BAD_BLOCK(i)) {
4467 wWearIndex = (u32)(pbt[i] & (~BAD_BLOCK));
4468 g_pWearCounter[wWearIndex -
4469 DeviceInfo.wSpectraStartBlock] =
4470 (u8)(g_pWearCounter
4471 [wWearIndex -
4472 DeviceInfo.wSpectraStartBlock] -
4473 wLeastWornCounter);
4474#if CMD_DMA
4475 p_BTableChangesDelta =
4476 (struct BTableChangesDelta *)g_pBTDelta_Free;
4477 g_pBTDelta_Free +=
4478 sizeof(struct BTableChangesDelta);
4479
4480 p_BTableChangesDelta->ftl_cmd_cnt =
4481 ftl_cmd_cnt;
4482 p_BTableChangesDelta->WC_Index = wWearIndex -
4483 DeviceInfo.wSpectraStartBlock;
4484 p_BTableChangesDelta->WC_Entry_Value =
4485 g_pWearCounter[wWearIndex -
4486 DeviceInfo.wSpectraStartBlock];
4487 p_BTableChangesDelta->ValidFields = 0x30;
4488#endif
4489 }
4490 }
4491
4492 return wResult;
4493}
4494
4495/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
4496* Function: FTL_Write_IN_Progress_Block_Table_Page
4497* Inputs: None
4498* Outputs: None
4499* Description: It writes in-progress flag page to the page next to
4500* block table
4501***********************************************************************/
4502static int FTL_Write_IN_Progress_Block_Table_Page(void)
4503{
4504 int wResult = PASS;
4505 u16 bt_pages;
4506 u16 dwIPFPageAddr;
4507#if CMD_DMA
4508#else
4509 u32 *pbt = (u32 *)g_pBlockTable;
4510 u32 wTempBlockTableIndex;
4511#endif
4512
4513 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
4514 __FILE__, __LINE__, __func__);
4515
4516 bt_pages = FTL_Get_Block_Table_Flash_Size_Pages();
4517
4518 dwIPFPageAddr = g_wBlockTableOffset + bt_pages;
4519
4520 nand_dbg_print(NAND_DBG_DEBUG, "Writing IPF at "
4521 "Block %d Page %d\n",
4522 g_wBlockTableIndex, dwIPFPageAddr);
4523
4524#if CMD_DMA
4525 wResult = GLOB_LLD_Write_Page_Main_Spare_cdma(g_pIPF,
4526 g_wBlockTableIndex, dwIPFPageAddr, 1,
4527 LLD_CMD_FLAG_MODE_CDMA | LLD_CMD_FLAG_ORDER_BEFORE_REST);
4528 if (wResult == FAIL) {
4529 nand_dbg_print(NAND_DBG_WARN,
4530 "NAND Program fail in %s, Line %d, "
4531 "Function: %s, new Bad Block %d generated!\n",
4532 __FILE__, __LINE__, __func__,
4533 g_wBlockTableIndex);
4534 }
4535 g_wBlockTableOffset = dwIPFPageAddr + 1;
4536 p_BTableChangesDelta = (struct BTableChangesDelta *)g_pBTDelta_Free;
4537 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
4538 p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
4539 p_BTableChangesDelta->g_wBlockTableOffset = g_wBlockTableOffset;
4540 p_BTableChangesDelta->ValidFields = 0x01;
4541 ftl_cmd_cnt++;
4542#else
4543 wResult = GLOB_LLD_Write_Page_Main_Spare(g_pIPF,
4544 g_wBlockTableIndex, dwIPFPageAddr, 1);
4545 if (wResult == FAIL) {
4546 nand_dbg_print(NAND_DBG_WARN,
4547 "NAND Program fail in %s, Line %d, "
4548 "Function: %s, new Bad Block %d generated!\n",
4549 __FILE__, __LINE__, __func__,
4550 (int)g_wBlockTableIndex);
4551 MARK_BLOCK_AS_BAD(pbt[BLOCK_TABLE_INDEX]);
4552 wTempBlockTableIndex = FTL_Replace_Block_Table();
4553 bt_block_changed = 1;
4554 if (BAD_BLOCK == wTempBlockTableIndex)
4555 return ERR;
4556 g_wBlockTableIndex = wTempBlockTableIndex;
4557 g_wBlockTableOffset = 0;
4558 /* Block table tag is '00'. Means it's used one */
4559 pbt[BLOCK_TABLE_INDEX] = g_wBlockTableIndex;
4560 return FAIL;
4561 }
4562 g_wBlockTableOffset = dwIPFPageAddr + 1;
4563#endif
4564 return wResult;
4565}
4566
4567/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
4568* Function: FTL_Read_Disturbance
4569* Inputs: block address
4570* Outputs: PASS=0 / FAIL=1
4571* Description: used to handle read disturbance. Data in block that
4572* reaches its read limit is moved to new block
4573*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
4574int FTL_Read_Disturbance(u32 blk_addr)
4575{
4576 int wResult = FAIL;
4577 u32 *pbt = (u32 *) g_pBlockTable;
4578 u32 dwOldBlockAddr = blk_addr;
4579 u32 wBlockNum;
4580 u32 i;
4581 u32 wLeastReadCounter = 0xFFFF;
4582 u32 wLeastReadIndex = BAD_BLOCK;
4583 u32 wSpareBlockNum = 0;
4584 u32 wTempNode;
4585 u32 wReplacedNode;
4586 u8 *g_pTempBuf;
4587
4588 nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
4589 __FILE__, __LINE__, __func__);
4590
4591#if CMD_DMA
4592 g_pTempBuf = cp_back_buf_copies[cp_back_buf_idx];
4593 cp_back_buf_idx++;
4594 if (cp_back_buf_idx > COPY_BACK_BUF_NUM) {
4595 printk(KERN_ERR "cp_back_buf_copies overflow! Exit."
4596 "Maybe too many pending commands in your CDMA chain.\n");
4597 return FAIL;
4598 }
4599#else
4600 g_pTempBuf = tmp_buf_read_disturbance;
4601#endif
4602
4603 wBlockNum = FTL_Get_Block_Index(blk_addr);
4604
4605 do {
4606 /* This is a bug.Here 'i' should be logical block number
4607 * and start from 1 (0 is reserved for block table).
4608 * Have fixed it. - Yunpeng 2008. 12. 19
4609 */
4610 for (i = 1; i < DeviceInfo.wDataBlockNum; i++) {
4611 if (IS_SPARE_BLOCK(i)) {
4612 u32 wPhysicalIndex =
4613 (u32)((~SPARE_BLOCK) & pbt[i]);
4614 if (g_pReadCounter[wPhysicalIndex -
4615 DeviceInfo.wSpectraStartBlock] <
4616 wLeastReadCounter) {
4617 wLeastReadCounter =
4618 g_pReadCounter[wPhysicalIndex -
4619 DeviceInfo.wSpectraStartBlock];
4620 wLeastReadIndex = i;
4621 }
4622 wSpareBlockNum++;
4623 }
4624 }
4625
4626 if (wSpareBlockNum <= NUM_FREE_BLOCKS_GATE) {
4627 wResult = GLOB_FTL_Garbage_Collection();
4628 if (PASS == wResult)
4629 continue;
4630 else
4631 break;
4632 } else {
4633 wTempNode = (u32)(DISCARD_BLOCK | pbt[wBlockNum]);
4634 wReplacedNode = (u32)((~SPARE_BLOCK) &
4635 pbt[wLeastReadIndex]);
4636#if CMD_DMA
4637 pbt[wBlockNum] = wReplacedNode;
4638 pbt[wLeastReadIndex] = wTempNode;
4639 p_BTableChangesDelta =
4640 (struct BTableChangesDelta *)g_pBTDelta_Free;
4641 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
4642
4643 p_BTableChangesDelta->ftl_cmd_cnt =
4644 ftl_cmd_cnt;
4645 p_BTableChangesDelta->BT_Index = wBlockNum;
4646 p_BTableChangesDelta->BT_Entry_Value = pbt[wBlockNum];
4647 p_BTableChangesDelta->ValidFields = 0x0C;
4648
4649 p_BTableChangesDelta =
4650 (struct BTableChangesDelta *)g_pBTDelta_Free;
4651 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
4652
4653 p_BTableChangesDelta->ftl_cmd_cnt =
4654 ftl_cmd_cnt;
4655 p_BTableChangesDelta->BT_Index = wLeastReadIndex;
4656 p_BTableChangesDelta->BT_Entry_Value =
4657 pbt[wLeastReadIndex];
4658 p_BTableChangesDelta->ValidFields = 0x0C;
4659
4660 wResult = GLOB_LLD_Read_Page_Main_cdma(g_pTempBuf,
4661 dwOldBlockAddr, 0, DeviceInfo.wPagesPerBlock,
4662 LLD_CMD_FLAG_MODE_CDMA);
4663 if (wResult == FAIL)
4664 return wResult;
4665
4666 ftl_cmd_cnt++;
4667
4668 if (wResult != FAIL) {
4669 if (FAIL == GLOB_LLD_Write_Page_Main_cdma(
4670 g_pTempBuf, pbt[wBlockNum], 0,
4671 DeviceInfo.wPagesPerBlock)) {
4672 nand_dbg_print(NAND_DBG_WARN,
4673 "NAND Program fail in "
4674 "%s, Line %d, Function: %s, "
4675 "new Bad Block %d "
4676 "generated!\n",
4677 __FILE__, __LINE__, __func__,
4678 (int)pbt[wBlockNum]);
4679 wResult = FAIL;
4680 MARK_BLOCK_AS_BAD(pbt[wBlockNum]);
4681 }
4682 ftl_cmd_cnt++;
4683 }
4684#else
4685 wResult = GLOB_LLD_Read_Page_Main(g_pTempBuf,
4686 dwOldBlockAddr, 0, DeviceInfo.wPagesPerBlock);
4687 if (wResult == FAIL)
4688 return wResult;
4689
4690 if (wResult != FAIL) {
4691 /* This is a bug. At this time, pbt[wBlockNum]
4692 is still the physical address of
4693 discard block, and should not be write.
4694 Have fixed it as below.
4695 -- Yunpeng 2008.12.19
4696 */
4697 wResult = GLOB_LLD_Write_Page_Main(g_pTempBuf,
4698 wReplacedNode, 0,
4699 DeviceInfo.wPagesPerBlock);
4700 if (wResult == FAIL) {
4701 nand_dbg_print(NAND_DBG_WARN,
4702 "NAND Program fail in "
4703 "%s, Line %d, Function: %s, "
4704 "new Bad Block %d "
4705 "generated!\n",
4706 __FILE__, __LINE__, __func__,
4707 (int)wReplacedNode);
4708 MARK_BLOCK_AS_BAD(wReplacedNode);
4709 } else {
4710 pbt[wBlockNum] = wReplacedNode;
4711 pbt[wLeastReadIndex] = wTempNode;
4712 }
4713 }
4714
4715 if ((wResult == PASS) && (g_cBlockTableStatus !=
4716 IN_PROGRESS_BLOCK_TABLE)) {
4717 g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
4718 FTL_Write_IN_Progress_Block_Table_Page();
4719 }
4720#endif
4721 }
4722 } while (wResult != PASS)
4723 ;
4724
4725#if CMD_DMA
4726 /* ... */
4727#endif
4728
4729 return wResult;
4730}
4731
diff --git a/drivers/staging/spectra/flash.h b/drivers/staging/spectra/flash.h
new file mode 100644
index 00000000000..5ed05805cf6
--- /dev/null
+++ b/drivers/staging/spectra/flash.h
@@ -0,0 +1,198 @@
1/*
2 * NAND Flash Controller Device Driver
3 * Copyright (c) 2009, Intel Corporation and its suppliers.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 */
19
20#ifndef _FLASH_INTERFACE_
21#define _FLASH_INTERFACE_
22
23#include "ffsport.h"
24#include "spectraswconfig.h"
25
26#define MAX_BYTE_VALUE 0xFF
27#define MAX_WORD_VALUE 0xFFFF
28#define MAX_U32_VALUE 0xFFFFFFFF
29
30#define MAX_BLOCKNODE_VALUE 0xFFFFFF
31#define DISCARD_BLOCK 0x800000
32#define SPARE_BLOCK 0x400000
33#define BAD_BLOCK 0xC00000
34
35#define UNHIT_CACHE_ITEM 0xFFFF
36
37#define NAND_CACHE_INIT_ADDR 0xffffffffffffffffULL
38
39#define IN_PROGRESS_BLOCK_TABLE 0x00
40#define CURRENT_BLOCK_TABLE 0x01
41
42#define BTSIG_OFFSET (0)
43#define BTSIG_BYTES (5)
44#define BTSIG_DELTA (3)
45
46#define MAX_READ_COUNTER 0x2710
47
48#define FIRST_BT_ID (1)
49#define LAST_BT_ID (254)
50#define BTBLOCK_INVAL (u32)(0xFFFFFFFF)
51
52struct device_info_tag {
53 u16 wDeviceMaker;
54 u16 wDeviceID;
55 u32 wDeviceType;
56 u32 wSpectraStartBlock;
57 u32 wSpectraEndBlock;
58 u32 wTotalBlocks;
59 u16 wPagesPerBlock;
60 u16 wPageSize;
61 u16 wPageDataSize;
62 u16 wPageSpareSize;
63 u16 wNumPageSpareFlag;
64 u16 wECCBytesPerSector;
65 u32 wBlockSize;
66 u32 wBlockDataSize;
67 u32 wDataBlockNum;
68 u8 bPlaneNum;
69 u16 wDeviceMainAreaSize;
70 u16 wDeviceSpareAreaSize;
71 u16 wDevicesConnected;
72 u16 wDeviceWidth;
73 u16 wHWRevision;
74 u16 wHWFeatures;
75
76 u16 wONFIDevFeatures;
77 u16 wONFIOptCommands;
78 u16 wONFITimingMode;
79 u16 wONFIPgmCacheTimingMode;
80
81 u16 MLCDevice;
82 u16 wSpareSkipBytes;
83
84 u8 nBitsInPageNumber;
85 u8 nBitsInPageDataSize;
86 u8 nBitsInBlockDataSize;
87};
88
89extern struct device_info_tag DeviceInfo;
90
91/* Cache item format */
92struct flash_cache_item_tag {
93 u64 address;
94 u16 use_cnt;
95 u16 changed;
96 u8 *buf;
97};
98
99struct flash_cache_tag {
100 u32 cache_item_size; /* Size in bytes of each cache item */
101 u16 pages_per_item; /* How many NAND pages in each cache item */
102 u16 LRU; /* No. of the least recently used cache item */
103 struct flash_cache_item_tag array[CACHE_ITEM_NUM];
104};
105
106/*
107 *Data structure for each list node of the managment table
108 * used for the Level 2 Cache. Each node maps one logical NAND block.
109 */
110struct spectra_l2_cache_list {
111 struct list_head list;
112 u32 logical_blk_num; /* Logical block number */
113 u32 pages_array[]; /* Page map array of this logical block.
114 * Array index is the logical block number,
115 * and for every item of this arry:
116 * high 16 bit is index of the L2 cache block num,
117 * low 16 bit is the phy page num
118 * of the above L2 cache block.
119 * This array will be kmalloc during run time.
120 */
121};
122
123struct spectra_l2_cache_info {
124 u32 blk_array[BLK_NUM_FOR_L2_CACHE];
125 u16 cur_blk_idx; /* idx to the phy block number of current using */
126 u16 cur_page_num; /* pages number of current using */
127 struct spectra_l2_cache_list table; /* First node of the table */
128};
129
130#define RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE 1
131
132#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
133struct flash_cache_mod_item_tag {
134 u64 address;
135 u8 changed;
136};
137
138struct flash_cache_delta_list_tag {
139 u8 item; /* used cache item */
140 struct flash_cache_mod_item_tag cache;
141};
142#endif
143
144extern struct flash_cache_tag Cache;
145
146extern u8 *buf_read_page_main_spare;
147extern u8 *buf_write_page_main_spare;
148extern u8 *buf_read_page_spare;
149extern u8 *buf_get_bad_block;
150extern u8 *cdma_desc_buf;
151extern u8 *memcp_desc_buf;
152
153/* struture used for IndentfyDevice function */
154struct spectra_indentfy_dev_tag {
155 u32 NumBlocks;
156 u16 PagesPerBlock;
157 u16 PageDataSize;
158 u16 wECCBytesPerSector;
159 u32 wDataBlockNum;
160};
161
162int GLOB_FTL_Flash_Init(void);
163int GLOB_FTL_Flash_Release(void);
164/*void GLOB_FTL_Erase_Flash(void);*/
165int GLOB_FTL_Block_Erase(u64 block_addr);
166int GLOB_FTL_Is_BadBlock(u32 block_num);
167int GLOB_FTL_IdentifyDevice(struct spectra_indentfy_dev_tag *dev_data);
168int GLOB_FTL_Event_Status(int *);
169u16 glob_ftl_execute_cmds(void);
170
171/*int FTL_Read_Disturbance(ADDRESSTYPE dwBlockAddr);*/
172int FTL_Read_Disturbance(u32 dwBlockAddr);
173
174/*Flash r/w based on cache*/
175int GLOB_FTL_Page_Read(u8 *read_data, u64 page_addr);
176int GLOB_FTL_Page_Write(u8 *write_data, u64 page_addr);
177int GLOB_FTL_Wear_Leveling(void);
178int GLOB_FTL_Flash_Format(void);
179int GLOB_FTL_Init(void);
180int GLOB_FTL_Flush_Cache(void);
181int GLOB_FTL_Garbage_Collection(void);
182int GLOB_FTL_BT_Garbage_Collection(void);
183void GLOB_FTL_Cache_Release(void);
184u8 *get_blk_table_start_addr(void);
185u8 *get_wear_leveling_table_start_addr(void);
186unsigned long get_blk_table_len(void);
187unsigned long get_wear_leveling_table_len(void);
188
189#if DEBUG_BNDRY
190void debug_boundary_lineno_error(int chnl, int limit, int no, int lineno,
191 char *filename);
192#define debug_boundary_error(chnl, limit, no) debug_boundary_lineno_error(chnl,\
193 limit, no, __LINE__, __FILE__)
194#else
195#define debug_boundary_error(chnl, limit, no) ;
196#endif
197
198#endif /*_FLASH_INTERFACE_*/
diff --git a/drivers/staging/spectra/lld.c b/drivers/staging/spectra/lld.c
new file mode 100644
index 00000000000..5c3b9762dc3
--- /dev/null
+++ b/drivers/staging/spectra/lld.c
@@ -0,0 +1,339 @@
1/*
2 * NAND Flash Controller Device Driver
3 * Copyright (c) 2009, Intel Corporation and its suppliers.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 */
19
20#include "spectraswconfig.h"
21#include "ffsport.h"
22#include "ffsdefs.h"
23#include "lld.h"
24#include "lld_nand.h"
25
26/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
27#if FLASH_EMU /* vector all the LLD calls to the LLD_EMU code */
28#include "lld_emu.h"
29#include "lld_cdma.h"
30
31/* common functions: */
32u16 GLOB_LLD_Flash_Reset(void)
33{
34 return emu_Flash_Reset();
35}
36
37u16 GLOB_LLD_Read_Device_ID(void)
38{
39 return emu_Read_Device_ID();
40}
41
42int GLOB_LLD_Flash_Release(void)
43{
44 return emu_Flash_Release();
45}
46
47u16 GLOB_LLD_Flash_Init(void)
48{
49 return emu_Flash_Init();
50}
51
52u16 GLOB_LLD_Erase_Block(u32 block_add)
53{
54 return emu_Erase_Block(block_add);
55}
56
57u16 GLOB_LLD_Write_Page_Main(u8 *write_data, u32 block, u16 Page,
58 u16 PageCount)
59{
60 return emu_Write_Page_Main(write_data, block, Page, PageCount);
61}
62
63u16 GLOB_LLD_Read_Page_Main(u8 *read_data, u32 block, u16 Page,
64 u16 PageCount)
65{
66 return emu_Read_Page_Main(read_data, block, Page, PageCount);
67}
68
69u16 GLOB_LLD_Read_Page_Main_Polling(u8 *read_data,
70 u32 block, u16 page, u16 page_count)
71{
72 return emu_Read_Page_Main(read_data, block, page, page_count);
73}
74
75u16 GLOB_LLD_Write_Page_Main_Spare(u8 *write_data, u32 block,
76 u16 Page, u16 PageCount)
77{
78 return emu_Write_Page_Main_Spare(write_data, block, Page, PageCount);
79}
80
81u16 GLOB_LLD_Read_Page_Main_Spare(u8 *read_data, u32 block,
82 u16 Page, u16 PageCount)
83{
84 return emu_Read_Page_Main_Spare(read_data, block, Page, PageCount);
85}
86
87u16 GLOB_LLD_Write_Page_Spare(u8 *write_data, u32 block, u16 Page,
88 u16 PageCount)
89{
90 return emu_Write_Page_Spare(write_data, block, Page, PageCount);
91}
92
93u16 GLOB_LLD_Read_Page_Spare(u8 *read_data, u32 block, u16 Page,
94 u16 PageCount)
95{
96 return emu_Read_Page_Spare(read_data, block, Page, PageCount);
97}
98
99u16 GLOB_LLD_Get_Bad_Block(u32 block)
100{
101 return emu_Get_Bad_Block(block);
102}
103
104#endif /* FLASH_EMU */
105
106/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
107#if FLASH_MTD /* vector all the LLD calls to the LLD_MTD code */
108#include "lld_mtd.h"
109#include "lld_cdma.h"
110
111/* common functions: */
112u16 GLOB_LLD_Flash_Reset(void)
113{
114 return mtd_Flash_Reset();
115}
116
117u16 GLOB_LLD_Read_Device_ID(void)
118{
119 return mtd_Read_Device_ID();
120}
121
122int GLOB_LLD_Flash_Release(void)
123{
124 return mtd_Flash_Release();
125}
126
127u16 GLOB_LLD_Flash_Init(void)
128{
129 return mtd_Flash_Init();
130}
131
132u16 GLOB_LLD_Erase_Block(u32 block_add)
133{
134 return mtd_Erase_Block(block_add);
135}
136
137u16 GLOB_LLD_Write_Page_Main(u8 *write_data, u32 block, u16 Page,
138 u16 PageCount)
139{
140 return mtd_Write_Page_Main(write_data, block, Page, PageCount);
141}
142
143u16 GLOB_LLD_Read_Page_Main(u8 *read_data, u32 block, u16 Page,
144 u16 PageCount)
145{
146 return mtd_Read_Page_Main(read_data, block, Page, PageCount);
147}
148
149u16 GLOB_LLD_Read_Page_Main_Polling(u8 *read_data,
150 u32 block, u16 page, u16 page_count)
151{
152 return mtd_Read_Page_Main(read_data, block, page, page_count);
153}
154
155u16 GLOB_LLD_Write_Page_Main_Spare(u8 *write_data, u32 block,
156 u16 Page, u16 PageCount)
157{
158 return mtd_Write_Page_Main_Spare(write_data, block, Page, PageCount);
159}
160
161u16 GLOB_LLD_Read_Page_Main_Spare(u8 *read_data, u32 block,
162 u16 Page, u16 PageCount)
163{
164 return mtd_Read_Page_Main_Spare(read_data, block, Page, PageCount);
165}
166
167u16 GLOB_LLD_Write_Page_Spare(u8 *write_data, u32 block, u16 Page,
168 u16 PageCount)
169{
170 return mtd_Write_Page_Spare(write_data, block, Page, PageCount);
171}
172
173u16 GLOB_LLD_Read_Page_Spare(u8 *read_data, u32 block, u16 Page,
174 u16 PageCount)
175{
176 return mtd_Read_Page_Spare(read_data, block, Page, PageCount);
177}
178
179u16 GLOB_LLD_Get_Bad_Block(u32 block)
180{
181 return mtd_Get_Bad_Block(block);
182}
183
184#endif /* FLASH_MTD */
185
186/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
187#if FLASH_NAND /* vector all the LLD calls to the NAND controller code */
188#include "lld_nand.h"
189#include "lld_cdma.h"
190#include "flash.h"
191
192/* common functions for LLD_NAND */
193void GLOB_LLD_ECC_Control(int enable)
194{
195 NAND_ECC_Ctrl(enable);
196}
197
198/* common functions for LLD_NAND */
199u16 GLOB_LLD_Flash_Reset(void)
200{
201 return NAND_Flash_Reset();
202}
203
204u16 GLOB_LLD_Read_Device_ID(void)
205{
206 return NAND_Read_Device_ID();
207}
208
209u16 GLOB_LLD_UnlockArrayAll(void)
210{
211 return NAND_UnlockArrayAll();
212}
213
214u16 GLOB_LLD_Flash_Init(void)
215{
216 return NAND_Flash_Init();
217}
218
219int GLOB_LLD_Flash_Release(void)
220{
221 return nand_release_spectra();
222}
223
224u16 GLOB_LLD_Erase_Block(u32 block_add)
225{
226 return NAND_Erase_Block(block_add);
227}
228
229
230u16 GLOB_LLD_Write_Page_Main(u8 *write_data, u32 block, u16 Page,
231 u16 PageCount)
232{
233 return NAND_Write_Page_Main(write_data, block, Page, PageCount);
234}
235
236u16 GLOB_LLD_Read_Page_Main(u8 *read_data, u32 block, u16 page,
237 u16 page_count)
238{
239 if (page_count == 1) /* Using polling to improve read speed */
240 return NAND_Read_Page_Main_Polling(read_data, block, page, 1);
241 else
242 return NAND_Read_Page_Main(read_data, block, page, page_count);
243}
244
245u16 GLOB_LLD_Read_Page_Main_Polling(u8 *read_data,
246 u32 block, u16 page, u16 page_count)
247{
248 return NAND_Read_Page_Main_Polling(read_data,
249 block, page, page_count);
250}
251
252u16 GLOB_LLD_Write_Page_Main_Spare(u8 *write_data, u32 block,
253 u16 Page, u16 PageCount)
254{
255 return NAND_Write_Page_Main_Spare(write_data, block, Page, PageCount);
256}
257
258u16 GLOB_LLD_Write_Page_Spare(u8 *write_data, u32 block, u16 Page,
259 u16 PageCount)
260{
261 return NAND_Write_Page_Spare(write_data, block, Page, PageCount);
262}
263
264u16 GLOB_LLD_Read_Page_Main_Spare(u8 *read_data, u32 block,
265 u16 page, u16 page_count)
266{
267 return NAND_Read_Page_Main_Spare(read_data, block, page, page_count);
268}
269
270u16 GLOB_LLD_Read_Page_Spare(u8 *read_data, u32 block, u16 Page,
271 u16 PageCount)
272{
273 return NAND_Read_Page_Spare(read_data, block, Page, PageCount);
274}
275
276u16 GLOB_LLD_Get_Bad_Block(u32 block)
277{
278 return NAND_Get_Bad_Block(block);
279}
280
281#if CMD_DMA
282u16 GLOB_LLD_Event_Status(void)
283{
284 return CDMA_Event_Status();
285}
286
287u16 glob_lld_execute_cmds(void)
288{
289 return CDMA_Execute_CMDs();
290}
291
292u16 GLOB_LLD_MemCopy_CMD(u8 *dest, u8 *src,
293 u32 ByteCount, u16 flag)
294{
295 /* Replace the hardware memcopy with software memcpy function */
296 if (CDMA_Execute_CMDs())
297 return FAIL;
298 memcpy(dest, src, ByteCount);
299 return PASS;
300
301 /* return CDMA_MemCopy_CMD(dest, src, ByteCount, flag); */
302}
303
304u16 GLOB_LLD_Erase_Block_cdma(u32 block, u16 flags)
305{
306 return CDMA_Data_CMD(ERASE_CMD, 0, block, 0, 0, flags);
307}
308
309u16 GLOB_LLD_Write_Page_Main_cdma(u8 *data, u32 block, u16 page, u16 count)
310{
311 return CDMA_Data_CMD(WRITE_MAIN_CMD, data, block, page, count, 0);
312}
313
314u16 GLOB_LLD_Read_Page_Main_cdma(u8 *data, u32 block, u16 page,
315 u16 count, u16 flags)
316{
317 return CDMA_Data_CMD(READ_MAIN_CMD, data, block, page, count, flags);
318}
319
320u16 GLOB_LLD_Write_Page_Main_Spare_cdma(u8 *data, u32 block, u16 page,
321 u16 count, u16 flags)
322{
323 return CDMA_Data_CMD(WRITE_MAIN_SPARE_CMD,
324 data, block, page, count, flags);
325}
326
327u16 GLOB_LLD_Read_Page_Main_Spare_cdma(u8 *data,
328 u32 block, u16 page, u16 count)
329{
330 return CDMA_Data_CMD(READ_MAIN_SPARE_CMD, data, block, page, count,
331 LLD_CMD_FLAG_MODE_CDMA);
332}
333
334#endif /* CMD_DMA */
335#endif /* FLASH_NAND */
336
337/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
338
339/* end of LLD.c */
diff --git a/drivers/staging/spectra/lld.h b/drivers/staging/spectra/lld.h
new file mode 100644
index 00000000000..d3738e0e1fe
--- /dev/null
+++ b/drivers/staging/spectra/lld.h
@@ -0,0 +1,111 @@
1/*
2 * NAND Flash Controller Device Driver
3 * Copyright (c) 2009, Intel Corporation and its suppliers.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 */
19
20
21
22#ifndef _LLD_
23#define _LLD_
24
25#include "ffsport.h"
26#include "spectraswconfig.h"
27#include "flash.h"
28
29#define GOOD_BLOCK 0
30#define DEFECTIVE_BLOCK 1
31#define READ_ERROR 2
32
33#define CLK_X 5
34#define CLK_MULTI 4
35
36/* Typedefs */
37
38/* prototypes: API for LLD */
39/* Currently, Write_Page_Main
40 * MemCopy
41 * Read_Page_Main_Spare
42 * do not have flag because they were not implemented prior to this
43 * They are not being added to keep changes to a minimum for now.
44 * Currently, they are not required (only reqd for Wr_P_M_S.)
45 * Later on, these NEED to be changed.
46 */
47
48extern void GLOB_LLD_ECC_Control(int enable);
49
50extern u16 GLOB_LLD_Flash_Reset(void);
51
52extern u16 GLOB_LLD_Read_Device_ID(void);
53
54extern u16 GLOB_LLD_UnlockArrayAll(void);
55
56extern u16 GLOB_LLD_Flash_Init(void);
57
58extern int GLOB_LLD_Flash_Release(void);
59
60extern u16 GLOB_LLD_Erase_Block(u32 block_add);
61
62extern u16 GLOB_LLD_Write_Page_Main(u8 *write_data,
63 u32 block, u16 Page, u16 PageCount);
64
65extern u16 GLOB_LLD_Read_Page_Main(u8 *read_data,
66 u32 block, u16 page, u16 page_count);
67
68extern u16 GLOB_LLD_Read_Page_Main_Polling(u8 *read_data,
69 u32 block, u16 page, u16 page_count);
70
71extern u16 GLOB_LLD_Write_Page_Main_Spare(u8 *write_data,
72 u32 block, u16 Page, u16 PageCount);
73
74extern u16 GLOB_LLD_Write_Page_Spare(u8 *write_data,
75 u32 block, u16 Page, u16 PageCount);
76
77extern u16 GLOB_LLD_Read_Page_Main_Spare(u8 *read_data,
78 u32 block, u16 page, u16 page_count);
79
80extern u16 GLOB_LLD_Read_Page_Spare(u8 *read_data,
81 u32 block, u16 Page, u16 PageCount);
82
83extern u16 GLOB_LLD_Get_Bad_Block(u32 block);
84
85extern u16 GLOB_LLD_Event_Status(void);
86
87extern u16 GLOB_LLD_MemCopy_CMD(u8 *dest, u8 *src, u32 ByteCount, u16 flag);
88
89extern u16 glob_lld_execute_cmds(void);
90
91extern u16 GLOB_LLD_Erase_Block_cdma(u32 block, u16 flags);
92
93extern u16 GLOB_LLD_Write_Page_Main_cdma(u8 *data,
94 u32 block, u16 page, u16 count);
95
96extern u16 GLOB_LLD_Read_Page_Main_cdma(u8 *data,
97 u32 block, u16 page, u16 count, u16 flags);
98
99extern u16 GLOB_LLD_Write_Page_Main_Spare_cdma(u8 *data,
100 u32 block, u16 page, u16 count, u16 flags);
101
102extern u16 GLOB_LLD_Read_Page_Main_Spare_cdma(u8 *data,
103 u32 block, u16 page, u16 count);
104
105#define LLD_CMD_FLAG_ORDER_BEFORE_REST (0x1)
106#define LLD_CMD_FLAG_MODE_CDMA (0x8)
107
108
109#endif /*_LLD_ */
110
111
diff --git a/drivers/staging/spectra/lld_cdma.c b/drivers/staging/spectra/lld_cdma.c
new file mode 100644
index 00000000000..c6e76103d43
--- /dev/null
+++ b/drivers/staging/spectra/lld_cdma.c
@@ -0,0 +1,910 @@
1/*
2 * NAND Flash Controller Device Driver
3 * Copyright (c) 2009, Intel Corporation and its suppliers.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 */
19
20#include <linux/fs.h>
21#include <linux/slab.h>
22
23#include "spectraswconfig.h"
24#include "lld.h"
25#include "lld_nand.h"
26#include "lld_cdma.h"
27#include "lld_emu.h"
28#include "flash.h"
29#include "nand_regs.h"
30
31#define MAX_PENDING_CMDS 4
32#define MODE_02 (0x2 << 26)
33
34/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
35* Function: CDMA_Data_Cmd
36* Inputs: cmd code (aligned for hw)
37* data: pointer to source or destination
38* block: block address
39* page: page address
40* num: num pages to transfer
41* Outputs: PASS
42* Description: This function takes the parameters and puts them
43* into the "pending commands" array.
44* It does not parse or validate the parameters.
45* The array index is same as the tag.
46*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
47u16 CDMA_Data_CMD(u8 cmd, u8 *data, u32 block, u16 page, u16 num, u16 flags)
48{
49 u8 bank;
50
51 nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
52 __FILE__, __LINE__, __func__);
53
54 if (0 == cmd)
55 nand_dbg_print(NAND_DBG_DEBUG,
56 "%s, Line %d, Illegal cmd (0)\n", __FILE__, __LINE__);
57
58 /* If a command of another bank comes, then first execute */
59 /* pending commands of the current bank, then set the new */
60 /* bank as current bank */
61 bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
62 if (bank != info.flash_bank) {
63 nand_dbg_print(NAND_DBG_WARN,
64 "Will access new bank. old bank: %d, new bank: %d\n",
65 info.flash_bank, bank);
66 if (CDMA_Execute_CMDs()) {
67 printk(KERN_ERR "CDMA_Execute_CMDs fail!\n");
68 return FAIL;
69 }
70 info.flash_bank = bank;
71 }
72
73 info.pcmds[info.pcmds_num].CMD = cmd;
74 info.pcmds[info.pcmds_num].DataAddr = data;
75 info.pcmds[info.pcmds_num].Block = block;
76 info.pcmds[info.pcmds_num].Page = page;
77 info.pcmds[info.pcmds_num].PageCount = num;
78 info.pcmds[info.pcmds_num].DataDestAddr = 0;
79 info.pcmds[info.pcmds_num].DataSrcAddr = 0;
80 info.pcmds[info.pcmds_num].MemCopyByteCnt = 0;
81 info.pcmds[info.pcmds_num].Flags = flags;
82 info.pcmds[info.pcmds_num].Status = 0xB0B;
83
84 switch (cmd) {
85 case WRITE_MAIN_SPARE_CMD:
86 Conv_Main_Spare_Data_Log2Phy_Format(data, num);
87 break;
88 case WRITE_SPARE_CMD:
89 Conv_Spare_Data_Log2Phy_Format(data);
90 break;
91 default:
92 break;
93 }
94
95 info.pcmds_num++;
96
97 if (info.pcmds_num >= MAX_PENDING_CMDS) {
98 if (CDMA_Execute_CMDs()) {
99 printk(KERN_ERR "CDMA_Execute_CMDs fail!\n");
100 return FAIL;
101 }
102 }
103
104 return PASS;
105}
106
107/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
108* Function: CDMA_MemCopy_CMD
109* Inputs: dest: pointer to destination
110* src: pointer to source
111* count: num bytes to transfer
112* Outputs: PASS
113* Description: This function takes the parameters and puts them
114* into the "pending commands" array.
115* It does not parse or validate the parameters.
116* The array index is same as the tag.
117*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
118u16 CDMA_MemCopy_CMD(u8 *dest, u8 *src, u32 byte_cnt, u16 flags)
119{
120 nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
121 __FILE__, __LINE__, __func__);
122
123 info.pcmds[info.pcmds_num].CMD = MEMCOPY_CMD;
124 info.pcmds[info.pcmds_num].DataAddr = 0;
125 info.pcmds[info.pcmds_num].Block = 0;
126 info.pcmds[info.pcmds_num].Page = 0;
127 info.pcmds[info.pcmds_num].PageCount = 0;
128 info.pcmds[info.pcmds_num].DataDestAddr = dest;
129 info.pcmds[info.pcmds_num].DataSrcAddr = src;
130 info.pcmds[info.pcmds_num].MemCopyByteCnt = byte_cnt;
131 info.pcmds[info.pcmds_num].Flags = flags;
132 info.pcmds[info.pcmds_num].Status = 0xB0B;
133
134 info.pcmds_num++;
135
136 if (info.pcmds_num >= MAX_PENDING_CMDS) {
137 if (CDMA_Execute_CMDs()) {
138 printk(KERN_ERR "CDMA_Execute_CMDs fail!\n");
139 return FAIL;
140 }
141 }
142
143 return PASS;
144}
145
146#if 0
147/* Prints the PendingCMDs array */
148void print_pending_cmds(void)
149{
150 u16 i;
151
152 nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
153 __FILE__, __LINE__, __func__);
154
155 for (i = 0; i < info.pcmds_num; i++) {
156 nand_dbg_print(NAND_DBG_DEBUG, "\ni: %d\n", i);
157 switch (info.pcmds[i].CMD) {
158 case ERASE_CMD:
159 nand_dbg_print(NAND_DBG_DEBUG,
160 "Erase Command (0x%x)\n",
161 info.pcmds[i].CMD);
162 break;
163 case WRITE_MAIN_CMD:
164 nand_dbg_print(NAND_DBG_DEBUG,
165 "Write Main Command (0x%x)\n",
166 info.pcmds[i].CMD);
167 break;
168 case WRITE_MAIN_SPARE_CMD:
169 nand_dbg_print(NAND_DBG_DEBUG,
170 "Write Main Spare Command (0x%x)\n",
171 info.pcmds[i].CMD);
172 break;
173 case READ_MAIN_SPARE_CMD:
174 nand_dbg_print(NAND_DBG_DEBUG,
175 "Read Main Spare Command (0x%x)\n",
176 info.pcmds[i].CMD);
177 break;
178 case READ_MAIN_CMD:
179 nand_dbg_print(NAND_DBG_DEBUG,
180 "Read Main Command (0x%x)\n",
181 info.pcmds[i].CMD);
182 break;
183 case MEMCOPY_CMD:
184 nand_dbg_print(NAND_DBG_DEBUG,
185 "Memcopy Command (0x%x)\n",
186 info.pcmds[i].CMD);
187 break;
188 case DUMMY_CMD:
189 nand_dbg_print(NAND_DBG_DEBUG,
190 "Dummy Command (0x%x)\n",
191 info.pcmds[i].CMD);
192 break;
193 default:
194 nand_dbg_print(NAND_DBG_DEBUG,
195 "Illegal Command (0x%x)\n",
196 info.pcmds[i].CMD);
197 break;
198 }
199
200 nand_dbg_print(NAND_DBG_DEBUG, "DataAddr: 0x%x\n",
201 (u32)info.pcmds[i].DataAddr);
202 nand_dbg_print(NAND_DBG_DEBUG, "Block: %d\n",
203 info.pcmds[i].Block);
204 nand_dbg_print(NAND_DBG_DEBUG, "Page: %d\n",
205 info.pcmds[i].Page);
206 nand_dbg_print(NAND_DBG_DEBUG, "PageCount: %d\n",
207 info.pcmds[i].PageCount);
208 nand_dbg_print(NAND_DBG_DEBUG, "DataDestAddr: 0x%x\n",
209 (u32)info.pcmds[i].DataDestAddr);
210 nand_dbg_print(NAND_DBG_DEBUG, "DataSrcAddr: 0x%x\n",
211 (u32)info.pcmds[i].DataSrcAddr);
212 nand_dbg_print(NAND_DBG_DEBUG, "MemCopyByteCnt: %d\n",
213 info.pcmds[i].MemCopyByteCnt);
214 nand_dbg_print(NAND_DBG_DEBUG, "Flags: 0x%x\n",
215 info.pcmds[i].Flags);
216 nand_dbg_print(NAND_DBG_DEBUG, "Status: 0x%x\n",
217 info.pcmds[i].Status);
218 }
219}
220
221/* Print the CDMA descriptors */
222void print_cdma_descriptors(void)
223{
224 struct cdma_descriptor *pc;
225 int i;
226
227 pc = (struct cdma_descriptor *)info.cdma_desc_buf;
228
229 nand_dbg_print(NAND_DBG_DEBUG, "\nWill dump cdma descriptors:\n");
230
231 for (i = 0; i < info.cdma_num; i++) {
232 nand_dbg_print(NAND_DBG_DEBUG, "\ni: %d\n", i);
233 nand_dbg_print(NAND_DBG_DEBUG,
234 "NxtPointerHi: 0x%x, NxtPointerLo: 0x%x\n",
235 pc[i].NxtPointerHi, pc[i].NxtPointerLo);
236 nand_dbg_print(NAND_DBG_DEBUG,
237 "FlashPointerHi: 0x%x, FlashPointerLo: 0x%x\n",
238 pc[i].FlashPointerHi, pc[i].FlashPointerLo);
239 nand_dbg_print(NAND_DBG_DEBUG, "CommandType: 0x%x\n",
240 pc[i].CommandType);
241 nand_dbg_print(NAND_DBG_DEBUG,
242 "MemAddrHi: 0x%x, MemAddrLo: 0x%x\n",
243 pc[i].MemAddrHi, pc[i].MemAddrLo);
244 nand_dbg_print(NAND_DBG_DEBUG, "CommandFlags: 0x%x\n",
245 pc[i].CommandFlags);
246 nand_dbg_print(NAND_DBG_DEBUG, "Channel: %d, Status: 0x%x\n",
247 pc[i].Channel, pc[i].Status);
248 nand_dbg_print(NAND_DBG_DEBUG,
249 "MemCopyPointerHi: 0x%x, MemCopyPointerLo: 0x%x\n",
250 pc[i].MemCopyPointerHi, pc[i].MemCopyPointerLo);
251 nand_dbg_print(NAND_DBG_DEBUG,
252 "Reserved12: 0x%x, Reserved13: 0x%x, "
253 "Reserved14: 0x%x, pcmd: %d\n",
254 pc[i].Reserved12, pc[i].Reserved13,
255 pc[i].Reserved14, pc[i].pcmd);
256 }
257}
258
259/* Print the Memory copy descriptors */
260static void print_memcp_descriptors(void)
261{
262 struct memcpy_descriptor *pm;
263 int i;
264
265 pm = (struct memcpy_descriptor *)info.memcp_desc_buf;
266
267 nand_dbg_print(NAND_DBG_DEBUG, "\nWill dump mem_cpy descriptors:\n");
268
269 for (i = 0; i < info.cdma_num; i++) {
270 nand_dbg_print(NAND_DBG_DEBUG, "\ni: %d\n", i);
271 nand_dbg_print(NAND_DBG_DEBUG,
272 "NxtPointerHi: 0x%x, NxtPointerLo: 0x%x\n",
273 pm[i].NxtPointerHi, pm[i].NxtPointerLo);
274 nand_dbg_print(NAND_DBG_DEBUG,
275 "SrcAddrHi: 0x%x, SrcAddrLo: 0x%x\n",
276 pm[i].SrcAddrHi, pm[i].SrcAddrLo);
277 nand_dbg_print(NAND_DBG_DEBUG,
278 "DestAddrHi: 0x%x, DestAddrLo: 0x%x\n",
279 pm[i].DestAddrHi, pm[i].DestAddrLo);
280 nand_dbg_print(NAND_DBG_DEBUG, "XferSize: %d\n",
281 pm[i].XferSize);
282 nand_dbg_print(NAND_DBG_DEBUG, "MemCopyFlags: 0x%x\n",
283 pm[i].MemCopyFlags);
284 nand_dbg_print(NAND_DBG_DEBUG, "MemCopyStatus: %d\n",
285 pm[i].MemCopyStatus);
286 nand_dbg_print(NAND_DBG_DEBUG, "reserved9: 0x%x\n",
287 pm[i].reserved9);
288 nand_dbg_print(NAND_DBG_DEBUG, "reserved10: 0x%x\n",
289 pm[i].reserved10);
290 nand_dbg_print(NAND_DBG_DEBUG, "reserved11: 0x%x\n",
291 pm[i].reserved11);
292 nand_dbg_print(NAND_DBG_DEBUG, "reserved12: 0x%x\n",
293 pm[i].reserved12);
294 nand_dbg_print(NAND_DBG_DEBUG, "reserved13: 0x%x\n",
295 pm[i].reserved13);
296 nand_dbg_print(NAND_DBG_DEBUG, "reserved14: 0x%x\n",
297 pm[i].reserved14);
298 nand_dbg_print(NAND_DBG_DEBUG, "reserved15: 0x%x\n",
299 pm[i].reserved15);
300 }
301}
302#endif
303
304/* Reset cdma_descriptor chain to 0 */
305static void reset_cdma_desc(int i)
306{
307 struct cdma_descriptor *ptr;
308
309 BUG_ON(i >= MAX_DESCS);
310
311 ptr = (struct cdma_descriptor *)info.cdma_desc_buf;
312
313 ptr[i].NxtPointerHi = 0;
314 ptr[i].NxtPointerLo = 0;
315 ptr[i].FlashPointerHi = 0;
316 ptr[i].FlashPointerLo = 0;
317 ptr[i].CommandType = 0;
318 ptr[i].MemAddrHi = 0;
319 ptr[i].MemAddrLo = 0;
320 ptr[i].CommandFlags = 0;
321 ptr[i].Channel = 0;
322 ptr[i].Status = 0;
323 ptr[i].MemCopyPointerHi = 0;
324 ptr[i].MemCopyPointerLo = 0;
325}
326
327/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
328* Function: CDMA_UpdateEventStatus
329* Inputs: none
330* Outputs: none
331* Description: This function update the event status of all the channels
332* when an error condition is reported.
333*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
334void CDMA_UpdateEventStatus(void)
335{
336 int i, j, active_chan;
337 struct cdma_descriptor *ptr;
338
339 nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
340 __FILE__, __LINE__, __func__);
341
342 ptr = (struct cdma_descriptor *)info.cdma_desc_buf;
343
344 for (j = 0; j < info.cdma_num; j++) {
345 /* Check for the descriptor with failure */
346 if ((ptr[j].Status & CMD_DMA_DESC_FAIL))
347 break;
348
349 }
350
351 /* All the previous cmd's status for this channel must be good */
352 for (i = 0; i < j; i++) {
353 if (ptr[i].pcmd != 0xff)
354 info.pcmds[ptr[i].pcmd].Status = CMD_PASS;
355 }
356
357 /* Abort the channel with type 0 reset command. It resets the */
358 /* selected channel after the descriptor completes the flash */
359 /* operation and status has been updated for the descriptor. */
360 /* Memory Copy and Sync associated with this descriptor will */
361 /* not be executed */
362 active_chan = ioread32(FlashReg + CHNL_ACTIVE);
363 if ((active_chan & (1 << info.flash_bank)) == (1 << info.flash_bank)) {
364 iowrite32(MODE_02 | (0 << 4), FlashMem); /* Type 0 reset */
365 iowrite32((0xF << 4) | info.flash_bank, FlashMem + 0x10);
366 } else { /* Should not reached here */
367 printk(KERN_ERR "Error! Used bank is not set in"
368 " reg CHNL_ACTIVE\n");
369 }
370}
371
372static void cdma_trans(u16 chan)
373{
374 u32 addr;
375
376 addr = info.cdma_desc;
377
378 iowrite32(MODE_10 | (chan << 24), FlashMem);
379 iowrite32((1 << 7) | chan, FlashMem + 0x10);
380
381 iowrite32(MODE_10 | (chan << 24) | ((0x0FFFF & (addr >> 16)) << 8),
382 FlashMem);
383 iowrite32((1 << 7) | (1 << 4) | 0, FlashMem + 0x10);
384
385 iowrite32(MODE_10 | (chan << 24) | ((0x0FFFF & addr) << 8), FlashMem);
386 iowrite32((1 << 7) | (1 << 5) | 0, FlashMem + 0x10);
387
388 iowrite32(MODE_10 | (chan << 24), FlashMem);
389 iowrite32((1 << 7) | (1 << 5) | (1 << 4) | 0, FlashMem + 0x10);
390}
391
392/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
393* Function: CDMA_Execute_CMDs (for use with CMD_DMA)
394* Inputs: tag_count: the number of pending cmds to do
395* Outputs: PASS/FAIL
396* Description: Build the SDMA chain(s) by making one CMD-DMA descriptor
397* for each pending command, start the CDMA engine, and return.
398*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
399u16 CDMA_Execute_CMDs(void)
400{
401 int i, ret;
402 u64 flash_add;
403 u32 ptr;
404 dma_addr_t map_addr, next_ptr;
405 u16 status = PASS;
406 u16 tmp_c;
407 struct cdma_descriptor *pc;
408 struct memcpy_descriptor *pm;
409
410 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
411 __FILE__, __LINE__, __func__);
412
413 /* No pending cmds to execute, just exit */
414 if (0 == info.pcmds_num) {
415 nand_dbg_print(NAND_DBG_TRACE,
416 "No pending cmds to execute. Just exit.\n");
417 return PASS;
418 }
419
420 for (i = 0; i < MAX_DESCS; i++)
421 reset_cdma_desc(i);
422
423 pc = (struct cdma_descriptor *)info.cdma_desc_buf;
424 pm = (struct memcpy_descriptor *)info.memcp_desc_buf;
425
426 info.cdma_desc = virt_to_bus(info.cdma_desc_buf);
427 info.memcp_desc = virt_to_bus(info.memcp_desc_buf);
428 next_ptr = info.cdma_desc;
429 info.cdma_num = 0;
430
431 for (i = 0; i < info.pcmds_num; i++) {
432 if (info.pcmds[i].Block >= DeviceInfo.wTotalBlocks) {
433 info.pcmds[i].Status = CMD_NOT_DONE;
434 continue;
435 }
436
437 next_ptr += sizeof(struct cdma_descriptor);
438 pc[info.cdma_num].NxtPointerHi = next_ptr >> 16;
439 pc[info.cdma_num].NxtPointerLo = next_ptr & 0xffff;
440
441 /* Use the Block offset within a bank */
442 tmp_c = info.pcmds[i].Block /
443 (DeviceInfo.wTotalBlocks / totalUsedBanks);
444 flash_add = (u64)(info.pcmds[i].Block - tmp_c *
445 (DeviceInfo.wTotalBlocks / totalUsedBanks)) *
446 DeviceInfo.wBlockDataSize +
447 (u64)(info.pcmds[i].Page) *
448 DeviceInfo.wPageDataSize;
449
450 ptr = MODE_10 | (info.flash_bank << 24) |
451 (u32)GLOB_u64_Div(flash_add,
452 DeviceInfo.wPageDataSize);
453 pc[info.cdma_num].FlashPointerHi = ptr >> 16;
454 pc[info.cdma_num].FlashPointerLo = ptr & 0xffff;
455
456 if ((info.pcmds[i].CMD == WRITE_MAIN_SPARE_CMD) ||
457 (info.pcmds[i].CMD == READ_MAIN_SPARE_CMD)) {
458 /* Descriptor to set Main+Spare Access Mode */
459 pc[info.cdma_num].CommandType = 0x43;
460 pc[info.cdma_num].CommandFlags =
461 (0 << 10) | (1 << 9) | (0 << 8) | 0x40;
462 pc[info.cdma_num].MemAddrHi = 0;
463 pc[info.cdma_num].MemAddrLo = 0;
464 pc[info.cdma_num].Channel = 0;
465 pc[info.cdma_num].Status = 0;
466 pc[info.cdma_num].pcmd = i;
467
468 info.cdma_num++;
469 BUG_ON(info.cdma_num >= MAX_DESCS);
470
471 reset_cdma_desc(info.cdma_num);
472 next_ptr += sizeof(struct cdma_descriptor);
473 pc[info.cdma_num].NxtPointerHi = next_ptr >> 16;
474 pc[info.cdma_num].NxtPointerLo = next_ptr & 0xffff;
475 pc[info.cdma_num].FlashPointerHi = ptr >> 16;
476 pc[info.cdma_num].FlashPointerLo = ptr & 0xffff;
477 }
478
479 switch (info.pcmds[i].CMD) {
480 case ERASE_CMD:
481 pc[info.cdma_num].CommandType = 1;
482 pc[info.cdma_num].CommandFlags =
483 (0 << 10) | (1 << 9) | (0 << 8) | 0x40;
484 pc[info.cdma_num].MemAddrHi = 0;
485 pc[info.cdma_num].MemAddrLo = 0;
486 break;
487
488 case WRITE_MAIN_CMD:
489 pc[info.cdma_num].CommandType =
490 0x2100 | info.pcmds[i].PageCount;
491 pc[info.cdma_num].CommandFlags =
492 (0 << 10) | (1 << 9) | (0 << 8) | 0x40;
493 map_addr = virt_to_bus(info.pcmds[i].DataAddr);
494 pc[info.cdma_num].MemAddrHi = map_addr >> 16;
495 pc[info.cdma_num].MemAddrLo = map_addr & 0xffff;
496 break;
497
498 case READ_MAIN_CMD:
499 pc[info.cdma_num].CommandType =
500 0x2000 | info.pcmds[i].PageCount;
501 pc[info.cdma_num].CommandFlags =
502 (0 << 10) | (1 << 9) | (0 << 8) | 0x40;
503 map_addr = virt_to_bus(info.pcmds[i].DataAddr);
504 pc[info.cdma_num].MemAddrHi = map_addr >> 16;
505 pc[info.cdma_num].MemAddrLo = map_addr & 0xffff;
506 break;
507
508 case WRITE_MAIN_SPARE_CMD:
509 pc[info.cdma_num].CommandType =
510 0x2100 | info.pcmds[i].PageCount;
511 pc[info.cdma_num].CommandFlags =
512 (0 << 10) | (1 << 9) | (0 << 8) | 0x40;
513 map_addr = virt_to_bus(info.pcmds[i].DataAddr);
514 pc[info.cdma_num].MemAddrHi = map_addr >> 16;
515 pc[info.cdma_num].MemAddrLo = map_addr & 0xffff;
516 break;
517
518 case READ_MAIN_SPARE_CMD:
519 pc[info.cdma_num].CommandType =
520 0x2000 | info.pcmds[i].PageCount;
521 pc[info.cdma_num].CommandFlags =
522 (0 << 10) | (1 << 9) | (0 << 8) | 0x40;
523 map_addr = virt_to_bus(info.pcmds[i].DataAddr);
524 pc[info.cdma_num].MemAddrHi = map_addr >> 16;
525 pc[info.cdma_num].MemAddrLo = map_addr & 0xffff;
526 break;
527
528 case MEMCOPY_CMD:
529 pc[info.cdma_num].CommandType = 0xFFFF; /* NOP cmd */
530 /* Set bit 11 to let the CDMA engine continue to */
531 /* execute only after it has finished processing */
532 /* the memcopy descriptor. */
533 /* Also set bit 10 and bit 9 to 1 */
534 pc[info.cdma_num].CommandFlags = 0x0E40;
535 map_addr = info.memcp_desc + info.cdma_num *
536 sizeof(struct memcpy_descriptor);
537 pc[info.cdma_num].MemCopyPointerHi = map_addr >> 16;
538 pc[info.cdma_num].MemCopyPointerLo = map_addr & 0xffff;
539
540 pm[info.cdma_num].NxtPointerHi = 0;
541 pm[info.cdma_num].NxtPointerLo = 0;
542
543 map_addr = virt_to_bus(info.pcmds[i].DataSrcAddr);
544 pm[info.cdma_num].SrcAddrHi = map_addr >> 16;
545 pm[info.cdma_num].SrcAddrLo = map_addr & 0xffff;
546 map_addr = virt_to_bus(info.pcmds[i].DataDestAddr);
547 pm[info.cdma_num].DestAddrHi = map_addr >> 16;
548 pm[info.cdma_num].DestAddrLo = map_addr & 0xffff;
549
550 pm[info.cdma_num].XferSize =
551 info.pcmds[i].MemCopyByteCnt;
552 pm[info.cdma_num].MemCopyFlags =
553 (0 << 15 | 0 << 14 | 27 << 8 | 0x40);
554 pm[info.cdma_num].MemCopyStatus = 0;
555 break;
556
557 case DUMMY_CMD:
558 default:
559 pc[info.cdma_num].CommandType = 0XFFFF;
560 pc[info.cdma_num].CommandFlags =
561 (0 << 10) | (1 << 9) | (0 << 8) | 0x40;
562 pc[info.cdma_num].MemAddrHi = 0;
563 pc[info.cdma_num].MemAddrLo = 0;
564 break;
565 }
566
567 pc[info.cdma_num].Channel = 0;
568 pc[info.cdma_num].Status = 0;
569 pc[info.cdma_num].pcmd = i;
570
571 info.cdma_num++;
572 BUG_ON(info.cdma_num >= MAX_DESCS);
573
574 if ((info.pcmds[i].CMD == WRITE_MAIN_SPARE_CMD) ||
575 (info.pcmds[i].CMD == READ_MAIN_SPARE_CMD)) {
576 /* Descriptor to set back Main Area Access Mode */
577 reset_cdma_desc(info.cdma_num);
578 next_ptr += sizeof(struct cdma_descriptor);
579 pc[info.cdma_num].NxtPointerHi = next_ptr >> 16;
580 pc[info.cdma_num].NxtPointerLo = next_ptr & 0xffff;
581
582 pc[info.cdma_num].FlashPointerHi = ptr >> 16;
583 pc[info.cdma_num].FlashPointerLo = ptr & 0xffff;
584
585 pc[info.cdma_num].CommandType = 0x42;
586 pc[info.cdma_num].CommandFlags =
587 (0 << 10) | (1 << 9) | (0 << 8) | 0x40;
588 pc[info.cdma_num].MemAddrHi = 0;
589 pc[info.cdma_num].MemAddrLo = 0;
590
591 pc[info.cdma_num].Channel = 0;
592 pc[info.cdma_num].Status = 0;
593 pc[info.cdma_num].pcmd = i;
594
595 info.cdma_num++;
596 BUG_ON(info.cdma_num >= MAX_DESCS);
597 }
598 }
599
600 /* Add a dummy descriptor at end of the CDMA chain */
601 reset_cdma_desc(info.cdma_num);
602 ptr = MODE_10 | (info.flash_bank << 24);
603 pc[info.cdma_num].FlashPointerHi = ptr >> 16;
604 pc[info.cdma_num].FlashPointerLo = ptr & 0xffff;
605 pc[info.cdma_num].CommandType = 0xFFFF; /* NOP command */
606 /* Set Command Flags for the last CDMA descriptor: */
607 /* set Continue bit (bit 9) to 0 and Interrupt bit (bit 8) to 1 */
608 pc[info.cdma_num].CommandFlags =
609 (0 << 10) | (0 << 9) | (1 << 8) | 0x40;
610 pc[info.cdma_num].pcmd = 0xff; /* Set it to an illegal value */
611 info.cdma_num++;
612 BUG_ON(info.cdma_num >= MAX_DESCS);
613
614 iowrite32(1, FlashReg + GLOBAL_INT_ENABLE); /* Enable Interrupt */
615
616 iowrite32(1, FlashReg + DMA_ENABLE);
617 /* Wait for DMA to be enabled before issuing the next command */
618 while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
619 ;
620 cdma_trans(info.flash_bank);
621
622 ret = wait_for_completion_timeout(&info.complete, 50 * HZ);
623 if (!ret)
624 printk(KERN_ERR "Wait for completion timeout "
625 "in %s, Line %d\n", __FILE__, __LINE__);
626 status = info.ret;
627
628 info.pcmds_num = 0; /* Clear the pending cmds number to 0 */
629
630 return status;
631}
632
633int is_cdma_interrupt(void)
634{
635 u32 ints_b0, ints_b1, ints_b2, ints_b3, ints_cdma;
636 u32 int_en_mask;
637 u32 cdma_int_en_mask;
638
639 nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
640 __FILE__, __LINE__, __func__);
641
642 /* Set the global Enable masks for only those interrupts
643 * that are supported */
644 cdma_int_en_mask = (DMA_INTR__DESC_COMP_CHANNEL0 |
645 DMA_INTR__DESC_COMP_CHANNEL1 |
646 DMA_INTR__DESC_COMP_CHANNEL2 |
647 DMA_INTR__DESC_COMP_CHANNEL3 |
648 DMA_INTR__MEMCOPY_DESC_COMP);
649
650 int_en_mask = (INTR_STATUS0__ECC_ERR |
651 INTR_STATUS0__PROGRAM_FAIL |
652 INTR_STATUS0__ERASE_FAIL);
653
654 ints_b0 = ioread32(FlashReg + INTR_STATUS0) & int_en_mask;
655 ints_b1 = ioread32(FlashReg + INTR_STATUS1) & int_en_mask;
656 ints_b2 = ioread32(FlashReg + INTR_STATUS2) & int_en_mask;
657 ints_b3 = ioread32(FlashReg + INTR_STATUS3) & int_en_mask;
658 ints_cdma = ioread32(FlashReg + DMA_INTR) & cdma_int_en_mask;
659
660 nand_dbg_print(NAND_DBG_WARN, "ints_bank0 to ints_bank3: "
661 "0x%x, 0x%x, 0x%x, 0x%x, ints_cdma: 0x%x\n",
662 ints_b0, ints_b1, ints_b2, ints_b3, ints_cdma);
663
664 if (ints_b0 || ints_b1 || ints_b2 || ints_b3 || ints_cdma) {
665 return 1;
666 } else {
667 iowrite32(ints_b0, FlashReg + INTR_STATUS0);
668 iowrite32(ints_b1, FlashReg + INTR_STATUS1);
669 iowrite32(ints_b2, FlashReg + INTR_STATUS2);
670 iowrite32(ints_b3, FlashReg + INTR_STATUS3);
671 nand_dbg_print(NAND_DBG_DEBUG,
672 "Not a NAND controller interrupt! Ignore it.\n");
673 return 0;
674 }
675}
676
677static void update_event_status(void)
678{
679 int i;
680 struct cdma_descriptor *ptr;
681
682 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
683 __FILE__, __LINE__, __func__);
684
685 ptr = (struct cdma_descriptor *)info.cdma_desc_buf;
686
687 for (i = 0; i < info.cdma_num; i++) {
688 if (ptr[i].pcmd != 0xff)
689 info.pcmds[ptr[i].pcmd].Status = CMD_PASS;
690 if ((ptr[i].CommandType == 0x41) ||
691 (ptr[i].CommandType == 0x42) ||
692 (ptr[i].CommandType == 0x43))
693 continue;
694
695 switch (info.pcmds[ptr[i].pcmd].CMD) {
696 case READ_MAIN_SPARE_CMD:
697 Conv_Main_Spare_Data_Phy2Log_Format(
698 info.pcmds[ptr[i].pcmd].DataAddr,
699 info.pcmds[ptr[i].pcmd].PageCount);
700 break;
701 case READ_SPARE_CMD:
702 Conv_Spare_Data_Phy2Log_Format(
703 info.pcmds[ptr[i].pcmd].DataAddr);
704 break;
705 }
706 }
707}
708
709static u16 do_ecc_for_desc(u32 ch, u8 *buf, u16 page)
710{
711 u16 event = EVENT_NONE;
712 u16 err_byte;
713 u16 err_page = 0;
714 u8 err_sector;
715 u8 err_device;
716 u16 ecc_correction_info;
717 u16 err_address;
718 u32 eccSectorSize;
719 u8 *err_pos;
720
721 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
722 __FILE__, __LINE__, __func__);
723
724 eccSectorSize = ECC_SECTOR_SIZE * (DeviceInfo.wDevicesConnected);
725
726 do {
727 if (0 == ch)
728 err_page = ioread32(FlashReg + ERR_PAGE_ADDR0);
729 else if (1 == ch)
730 err_page = ioread32(FlashReg + ERR_PAGE_ADDR1);
731 else if (2 == ch)
732 err_page = ioread32(FlashReg + ERR_PAGE_ADDR2);
733 else if (3 == ch)
734 err_page = ioread32(FlashReg + ERR_PAGE_ADDR3);
735
736 err_address = ioread32(FlashReg + ECC_ERROR_ADDRESS);
737 err_byte = err_address & ECC_ERROR_ADDRESS__OFFSET;
738 err_sector = ((err_address &
739 ECC_ERROR_ADDRESS__SECTOR_NR) >> 12);
740
741 ecc_correction_info = ioread32(FlashReg + ERR_CORRECTION_INFO);
742 err_device = ((ecc_correction_info &
743 ERR_CORRECTION_INFO__DEVICE_NR) >> 8);
744
745 if (ecc_correction_info & ERR_CORRECTION_INFO__ERROR_TYPE) {
746 event = EVENT_UNCORRECTABLE_DATA_ERROR;
747 } else {
748 event = EVENT_CORRECTABLE_DATA_ERROR_FIXED;
749 if (err_byte < ECC_SECTOR_SIZE) {
750 err_pos = buf +
751 (err_page - page) *
752 DeviceInfo.wPageDataSize +
753 err_sector * eccSectorSize +
754 err_byte *
755 DeviceInfo.wDevicesConnected +
756 err_device;
757 *err_pos ^= ecc_correction_info &
758 ERR_CORRECTION_INFO__BYTEMASK;
759 }
760 }
761 } while (!(ecc_correction_info & ERR_CORRECTION_INFO__LAST_ERR_INFO));
762
763 return event;
764}
765
766static u16 process_ecc_int(u32 c, u16 *p_desc_num)
767{
768 struct cdma_descriptor *ptr;
769 u16 j;
770 int event = EVENT_PASS;
771
772 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
773 __FILE__, __LINE__, __func__);
774
775 if (c != info.flash_bank)
776 printk(KERN_ERR "Error!info.flash_bank is %d, while c is %d\n",
777 info.flash_bank, c);
778
779 ptr = (struct cdma_descriptor *)info.cdma_desc_buf;
780
781 for (j = 0; j < info.cdma_num; j++)
782 if ((ptr[j].Status & CMD_DMA_DESC_COMP) != CMD_DMA_DESC_COMP)
783 break;
784
785 *p_desc_num = j; /* Pass the descripter number found here */
786
787 if (j >= info.cdma_num) {
788 printk(KERN_ERR "Can not find the correct descriptor number "
789 "when ecc interrupt triggered!"
790 "info.cdma_num: %d, j: %d\n", info.cdma_num, j);
791 return EVENT_UNCORRECTABLE_DATA_ERROR;
792 }
793
794 event = do_ecc_for_desc(c, info.pcmds[ptr[j].pcmd].DataAddr,
795 info.pcmds[ptr[j].pcmd].Page);
796
797 if (EVENT_UNCORRECTABLE_DATA_ERROR == event) {
798 printk(KERN_ERR "Uncorrectable ECC error!"
799 "info.cdma_num: %d, j: %d, "
800 "pending cmd CMD: 0x%x, "
801 "Block: 0x%x, Page: 0x%x, PageCount: 0x%x\n",
802 info.cdma_num, j,
803 info.pcmds[ptr[j].pcmd].CMD,
804 info.pcmds[ptr[j].pcmd].Block,
805 info.pcmds[ptr[j].pcmd].Page,
806 info.pcmds[ptr[j].pcmd].PageCount);
807
808 if (ptr[j].pcmd != 0xff)
809 info.pcmds[ptr[j].pcmd].Status = CMD_FAIL;
810 CDMA_UpdateEventStatus();
811 }
812
813 return event;
814}
815
816static void process_prog_erase_fail_int(u16 desc_num)
817{
818 struct cdma_descriptor *ptr;
819
820 nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
821 __FILE__, __LINE__, __func__);
822
823 ptr = (struct cdma_descriptor *)info.cdma_desc_buf;
824
825 if (ptr[desc_num].pcmd != 0xFF)
826 info.pcmds[ptr[desc_num].pcmd].Status = CMD_FAIL;
827
828 CDMA_UpdateEventStatus();
829}
830
831/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
832* Function: CDMA_Event_Status (for use with CMD_DMA)
833* Inputs: none
834* Outputs: Event_Status code
835* Description: This function is called after an interrupt has happened
836* It reads the HW status register and ...tbd
837* It returns the appropriate event status
838*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
839u16 CDMA_Event_Status(void)
840{
841 u32 ints_addr[4] = {INTR_STATUS0, INTR_STATUS1,
842 INTR_STATUS2, INTR_STATUS3};
843 u32 dma_intr_bit[4] = {DMA_INTR__DESC_COMP_CHANNEL0,
844 DMA_INTR__DESC_COMP_CHANNEL1,
845 DMA_INTR__DESC_COMP_CHANNEL2,
846 DMA_INTR__DESC_COMP_CHANNEL3};
847 u32 cdma_int_status, int_status;
848 u32 ecc_enable = 0;
849 u16 event = EVENT_PASS;
850 u16 cur_desc = 0;
851
852 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
853 __FILE__, __LINE__, __func__);
854
855 ecc_enable = ioread32(FlashReg + ECC_ENABLE);
856
857 while (1) {
858 int_status = ioread32(FlashReg + ints_addr[info.flash_bank]);
859 if (ecc_enable && (int_status & INTR_STATUS0__ECC_ERR)) {
860 event = process_ecc_int(info.flash_bank, &cur_desc);
861 iowrite32(INTR_STATUS0__ECC_ERR,
862 FlashReg + ints_addr[info.flash_bank]);
863 if (EVENT_UNCORRECTABLE_DATA_ERROR == event) {
864 nand_dbg_print(NAND_DBG_WARN,
865 "ints_bank0 to ints_bank3: "
866 "0x%x, 0x%x, 0x%x, 0x%x, "
867 "ints_cdma: 0x%x\n",
868 ioread32(FlashReg + INTR_STATUS0),
869 ioread32(FlashReg + INTR_STATUS1),
870 ioread32(FlashReg + INTR_STATUS2),
871 ioread32(FlashReg + INTR_STATUS3),
872 ioread32(FlashReg + DMA_INTR));
873 break;
874 }
875 } else if (int_status & INTR_STATUS0__PROGRAM_FAIL) {
876 printk(KERN_ERR "NAND program fail interrupt!\n");
877 process_prog_erase_fail_int(cur_desc);
878 event = EVENT_PROGRAM_FAILURE;
879 break;
880 } else if (int_status & INTR_STATUS0__ERASE_FAIL) {
881 printk(KERN_ERR "NAND erase fail interrupt!\n");
882 process_prog_erase_fail_int(cur_desc);
883 event = EVENT_ERASE_FAILURE;
884 break;
885 } else {
886 cdma_int_status = ioread32(FlashReg + DMA_INTR);
887 if (cdma_int_status & dma_intr_bit[info.flash_bank]) {
888 iowrite32(dma_intr_bit[info.flash_bank],
889 FlashReg + DMA_INTR);
890 update_event_status();
891 event = EVENT_PASS;
892 break;
893 }
894 }
895 }
896
897 int_status = ioread32(FlashReg + ints_addr[info.flash_bank]);
898 iowrite32(int_status, FlashReg + ints_addr[info.flash_bank]);
899 cdma_int_status = ioread32(FlashReg + DMA_INTR);
900 iowrite32(cdma_int_status, FlashReg + DMA_INTR);
901
902 iowrite32(0, FlashReg + DMA_ENABLE);
903 while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
904 ;
905
906 return event;
907}
908
909
910
diff --git a/drivers/staging/spectra/lld_cdma.h b/drivers/staging/spectra/lld_cdma.h
new file mode 100644
index 00000000000..854ea066f0c
--- /dev/null
+++ b/drivers/staging/spectra/lld_cdma.h
@@ -0,0 +1,123 @@
1/*
2 * NAND Flash Controller Device Driver
3 * Copyright (c) 2009, Intel Corporation and its suppliers.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 */
19
20/* header for LLD_CDMA.c module */
21
22#ifndef _LLD_CDMA_
23#define _LLD_CDMA_
24
25#include "flash.h"
26
27#define DEBUG_SYNC 1
28
29/*/////////// CDMA specific MACRO definition */
30#define MAX_DESCS (255)
31#define MAX_CHANS (4)
32#define MAX_SYNC_POINTS (16)
33#define MAX_DESC_PER_CHAN (MAX_DESCS * 3 + MAX_SYNC_POINTS + 2)
34
35#define CHANNEL_SYNC_MASK (0x000F)
36#define CHANNEL_DMA_MASK (0x00F0)
37#define CHANNEL_ID_MASK (0x0300)
38#define CHANNEL_CONT_MASK (0x4000)
39#define CHANNEL_INTR_MASK (0x8000)
40
41#define CHANNEL_SYNC_OFFSET (0)
42#define CHANNEL_DMA_OFFSET (4)
43#define CHANNEL_ID_OFFSET (8)
44#define CHANNEL_CONT_OFFSET (14)
45#define CHANNEL_INTR_OFFSET (15)
46
47u16 CDMA_Data_CMD(u8 cmd, u8 *data, u32 block, u16 page, u16 num, u16 flags);
48u16 CDMA_MemCopy_CMD(u8 *dest, u8 *src, u32 byte_cnt, u16 flags);
49u16 CDMA_Execute_CMDs(void);
50void print_pending_cmds(void);
51void print_cdma_descriptors(void);
52
53extern u8 g_SBDCmdIndex;
54extern struct mrst_nand_info info;
55
56
57/*/////////// prototypes: APIs for LLD_CDMA */
58int is_cdma_interrupt(void);
59u16 CDMA_Event_Status(void);
60
61/* CMD-DMA Descriptor Struct. These are defined by the CMD_DMA HW */
62struct cdma_descriptor {
63 u32 NxtPointerHi;
64 u32 NxtPointerLo;
65 u32 FlashPointerHi;
66 u32 FlashPointerLo;
67 u32 CommandType;
68 u32 MemAddrHi;
69 u32 MemAddrLo;
70 u32 CommandFlags;
71 u32 Channel;
72 u32 Status;
73 u32 MemCopyPointerHi;
74 u32 MemCopyPointerLo;
75 u32 Reserved12;
76 u32 Reserved13;
77 u32 Reserved14;
78 u32 pcmd; /* pending cmd num related to this descriptor */
79};
80
81/* This struct holds one MemCopy descriptor as defined by the HW */
82struct memcpy_descriptor {
83 u32 NxtPointerHi;
84 u32 NxtPointerLo;
85 u32 SrcAddrHi;
86 u32 SrcAddrLo;
87 u32 DestAddrHi;
88 u32 DestAddrLo;
89 u32 XferSize;
90 u32 MemCopyFlags;
91 u32 MemCopyStatus;
92 u32 reserved9;
93 u32 reserved10;
94 u32 reserved11;
95 u32 reserved12;
96 u32 reserved13;
97 u32 reserved14;
98 u32 reserved15;
99};
100
101/* Pending CMD table entries (includes MemCopy parameters */
102struct pending_cmd {
103 u8 CMD;
104 u8 *DataAddr;
105 u32 Block;
106 u16 Page;
107 u16 PageCount;
108 u8 *DataDestAddr;
109 u8 *DataSrcAddr;
110 u32 MemCopyByteCnt;
111 u16 Flags;
112 u16 Status;
113};
114
115#if DEBUG_SYNC
116extern u32 debug_sync_cnt;
117#endif
118
119/* Definitions for CMD DMA descriptor chain fields */
120#define CMD_DMA_DESC_COMP 0x8000
121#define CMD_DMA_DESC_FAIL 0x4000
122
123#endif /*_LLD_CDMA_*/
diff --git a/drivers/staging/spectra/lld_emu.c b/drivers/staging/spectra/lld_emu.c
new file mode 100644
index 00000000000..60eb0f6fdba
--- /dev/null
+++ b/drivers/staging/spectra/lld_emu.c
@@ -0,0 +1,780 @@
1/*
2 * NAND Flash Controller Device Driver
3 * Copyright (c) 2009, Intel Corporation and its suppliers.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 */
19
20#include <linux/fs.h>
21#include <linux/slab.h>
22#include "flash.h"
23#include "ffsdefs.h"
24#include "lld_emu.h"
25#include "lld.h"
26#if CMD_DMA
27#include "lld_cdma.h"
28#endif
29
30#define GLOB_LLD_PAGES 64
31#define GLOB_LLD_PAGE_SIZE (512+16)
32#define GLOB_LLD_PAGE_DATA_SIZE 512
33#define GLOB_LLD_BLOCKS 2048
34
35#if (CMD_DMA && FLASH_EMU)
36#include "lld_cdma.h"
37u32 totalUsedBanks;
38u32 valid_banks[MAX_CHANS];
39#endif
40
41#if FLASH_EMU /* This is for entire module */
42
43static u8 *flash_memory[GLOB_LLD_BLOCKS * GLOB_LLD_PAGES];
44
45/* Read nand emu file and then fill it's content to flash_memory */
46int emu_load_file_to_mem(void)
47{
48 mm_segment_t fs;
49 struct file *nef_filp = NULL;
50 struct inode *inode = NULL;
51 loff_t nef_size = 0;
52 loff_t tmp_file_offset, file_offset;
53 ssize_t nread;
54 int i, rc = -EINVAL;
55
56 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
57 __FILE__, __LINE__, __func__);
58
59 fs = get_fs();
60 set_fs(get_ds());
61
62 nef_filp = filp_open("/root/nand_emu_file", O_RDWR | O_LARGEFILE, 0);
63 if (IS_ERR(nef_filp)) {
64 printk(KERN_ERR "filp_open error: "
65 "Unable to open nand emu file!\n");
66 return PTR_ERR(nef_filp);
67 }
68
69 if (nef_filp->f_path.dentry) {
70 inode = nef_filp->f_path.dentry->d_inode;
71 } else {
72 printk(KERN_ERR "Can not get valid inode!\n");
73 goto out;
74 }
75
76 nef_size = i_size_read(inode->i_mapping->host);
77 if (nef_size <= 0) {
78 printk(KERN_ERR "Invalid nand emu file size: "
79 "0x%llx\n", nef_size);
80 goto out;
81 } else {
82 nand_dbg_print(NAND_DBG_DEBUG, "nand emu file size: %lld\n",
83 nef_size);
84 }
85
86 file_offset = 0;
87 for (i = 0; i < GLOB_LLD_BLOCKS * GLOB_LLD_PAGES; i++) {
88 tmp_file_offset = file_offset;
89 nread = vfs_read(nef_filp,
90 (char __user *)flash_memory[i],
91 GLOB_LLD_PAGE_SIZE, &tmp_file_offset);
92 if (nread < GLOB_LLD_PAGE_SIZE) {
93 printk(KERN_ERR "%s, Line %d - "
94 "nand emu file partial read: "
95 "%d bytes\n", __FILE__, __LINE__, (int)nread);
96 goto out;
97 }
98 file_offset += GLOB_LLD_PAGE_SIZE;
99 }
100 rc = 0;
101
102out:
103 filp_close(nef_filp, current->files);
104 set_fs(fs);
105 return rc;
106}
107
108/* Write contents of flash_memory to nand emu file */
109int emu_write_mem_to_file(void)
110{
111 mm_segment_t fs;
112 struct file *nef_filp = NULL;
113 struct inode *inode = NULL;
114 loff_t nef_size = 0;
115 loff_t tmp_file_offset, file_offset;
116 ssize_t nwritten;
117 int i, rc = -EINVAL;
118
119 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
120 __FILE__, __LINE__, __func__);
121
122 fs = get_fs();
123 set_fs(get_ds());
124
125 nef_filp = filp_open("/root/nand_emu_file", O_RDWR | O_LARGEFILE, 0);
126 if (IS_ERR(nef_filp)) {
127 printk(KERN_ERR "filp_open error: "
128 "Unable to open nand emu file!\n");
129 return PTR_ERR(nef_filp);
130 }
131
132 if (nef_filp->f_path.dentry) {
133 inode = nef_filp->f_path.dentry->d_inode;
134 } else {
135 printk(KERN_ERR "Invalid " "nef_filp->f_path.dentry value!\n");
136 goto out;
137 }
138
139 nef_size = i_size_read(inode->i_mapping->host);
140 if (nef_size <= 0) {
141 printk(KERN_ERR "Invalid "
142 "nand emu file size: 0x%llx\n", nef_size);
143 goto out;
144 } else {
145 nand_dbg_print(NAND_DBG_DEBUG, "nand emu file size: "
146 "%lld\n", nef_size);
147 }
148
149 file_offset = 0;
150 for (i = 0; i < GLOB_LLD_BLOCKS * GLOB_LLD_PAGES; i++) {
151 tmp_file_offset = file_offset;
152 nwritten = vfs_write(nef_filp,
153 (char __user *)flash_memory[i],
154 GLOB_LLD_PAGE_SIZE, &tmp_file_offset);
155 if (nwritten < GLOB_LLD_PAGE_SIZE) {
156 printk(KERN_ERR "%s, Line %d - "
157 "nand emu file partial write: "
158 "%d bytes\n", __FILE__, __LINE__, (int)nwritten);
159 goto out;
160 }
161 file_offset += GLOB_LLD_PAGE_SIZE;
162 }
163 rc = 0;
164
165out:
166 filp_close(nef_filp, current->files);
167 set_fs(fs);
168 return rc;
169}
170
171/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
172* Function: emu_Flash_Init
173* Inputs: none
174* Outputs: PASS=0 (notice 0=ok here)
175* Description: Creates & initializes the flash RAM array.
176*
177*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
178u16 emu_Flash_Init(void)
179{
180 int i;
181
182 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
183 __FILE__, __LINE__, __func__);
184
185 flash_memory[0] = (u8 *)vmalloc(GLOB_LLD_PAGE_SIZE *
186 GLOB_LLD_BLOCKS *
187 GLOB_LLD_PAGES *
188 sizeof(u8));
189 if (!flash_memory[0]) {
190 printk(KERN_ERR "Fail to allocate memory "
191 "for nand emulator!\n");
192 return ERR;
193 }
194
195 memset((char *)(flash_memory[0]), 0xFF,
196 GLOB_LLD_PAGE_SIZE * GLOB_LLD_BLOCKS * GLOB_LLD_PAGES *
197 sizeof(u8));
198
199 for (i = 1; i < GLOB_LLD_BLOCKS * GLOB_LLD_PAGES; i++)
200 flash_memory[i] = flash_memory[i - 1] + GLOB_LLD_PAGE_SIZE;
201
202 emu_load_file_to_mem(); /* Load nand emu file to mem */
203
204 return PASS;
205}
206
207/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
208* Function: emu_Flash_Release
209* Inputs: none
210* Outputs: PASS=0 (notice 0=ok here)
211* Description: Releases the flash.
212*
213*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
214int emu_Flash_Release(void)
215{
216 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
217 __FILE__, __LINE__, __func__);
218
219 emu_write_mem_to_file(); /* Write back mem to nand emu file */
220
221 vfree(flash_memory[0]);
222 return PASS;
223}
224
225/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
226* Function: emu_Read_Device_ID
227* Inputs: none
228* Outputs: PASS=1 FAIL=0
229* Description: Reads the info from the controller registers.
230* Sets up DeviceInfo structure with device parameters
231*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
232
233u16 emu_Read_Device_ID(void)
234{
235 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
236 __FILE__, __LINE__, __func__);
237
238 DeviceInfo.wDeviceMaker = 0;
239 DeviceInfo.wDeviceType = 8;
240 DeviceInfo.wSpectraStartBlock = 36;
241 DeviceInfo.wSpectraEndBlock = GLOB_LLD_BLOCKS - 1;
242 DeviceInfo.wTotalBlocks = GLOB_LLD_BLOCKS;
243 DeviceInfo.wPagesPerBlock = GLOB_LLD_PAGES;
244 DeviceInfo.wPageSize = GLOB_LLD_PAGE_SIZE;
245 DeviceInfo.wPageDataSize = GLOB_LLD_PAGE_DATA_SIZE;
246 DeviceInfo.wPageSpareSize = GLOB_LLD_PAGE_SIZE -
247 GLOB_LLD_PAGE_DATA_SIZE;
248 DeviceInfo.wBlockSize = DeviceInfo.wPageSize * GLOB_LLD_PAGES;
249 DeviceInfo.wBlockDataSize = DeviceInfo.wPageDataSize * GLOB_LLD_PAGES;
250 DeviceInfo.wDataBlockNum = (u32) (DeviceInfo.wSpectraEndBlock -
251 DeviceInfo.wSpectraStartBlock
252 + 1);
253 DeviceInfo.MLCDevice = 1; /* Emulate MLC device */
254 DeviceInfo.nBitsInPageNumber =
255 (u8)GLOB_Calc_Used_Bits(DeviceInfo.wPagesPerBlock);
256 DeviceInfo.nBitsInPageDataSize =
257 (u8)GLOB_Calc_Used_Bits(DeviceInfo.wPageDataSize);
258 DeviceInfo.nBitsInBlockDataSize =
259 (u8)GLOB_Calc_Used_Bits(DeviceInfo.wBlockDataSize);
260
261#if CMD_DMA
262 totalUsedBanks = 4;
263 valid_banks[0] = 1;
264 valid_banks[1] = 1;
265 valid_banks[2] = 1;
266 valid_banks[3] = 1;
267#endif
268
269 return PASS;
270}
271
272/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
273* Function: emu_Flash_Reset
274* Inputs: none
275* Outputs: PASS=0 (notice 0=ok here)
276* Description: Reset the flash
277*
278*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
279u16 emu_Flash_Reset(void)
280{
281 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
282 __FILE__, __LINE__, __func__);
283
284 return PASS;
285}
286
287/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
288* Function: emu_Erase_Block
289* Inputs: Address
290* Outputs: PASS=0 (notice 0=ok here)
291* Description: Erase a block
292*
293*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
294u16 emu_Erase_Block(u32 block_add)
295{
296 int i;
297
298 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
299 __FILE__, __LINE__, __func__);
300
301 if (block_add >= DeviceInfo.wTotalBlocks) {
302 printk(KERN_ERR "emu_Erase_Block error! "
303 "Too big block address: %d\n", block_add);
304 return FAIL;
305 }
306
307 nand_dbg_print(NAND_DBG_DEBUG, "Erasing block %d\n",
308 (int)block_add);
309
310 for (i = block_add * GLOB_LLD_PAGES;
311 i < ((block_add + 1) * GLOB_LLD_PAGES); i++) {
312 if (flash_memory[i]) {
313 memset((u8 *)(flash_memory[i]), 0xFF,
314 DeviceInfo.wPageSize * sizeof(u8));
315 }
316 }
317
318 return PASS;
319}
320
321/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
322* Function: emu_Write_Page_Main
323* Inputs: Write buffer address pointer
324* Block number
325* Page number
326* Number of pages to process
327* Outputs: PASS=0 (notice 0=ok here)
328* Description: Write the data in the buffer to main area of flash
329*
330*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
331u16 emu_Write_Page_Main(u8 *write_data, u32 Block,
332 u16 Page, u16 PageCount)
333{
334 int i;
335
336 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
337 __FILE__, __LINE__, __func__);
338
339 if (Block >= DeviceInfo.wTotalBlocks)
340 return FAIL;
341
342 if (Page + PageCount > DeviceInfo.wPagesPerBlock)
343 return FAIL;
344
345 nand_dbg_print(NAND_DBG_DEBUG, "emu_Write_Page_Main: "
346 "lba %u Page %u PageCount %u\n",
347 (unsigned int)Block,
348 (unsigned int)Page, (unsigned int)PageCount);
349
350 for (i = 0; i < PageCount; i++) {
351 if (NULL == flash_memory[Block * GLOB_LLD_PAGES + Page]) {
352 printk(KERN_ERR "Run out of memory\n");
353 return FAIL;
354 }
355 memcpy((u8 *) (flash_memory[Block * GLOB_LLD_PAGES + Page]),
356 write_data, DeviceInfo.wPageDataSize);
357 write_data += DeviceInfo.wPageDataSize;
358 Page++;
359 }
360
361 return PASS;
362}
363
364/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
365* Function: emu_Read_Page_Main
366* Inputs: Read buffer address pointer
367* Block number
368* Page number
369* Number of pages to process
370* Outputs: PASS=0 (notice 0=ok here)
371* Description: Read the data from the flash main area to the buffer
372*
373*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
374u16 emu_Read_Page_Main(u8 *read_data, u32 Block,
375 u16 Page, u16 PageCount)
376{
377 int i;
378
379 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
380 __FILE__, __LINE__, __func__);
381
382 if (Block >= DeviceInfo.wTotalBlocks)
383 return FAIL;
384
385 if (Page + PageCount > DeviceInfo.wPagesPerBlock)
386 return FAIL;
387
388 nand_dbg_print(NAND_DBG_DEBUG, "emu_Read_Page_Main: "
389 "lba %u Page %u PageCount %u\n",
390 (unsigned int)Block,
391 (unsigned int)Page, (unsigned int)PageCount);
392
393 for (i = 0; i < PageCount; i++) {
394 if (NULL == flash_memory[Block * GLOB_LLD_PAGES + Page]) {
395 memset(read_data, 0xFF, DeviceInfo.wPageDataSize);
396 } else {
397 memcpy(read_data,
398 (u8 *) (flash_memory[Block * GLOB_LLD_PAGES
399 + Page]),
400 DeviceInfo.wPageDataSize);
401 }
402 read_data += DeviceInfo.wPageDataSize;
403 Page++;
404 }
405
406 return PASS;
407}
408
409#ifndef ELDORA
410/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
411* Function: emu_Read_Page_Main_Spare
412* Inputs: Write Buffer
413* Address
414* Buffer size
415* Outputs: PASS=0 (notice 0=ok here)
416* Description: Read from flash main+spare area
417*
418*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
419u16 emu_Read_Page_Main_Spare(u8 *read_data, u32 Block,
420 u16 Page, u16 PageCount)
421{
422 int i;
423
424 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
425 __FILE__, __LINE__, __func__);
426
427 if (Block >= DeviceInfo.wTotalBlocks) {
428 printk(KERN_ERR "Read Page Main+Spare "
429 "Error: Block Address too big\n");
430 return FAIL;
431 }
432
433 if (Page + PageCount > DeviceInfo.wPagesPerBlock) {
434 printk(KERN_ERR "Read Page Main+Spare "
435 "Error: Page number too big\n");
436 return FAIL;
437 }
438
439 nand_dbg_print(NAND_DBG_DEBUG, "Read Page Main + Spare - "
440 "No. of pages %u block %u start page %u\n",
441 (unsigned int)PageCount,
442 (unsigned int)Block, (unsigned int)Page);
443
444 for (i = 0; i < PageCount; i++) {
445 if (NULL == flash_memory[Block * GLOB_LLD_PAGES + Page]) {
446 memset(read_data, 0xFF, DeviceInfo.wPageSize);
447 } else {
448 memcpy(read_data, (u8 *) (flash_memory[Block *
449 GLOB_LLD_PAGES
450 + Page]),
451 DeviceInfo.wPageSize);
452 }
453
454 read_data += DeviceInfo.wPageSize;
455 Page++;
456 }
457
458 return PASS;
459}
460
461/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
462* Function: emu_Write_Page_Main_Spare
463* Inputs: Write buffer
464* address
465* buffer length
466* Outputs: PASS=0 (notice 0=ok here)
467* Description: Write the buffer to main+spare area of flash
468*
469*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
470u16 emu_Write_Page_Main_Spare(u8 *write_data, u32 Block,
471 u16 Page, u16 page_count)
472{
473 u16 i;
474
475 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
476 __FILE__, __LINE__, __func__);
477
478 if (Block >= DeviceInfo.wTotalBlocks) {
479 printk(KERN_ERR "Write Page Main + Spare "
480 "Error: Block Address too big\n");
481 return FAIL;
482 }
483
484 if (Page + page_count > DeviceInfo.wPagesPerBlock) {
485 printk(KERN_ERR "Write Page Main + Spare "
486 "Error: Page number too big\n");
487 return FAIL;
488 }
489
490 nand_dbg_print(NAND_DBG_DEBUG, "Write Page Main+Spare - "
491 "No. of pages %u block %u start page %u\n",
492 (unsigned int)page_count,
493 (unsigned int)Block, (unsigned int)Page);
494
495 for (i = 0; i < page_count; i++) {
496 if (NULL == flash_memory[Block * GLOB_LLD_PAGES + Page]) {
497 printk(KERN_ERR "Run out of memory!\n");
498 return FAIL;
499 }
500 memcpy((u8 *) (flash_memory[Block * GLOB_LLD_PAGES + Page]),
501 write_data, DeviceInfo.wPageSize);
502 write_data += DeviceInfo.wPageSize;
503 Page++;
504 }
505
506 return PASS;
507}
508
509/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
510* Function: emu_Write_Page_Spare
511* Inputs: Write buffer
512* Address
513* buffer size
514* Outputs: PASS=0 (notice 0=ok here)
515* Description: Write the buffer in the spare area
516*
517*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
518u16 emu_Write_Page_Spare(u8 *write_data, u32 Block,
519 u16 Page, u16 PageCount)
520{
521 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
522 __FILE__, __LINE__, __func__);
523
524 if (Block >= DeviceInfo.wTotalBlocks) {
525 printk(KERN_ERR "Read Page Spare Error: "
526 "Block Address too big\n");
527 return FAIL;
528 }
529
530 if (Page + PageCount > DeviceInfo.wPagesPerBlock) {
531 printk(KERN_ERR "Read Page Spare Error: "
532 "Page number too big\n");
533 return FAIL;
534 }
535
536 nand_dbg_print(NAND_DBG_DEBUG, "Write Page Spare- "
537 "block %u page %u\n",
538 (unsigned int)Block, (unsigned int)Page);
539
540 if (NULL == flash_memory[Block * GLOB_LLD_PAGES + Page]) {
541 printk(KERN_ERR "Run out of memory!\n");
542 return FAIL;
543 }
544
545 memcpy((u8 *) (flash_memory[Block * GLOB_LLD_PAGES + Page] +
546 DeviceInfo.wPageDataSize), write_data,
547 (DeviceInfo.wPageSize - DeviceInfo.wPageDataSize));
548
549 return PASS;
550}
551
552/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
553* Function: emu_Read_Page_Spare
554* Inputs: Write Buffer
555* Address
556* Buffer size
557* Outputs: PASS=0 (notice 0=ok here)
558* Description: Read data from the spare area
559*
560*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
561u16 emu_Read_Page_Spare(u8 *write_data, u32 Block,
562 u16 Page, u16 PageCount)
563{
564 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
565 __FILE__, __LINE__, __func__);
566
567 if (Block >= DeviceInfo.wTotalBlocks) {
568 printk(KERN_ERR "Read Page Spare "
569 "Error: Block Address too big\n");
570 return FAIL;
571 }
572
573 if (Page + PageCount > DeviceInfo.wPagesPerBlock) {
574 printk(KERN_ERR "Read Page Spare "
575 "Error: Page number too big\n");
576 return FAIL;
577 }
578
579 nand_dbg_print(NAND_DBG_DEBUG, "Read Page Spare- "
580 "block %u page %u\n",
581 (unsigned int)Block, (unsigned int)Page);
582
583 if (NULL == flash_memory[Block * GLOB_LLD_PAGES + Page]) {
584 memset(write_data, 0xFF,
585 (DeviceInfo.wPageSize - DeviceInfo.wPageDataSize));
586 } else {
587 memcpy(write_data,
588 (u8 *) (flash_memory[Block * GLOB_LLD_PAGES + Page]
589 + DeviceInfo.wPageDataSize),
590 (DeviceInfo.wPageSize - DeviceInfo.wPageDataSize));
591 }
592
593 return PASS;
594}
595
596/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
597* Function: emu_Enable_Disable_Interrupts
598* Inputs: enable or disable
599* Outputs: none
600* Description: NOP
601*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
602void emu_Enable_Disable_Interrupts(u16 INT_ENABLE)
603{
604 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
605 __FILE__, __LINE__, __func__);
606}
607
608u16 emu_Get_Bad_Block(u32 block)
609{
610 return 0;
611}
612
613#if CMD_DMA
614/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
615* Support for CDMA functions
616************************************
617* emu_CDMA_Flash_Init
618* CDMA_process_data command (use LLD_CDMA)
619* CDMA_MemCopy_CMD (use LLD_CDMA)
620* emu_CDMA_execute all commands
621* emu_CDMA_Event_Status
622*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
623u16 emu_CDMA_Flash_Init(void)
624{
625 u16 i;
626
627 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
628 __FILE__, __LINE__, __func__);
629
630 for (i = 0; i < MAX_DESCS + MAX_CHANS; i++) {
631 PendingCMD[i].CMD = 0;
632 PendingCMD[i].Tag = 0;
633 PendingCMD[i].DataAddr = 0;
634 PendingCMD[i].Block = 0;
635 PendingCMD[i].Page = 0;
636 PendingCMD[i].PageCount = 0;
637 PendingCMD[i].DataDestAddr = 0;
638 PendingCMD[i].DataSrcAddr = 0;
639 PendingCMD[i].MemCopyByteCnt = 0;
640 PendingCMD[i].ChanSync[0] = 0;
641 PendingCMD[i].ChanSync[1] = 0;
642 PendingCMD[i].ChanSync[2] = 0;
643 PendingCMD[i].ChanSync[3] = 0;
644 PendingCMD[i].ChanSync[4] = 0;
645 PendingCMD[i].Status = 3;
646 }
647
648 return PASS;
649}
650
651static void emu_isr(int irq, void *dev_id)
652{
653 /* TODO: ... */
654}
655
656/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
657* Function: CDMA_Execute_CMDs
658* Inputs: tag_count: the number of pending cmds to do
659* Outputs: PASS/FAIL
660* Description: execute each command in the pending CMD array
661*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
662u16 emu_CDMA_Execute_CMDs(u16 tag_count)
663{
664 u16 i, j;
665 u8 CMD; /* cmd parameter */
666 u8 *data;
667 u32 block;
668 u16 page;
669 u16 count;
670 u16 status = PASS;
671
672 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
673 __FILE__, __LINE__, __func__);
674
675 nand_dbg_print(NAND_DBG_TRACE, "At start of Execute CMDs: "
676 "Tag Count %u\n", tag_count);
677
678 for (i = 0; i < totalUsedBanks; i++) {
679 PendingCMD[i].CMD = DUMMY_CMD;
680 PendingCMD[i].Tag = 0xFF;
681 PendingCMD[i].Block =
682 (DeviceInfo.wTotalBlocks / totalUsedBanks) * i;
683
684 for (j = 0; j <= MAX_CHANS; j++)
685 PendingCMD[i].ChanSync[j] = 0;
686 }
687
688 CDMA_Execute_CMDs(tag_count);
689
690 print_pending_cmds(tag_count);
691
692#if DEBUG_SYNC
693 }
694 debug_sync_cnt++;
695#endif
696
697 for (i = MAX_CHANS;
698 i < tag_count + MAX_CHANS; i++) {
699 CMD = PendingCMD[i].CMD;
700 data = PendingCMD[i].DataAddr;
701 block = PendingCMD[i].Block;
702 page = PendingCMD[i].Page;
703 count = PendingCMD[i].PageCount;
704
705 switch (CMD) {
706 case ERASE_CMD:
707 emu_Erase_Block(block);
708 PendingCMD[i].Status = PASS;
709 break;
710 case WRITE_MAIN_CMD:
711 emu_Write_Page_Main(data, block, page, count);
712 PendingCMD[i].Status = PASS;
713 break;
714 case WRITE_MAIN_SPARE_CMD:
715 emu_Write_Page_Main_Spare(data, block, page, count);
716 PendingCMD[i].Status = PASS;
717 break;
718 case READ_MAIN_CMD:
719 emu_Read_Page_Main(data, block, page, count);
720 PendingCMD[i].Status = PASS;
721 break;
722 case MEMCOPY_CMD:
723 memcpy(PendingCMD[i].DataDestAddr,
724 PendingCMD[i].DataSrcAddr,
725 PendingCMD[i].MemCopyByteCnt);
726 case DUMMY_CMD:
727 PendingCMD[i].Status = PASS;
728 break;
729 default:
730 PendingCMD[i].Status = FAIL;
731 break;
732 }
733 }
734
735 /*
736 * Temperory adding code to reset PendingCMD array for basic testing.
737 * It should be done at the end of event status function.
738 */
739 for (i = tag_count + MAX_CHANS; i < MAX_DESCS; i++) {
740 PendingCMD[i].CMD = 0;
741 PendingCMD[i].Tag = 0;
742 PendingCMD[i].DataAddr = 0;
743 PendingCMD[i].Block = 0;
744 PendingCMD[i].Page = 0;
745 PendingCMD[i].PageCount = 0;
746 PendingCMD[i].DataDestAddr = 0;
747 PendingCMD[i].DataSrcAddr = 0;
748 PendingCMD[i].MemCopyByteCnt = 0;
749 PendingCMD[i].ChanSync[0] = 0;
750 PendingCMD[i].ChanSync[1] = 0;
751 PendingCMD[i].ChanSync[2] = 0;
752 PendingCMD[i].ChanSync[3] = 0;
753 PendingCMD[i].ChanSync[4] = 0;
754 PendingCMD[i].Status = CMD_NOT_DONE;
755 }
756
757 nand_dbg_print(NAND_DBG_TRACE, "At end of Execute CMDs.\n");
758
759 emu_isr(0, 0); /* This is a null isr now. Need fill it in future */
760
761 return status;
762}
763
764/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
765* Function: emu_Event_Status
766* Inputs: none
767* Outputs: Event_Status code
768* Description: This function can also be used to force errors
769*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
770u16 emu_CDMA_Event_Status(void)
771{
772 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
773 __FILE__, __LINE__, __func__);
774
775 return EVENT_PASS;
776}
777
778#endif /* CMD_DMA */
779#endif /* !ELDORA */
780#endif /* FLASH_EMU */
diff --git a/drivers/staging/spectra/lld_emu.h b/drivers/staging/spectra/lld_emu.h
new file mode 100644
index 00000000000..63f84c38d3c
--- /dev/null
+++ b/drivers/staging/spectra/lld_emu.h
@@ -0,0 +1,51 @@
1/*
2 * NAND Flash Controller Device Driver
3 * Copyright (c) 2009, Intel Corporation and its suppliers.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 */
19
20#ifndef _LLD_EMU_
21#define _LLD_EMU_
22
23#include "ffsport.h"
24#include "ffsdefs.h"
25
26/* prototypes: emulator API functions */
27extern u16 emu_Flash_Reset(void);
28extern u16 emu_Flash_Init(void);
29extern int emu_Flash_Release(void);
30extern u16 emu_Read_Device_ID(void);
31extern u16 emu_Erase_Block(u32 block_addr);
32extern u16 emu_Write_Page_Main(u8 *write_data, u32 Block,
33 u16 Page, u16 PageCount);
34extern u16 emu_Read_Page_Main(u8 *read_data, u32 Block, u16 Page,
35 u16 PageCount);
36extern u16 emu_Event_Status(void);
37extern void emu_Enable_Disable_Interrupts(u16 INT_ENABLE);
38extern u16 emu_Write_Page_Main_Spare(u8 *write_data, u32 Block,
39 u16 Page, u16 PageCount);
40extern u16 emu_Write_Page_Spare(u8 *write_data, u32 Block,
41 u16 Page, u16 PageCount);
42extern u16 emu_Read_Page_Main_Spare(u8 *read_data, u32 Block,
43 u16 Page, u16 PageCount);
44extern u16 emu_Read_Page_Spare(u8 *read_data, u32 Block, u16 Page,
45 u16 PageCount);
46extern u16 emu_Get_Bad_Block(u32 block);
47
48u16 emu_CDMA_Flash_Init(void);
49u16 emu_CDMA_Execute_CMDs(u16 tag_count);
50u16 emu_CDMA_Event_Status(void);
51#endif /*_LLD_EMU_*/
diff --git a/drivers/staging/spectra/lld_mtd.c b/drivers/staging/spectra/lld_mtd.c
new file mode 100644
index 00000000000..0de05b1e75f
--- /dev/null
+++ b/drivers/staging/spectra/lld_mtd.c
@@ -0,0 +1,687 @@
1/*
2 * NAND Flash Controller Device Driver
3 * Copyright (c) 2009, Intel Corporation and its suppliers.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 */
19
20#include <linux/fs.h>
21#include <linux/slab.h>
22#include <linux/mtd/mtd.h>
23#include "flash.h"
24#include "ffsdefs.h"
25#include "lld_emu.h"
26#include "lld.h"
27#if CMD_DMA
28#include "lld_cdma.h"
29#endif
30
31#define GLOB_LLD_PAGES 64
32#define GLOB_LLD_PAGE_SIZE (512+16)
33#define GLOB_LLD_PAGE_DATA_SIZE 512
34#define GLOB_LLD_BLOCKS 2048
35
36#if CMD_DMA
37#include "lld_cdma.h"
38u32 totalUsedBanks;
39u32 valid_banks[MAX_CHANS];
40#endif
41
42static struct mtd_info *spectra_mtd;
43static int mtddev = -1;
44module_param(mtddev, int, 0);
45
46/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
47* Function: mtd_Flash_Init
48* Inputs: none
49* Outputs: PASS=0 (notice 0=ok here)
50* Description: Creates & initializes the flash RAM array.
51*
52*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
53u16 mtd_Flash_Init(void)
54{
55 if (mtddev == -1) {
56 printk(KERN_ERR "No MTD device specified. Give mtddev parameter\n");
57 return FAIL;
58 }
59
60 spectra_mtd = get_mtd_device(NULL, mtddev);
61 if (!spectra_mtd) {
62 printk(KERN_ERR "Failed to obtain MTD device #%d\n", mtddev);
63 return FAIL;
64 }
65
66 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
67 __FILE__, __LINE__, __func__);
68
69 return PASS;
70}
71
72/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
73* Function: mtd_Flash_Release
74* Inputs: none
75* Outputs: PASS=0 (notice 0=ok here)
76* Description: Releases the flash.
77*
78*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
79int mtd_Flash_Release(void)
80{
81 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
82 __FILE__, __LINE__, __func__);
83 if (!spectra_mtd)
84 return PASS;
85
86 put_mtd_device(spectra_mtd);
87 spectra_mtd = NULL;
88
89 return PASS;
90}
91
92/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
93* Function: mtd_Read_Device_ID
94* Inputs: none
95* Outputs: PASS=1 FAIL=0
96* Description: Reads the info from the controller registers.
97* Sets up DeviceInfo structure with device parameters
98*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
99
100u16 mtd_Read_Device_ID(void)
101{
102 uint64_t tmp;
103 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
104 __FILE__, __LINE__, __func__);
105
106 if (!spectra_mtd)
107 return FAIL;
108
109 DeviceInfo.wDeviceMaker = 0;
110 DeviceInfo.wDeviceType = 8;
111 DeviceInfo.wSpectraStartBlock = SPECTRA_START_BLOCK;
112 tmp = spectra_mtd->size;
113 do_div(tmp, spectra_mtd->erasesize);
114 DeviceInfo.wTotalBlocks = tmp;
115 DeviceInfo.wSpectraEndBlock = DeviceInfo.wTotalBlocks - 1;
116 DeviceInfo.wPagesPerBlock = spectra_mtd->erasesize / spectra_mtd->writesize;
117 DeviceInfo.wPageSize = spectra_mtd->writesize + spectra_mtd->oobsize;
118 DeviceInfo.wPageDataSize = spectra_mtd->writesize;
119 DeviceInfo.wPageSpareSize = spectra_mtd->oobsize;
120 DeviceInfo.wBlockSize = DeviceInfo.wPageSize * DeviceInfo.wPagesPerBlock;
121 DeviceInfo.wBlockDataSize = DeviceInfo.wPageDataSize * DeviceInfo.wPagesPerBlock;
122 DeviceInfo.wDataBlockNum = (u32) (DeviceInfo.wSpectraEndBlock -
123 DeviceInfo.wSpectraStartBlock
124 + 1);
125 DeviceInfo.MLCDevice = 0;//spectra_mtd->celltype & NAND_CI_CELLTYPE_MSK;
126 DeviceInfo.nBitsInPageNumber =
127 (u8)GLOB_Calc_Used_Bits(DeviceInfo.wPagesPerBlock);
128 DeviceInfo.nBitsInPageDataSize =
129 (u8)GLOB_Calc_Used_Bits(DeviceInfo.wPageDataSize);
130 DeviceInfo.nBitsInBlockDataSize =
131 (u8)GLOB_Calc_Used_Bits(DeviceInfo.wBlockDataSize);
132
133#if CMD_DMA
134 totalUsedBanks = 4;
135 valid_banks[0] = 1;
136 valid_banks[1] = 1;
137 valid_banks[2] = 1;
138 valid_banks[3] = 1;
139#endif
140
141 return PASS;
142}
143
144/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
145* Function: mtd_Flash_Reset
146* Inputs: none
147* Outputs: PASS=0 (notice 0=ok here)
148* Description: Reset the flash
149*
150*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
151u16 mtd_Flash_Reset(void)
152{
153 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
154 __FILE__, __LINE__, __func__);
155
156 return PASS;
157}
158
159void erase_callback(struct erase_info *e)
160{
161 complete((void *)e->priv);
162}
163
164/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
165* Function: mtd_Erase_Block
166* Inputs: Address
167* Outputs: PASS=0 (notice 0=ok here)
168* Description: Erase a block
169*
170*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
171u16 mtd_Erase_Block(u32 block_add)
172{
173 struct erase_info erase;
174 DECLARE_COMPLETION_ONSTACK(comp);
175 int ret;
176
177 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
178 __FILE__, __LINE__, __func__);
179
180 if (block_add >= DeviceInfo.wTotalBlocks) {
181 printk(KERN_ERR "mtd_Erase_Block error! "
182 "Too big block address: %d\n", block_add);
183 return FAIL;
184 }
185
186 nand_dbg_print(NAND_DBG_DEBUG, "Erasing block %d\n",
187 (int)block_add);
188
189 erase.mtd = spectra_mtd;
190 erase.callback = erase_callback;
191 erase.addr = block_add * spectra_mtd->erasesize;
192 erase.len = spectra_mtd->erasesize;
193 erase.priv = (unsigned long)&comp;
194
195 ret = spectra_mtd->erase(spectra_mtd, &erase);
196 if (!ret) {
197 wait_for_completion(&comp);
198 if (erase.state != MTD_ERASE_DONE)
199 ret = -EIO;
200 }
201 if (ret) {
202 printk(KERN_WARNING "mtd_Erase_Block error! "
203 "erase of region [0x%llx, 0x%llx] failed\n",
204 erase.addr, erase.len);
205 return FAIL;
206 }
207
208 return PASS;
209}
210
211/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
212* Function: mtd_Write_Page_Main
213* Inputs: Write buffer address pointer
214* Block number
215* Page number
216* Number of pages to process
217* Outputs: PASS=0 (notice 0=ok here)
218* Description: Write the data in the buffer to main area of flash
219*
220*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
221u16 mtd_Write_Page_Main(u8 *write_data, u32 Block,
222 u16 Page, u16 PageCount)
223{
224 size_t retlen;
225 int ret = 0;
226
227 if (Block >= DeviceInfo.wTotalBlocks)
228 return FAIL;
229
230 if (Page + PageCount > DeviceInfo.wPagesPerBlock)
231 return FAIL;
232
233 nand_dbg_print(NAND_DBG_DEBUG, "mtd_Write_Page_Main: "
234 "lba %u Page %u PageCount %u\n",
235 (unsigned int)Block,
236 (unsigned int)Page, (unsigned int)PageCount);
237
238
239 while (PageCount) {
240 ret = spectra_mtd->write(spectra_mtd,
241 (Block * spectra_mtd->erasesize) + (Page * spectra_mtd->writesize),
242 DeviceInfo.wPageDataSize, &retlen, write_data);
243 if (ret) {
244 printk(KERN_ERR "%s failed %d\n", __func__, ret);
245 return FAIL;
246 }
247 write_data += DeviceInfo.wPageDataSize;
248 Page++;
249 PageCount--;
250 }
251
252 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
253 __FILE__, __LINE__, __func__);
254
255 return PASS;
256}
257
258/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
259* Function: mtd_Read_Page_Main
260* Inputs: Read buffer address pointer
261* Block number
262* Page number
263* Number of pages to process
264* Outputs: PASS=0 (notice 0=ok here)
265* Description: Read the data from the flash main area to the buffer
266*
267*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
268u16 mtd_Read_Page_Main(u8 *read_data, u32 Block,
269 u16 Page, u16 PageCount)
270{
271 size_t retlen;
272 int ret = 0;
273
274 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
275 __FILE__, __LINE__, __func__);
276
277 if (Block >= DeviceInfo.wTotalBlocks)
278 return FAIL;
279
280 if (Page + PageCount > DeviceInfo.wPagesPerBlock)
281 return FAIL;
282
283 nand_dbg_print(NAND_DBG_DEBUG, "mtd_Read_Page_Main: "
284 "lba %u Page %u PageCount %u\n",
285 (unsigned int)Block,
286 (unsigned int)Page, (unsigned int)PageCount);
287
288
289 while (PageCount) {
290 ret = spectra_mtd->read(spectra_mtd,
291 (Block * spectra_mtd->erasesize) + (Page * spectra_mtd->writesize),
292 DeviceInfo.wPageDataSize, &retlen, read_data);
293 if (ret) {
294 printk(KERN_ERR "%s failed %d\n", __func__, ret);
295 return FAIL;
296 }
297 read_data += DeviceInfo.wPageDataSize;
298 Page++;
299 PageCount--;
300 }
301
302 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
303 __FILE__, __LINE__, __func__);
304
305 return PASS;
306}
307
308#ifndef ELDORA
309/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
310* Function: mtd_Read_Page_Main_Spare
311* Inputs: Write Buffer
312* Address
313* Buffer size
314* Outputs: PASS=0 (notice 0=ok here)
315* Description: Read from flash main+spare area
316*
317*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
318u16 mtd_Read_Page_Main_Spare(u8 *read_data, u32 Block,
319 u16 Page, u16 PageCount)
320{
321 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
322 __FILE__, __LINE__, __func__);
323
324 if (Block >= DeviceInfo.wTotalBlocks) {
325 printk(KERN_ERR "Read Page Main+Spare "
326 "Error: Block Address too big\n");
327 return FAIL;
328 }
329
330 if (Page + PageCount > DeviceInfo.wPagesPerBlock) {
331 printk(KERN_ERR "Read Page Main+Spare "
332 "Error: Page number %d+%d too big in block %d\n",
333 Page, PageCount, Block);
334 return FAIL;
335 }
336
337 nand_dbg_print(NAND_DBG_DEBUG, "Read Page Main + Spare - "
338 "No. of pages %u block %u start page %u\n",
339 (unsigned int)PageCount,
340 (unsigned int)Block, (unsigned int)Page);
341
342
343 while (PageCount) {
344 struct mtd_oob_ops ops;
345 int ret;
346
347 ops.mode = MTD_OOB_AUTO;
348 ops.datbuf = read_data;
349 ops.len = DeviceInfo.wPageDataSize;
350 ops.oobbuf = read_data + DeviceInfo.wPageDataSize + BTSIG_OFFSET;
351 ops.ooblen = BTSIG_BYTES;
352 ops.ooboffs = 0;
353
354 ret = spectra_mtd->read_oob(spectra_mtd,
355 (Block * spectra_mtd->erasesize) + (Page * spectra_mtd->writesize),
356 &ops);
357 if (ret) {
358 printk(KERN_ERR "%s failed %d\n", __func__, ret);
359 return FAIL;
360 }
361 read_data += DeviceInfo.wPageSize;
362 Page++;
363 PageCount--;
364 }
365
366 return PASS;
367}
368
369/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
370* Function: mtd_Write_Page_Main_Spare
371* Inputs: Write buffer
372* address
373* buffer length
374* Outputs: PASS=0 (notice 0=ok here)
375* Description: Write the buffer to main+spare area of flash
376*
377*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
378u16 mtd_Write_Page_Main_Spare(u8 *write_data, u32 Block,
379 u16 Page, u16 page_count)
380{
381 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
382 __FILE__, __LINE__, __func__);
383
384 if (Block >= DeviceInfo.wTotalBlocks) {
385 printk(KERN_ERR "Write Page Main + Spare "
386 "Error: Block Address too big\n");
387 return FAIL;
388 }
389
390 if (Page + page_count > DeviceInfo.wPagesPerBlock) {
391 printk(KERN_ERR "Write Page Main + Spare "
392 "Error: Page number %d+%d too big in block %d\n",
393 Page, page_count, Block);
394 WARN_ON(1);
395 return FAIL;
396 }
397
398 nand_dbg_print(NAND_DBG_DEBUG, "Write Page Main+Spare - "
399 "No. of pages %u block %u start page %u\n",
400 (unsigned int)page_count,
401 (unsigned int)Block, (unsigned int)Page);
402
403 while (page_count) {
404 struct mtd_oob_ops ops;
405 int ret;
406
407 ops.mode = MTD_OOB_AUTO;
408 ops.datbuf = write_data;
409 ops.len = DeviceInfo.wPageDataSize;
410 ops.oobbuf = write_data + DeviceInfo.wPageDataSize + BTSIG_OFFSET;
411 ops.ooblen = BTSIG_BYTES;
412 ops.ooboffs = 0;
413
414 ret = spectra_mtd->write_oob(spectra_mtd,
415 (Block * spectra_mtd->erasesize) + (Page * spectra_mtd->writesize),
416 &ops);
417 if (ret) {
418 printk(KERN_ERR "%s failed %d\n", __func__, ret);
419 return FAIL;
420 }
421 write_data += DeviceInfo.wPageSize;
422 Page++;
423 page_count--;
424 }
425
426 return PASS;
427}
428
429/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
430* Function: mtd_Write_Page_Spare
431* Inputs: Write buffer
432* Address
433* buffer size
434* Outputs: PASS=0 (notice 0=ok here)
435* Description: Write the buffer in the spare area
436*
437*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
438u16 mtd_Write_Page_Spare(u8 *write_data, u32 Block,
439 u16 Page, u16 PageCount)
440{
441 WARN_ON(1);
442 return FAIL;
443}
444
445/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
446* Function: mtd_Read_Page_Spare
447* Inputs: Write Buffer
448* Address
449* Buffer size
450* Outputs: PASS=0 (notice 0=ok here)
451* Description: Read data from the spare area
452*
453*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
454u16 mtd_Read_Page_Spare(u8 *read_data, u32 Block,
455 u16 Page, u16 PageCount)
456{
457 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
458 __FILE__, __LINE__, __func__);
459
460 if (Block >= DeviceInfo.wTotalBlocks) {
461 printk(KERN_ERR "Read Page Spare "
462 "Error: Block Address too big\n");
463 return FAIL;
464 }
465
466 if (Page + PageCount > DeviceInfo.wPagesPerBlock) {
467 printk(KERN_ERR "Read Page Spare "
468 "Error: Page number too big\n");
469 return FAIL;
470 }
471
472 nand_dbg_print(NAND_DBG_DEBUG, "Read Page Spare- "
473 "block %u page %u (%u pages)\n",
474 (unsigned int)Block, (unsigned int)Page, PageCount);
475
476 while (PageCount) {
477 struct mtd_oob_ops ops;
478 int ret;
479
480 ops.mode = MTD_OOB_AUTO;
481 ops.datbuf = NULL;
482 ops.len = 0;
483 ops.oobbuf = read_data;
484 ops.ooblen = BTSIG_BYTES;
485 ops.ooboffs = 0;
486
487 ret = spectra_mtd->read_oob(spectra_mtd,
488 (Block * spectra_mtd->erasesize) + (Page * spectra_mtd->writesize),
489 &ops);
490 if (ret) {
491 printk(KERN_ERR "%s failed %d\n", __func__, ret);
492 return FAIL;
493 }
494
495 read_data += DeviceInfo.wPageSize;
496 Page++;
497 PageCount--;
498 }
499
500 return PASS;
501}
502
503/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
504* Function: mtd_Enable_Disable_Interrupts
505* Inputs: enable or disable
506* Outputs: none
507* Description: NOP
508*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
509void mtd_Enable_Disable_Interrupts(u16 INT_ENABLE)
510{
511 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
512 __FILE__, __LINE__, __func__);
513}
514
515u16 mtd_Get_Bad_Block(u32 block)
516{
517 return 0;
518}
519
520#if CMD_DMA
521/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
522* Support for CDMA functions
523************************************
524* mtd_CDMA_Flash_Init
525* CDMA_process_data command (use LLD_CDMA)
526* CDMA_MemCopy_CMD (use LLD_CDMA)
527* mtd_CDMA_execute all commands
528* mtd_CDMA_Event_Status
529*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
530u16 mtd_CDMA_Flash_Init(void)
531{
532 u16 i;
533
534 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
535 __FILE__, __LINE__, __func__);
536
537 for (i = 0; i < MAX_DESCS + MAX_CHANS; i++) {
538 PendingCMD[i].CMD = 0;
539 PendingCMD[i].Tag = 0;
540 PendingCMD[i].DataAddr = 0;
541 PendingCMD[i].Block = 0;
542 PendingCMD[i].Page = 0;
543 PendingCMD[i].PageCount = 0;
544 PendingCMD[i].DataDestAddr = 0;
545 PendingCMD[i].DataSrcAddr = 0;
546 PendingCMD[i].MemCopyByteCnt = 0;
547 PendingCMD[i].ChanSync[0] = 0;
548 PendingCMD[i].ChanSync[1] = 0;
549 PendingCMD[i].ChanSync[2] = 0;
550 PendingCMD[i].ChanSync[3] = 0;
551 PendingCMD[i].ChanSync[4] = 0;
552 PendingCMD[i].Status = 3;
553 }
554
555 return PASS;
556}
557
558static void mtd_isr(int irq, void *dev_id)
559{
560 /* TODO: ... */
561}
562
563/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
564* Function: CDMA_Execute_CMDs
565* Inputs: tag_count: the number of pending cmds to do
566* Outputs: PASS/FAIL
567* Description: execute each command in the pending CMD array
568*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
569u16 mtd_CDMA_Execute_CMDs(u16 tag_count)
570{
571 u16 i, j;
572 u8 CMD; /* cmd parameter */
573 u8 *data;
574 u32 block;
575 u16 page;
576 u16 count;
577 u16 status = PASS;
578
579 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
580 __FILE__, __LINE__, __func__);
581
582 nand_dbg_print(NAND_DBG_TRACE, "At start of Execute CMDs: "
583 "Tag Count %u\n", tag_count);
584
585 for (i = 0; i < totalUsedBanks; i++) {
586 PendingCMD[i].CMD = DUMMY_CMD;
587 PendingCMD[i].Tag = 0xFF;
588 PendingCMD[i].Block =
589 (DeviceInfo.wTotalBlocks / totalUsedBanks) * i;
590
591 for (j = 0; j <= MAX_CHANS; j++)
592 PendingCMD[i].ChanSync[j] = 0;
593 }
594
595 CDMA_Execute_CMDs(tag_count);
596
597#ifdef VERBOSE
598 print_pending_cmds(tag_count);
599#endif
600#if DEBUG_SYNC
601 }
602 debug_sync_cnt++;
603#endif
604
605 for (i = MAX_CHANS;
606 i < tag_count + MAX_CHANS; i++) {
607 CMD = PendingCMD[i].CMD;
608 data = PendingCMD[i].DataAddr;
609 block = PendingCMD[i].Block;
610 page = PendingCMD[i].Page;
611 count = PendingCMD[i].PageCount;
612
613 switch (CMD) {
614 case ERASE_CMD:
615 mtd_Erase_Block(block);
616 PendingCMD[i].Status = PASS;
617 break;
618 case WRITE_MAIN_CMD:
619 mtd_Write_Page_Main(data, block, page, count);
620 PendingCMD[i].Status = PASS;
621 break;
622 case WRITE_MAIN_SPARE_CMD:
623 mtd_Write_Page_Main_Spare(data, block, page, count);
624 PendingCMD[i].Status = PASS;
625 break;
626 case READ_MAIN_CMD:
627 mtd_Read_Page_Main(data, block, page, count);
628 PendingCMD[i].Status = PASS;
629 break;
630 case MEMCOPY_CMD:
631 memcpy(PendingCMD[i].DataDestAddr,
632 PendingCMD[i].DataSrcAddr,
633 PendingCMD[i].MemCopyByteCnt);
634 case DUMMY_CMD:
635 PendingCMD[i].Status = PASS;
636 break;
637 default:
638 PendingCMD[i].Status = FAIL;
639 break;
640 }
641 }
642
643 /*
644 * Temperory adding code to reset PendingCMD array for basic testing.
645 * It should be done at the end of event status function.
646 */
647 for (i = tag_count + MAX_CHANS; i < MAX_DESCS; i++) {
648 PendingCMD[i].CMD = 0;
649 PendingCMD[i].Tag = 0;
650 PendingCMD[i].DataAddr = 0;
651 PendingCMD[i].Block = 0;
652 PendingCMD[i].Page = 0;
653 PendingCMD[i].PageCount = 0;
654 PendingCMD[i].DataDestAddr = 0;
655 PendingCMD[i].DataSrcAddr = 0;
656 PendingCMD[i].MemCopyByteCnt = 0;
657 PendingCMD[i].ChanSync[0] = 0;
658 PendingCMD[i].ChanSync[1] = 0;
659 PendingCMD[i].ChanSync[2] = 0;
660 PendingCMD[i].ChanSync[3] = 0;
661 PendingCMD[i].ChanSync[4] = 0;
662 PendingCMD[i].Status = CMD_NOT_DONE;
663 }
664
665 nand_dbg_print(NAND_DBG_TRACE, "At end of Execute CMDs.\n");
666
667 mtd_isr(0, 0); /* This is a null isr now. Need fill it in future */
668
669 return status;
670}
671
672/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
673* Function: mtd_Event_Status
674* Inputs: none
675* Outputs: Event_Status code
676* Description: This function can also be used to force errors
677*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
678u16 mtd_CDMA_Event_Status(void)
679{
680 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
681 __FILE__, __LINE__, __func__);
682
683 return EVENT_PASS;
684}
685
686#endif /* CMD_DMA */
687#endif /* !ELDORA */
diff --git a/drivers/staging/spectra/lld_mtd.h b/drivers/staging/spectra/lld_mtd.h
new file mode 100644
index 00000000000..4e81ee87b53
--- /dev/null
+++ b/drivers/staging/spectra/lld_mtd.h
@@ -0,0 +1,51 @@
1/*
2 * NAND Flash Controller Device Driver
3 * Copyright (c) 2009, Intel Corporation and its suppliers.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 */
19
20#ifndef _LLD_MTD_
21#define _LLD_MTD_
22
23#include "ffsport.h"
24#include "ffsdefs.h"
25
26/* prototypes: MTD API functions */
27extern u16 mtd_Flash_Reset(void);
28extern u16 mtd_Flash_Init(void);
29extern int mtd_Flash_Release(void);
30extern u16 mtd_Read_Device_ID(void);
31extern u16 mtd_Erase_Block(u32 block_addr);
32extern u16 mtd_Write_Page_Main(u8 *write_data, u32 Block,
33 u16 Page, u16 PageCount);
34extern u16 mtd_Read_Page_Main(u8 *read_data, u32 Block, u16 Page,
35 u16 PageCount);
36extern u16 mtd_Event_Status(void);
37extern void mtd_Enable_Disable_Interrupts(u16 INT_ENABLE);
38extern u16 mtd_Write_Page_Main_Spare(u8 *write_data, u32 Block,
39 u16 Page, u16 PageCount);
40extern u16 mtd_Write_Page_Spare(u8 *write_data, u32 Block,
41 u16 Page, u16 PageCount);
42extern u16 mtd_Read_Page_Main_Spare(u8 *read_data, u32 Block,
43 u16 Page, u16 PageCount);
44extern u16 mtd_Read_Page_Spare(u8 *read_data, u32 Block, u16 Page,
45 u16 PageCount);
46extern u16 mtd_Get_Bad_Block(u32 block);
47
48u16 mtd_CDMA_Flash_Init(void);
49u16 mtd_CDMA_Execute_CMDs(u16 tag_count);
50u16 mtd_CDMA_Event_Status(void);
51#endif /*_LLD_MTD_*/
diff --git a/drivers/staging/spectra/lld_nand.c b/drivers/staging/spectra/lld_nand.c
new file mode 100644
index 00000000000..13c3ad2db39
--- /dev/null
+++ b/drivers/staging/spectra/lld_nand.c
@@ -0,0 +1,2601 @@
1/*
2 * NAND Flash Controller Device Driver
3 * Copyright (c) 2009, Intel Corporation and its suppliers.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 */
19
20#include "lld.h"
21#include "lld_nand.h"
22#include "lld_cdma.h"
23
24#include "spectraswconfig.h"
25#include "flash.h"
26#include "ffsdefs.h"
27
28#include <linux/interrupt.h>
29#include <linux/delay.h>
30#include <linux/wait.h>
31#include <linux/mutex.h>
32
33#include "nand_regs.h"
34
35#define SPECTRA_NAND_NAME "nd"
36
37#define CEIL_DIV(X, Y) (((X)%(Y)) ? ((X)/(Y)+1) : ((X)/(Y)))
38#define MAX_PAGES_PER_RW 128
39
40#define INT_IDLE_STATE 0
41#define INT_READ_PAGE_MAIN 0x01
42#define INT_WRITE_PAGE_MAIN 0x02
43#define INT_PIPELINE_READ_AHEAD 0x04
44#define INT_PIPELINE_WRITE_AHEAD 0x08
45#define INT_MULTI_PLANE_READ 0x10
46#define INT_MULTI_PLANE_WRITE 0x11
47
48static u32 enable_ecc;
49
50struct mrst_nand_info info;
51
52int totalUsedBanks;
53u32 GLOB_valid_banks[LLD_MAX_FLASH_BANKS];
54
55void __iomem *FlashReg;
56void __iomem *FlashMem;
57
58u16 conf_parameters[] = {
59 0x0000,
60 0x0000,
61 0x01F4,
62 0x01F4,
63 0x01F4,
64 0x01F4,
65 0x0000,
66 0x0000,
67 0x0001,
68 0x0000,
69 0x0000,
70 0x0000,
71 0x0000,
72 0x0040,
73 0x0001,
74 0x000A,
75 0x000A,
76 0x000A,
77 0x0000,
78 0x0000,
79 0x0005,
80 0x0012,
81 0x000C
82};
83
84u16 NAND_Get_Bad_Block(u32 block)
85{
86 u32 status = PASS;
87 u32 flag_bytes = 0;
88 u32 skip_bytes = DeviceInfo.wSpareSkipBytes;
89 u32 page, i;
90 u8 *pReadSpareBuf = buf_get_bad_block;
91
92 if (enable_ecc)
93 flag_bytes = DeviceInfo.wNumPageSpareFlag;
94
95 for (page = 0; page < 2; page++) {
96 status = NAND_Read_Page_Spare(pReadSpareBuf, block, page, 1);
97 if (status != PASS)
98 return READ_ERROR;
99 for (i = flag_bytes; i < (flag_bytes + skip_bytes); i++)
100 if (pReadSpareBuf[i] != 0xff)
101 return DEFECTIVE_BLOCK;
102 }
103
104 for (page = 1; page < 3; page++) {
105 status = NAND_Read_Page_Spare(pReadSpareBuf, block,
106 DeviceInfo.wPagesPerBlock - page , 1);
107 if (status != PASS)
108 return READ_ERROR;
109 for (i = flag_bytes; i < (flag_bytes + skip_bytes); i++)
110 if (pReadSpareBuf[i] != 0xff)
111 return DEFECTIVE_BLOCK;
112 }
113
114 return GOOD_BLOCK;
115}
116
117
118u16 NAND_Flash_Reset(void)
119{
120 u32 i;
121 u32 intr_status_rst_comp[4] = {INTR_STATUS0__RST_COMP,
122 INTR_STATUS1__RST_COMP,
123 INTR_STATUS2__RST_COMP,
124 INTR_STATUS3__RST_COMP};
125 u32 intr_status_time_out[4] = {INTR_STATUS0__TIME_OUT,
126 INTR_STATUS1__TIME_OUT,
127 INTR_STATUS2__TIME_OUT,
128 INTR_STATUS3__TIME_OUT};
129 u32 intr_status[4] = {INTR_STATUS0, INTR_STATUS1,
130 INTR_STATUS2, INTR_STATUS3};
131 u32 device_reset_banks[4] = {DEVICE_RESET__BANK0,
132 DEVICE_RESET__BANK1,
133 DEVICE_RESET__BANK2,
134 DEVICE_RESET__BANK3};
135
136 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
137 __FILE__, __LINE__, __func__);
138
139 for (i = 0 ; i < LLD_MAX_FLASH_BANKS; i++)
140 iowrite32(intr_status_rst_comp[i] | intr_status_time_out[i],
141 FlashReg + intr_status[i]);
142
143 for (i = 0 ; i < LLD_MAX_FLASH_BANKS; i++) {
144 iowrite32(device_reset_banks[i], FlashReg + DEVICE_RESET);
145 while (!(ioread32(FlashReg + intr_status[i]) &
146 (intr_status_rst_comp[i] | intr_status_time_out[i])))
147 ;
148 if (ioread32(FlashReg + intr_status[i]) &
149 intr_status_time_out[i])
150 nand_dbg_print(NAND_DBG_WARN,
151 "NAND Reset operation timed out on bank %d\n", i);
152 }
153
154 for (i = 0; i < LLD_MAX_FLASH_BANKS; i++)
155 iowrite32(intr_status_rst_comp[i] | intr_status_time_out[i],
156 FlashReg + intr_status[i]);
157
158 return PASS;
159}
160
161static void NAND_ONFi_Timing_Mode(u16 mode)
162{
163 u16 Trea[6] = {40, 30, 25, 20, 20, 16};
164 u16 Trp[6] = {50, 25, 17, 15, 12, 10};
165 u16 Treh[6] = {30, 15, 15, 10, 10, 7};
166 u16 Trc[6] = {100, 50, 35, 30, 25, 20};
167 u16 Trhoh[6] = {0, 15, 15, 15, 15, 15};
168 u16 Trloh[6] = {0, 0, 0, 0, 5, 5};
169 u16 Tcea[6] = {100, 45, 30, 25, 25, 25};
170 u16 Tadl[6] = {200, 100, 100, 100, 70, 70};
171 u16 Trhw[6] = {200, 100, 100, 100, 100, 100};
172 u16 Trhz[6] = {200, 100, 100, 100, 100, 100};
173 u16 Twhr[6] = {120, 80, 80, 60, 60, 60};
174 u16 Tcs[6] = {70, 35, 25, 25, 20, 15};
175
176 u16 TclsRising = 1;
177 u16 data_invalid_rhoh, data_invalid_rloh, data_invalid;
178 u16 dv_window = 0;
179 u16 en_lo, en_hi;
180 u16 acc_clks;
181 u16 addr_2_data, re_2_we, re_2_re, we_2_re, cs_cnt;
182
183 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
184 __FILE__, __LINE__, __func__);
185
186 en_lo = CEIL_DIV(Trp[mode], CLK_X);
187 en_hi = CEIL_DIV(Treh[mode], CLK_X);
188
189#if ONFI_BLOOM_TIME
190 if ((en_hi * CLK_X) < (Treh[mode] + 2))
191 en_hi++;
192#endif
193
194 if ((en_lo + en_hi) * CLK_X < Trc[mode])
195 en_lo += CEIL_DIV((Trc[mode] - (en_lo + en_hi) * CLK_X), CLK_X);
196
197 if ((en_lo + en_hi) < CLK_MULTI)
198 en_lo += CLK_MULTI - en_lo - en_hi;
199
200 while (dv_window < 8) {
201 data_invalid_rhoh = en_lo * CLK_X + Trhoh[mode];
202
203 data_invalid_rloh = (en_lo + en_hi) * CLK_X + Trloh[mode];
204
205 data_invalid =
206 data_invalid_rhoh <
207 data_invalid_rloh ? data_invalid_rhoh : data_invalid_rloh;
208
209 dv_window = data_invalid - Trea[mode];
210
211 if (dv_window < 8)
212 en_lo++;
213 }
214
215 acc_clks = CEIL_DIV(Trea[mode], CLK_X);
216
217 while (((acc_clks * CLK_X) - Trea[mode]) < 3)
218 acc_clks++;
219
220 if ((data_invalid - acc_clks * CLK_X) < 2)
221 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d: Warning!\n",
222 __FILE__, __LINE__);
223
224 addr_2_data = CEIL_DIV(Tadl[mode], CLK_X);
225 re_2_we = CEIL_DIV(Trhw[mode], CLK_X);
226 re_2_re = CEIL_DIV(Trhz[mode], CLK_X);
227 we_2_re = CEIL_DIV(Twhr[mode], CLK_X);
228 cs_cnt = CEIL_DIV((Tcs[mode] - Trp[mode]), CLK_X);
229 if (!TclsRising)
230 cs_cnt = CEIL_DIV(Tcs[mode], CLK_X);
231 if (cs_cnt == 0)
232 cs_cnt = 1;
233
234 if (Tcea[mode]) {
235 while (((cs_cnt * CLK_X) + Trea[mode]) < Tcea[mode])
236 cs_cnt++;
237 }
238
239#if MODE5_WORKAROUND
240 if (mode == 5)
241 acc_clks = 5;
242#endif
243
244 /* Sighting 3462430: Temporary hack for MT29F128G08CJABAWP:B */
245 if ((ioread32(FlashReg + MANUFACTURER_ID) == 0) &&
246 (ioread32(FlashReg + DEVICE_ID) == 0x88))
247 acc_clks = 6;
248
249 iowrite32(acc_clks, FlashReg + ACC_CLKS);
250 iowrite32(re_2_we, FlashReg + RE_2_WE);
251 iowrite32(re_2_re, FlashReg + RE_2_RE);
252 iowrite32(we_2_re, FlashReg + WE_2_RE);
253 iowrite32(addr_2_data, FlashReg + ADDR_2_DATA);
254 iowrite32(en_lo, FlashReg + RDWR_EN_LO_CNT);
255 iowrite32(en_hi, FlashReg + RDWR_EN_HI_CNT);
256 iowrite32(cs_cnt, FlashReg + CS_SETUP_CNT);
257}
258
259static void index_addr(u32 address, u32 data)
260{
261 iowrite32(address, FlashMem);
262 iowrite32(data, FlashMem + 0x10);
263}
264
265static void index_addr_read_data(u32 address, u32 *pdata)
266{
267 iowrite32(address, FlashMem);
268 *pdata = ioread32(FlashMem + 0x10);
269}
270
271static void set_ecc_config(void)
272{
273#if SUPPORT_8BITECC
274 if ((ioread32(FlashReg + DEVICE_MAIN_AREA_SIZE) < 4096) ||
275 (ioread32(FlashReg + DEVICE_SPARE_AREA_SIZE) <= 128))
276 iowrite32(8, FlashReg + ECC_CORRECTION);
277#endif
278
279 if ((ioread32(FlashReg + ECC_CORRECTION) & ECC_CORRECTION__VALUE)
280 == 1) {
281 DeviceInfo.wECCBytesPerSector = 4;
282 DeviceInfo.wECCBytesPerSector *= DeviceInfo.wDevicesConnected;
283 DeviceInfo.wNumPageSpareFlag =
284 DeviceInfo.wPageSpareSize -
285 DeviceInfo.wPageDataSize /
286 (ECC_SECTOR_SIZE * DeviceInfo.wDevicesConnected) *
287 DeviceInfo.wECCBytesPerSector
288 - DeviceInfo.wSpareSkipBytes;
289 } else {
290 DeviceInfo.wECCBytesPerSector =
291 (ioread32(FlashReg + ECC_CORRECTION) &
292 ECC_CORRECTION__VALUE) * 13 / 8;
293 if ((DeviceInfo.wECCBytesPerSector) % 2 == 0)
294 DeviceInfo.wECCBytesPerSector += 2;
295 else
296 DeviceInfo.wECCBytesPerSector += 1;
297
298 DeviceInfo.wECCBytesPerSector *= DeviceInfo.wDevicesConnected;
299 DeviceInfo.wNumPageSpareFlag = DeviceInfo.wPageSpareSize -
300 DeviceInfo.wPageDataSize /
301 (ECC_SECTOR_SIZE * DeviceInfo.wDevicesConnected) *
302 DeviceInfo.wECCBytesPerSector
303 - DeviceInfo.wSpareSkipBytes;
304 }
305}
306
307static u16 get_onfi_nand_para(void)
308{
309 int i;
310 u16 blks_lun_l, blks_lun_h, n_of_luns;
311 u32 blockperlun, id;
312
313 iowrite32(DEVICE_RESET__BANK0, FlashReg + DEVICE_RESET);
314
315 while (!((ioread32(FlashReg + INTR_STATUS0) &
316 INTR_STATUS0__RST_COMP) |
317 (ioread32(FlashReg + INTR_STATUS0) &
318 INTR_STATUS0__TIME_OUT)))
319 ;
320
321 if (ioread32(FlashReg + INTR_STATUS0) & INTR_STATUS0__RST_COMP) {
322 iowrite32(DEVICE_RESET__BANK1, FlashReg + DEVICE_RESET);
323 while (!((ioread32(FlashReg + INTR_STATUS1) &
324 INTR_STATUS1__RST_COMP) |
325 (ioread32(FlashReg + INTR_STATUS1) &
326 INTR_STATUS1__TIME_OUT)))
327 ;
328
329 if (ioread32(FlashReg + INTR_STATUS1) &
330 INTR_STATUS1__RST_COMP) {
331 iowrite32(DEVICE_RESET__BANK2,
332 FlashReg + DEVICE_RESET);
333 while (!((ioread32(FlashReg + INTR_STATUS2) &
334 INTR_STATUS2__RST_COMP) |
335 (ioread32(FlashReg + INTR_STATUS2) &
336 INTR_STATUS2__TIME_OUT)))
337 ;
338
339 if (ioread32(FlashReg + INTR_STATUS2) &
340 INTR_STATUS2__RST_COMP) {
341 iowrite32(DEVICE_RESET__BANK3,
342 FlashReg + DEVICE_RESET);
343 while (!((ioread32(FlashReg + INTR_STATUS3) &
344 INTR_STATUS3__RST_COMP) |
345 (ioread32(FlashReg + INTR_STATUS3) &
346 INTR_STATUS3__TIME_OUT)))
347 ;
348 } else {
349 printk(KERN_ERR "Getting a time out for bank 2!\n");
350 }
351 } else {
352 printk(KERN_ERR "Getting a time out for bank 1!\n");
353 }
354 }
355
356 iowrite32(INTR_STATUS0__TIME_OUT, FlashReg + INTR_STATUS0);
357 iowrite32(INTR_STATUS1__TIME_OUT, FlashReg + INTR_STATUS1);
358 iowrite32(INTR_STATUS2__TIME_OUT, FlashReg + INTR_STATUS2);
359 iowrite32(INTR_STATUS3__TIME_OUT, FlashReg + INTR_STATUS3);
360
361 DeviceInfo.wONFIDevFeatures =
362 ioread32(FlashReg + ONFI_DEVICE_FEATURES);
363 DeviceInfo.wONFIOptCommands =
364 ioread32(FlashReg + ONFI_OPTIONAL_COMMANDS);
365 DeviceInfo.wONFITimingMode =
366 ioread32(FlashReg + ONFI_TIMING_MODE);
367 DeviceInfo.wONFIPgmCacheTimingMode =
368 ioread32(FlashReg + ONFI_PGM_CACHE_TIMING_MODE);
369
370 n_of_luns = ioread32(FlashReg + ONFI_DEVICE_NO_OF_LUNS) &
371 ONFI_DEVICE_NO_OF_LUNS__NO_OF_LUNS;
372 blks_lun_l = ioread32(FlashReg + ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L);
373 blks_lun_h = ioread32(FlashReg + ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U);
374
375 blockperlun = (blks_lun_h << 16) | blks_lun_l;
376
377 DeviceInfo.wTotalBlocks = n_of_luns * blockperlun;
378
379 if (!(ioread32(FlashReg + ONFI_TIMING_MODE) &
380 ONFI_TIMING_MODE__VALUE))
381 return FAIL;
382
383 for (i = 5; i > 0; i--) {
384 if (ioread32(FlashReg + ONFI_TIMING_MODE) & (0x01 << i))
385 break;
386 }
387
388 NAND_ONFi_Timing_Mode(i);
389
390 index_addr(MODE_11 | 0, 0x90);
391 index_addr(MODE_11 | 1, 0);
392
393 for (i = 0; i < 3; i++)
394 index_addr_read_data(MODE_11 | 2, &id);
395
396 nand_dbg_print(NAND_DBG_DEBUG, "3rd ID: 0x%x\n", id);
397
398 DeviceInfo.MLCDevice = id & 0x0C;
399
400 /* By now, all the ONFI devices we know support the page cache */
401 /* rw feature. So here we enable the pipeline_rw_ahead feature */
402 /* iowrite32(1, FlashReg + CACHE_WRITE_ENABLE); */
403 /* iowrite32(1, FlashReg + CACHE_READ_ENABLE); */
404
405 return PASS;
406}
407
408static void get_samsung_nand_para(void)
409{
410 u8 no_of_planes;
411 u32 blk_size;
412 u64 plane_size, capacity;
413 u32 id_bytes[5];
414 int i;
415
416 index_addr((u32)(MODE_11 | 0), 0x90);
417 index_addr((u32)(MODE_11 | 1), 0);
418 for (i = 0; i < 5; i++)
419 index_addr_read_data((u32)(MODE_11 | 2), &id_bytes[i]);
420
421 nand_dbg_print(NAND_DBG_DEBUG,
422 "ID bytes: 0x%x, 0x%x, 0x%x, 0x%x, 0x%x\n",
423 id_bytes[0], id_bytes[1], id_bytes[2],
424 id_bytes[3], id_bytes[4]);
425
426 if ((id_bytes[1] & 0xff) == 0xd3) { /* Samsung K9WAG08U1A */
427 /* Set timing register values according to datasheet */
428 iowrite32(5, FlashReg + ACC_CLKS);
429 iowrite32(20, FlashReg + RE_2_WE);
430 iowrite32(12, FlashReg + WE_2_RE);
431 iowrite32(14, FlashReg + ADDR_2_DATA);
432 iowrite32(3, FlashReg + RDWR_EN_LO_CNT);
433 iowrite32(2, FlashReg + RDWR_EN_HI_CNT);
434 iowrite32(2, FlashReg + CS_SETUP_CNT);
435 }
436
437 no_of_planes = 1 << ((id_bytes[4] & 0x0c) >> 2);
438 plane_size = (u64)64 << ((id_bytes[4] & 0x70) >> 4);
439 blk_size = 64 << ((ioread32(FlashReg + DEVICE_PARAM_1) & 0x30) >> 4);
440 capacity = (u64)128 * plane_size * no_of_planes;
441
442 DeviceInfo.wTotalBlocks = (u32)GLOB_u64_Div(capacity, blk_size);
443}
444
445static void get_toshiba_nand_para(void)
446{
447 void __iomem *scratch_reg;
448 u32 tmp;
449
450 /* Workaround to fix a controller bug which reports a wrong */
451 /* spare area size for some kind of Toshiba NAND device */
452 if ((ioread32(FlashReg + DEVICE_MAIN_AREA_SIZE) == 4096) &&
453 (ioread32(FlashReg + DEVICE_SPARE_AREA_SIZE) == 64)) {
454 iowrite32(216, FlashReg + DEVICE_SPARE_AREA_SIZE);
455 tmp = ioread32(FlashReg + DEVICES_CONNECTED) *
456 ioread32(FlashReg + DEVICE_SPARE_AREA_SIZE);
457 iowrite32(tmp, FlashReg + LOGICAL_PAGE_SPARE_SIZE);
458#if SUPPORT_15BITECC
459 iowrite32(15, FlashReg + ECC_CORRECTION);
460#elif SUPPORT_8BITECC
461 iowrite32(8, FlashReg + ECC_CORRECTION);
462#endif
463 }
464
465 /* As Toshiba NAND can not provide it's block number, */
466 /* so here we need user to provide the correct block */
467 /* number in a scratch register before the Linux NAND */
468 /* driver is loaded. If no valid value found in the scratch */
469 /* register, then we use default block number value */
470 scratch_reg = ioremap_nocache(SCRATCH_REG_ADDR, SCRATCH_REG_SIZE);
471 if (!scratch_reg) {
472 printk(KERN_ERR "Spectra: ioremap failed in %s, Line %d",
473 __FILE__, __LINE__);
474 DeviceInfo.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
475 } else {
476 nand_dbg_print(NAND_DBG_WARN,
477 "Spectra: ioremap reg address: 0x%p\n", scratch_reg);
478 DeviceInfo.wTotalBlocks = 1 << ioread8(scratch_reg);
479 if (DeviceInfo.wTotalBlocks < 512)
480 DeviceInfo.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
481 iounmap(scratch_reg);
482 }
483}
484
485static void get_hynix_nand_para(void)
486{
487 void __iomem *scratch_reg;
488 u32 main_size, spare_size;
489
490 switch (DeviceInfo.wDeviceID) {
491 case 0xD5: /* Hynix H27UAG8T2A, H27UBG8U5A or H27UCG8VFA */
492 case 0xD7: /* Hynix H27UDG8VEM, H27UCG8UDM or H27UCG8V5A */
493 iowrite32(128, FlashReg + PAGES_PER_BLOCK);
494 iowrite32(4096, FlashReg + DEVICE_MAIN_AREA_SIZE);
495 iowrite32(224, FlashReg + DEVICE_SPARE_AREA_SIZE);
496 main_size = 4096 * ioread32(FlashReg + DEVICES_CONNECTED);
497 spare_size = 224 * ioread32(FlashReg + DEVICES_CONNECTED);
498 iowrite32(main_size, FlashReg + LOGICAL_PAGE_DATA_SIZE);
499 iowrite32(spare_size, FlashReg + LOGICAL_PAGE_SPARE_SIZE);
500 iowrite32(0, FlashReg + DEVICE_WIDTH);
501#if SUPPORT_15BITECC
502 iowrite32(15, FlashReg + ECC_CORRECTION);
503#elif SUPPORT_8BITECC
504 iowrite32(8, FlashReg + ECC_CORRECTION);
505#endif
506 DeviceInfo.MLCDevice = 1;
507 break;
508 default:
509 nand_dbg_print(NAND_DBG_WARN,
510 "Spectra: Unknown Hynix NAND (Device ID: 0x%x)."
511 "Will use default parameter values instead.\n",
512 DeviceInfo.wDeviceID);
513 }
514
515 scratch_reg = ioremap_nocache(SCRATCH_REG_ADDR, SCRATCH_REG_SIZE);
516 if (!scratch_reg) {
517 printk(KERN_ERR "Spectra: ioremap failed in %s, Line %d",
518 __FILE__, __LINE__);
519 DeviceInfo.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
520 } else {
521 nand_dbg_print(NAND_DBG_WARN,
522 "Spectra: ioremap reg address: 0x%p\n", scratch_reg);
523 DeviceInfo.wTotalBlocks = 1 << ioread8(scratch_reg);
524 if (DeviceInfo.wTotalBlocks < 512)
525 DeviceInfo.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
526 iounmap(scratch_reg);
527 }
528}
529
530static void find_valid_banks(void)
531{
532 u32 id[LLD_MAX_FLASH_BANKS];
533 int i;
534
535 totalUsedBanks = 0;
536 for (i = 0; i < LLD_MAX_FLASH_BANKS; i++) {
537 index_addr((u32)(MODE_11 | (i << 24) | 0), 0x90);
538 index_addr((u32)(MODE_11 | (i << 24) | 1), 0);
539 index_addr_read_data((u32)(MODE_11 | (i << 24) | 2), &id[i]);
540
541 nand_dbg_print(NAND_DBG_DEBUG,
542 "Return 1st ID for bank[%d]: %x\n", i, id[i]);
543
544 if (i == 0) {
545 if (id[i] & 0x0ff)
546 GLOB_valid_banks[i] = 1;
547 } else {
548 if ((id[i] & 0x0ff) == (id[0] & 0x0ff))
549 GLOB_valid_banks[i] = 1;
550 }
551
552 totalUsedBanks += GLOB_valid_banks[i];
553 }
554
555 nand_dbg_print(NAND_DBG_DEBUG,
556 "totalUsedBanks: %d\n", totalUsedBanks);
557}
558
559static void detect_partition_feature(void)
560{
561 if (ioread32(FlashReg + FEATURES) & FEATURES__PARTITION) {
562 if ((ioread32(FlashReg + PERM_SRC_ID_1) &
563 PERM_SRC_ID_1__SRCID) == SPECTRA_PARTITION_ID) {
564 DeviceInfo.wSpectraStartBlock =
565 ((ioread32(FlashReg + MIN_MAX_BANK_1) &
566 MIN_MAX_BANK_1__MIN_VALUE) *
567 DeviceInfo.wTotalBlocks)
568 +
569 (ioread32(FlashReg + MIN_BLK_ADDR_1) &
570 MIN_BLK_ADDR_1__VALUE);
571
572 DeviceInfo.wSpectraEndBlock =
573 (((ioread32(FlashReg + MIN_MAX_BANK_1) &
574 MIN_MAX_BANK_1__MAX_VALUE) >> 2) *
575 DeviceInfo.wTotalBlocks)
576 +
577 (ioread32(FlashReg + MAX_BLK_ADDR_1) &
578 MAX_BLK_ADDR_1__VALUE);
579
580 DeviceInfo.wTotalBlocks *= totalUsedBanks;
581
582 if (DeviceInfo.wSpectraEndBlock >=
583 DeviceInfo.wTotalBlocks) {
584 DeviceInfo.wSpectraEndBlock =
585 DeviceInfo.wTotalBlocks - 1;
586 }
587
588 DeviceInfo.wDataBlockNum =
589 DeviceInfo.wSpectraEndBlock -
590 DeviceInfo.wSpectraStartBlock + 1;
591 } else {
592 DeviceInfo.wTotalBlocks *= totalUsedBanks;
593 DeviceInfo.wSpectraStartBlock = SPECTRA_START_BLOCK;
594 DeviceInfo.wSpectraEndBlock =
595 DeviceInfo.wTotalBlocks - 1;
596 DeviceInfo.wDataBlockNum =
597 DeviceInfo.wSpectraEndBlock -
598 DeviceInfo.wSpectraStartBlock + 1;
599 }
600 } else {
601 DeviceInfo.wTotalBlocks *= totalUsedBanks;
602 DeviceInfo.wSpectraStartBlock = SPECTRA_START_BLOCK;
603 DeviceInfo.wSpectraEndBlock = DeviceInfo.wTotalBlocks - 1;
604 DeviceInfo.wDataBlockNum =
605 DeviceInfo.wSpectraEndBlock -
606 DeviceInfo.wSpectraStartBlock + 1;
607 }
608}
609
610static void dump_device_info(void)
611{
612 nand_dbg_print(NAND_DBG_DEBUG, "DeviceInfo:\n");
613 nand_dbg_print(NAND_DBG_DEBUG, "DeviceMaker: 0x%x\n",
614 DeviceInfo.wDeviceMaker);
615 nand_dbg_print(NAND_DBG_DEBUG, "DeviceID: 0x%x\n",
616 DeviceInfo.wDeviceID);
617 nand_dbg_print(NAND_DBG_DEBUG, "DeviceType: 0x%x\n",
618 DeviceInfo.wDeviceType);
619 nand_dbg_print(NAND_DBG_DEBUG, "SpectraStartBlock: %d\n",
620 DeviceInfo.wSpectraStartBlock);
621 nand_dbg_print(NAND_DBG_DEBUG, "SpectraEndBlock: %d\n",
622 DeviceInfo.wSpectraEndBlock);
623 nand_dbg_print(NAND_DBG_DEBUG, "TotalBlocks: %d\n",
624 DeviceInfo.wTotalBlocks);
625 nand_dbg_print(NAND_DBG_DEBUG, "PagesPerBlock: %d\n",
626 DeviceInfo.wPagesPerBlock);
627 nand_dbg_print(NAND_DBG_DEBUG, "PageSize: %d\n",
628 DeviceInfo.wPageSize);
629 nand_dbg_print(NAND_DBG_DEBUG, "PageDataSize: %d\n",
630 DeviceInfo.wPageDataSize);
631 nand_dbg_print(NAND_DBG_DEBUG, "PageSpareSize: %d\n",
632 DeviceInfo.wPageSpareSize);
633 nand_dbg_print(NAND_DBG_DEBUG, "NumPageSpareFlag: %d\n",
634 DeviceInfo.wNumPageSpareFlag);
635 nand_dbg_print(NAND_DBG_DEBUG, "ECCBytesPerSector: %d\n",
636 DeviceInfo.wECCBytesPerSector);
637 nand_dbg_print(NAND_DBG_DEBUG, "BlockSize: %d\n",
638 DeviceInfo.wBlockSize);
639 nand_dbg_print(NAND_DBG_DEBUG, "BlockDataSize: %d\n",
640 DeviceInfo.wBlockDataSize);
641 nand_dbg_print(NAND_DBG_DEBUG, "DataBlockNum: %d\n",
642 DeviceInfo.wDataBlockNum);
643 nand_dbg_print(NAND_DBG_DEBUG, "PlaneNum: %d\n",
644 DeviceInfo.bPlaneNum);
645 nand_dbg_print(NAND_DBG_DEBUG, "DeviceMainAreaSize: %d\n",
646 DeviceInfo.wDeviceMainAreaSize);
647 nand_dbg_print(NAND_DBG_DEBUG, "DeviceSpareAreaSize: %d\n",
648 DeviceInfo.wDeviceSpareAreaSize);
649 nand_dbg_print(NAND_DBG_DEBUG, "DevicesConnected: %d\n",
650 DeviceInfo.wDevicesConnected);
651 nand_dbg_print(NAND_DBG_DEBUG, "DeviceWidth: %d\n",
652 DeviceInfo.wDeviceWidth);
653 nand_dbg_print(NAND_DBG_DEBUG, "HWRevision: 0x%x\n",
654 DeviceInfo.wHWRevision);
655 nand_dbg_print(NAND_DBG_DEBUG, "HWFeatures: 0x%x\n",
656 DeviceInfo.wHWFeatures);
657 nand_dbg_print(NAND_DBG_DEBUG, "ONFIDevFeatures: 0x%x\n",
658 DeviceInfo.wONFIDevFeatures);
659 nand_dbg_print(NAND_DBG_DEBUG, "ONFIOptCommands: 0x%x\n",
660 DeviceInfo.wONFIOptCommands);
661 nand_dbg_print(NAND_DBG_DEBUG, "ONFITimingMode: 0x%x\n",
662 DeviceInfo.wONFITimingMode);
663 nand_dbg_print(NAND_DBG_DEBUG, "ONFIPgmCacheTimingMode: 0x%x\n",
664 DeviceInfo.wONFIPgmCacheTimingMode);
665 nand_dbg_print(NAND_DBG_DEBUG, "MLCDevice: %s\n",
666 DeviceInfo.MLCDevice ? "Yes" : "No");
667 nand_dbg_print(NAND_DBG_DEBUG, "SpareSkipBytes: %d\n",
668 DeviceInfo.wSpareSkipBytes);
669 nand_dbg_print(NAND_DBG_DEBUG, "BitsInPageNumber: %d\n",
670 DeviceInfo.nBitsInPageNumber);
671 nand_dbg_print(NAND_DBG_DEBUG, "BitsInPageDataSize: %d\n",
672 DeviceInfo.nBitsInPageDataSize);
673 nand_dbg_print(NAND_DBG_DEBUG, "BitsInBlockDataSize: %d\n",
674 DeviceInfo.nBitsInBlockDataSize);
675}
676
677u16 NAND_Read_Device_ID(void)
678{
679 u16 status = PASS;
680 u8 no_of_planes;
681
682 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
683 __FILE__, __LINE__, __func__);
684
685 iowrite32(0x02, FlashReg + SPARE_AREA_SKIP_BYTES);
686 iowrite32(0xffff, FlashReg + SPARE_AREA_MARKER);
687 DeviceInfo.wDeviceMaker = ioread32(FlashReg + MANUFACTURER_ID);
688 DeviceInfo.wDeviceID = ioread32(FlashReg + DEVICE_ID);
689 DeviceInfo.MLCDevice = ioread32(FlashReg + DEVICE_PARAM_0) & 0x0c;
690
691 if (ioread32(FlashReg + ONFI_DEVICE_NO_OF_LUNS) &
692 ONFI_DEVICE_NO_OF_LUNS__ONFI_DEVICE) { /* ONFI 1.0 NAND */
693 if (FAIL == get_onfi_nand_para())
694 return FAIL;
695 } else if (DeviceInfo.wDeviceMaker == 0xEC) { /* Samsung NAND */
696 get_samsung_nand_para();
697 } else if (DeviceInfo.wDeviceMaker == 0x98) { /* Toshiba NAND */
698 get_toshiba_nand_para();
699 } else if (DeviceInfo.wDeviceMaker == 0xAD) { /* Hynix NAND */
700 get_hynix_nand_para();
701 } else {
702 DeviceInfo.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
703 }
704
705 nand_dbg_print(NAND_DBG_DEBUG, "Dump timing register values:"
706 "acc_clks: %d, re_2_we: %d, we_2_re: %d,"
707 "addr_2_data: %d, rdwr_en_lo_cnt: %d, "
708 "rdwr_en_hi_cnt: %d, cs_setup_cnt: %d\n",
709 ioread32(FlashReg + ACC_CLKS),
710 ioread32(FlashReg + RE_2_WE),
711 ioread32(FlashReg + WE_2_RE),
712 ioread32(FlashReg + ADDR_2_DATA),
713 ioread32(FlashReg + RDWR_EN_LO_CNT),
714 ioread32(FlashReg + RDWR_EN_HI_CNT),
715 ioread32(FlashReg + CS_SETUP_CNT));
716
717 DeviceInfo.wHWRevision = ioread32(FlashReg + REVISION);
718 DeviceInfo.wHWFeatures = ioread32(FlashReg + FEATURES);
719
720 DeviceInfo.wDeviceMainAreaSize =
721 ioread32(FlashReg + DEVICE_MAIN_AREA_SIZE);
722 DeviceInfo.wDeviceSpareAreaSize =
723 ioread32(FlashReg + DEVICE_SPARE_AREA_SIZE);
724
725 DeviceInfo.wPageDataSize =
726 ioread32(FlashReg + LOGICAL_PAGE_DATA_SIZE);
727
728 /* Note: When using the Micon 4K NAND device, the controller will report
729 * Page Spare Size as 216 bytes. But Micron's Spec say it's 218 bytes.
730 * And if force set it to 218 bytes, the controller can not work
731 * correctly. So just let it be. But keep in mind that this bug may
732 * cause
733 * other problems in future. - Yunpeng 2008-10-10
734 */
735 DeviceInfo.wPageSpareSize =
736 ioread32(FlashReg + LOGICAL_PAGE_SPARE_SIZE);
737
738 DeviceInfo.wPagesPerBlock = ioread32(FlashReg + PAGES_PER_BLOCK);
739
740 DeviceInfo.wPageSize =
741 DeviceInfo.wPageDataSize + DeviceInfo.wPageSpareSize;
742 DeviceInfo.wBlockSize =
743 DeviceInfo.wPageSize * DeviceInfo.wPagesPerBlock;
744 DeviceInfo.wBlockDataSize =
745 DeviceInfo.wPagesPerBlock * DeviceInfo.wPageDataSize;
746
747 DeviceInfo.wDeviceWidth = ioread32(FlashReg + DEVICE_WIDTH);
748 DeviceInfo.wDeviceType =
749 ((ioread32(FlashReg + DEVICE_WIDTH) > 0) ? 16 : 8);
750
751 DeviceInfo.wDevicesConnected = ioread32(FlashReg + DEVICES_CONNECTED);
752
753 DeviceInfo.wSpareSkipBytes =
754 ioread32(FlashReg + SPARE_AREA_SKIP_BYTES) *
755 DeviceInfo.wDevicesConnected;
756
757 DeviceInfo.nBitsInPageNumber =
758 (u8)GLOB_Calc_Used_Bits(DeviceInfo.wPagesPerBlock);
759 DeviceInfo.nBitsInPageDataSize =
760 (u8)GLOB_Calc_Used_Bits(DeviceInfo.wPageDataSize);
761 DeviceInfo.nBitsInBlockDataSize =
762 (u8)GLOB_Calc_Used_Bits(DeviceInfo.wBlockDataSize);
763
764 set_ecc_config();
765
766 no_of_planes = ioread32(FlashReg + NUMBER_OF_PLANES) &
767 NUMBER_OF_PLANES__VALUE;
768
769 switch (no_of_planes) {
770 case 0:
771 case 1:
772 case 3:
773 case 7:
774 DeviceInfo.bPlaneNum = no_of_planes + 1;
775 break;
776 default:
777 status = FAIL;
778 break;
779 }
780
781 find_valid_banks();
782
783 detect_partition_feature();
784
785 dump_device_info();
786
787 return status;
788}
789
790u16 NAND_UnlockArrayAll(void)
791{
792 u64 start_addr, end_addr;
793
794 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
795 __FILE__, __LINE__, __func__);
796
797 start_addr = 0;
798 end_addr = ((u64)DeviceInfo.wBlockSize *
799 (DeviceInfo.wTotalBlocks - 1)) >>
800 DeviceInfo.nBitsInPageDataSize;
801
802 index_addr((u32)(MODE_10 | (u32)start_addr), 0x10);
803 index_addr((u32)(MODE_10 | (u32)end_addr), 0x11);
804
805 return PASS;
806}
807
808void NAND_LLD_Enable_Disable_Interrupts(u16 INT_ENABLE)
809{
810 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
811 __FILE__, __LINE__, __func__);
812
813 if (INT_ENABLE)
814 iowrite32(1, FlashReg + GLOBAL_INT_ENABLE);
815 else
816 iowrite32(0, FlashReg + GLOBAL_INT_ENABLE);
817}
818
819u16 NAND_Erase_Block(u32 block)
820{
821 u16 status = PASS;
822 u64 flash_add;
823 u16 flash_bank;
824 u32 intr_status = 0;
825 u32 intr_status_addresses[4] = {INTR_STATUS0,
826 INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
827
828 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
829 __FILE__, __LINE__, __func__);
830
831 flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
832 * DeviceInfo.wBlockDataSize;
833
834 flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
835
836 if (block >= DeviceInfo.wTotalBlocks)
837 status = FAIL;
838
839 if (status == PASS) {
840 intr_status = intr_status_addresses[flash_bank];
841
842 iowrite32(INTR_STATUS0__ERASE_COMP | INTR_STATUS0__ERASE_FAIL,
843 FlashReg + intr_status);
844
845 index_addr((u32)(MODE_10 | (flash_bank << 24) |
846 (flash_add >> DeviceInfo.nBitsInPageDataSize)), 1);
847
848 while (!(ioread32(FlashReg + intr_status) &
849 (INTR_STATUS0__ERASE_COMP | INTR_STATUS0__ERASE_FAIL)))
850 ;
851
852 if (ioread32(FlashReg + intr_status) &
853 INTR_STATUS0__ERASE_FAIL)
854 status = FAIL;
855
856 iowrite32(INTR_STATUS0__ERASE_COMP | INTR_STATUS0__ERASE_FAIL,
857 FlashReg + intr_status);
858 }
859
860 return status;
861}
862
863static u32 Boundary_Check_Block_Page(u32 block, u16 page,
864 u16 page_count)
865{
866 u32 status = PASS;
867
868 if (block >= DeviceInfo.wTotalBlocks)
869 status = FAIL;
870
871 if (page + page_count > DeviceInfo.wPagesPerBlock)
872 status = FAIL;
873
874 return status;
875}
876
877u16 NAND_Read_Page_Spare(u8 *read_data, u32 block, u16 page,
878 u16 page_count)
879{
880 u32 status = PASS;
881 u32 i;
882 u64 flash_add;
883 u32 PageSpareSize = DeviceInfo.wPageSpareSize;
884 u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
885 u32 flash_bank;
886 u32 intr_status = 0;
887 u32 intr_status_addresses[4] = {INTR_STATUS0,
888 INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
889 u8 *page_spare = buf_read_page_spare;
890
891 if (block >= DeviceInfo.wTotalBlocks) {
892 printk(KERN_ERR "block too big: %d\n", (int)block);
893 status = FAIL;
894 }
895
896 if (page >= DeviceInfo.wPagesPerBlock) {
897 printk(KERN_ERR "page too big: %d\n", page);
898 status = FAIL;
899 }
900
901 if (page_count > 1) {
902 printk(KERN_ERR "page count too big: %d\n", page_count);
903 status = FAIL;
904 }
905
906 flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
907 * DeviceInfo.wBlockDataSize +
908 (u64)page * DeviceInfo.wPageDataSize;
909
910 flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
911
912 if (status == PASS) {
913 intr_status = intr_status_addresses[flash_bank];
914 iowrite32(ioread32(FlashReg + intr_status),
915 FlashReg + intr_status);
916
917 index_addr((u32)(MODE_10 | (flash_bank << 24) |
918 (flash_add >> DeviceInfo.nBitsInPageDataSize)),
919 0x41);
920 index_addr((u32)(MODE_10 | (flash_bank << 24) |
921 (flash_add >> DeviceInfo.nBitsInPageDataSize)),
922 0x2000 | page_count);
923 while (!(ioread32(FlashReg + intr_status) &
924 INTR_STATUS0__LOAD_COMP))
925 ;
926
927 iowrite32((u32)(MODE_01 | (flash_bank << 24) |
928 (flash_add >> DeviceInfo.nBitsInPageDataSize)),
929 FlashMem);
930
931 for (i = 0; i < (PageSpareSize / 4); i++)
932 *((u32 *)page_spare + i) =
933 ioread32(FlashMem + 0x10);
934
935 if (enable_ecc) {
936 for (i = 0; i < spareFlagBytes; i++)
937 read_data[i] =
938 page_spare[PageSpareSize -
939 spareFlagBytes + i];
940 for (i = 0; i < (PageSpareSize - spareFlagBytes); i++)
941 read_data[spareFlagBytes + i] =
942 page_spare[i];
943 } else {
944 for (i = 0; i < PageSpareSize; i++)
945 read_data[i] = page_spare[i];
946 }
947
948 index_addr((u32)(MODE_10 | (flash_bank << 24) |
949 (flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
950 }
951
952 return status;
953}
954
955/* No use function. Should be removed later */
956u16 NAND_Write_Page_Spare(u8 *write_data, u32 block, u16 page,
957 u16 page_count)
958{
959 printk(KERN_ERR
960 "Error! This function (NAND_Write_Page_Spare) should never"
961 " be called!\n");
962 return ERR;
963}
964
965/* op value: 0 - DDMA read; 1 - DDMA write */
966static void ddma_trans(u8 *data, u64 flash_add,
967 u32 flash_bank, int op, u32 numPages)
968{
969 u32 data_addr;
970
971 /* Map virtual address to bus address for DDMA */
972 data_addr = virt_to_bus(data);
973
974 index_addr((u32)(MODE_10 | (flash_bank << 24) |
975 (flash_add >> DeviceInfo.nBitsInPageDataSize)),
976 (u16)(2 << 12) | (op << 8) | numPages);
977
978 index_addr((u32)(MODE_10 | (flash_bank << 24) |
979 ((u16)(0x0FFFF & (data_addr >> 16)) << 8)),
980 (u16)(2 << 12) | (2 << 8) | 0);
981
982 index_addr((u32)(MODE_10 | (flash_bank << 24) |
983 ((u16)(0x0FFFF & data_addr) << 8)),
984 (u16)(2 << 12) | (3 << 8) | 0);
985
986 index_addr((u32)(MODE_10 | (flash_bank << 24) |
987 (1 << 16) | (0x40 << 8)),
988 (u16)(2 << 12) | (4 << 8) | 0);
989}
990
991/* If data in buf are all 0xff, then return 1; otherwise return 0 */
992static int check_all_1(u8 *buf)
993{
994 int i, j, cnt;
995
996 for (i = 0; i < DeviceInfo.wPageDataSize; i++) {
997 if (buf[i] != 0xff) {
998 cnt = 0;
999 nand_dbg_print(NAND_DBG_WARN,
1000 "the first non-0xff data byte is: %d\n", i);
1001 for (j = i; j < DeviceInfo.wPageDataSize; j++) {
1002 nand_dbg_print(NAND_DBG_WARN, "0x%x ", buf[j]);
1003 cnt++;
1004 if (cnt > 8)
1005 break;
1006 }
1007 nand_dbg_print(NAND_DBG_WARN, "\n");
1008 return 0;
1009 }
1010 }
1011
1012 return 1;
1013}
1014
1015static int do_ecc_new(unsigned long bank, u8 *buf,
1016 u32 block, u16 page)
1017{
1018 int status = PASS;
1019 u16 err_page = 0;
1020 u16 err_byte;
1021 u8 err_sect;
1022 u8 err_dev;
1023 u16 err_fix_info;
1024 u16 err_addr;
1025 u32 ecc_sect_size;
1026 u8 *err_pos;
1027 u32 err_page_addr[4] = {ERR_PAGE_ADDR0,
1028 ERR_PAGE_ADDR1, ERR_PAGE_ADDR2, ERR_PAGE_ADDR3};
1029
1030 ecc_sect_size = ECC_SECTOR_SIZE * (DeviceInfo.wDevicesConnected);
1031
1032 do {
1033 err_page = ioread32(FlashReg + err_page_addr[bank]);
1034 err_addr = ioread32(FlashReg + ECC_ERROR_ADDRESS);
1035 err_byte = err_addr & ECC_ERROR_ADDRESS__OFFSET;
1036 err_sect = ((err_addr & ECC_ERROR_ADDRESS__SECTOR_NR) >> 12);
1037 err_fix_info = ioread32(FlashReg + ERR_CORRECTION_INFO);
1038 err_dev = ((err_fix_info & ERR_CORRECTION_INFO__DEVICE_NR)
1039 >> 8);
1040 if (err_fix_info & ERR_CORRECTION_INFO__ERROR_TYPE) {
1041 nand_dbg_print(NAND_DBG_WARN,
1042 "%s, Line %d Uncorrectable ECC error "
1043 "when read block %d page %d."
1044 "PTN_INTR register: 0x%x "
1045 "err_page: %d, err_sect: %d, err_byte: %d, "
1046 "err_dev: %d, ecc_sect_size: %d, "
1047 "err_fix_info: 0x%x\n",
1048 __FILE__, __LINE__, block, page,
1049 ioread32(FlashReg + PTN_INTR),
1050 err_page, err_sect, err_byte, err_dev,
1051 ecc_sect_size, (u32)err_fix_info);
1052
1053 if (check_all_1(buf))
1054 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d"
1055 "All 0xff!\n",
1056 __FILE__, __LINE__);
1057 else
1058 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d"
1059 "Not all 0xff!\n",
1060 __FILE__, __LINE__);
1061 status = FAIL;
1062 } else {
1063 nand_dbg_print(NAND_DBG_WARN,
1064 "%s, Line %d Found ECC error "
1065 "when read block %d page %d."
1066 "err_page: %d, err_sect: %d, err_byte: %d, "
1067 "err_dev: %d, ecc_sect_size: %d, "
1068 "err_fix_info: 0x%x\n",
1069 __FILE__, __LINE__, block, page,
1070 err_page, err_sect, err_byte, err_dev,
1071 ecc_sect_size, (u32)err_fix_info);
1072 if (err_byte < ECC_SECTOR_SIZE) {
1073 err_pos = buf +
1074 (err_page - page) *
1075 DeviceInfo.wPageDataSize +
1076 err_sect * ecc_sect_size +
1077 err_byte *
1078 DeviceInfo.wDevicesConnected +
1079 err_dev;
1080
1081 *err_pos ^= err_fix_info &
1082 ERR_CORRECTION_INFO__BYTEMASK;
1083 }
1084 }
1085 } while (!(err_fix_info & ERR_CORRECTION_INFO__LAST_ERR_INFO));
1086
1087 return status;
1088}
1089
1090u16 NAND_Read_Page_Main_Polling(u8 *read_data,
1091 u32 block, u16 page, u16 page_count)
1092{
1093 u32 status = PASS;
1094 u64 flash_add;
1095 u32 intr_status = 0;
1096 u32 flash_bank;
1097 u32 intr_status_addresses[4] = {INTR_STATUS0,
1098 INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
1099 u8 *read_data_l;
1100
1101 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
1102 __FILE__, __LINE__, __func__);
1103
1104 status = Boundary_Check_Block_Page(block, page, page_count);
1105 if (status != PASS)
1106 return status;
1107
1108 flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
1109 * DeviceInfo.wBlockDataSize +
1110 (u64)page * DeviceInfo.wPageDataSize;
1111 flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
1112
1113 iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
1114
1115 intr_status = intr_status_addresses[flash_bank];
1116 iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
1117
1118 if (page_count > 1) {
1119 read_data_l = read_data;
1120 while (page_count > MAX_PAGES_PER_RW) {
1121 if (ioread32(FlashReg + MULTIPLANE_OPERATION))
1122 status = NAND_Multiplane_Read(read_data_l,
1123 block, page, MAX_PAGES_PER_RW);
1124 else
1125 status = NAND_Pipeline_Read_Ahead_Polling(
1126 read_data_l, block, page,
1127 MAX_PAGES_PER_RW);
1128
1129 if (status == FAIL)
1130 return status;
1131
1132 read_data_l += DeviceInfo.wPageDataSize *
1133 MAX_PAGES_PER_RW;
1134 page_count -= MAX_PAGES_PER_RW;
1135 page += MAX_PAGES_PER_RW;
1136 }
1137 if (ioread32(FlashReg + MULTIPLANE_OPERATION))
1138 status = NAND_Multiplane_Read(read_data_l,
1139 block, page, page_count);
1140 else
1141 status = NAND_Pipeline_Read_Ahead_Polling(
1142 read_data_l, block, page, page_count);
1143
1144 return status;
1145 }
1146
1147 iowrite32(1, FlashReg + DMA_ENABLE);
1148 while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
1149 ;
1150
1151 iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
1152 iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
1153
1154 ddma_trans(read_data, flash_add, flash_bank, 0, 1);
1155
1156 if (enable_ecc) {
1157 while (!(ioread32(FlashReg + intr_status) &
1158 (INTR_STATUS0__ECC_TRANSACTION_DONE |
1159 INTR_STATUS0__ECC_ERR)))
1160 ;
1161
1162 if (ioread32(FlashReg + intr_status) &
1163 INTR_STATUS0__ECC_ERR) {
1164 iowrite32(INTR_STATUS0__ECC_ERR,
1165 FlashReg + intr_status);
1166 status = do_ecc_new(flash_bank, read_data,
1167 block, page);
1168 }
1169
1170 if (ioread32(FlashReg + intr_status) &
1171 INTR_STATUS0__ECC_TRANSACTION_DONE &
1172 INTR_STATUS0__ECC_ERR)
1173 iowrite32(INTR_STATUS0__ECC_TRANSACTION_DONE |
1174 INTR_STATUS0__ECC_ERR,
1175 FlashReg + intr_status);
1176 else if (ioread32(FlashReg + intr_status) &
1177 INTR_STATUS0__ECC_TRANSACTION_DONE)
1178 iowrite32(INTR_STATUS0__ECC_TRANSACTION_DONE,
1179 FlashReg + intr_status);
1180 else if (ioread32(FlashReg + intr_status) &
1181 INTR_STATUS0__ECC_ERR)
1182 iowrite32(INTR_STATUS0__ECC_ERR,
1183 FlashReg + intr_status);
1184 } else {
1185 while (!(ioread32(FlashReg + intr_status) &
1186 INTR_STATUS0__DMA_CMD_COMP))
1187 ;
1188 iowrite32(INTR_STATUS0__DMA_CMD_COMP, FlashReg + intr_status);
1189 }
1190
1191 iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
1192
1193 iowrite32(0, FlashReg + DMA_ENABLE);
1194 while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
1195 ;
1196
1197 return status;
1198}
1199
1200u16 NAND_Pipeline_Read_Ahead_Polling(u8 *read_data,
1201 u32 block, u16 page, u16 page_count)
1202{
1203 u32 status = PASS;
1204 u32 NumPages = page_count;
1205 u64 flash_add;
1206 u32 flash_bank;
1207 u32 intr_status = 0;
1208 u32 intr_status_addresses[4] = {INTR_STATUS0,
1209 INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
1210 u32 ecc_done_OR_dma_comp;
1211
1212 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
1213 __FILE__, __LINE__, __func__);
1214
1215 status = Boundary_Check_Block_Page(block, page, page_count);
1216
1217 if (page_count < 2)
1218 status = FAIL;
1219
1220 flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
1221 *DeviceInfo.wBlockDataSize +
1222 (u64)page * DeviceInfo.wPageDataSize;
1223
1224 flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
1225
1226 if (status == PASS) {
1227 intr_status = intr_status_addresses[flash_bank];
1228 iowrite32(ioread32(FlashReg + intr_status),
1229 FlashReg + intr_status);
1230
1231 iowrite32(1, FlashReg + DMA_ENABLE);
1232 while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
1233 ;
1234
1235 iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
1236
1237 index_addr((u32)(MODE_10 | (flash_bank << 24) |
1238 (flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
1239 ddma_trans(read_data, flash_add, flash_bank, 0, NumPages);
1240
1241 ecc_done_OR_dma_comp = 0;
1242 while (1) {
1243 if (enable_ecc) {
1244 while (!ioread32(FlashReg + intr_status))
1245 ;
1246
1247 if (ioread32(FlashReg + intr_status) &
1248 INTR_STATUS0__ECC_ERR) {
1249 iowrite32(INTR_STATUS0__ECC_ERR,
1250 FlashReg + intr_status);
1251 status = do_ecc_new(flash_bank,
1252 read_data, block, page);
1253 } else if (ioread32(FlashReg + intr_status) &
1254 INTR_STATUS0__DMA_CMD_COMP) {
1255 iowrite32(INTR_STATUS0__DMA_CMD_COMP,
1256 FlashReg + intr_status);
1257
1258 if (1 == ecc_done_OR_dma_comp)
1259 break;
1260
1261 ecc_done_OR_dma_comp = 1;
1262 } else if (ioread32(FlashReg + intr_status) &
1263 INTR_STATUS0__ECC_TRANSACTION_DONE) {
1264 iowrite32(
1265 INTR_STATUS0__ECC_TRANSACTION_DONE,
1266 FlashReg + intr_status);
1267
1268 if (1 == ecc_done_OR_dma_comp)
1269 break;
1270
1271 ecc_done_OR_dma_comp = 1;
1272 }
1273 } else {
1274 while (!(ioread32(FlashReg + intr_status) &
1275 INTR_STATUS0__DMA_CMD_COMP))
1276 ;
1277
1278 iowrite32(INTR_STATUS0__DMA_CMD_COMP,
1279 FlashReg + intr_status);
1280 break;
1281 }
1282
1283 iowrite32((~INTR_STATUS0__ECC_ERR) &
1284 (~INTR_STATUS0__ECC_TRANSACTION_DONE) &
1285 (~INTR_STATUS0__DMA_CMD_COMP),
1286 FlashReg + intr_status);
1287
1288 }
1289
1290 iowrite32(ioread32(FlashReg + intr_status),
1291 FlashReg + intr_status);
1292
1293 iowrite32(0, FlashReg + DMA_ENABLE);
1294
1295 while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
1296 ;
1297 }
1298 return status;
1299}
1300
1301u16 NAND_Read_Page_Main(u8 *read_data, u32 block, u16 page,
1302 u16 page_count)
1303{
1304 u32 status = PASS;
1305 u64 flash_add;
1306 u32 intr_status = 0;
1307 u32 flash_bank;
1308 u32 intr_status_addresses[4] = {INTR_STATUS0,
1309 INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
1310 int ret;
1311 u8 *read_data_l;
1312
1313 nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
1314 __FILE__, __LINE__, __func__);
1315
1316 status = Boundary_Check_Block_Page(block, page, page_count);
1317 if (status != PASS)
1318 return status;
1319
1320 flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
1321 * DeviceInfo.wBlockDataSize +
1322 (u64)page * DeviceInfo.wPageDataSize;
1323 flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
1324
1325 iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
1326
1327 intr_status = intr_status_addresses[flash_bank];
1328 iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
1329
1330 if (page_count > 1) {
1331 read_data_l = read_data;
1332 while (page_count > MAX_PAGES_PER_RW) {
1333 if (ioread32(FlashReg + MULTIPLANE_OPERATION))
1334 status = NAND_Multiplane_Read(read_data_l,
1335 block, page, MAX_PAGES_PER_RW);
1336 else
1337 status = NAND_Pipeline_Read_Ahead(
1338 read_data_l, block, page,
1339 MAX_PAGES_PER_RW);
1340
1341 if (status == FAIL)
1342 return status;
1343
1344 read_data_l += DeviceInfo.wPageDataSize *
1345 MAX_PAGES_PER_RW;
1346 page_count -= MAX_PAGES_PER_RW;
1347 page += MAX_PAGES_PER_RW;
1348 }
1349 if (ioread32(FlashReg + MULTIPLANE_OPERATION))
1350 status = NAND_Multiplane_Read(read_data_l,
1351 block, page, page_count);
1352 else
1353 status = NAND_Pipeline_Read_Ahead(
1354 read_data_l, block, page, page_count);
1355
1356 return status;
1357 }
1358
1359 iowrite32(1, FlashReg + DMA_ENABLE);
1360 while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
1361 ;
1362
1363 iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
1364 iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
1365
1366 /* Fill the mrst_nand_info structure */
1367 info.state = INT_READ_PAGE_MAIN;
1368 info.read_data = read_data;
1369 info.flash_bank = flash_bank;
1370 info.block = block;
1371 info.page = page;
1372 info.ret = PASS;
1373
1374 ddma_trans(read_data, flash_add, flash_bank, 0, 1);
1375
1376 iowrite32(1, FlashReg + GLOBAL_INT_ENABLE); /* Enable Interrupt */
1377
1378 ret = wait_for_completion_timeout(&info.complete, 10 * HZ);
1379 if (!ret) {
1380 printk(KERN_ERR "Wait for completion timeout "
1381 "in %s, Line %d\n", __FILE__, __LINE__);
1382 status = ERR;
1383 } else {
1384 status = info.ret;
1385 }
1386
1387 iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
1388
1389 iowrite32(0, FlashReg + DMA_ENABLE);
1390 while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
1391 ;
1392
1393 return status;
1394}
1395
1396void Conv_Spare_Data_Log2Phy_Format(u8 *data)
1397{
1398 int i;
1399 const u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
1400 const u32 PageSpareSize = DeviceInfo.wPageSpareSize;
1401
1402 if (enable_ecc) {
1403 for (i = spareFlagBytes - 1; i >= 0; i++)
1404 data[PageSpareSize - spareFlagBytes + i] = data[i];
1405 }
1406}
1407
1408void Conv_Spare_Data_Phy2Log_Format(u8 *data)
1409{
1410 int i;
1411 const u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
1412 const u32 PageSpareSize = DeviceInfo.wPageSpareSize;
1413
1414 if (enable_ecc) {
1415 for (i = 0; i < spareFlagBytes; i++)
1416 data[i] = data[PageSpareSize - spareFlagBytes + i];
1417 }
1418}
1419
1420
1421void Conv_Main_Spare_Data_Log2Phy_Format(u8 *data, u16 page_count)
1422{
1423 const u32 PageSize = DeviceInfo.wPageSize;
1424 const u32 PageDataSize = DeviceInfo.wPageDataSize;
1425 const u32 eccBytes = DeviceInfo.wECCBytesPerSector;
1426 const u32 spareSkipBytes = DeviceInfo.wSpareSkipBytes;
1427 const u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
1428 u32 eccSectorSize;
1429 u32 page_offset;
1430 int i, j;
1431
1432 eccSectorSize = ECC_SECTOR_SIZE * (DeviceInfo.wDevicesConnected);
1433 if (enable_ecc) {
1434 while (page_count > 0) {
1435 page_offset = (page_count - 1) * PageSize;
1436 j = (DeviceInfo.wPageDataSize / eccSectorSize);
1437 for (i = spareFlagBytes - 1; i >= 0; i--)
1438 data[page_offset +
1439 (eccSectorSize + eccBytes) * j + i] =
1440 data[page_offset + PageDataSize + i];
1441 for (j--; j >= 1; j--) {
1442 for (i = eccSectorSize - 1; i >= 0; i--)
1443 data[page_offset +
1444 (eccSectorSize + eccBytes) * j + i] =
1445 data[page_offset +
1446 eccSectorSize * j + i];
1447 }
1448 for (i = (PageSize - spareSkipBytes) - 1;
1449 i >= PageDataSize; i--)
1450 data[page_offset + i + spareSkipBytes] =
1451 data[page_offset + i];
1452 page_count--;
1453 }
1454 }
1455}
1456
1457void Conv_Main_Spare_Data_Phy2Log_Format(u8 *data, u16 page_count)
1458{
1459 const u32 PageSize = DeviceInfo.wPageSize;
1460 const u32 PageDataSize = DeviceInfo.wPageDataSize;
1461 const u32 eccBytes = DeviceInfo.wECCBytesPerSector;
1462 const u32 spareSkipBytes = DeviceInfo.wSpareSkipBytes;
1463 const u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
1464 u32 eccSectorSize;
1465 u32 page_offset;
1466 int i, j;
1467
1468 eccSectorSize = ECC_SECTOR_SIZE * (DeviceInfo.wDevicesConnected);
1469 if (enable_ecc) {
1470 while (page_count > 0) {
1471 page_offset = (page_count - 1) * PageSize;
1472 for (i = PageDataSize;
1473 i < PageSize - spareSkipBytes;
1474 i++)
1475 data[page_offset + i] =
1476 data[page_offset + i +
1477 spareSkipBytes];
1478 for (j = 1;
1479 j < DeviceInfo.wPageDataSize / eccSectorSize;
1480 j++) {
1481 for (i = 0; i < eccSectorSize; i++)
1482 data[page_offset +
1483 eccSectorSize * j + i] =
1484 data[page_offset +
1485 (eccSectorSize + eccBytes) * j
1486 + i];
1487 }
1488 for (i = 0; i < spareFlagBytes; i++)
1489 data[page_offset + PageDataSize + i] =
1490 data[page_offset +
1491 (eccSectorSize + eccBytes) * j + i];
1492 page_count--;
1493 }
1494 }
1495}
1496
1497/* Un-tested function */
1498u16 NAND_Multiplane_Read(u8 *read_data, u32 block, u16 page,
1499 u16 page_count)
1500{
1501 u32 status = PASS;
1502 u32 NumPages = page_count;
1503 u64 flash_add;
1504 u32 flash_bank;
1505 u32 intr_status = 0;
1506 u32 intr_status_addresses[4] = {INTR_STATUS0,
1507 INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
1508 u32 ecc_done_OR_dma_comp;
1509
1510 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
1511 __FILE__, __LINE__, __func__);
1512
1513 status = Boundary_Check_Block_Page(block, page, page_count);
1514
1515 flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
1516 * DeviceInfo.wBlockDataSize +
1517 (u64)page * DeviceInfo.wPageDataSize;
1518
1519 flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
1520
1521 if (status == PASS) {
1522 intr_status = intr_status_addresses[flash_bank];
1523 iowrite32(ioread32(FlashReg + intr_status),
1524 FlashReg + intr_status);
1525
1526 iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
1527 iowrite32(0x01, FlashReg + MULTIPLANE_OPERATION);
1528
1529 iowrite32(1, FlashReg + DMA_ENABLE);
1530 while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
1531 ;
1532 index_addr((u32)(MODE_10 | (flash_bank << 24) |
1533 (flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
1534 ddma_trans(read_data, flash_add, flash_bank, 0, NumPages);
1535
1536 ecc_done_OR_dma_comp = 0;
1537 while (1) {
1538 if (enable_ecc) {
1539 while (!ioread32(FlashReg + intr_status))
1540 ;
1541
1542 if (ioread32(FlashReg + intr_status) &
1543 INTR_STATUS0__ECC_ERR) {
1544 iowrite32(INTR_STATUS0__ECC_ERR,
1545 FlashReg + intr_status);
1546 status = do_ecc_new(flash_bank,
1547 read_data, block, page);
1548 } else if (ioread32(FlashReg + intr_status) &
1549 INTR_STATUS0__DMA_CMD_COMP) {
1550 iowrite32(INTR_STATUS0__DMA_CMD_COMP,
1551 FlashReg + intr_status);
1552
1553 if (1 == ecc_done_OR_dma_comp)
1554 break;
1555
1556 ecc_done_OR_dma_comp = 1;
1557 } else if (ioread32(FlashReg + intr_status) &
1558 INTR_STATUS0__ECC_TRANSACTION_DONE) {
1559 iowrite32(
1560 INTR_STATUS0__ECC_TRANSACTION_DONE,
1561 FlashReg + intr_status);
1562
1563 if (1 == ecc_done_OR_dma_comp)
1564 break;
1565
1566 ecc_done_OR_dma_comp = 1;
1567 }
1568 } else {
1569 while (!(ioread32(FlashReg + intr_status) &
1570 INTR_STATUS0__DMA_CMD_COMP))
1571 ;
1572 iowrite32(INTR_STATUS0__DMA_CMD_COMP,
1573 FlashReg + intr_status);
1574 break;
1575 }
1576
1577 iowrite32((~INTR_STATUS0__ECC_ERR) &
1578 (~INTR_STATUS0__ECC_TRANSACTION_DONE) &
1579 (~INTR_STATUS0__DMA_CMD_COMP),
1580 FlashReg + intr_status);
1581
1582 }
1583
1584 iowrite32(ioread32(FlashReg + intr_status),
1585 FlashReg + intr_status);
1586
1587 iowrite32(0, FlashReg + DMA_ENABLE);
1588
1589 while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
1590 ;
1591
1592 iowrite32(0, FlashReg + MULTIPLANE_OPERATION);
1593 }
1594
1595 return status;
1596}
1597
1598u16 NAND_Pipeline_Read_Ahead(u8 *read_data, u32 block,
1599 u16 page, u16 page_count)
1600{
1601 u32 status = PASS;
1602 u32 NumPages = page_count;
1603 u64 flash_add;
1604 u32 flash_bank;
1605 u32 intr_status = 0;
1606 u32 intr_status_addresses[4] = {INTR_STATUS0,
1607 INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
1608 int ret;
1609
1610 nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
1611 __FILE__, __LINE__, __func__);
1612
1613 status = Boundary_Check_Block_Page(block, page, page_count);
1614
1615 if (page_count < 2)
1616 status = FAIL;
1617
1618 if (status != PASS)
1619 return status;
1620
1621 flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
1622 *DeviceInfo.wBlockDataSize +
1623 (u64)page * DeviceInfo.wPageDataSize;
1624
1625 flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
1626
1627 intr_status = intr_status_addresses[flash_bank];
1628 iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
1629
1630 iowrite32(1, FlashReg + DMA_ENABLE);
1631 while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
1632 ;
1633
1634 iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
1635
1636 /* Fill the mrst_nand_info structure */
1637 info.state = INT_PIPELINE_READ_AHEAD;
1638 info.read_data = read_data;
1639 info.flash_bank = flash_bank;
1640 info.block = block;
1641 info.page = page;
1642 info.ret = PASS;
1643
1644 index_addr((u32)(MODE_10 | (flash_bank << 24) |
1645 (flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
1646
1647 ddma_trans(read_data, flash_add, flash_bank, 0, NumPages);
1648
1649 iowrite32(1, FlashReg + GLOBAL_INT_ENABLE); /* Enable Interrupt */
1650
1651 ret = wait_for_completion_timeout(&info.complete, 10 * HZ);
1652 if (!ret) {
1653 printk(KERN_ERR "Wait for completion timeout "
1654 "in %s, Line %d\n", __FILE__, __LINE__);
1655 status = ERR;
1656 } else {
1657 status = info.ret;
1658 }
1659
1660 iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
1661
1662 iowrite32(0, FlashReg + DMA_ENABLE);
1663
1664 while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
1665 ;
1666
1667 return status;
1668}
1669
1670
1671u16 NAND_Write_Page_Main(u8 *write_data, u32 block, u16 page,
1672 u16 page_count)
1673{
1674 u32 status = PASS;
1675 u64 flash_add;
1676 u32 intr_status = 0;
1677 u32 flash_bank;
1678 u32 intr_status_addresses[4] = {INTR_STATUS0,
1679 INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
1680 int ret;
1681 u8 *write_data_l;
1682
1683 nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
1684 __FILE__, __LINE__, __func__);
1685
1686 status = Boundary_Check_Block_Page(block, page, page_count);
1687 if (status != PASS)
1688 return status;
1689
1690 flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
1691 * DeviceInfo.wBlockDataSize +
1692 (u64)page * DeviceInfo.wPageDataSize;
1693
1694 flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
1695
1696 intr_status = intr_status_addresses[flash_bank];
1697
1698 iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
1699
1700 iowrite32(INTR_STATUS0__PROGRAM_COMP |
1701 INTR_STATUS0__PROGRAM_FAIL, FlashReg + intr_status);
1702
1703 if (page_count > 1) {
1704 write_data_l = write_data;
1705 while (page_count > MAX_PAGES_PER_RW) {
1706 if (ioread32(FlashReg + MULTIPLANE_OPERATION))
1707 status = NAND_Multiplane_Write(write_data_l,
1708 block, page, MAX_PAGES_PER_RW);
1709 else
1710 status = NAND_Pipeline_Write_Ahead(
1711 write_data_l, block, page,
1712 MAX_PAGES_PER_RW);
1713 if (status == FAIL)
1714 return status;
1715
1716 write_data_l += DeviceInfo.wPageDataSize *
1717 MAX_PAGES_PER_RW;
1718 page_count -= MAX_PAGES_PER_RW;
1719 page += MAX_PAGES_PER_RW;
1720 }
1721 if (ioread32(FlashReg + MULTIPLANE_OPERATION))
1722 status = NAND_Multiplane_Write(write_data_l,
1723 block, page, page_count);
1724 else
1725 status = NAND_Pipeline_Write_Ahead(write_data_l,
1726 block, page, page_count);
1727
1728 return status;
1729 }
1730
1731 iowrite32(1, FlashReg + DMA_ENABLE);
1732 while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
1733 ;
1734
1735 iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
1736
1737 iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
1738
1739 /* Fill the mrst_nand_info structure */
1740 info.state = INT_WRITE_PAGE_MAIN;
1741 info.write_data = write_data;
1742 info.flash_bank = flash_bank;
1743 info.block = block;
1744 info.page = page;
1745 info.ret = PASS;
1746
1747 ddma_trans(write_data, flash_add, flash_bank, 1, 1);
1748
1749 iowrite32(1, FlashReg + GLOBAL_INT_ENABLE); /* Enable interrupt */
1750
1751 ret = wait_for_completion_timeout(&info.complete, 10 * HZ);
1752 if (!ret) {
1753 printk(KERN_ERR "Wait for completion timeout "
1754 "in %s, Line %d\n", __FILE__, __LINE__);
1755 status = ERR;
1756 } else {
1757 status = info.ret;
1758 }
1759
1760 iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
1761
1762 iowrite32(0, FlashReg + DMA_ENABLE);
1763 while (ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG)
1764 ;
1765
1766 return status;
1767}
1768
1769void NAND_ECC_Ctrl(int enable)
1770{
1771 if (enable) {
1772 nand_dbg_print(NAND_DBG_WARN,
1773 "Will enable ECC in %s, Line %d, Function: %s\n",
1774 __FILE__, __LINE__, __func__);
1775 iowrite32(1, FlashReg + ECC_ENABLE);
1776 enable_ecc = 1;
1777 } else {
1778 nand_dbg_print(NAND_DBG_WARN,
1779 "Will disable ECC in %s, Line %d, Function: %s\n",
1780 __FILE__, __LINE__, __func__);
1781 iowrite32(0, FlashReg + ECC_ENABLE);
1782 enable_ecc = 0;
1783 }
1784}
1785
1786u16 NAND_Write_Page_Main_Spare(u8 *write_data, u32 block,
1787 u16 page, u16 page_count)
1788{
1789 u32 status = PASS;
1790 u32 i, j, page_num = 0;
1791 u32 PageSize = DeviceInfo.wPageSize;
1792 u32 PageDataSize = DeviceInfo.wPageDataSize;
1793 u32 eccBytes = DeviceInfo.wECCBytesPerSector;
1794 u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
1795 u32 spareSkipBytes = DeviceInfo.wSpareSkipBytes;
1796 u64 flash_add;
1797 u32 eccSectorSize;
1798 u32 flash_bank;
1799 u32 intr_status = 0;
1800 u32 intr_status_addresses[4] = {INTR_STATUS0,
1801 INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
1802 u8 *page_main_spare = buf_write_page_main_spare;
1803
1804 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
1805 __FILE__, __LINE__, __func__);
1806
1807 eccSectorSize = ECC_SECTOR_SIZE * (DeviceInfo.wDevicesConnected);
1808
1809 status = Boundary_Check_Block_Page(block, page, page_count);
1810
1811 flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
1812
1813 if (status == PASS) {
1814 intr_status = intr_status_addresses[flash_bank];
1815
1816 iowrite32(1, FlashReg + TRANSFER_SPARE_REG);
1817
1818 while ((status != FAIL) && (page_count > 0)) {
1819 flash_add = (u64)(block %
1820 (DeviceInfo.wTotalBlocks / totalUsedBanks)) *
1821 DeviceInfo.wBlockDataSize +
1822 (u64)page * DeviceInfo.wPageDataSize;
1823
1824 iowrite32(ioread32(FlashReg + intr_status),
1825 FlashReg + intr_status);
1826
1827 iowrite32((u32)(MODE_01 | (flash_bank << 24) |
1828 (flash_add >>
1829 DeviceInfo.nBitsInPageDataSize)),
1830 FlashMem);
1831
1832 if (enable_ecc) {
1833 for (j = 0;
1834 j <
1835 DeviceInfo.wPageDataSize / eccSectorSize;
1836 j++) {
1837 for (i = 0; i < eccSectorSize; i++)
1838 page_main_spare[(eccSectorSize +
1839 eccBytes) * j +
1840 i] =
1841 write_data[eccSectorSize *
1842 j + i];
1843
1844 for (i = 0; i < eccBytes; i++)
1845 page_main_spare[(eccSectorSize +
1846 eccBytes) * j +
1847 eccSectorSize +
1848 i] =
1849 write_data[PageDataSize +
1850 spareFlagBytes +
1851 eccBytes * j +
1852 i];
1853 }
1854
1855 for (i = 0; i < spareFlagBytes; i++)
1856 page_main_spare[(eccSectorSize +
1857 eccBytes) * j + i] =
1858 write_data[PageDataSize + i];
1859
1860 for (i = PageSize - 1; i >= PageDataSize +
1861 spareSkipBytes; i--)
1862 page_main_spare[i] = page_main_spare[i -
1863 spareSkipBytes];
1864
1865 for (i = PageDataSize; i < PageDataSize +
1866 spareSkipBytes; i++)
1867 page_main_spare[i] = 0xff;
1868
1869 for (i = 0; i < PageSize / 4; i++)
1870 iowrite32(
1871 *((u32 *)page_main_spare + i),
1872 FlashMem + 0x10);
1873 } else {
1874
1875 for (i = 0; i < PageSize / 4; i++)
1876 iowrite32(*((u32 *)write_data + i),
1877 FlashMem + 0x10);
1878 }
1879
1880 while (!(ioread32(FlashReg + intr_status) &
1881 (INTR_STATUS0__PROGRAM_COMP |
1882 INTR_STATUS0__PROGRAM_FAIL)))
1883 ;
1884
1885 if (ioread32(FlashReg + intr_status) &
1886 INTR_STATUS0__PROGRAM_FAIL)
1887 status = FAIL;
1888
1889 iowrite32(ioread32(FlashReg + intr_status),
1890 FlashReg + intr_status);
1891
1892 page_num++;
1893 page_count--;
1894 write_data += PageSize;
1895 }
1896
1897 iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
1898 }
1899
1900 return status;
1901}
1902
1903u16 NAND_Read_Page_Main_Spare(u8 *read_data, u32 block, u16 page,
1904 u16 page_count)
1905{
1906 u32 status = PASS;
1907 u32 i, j;
1908 u64 flash_add = 0;
1909 u32 PageSize = DeviceInfo.wPageSize;
1910 u32 PageDataSize = DeviceInfo.wPageDataSize;
1911 u32 PageSpareSize = DeviceInfo.wPageSpareSize;
1912 u32 eccBytes = DeviceInfo.wECCBytesPerSector;
1913 u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
1914 u32 spareSkipBytes = DeviceInfo.wSpareSkipBytes;
1915 u32 eccSectorSize;
1916 u32 flash_bank;
1917 u32 intr_status = 0;
1918 u8 *read_data_l = read_data;
1919 u32 intr_status_addresses[4] = {INTR_STATUS0,
1920 INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
1921 u8 *page_main_spare = buf_read_page_main_spare;
1922
1923 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
1924 __FILE__, __LINE__, __func__);
1925
1926 eccSectorSize = ECC_SECTOR_SIZE * (DeviceInfo.wDevicesConnected);
1927
1928 status = Boundary_Check_Block_Page(block, page, page_count);
1929
1930 flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
1931
1932 if (status == PASS) {
1933 intr_status = intr_status_addresses[flash_bank];
1934
1935 iowrite32(1, FlashReg + TRANSFER_SPARE_REG);
1936
1937 iowrite32(ioread32(FlashReg + intr_status),
1938 FlashReg + intr_status);
1939
1940 while ((status != FAIL) && (page_count > 0)) {
1941 flash_add = (u64)(block %
1942 (DeviceInfo.wTotalBlocks / totalUsedBanks))
1943 * DeviceInfo.wBlockDataSize +
1944 (u64)page * DeviceInfo.wPageDataSize;
1945
1946 index_addr((u32)(MODE_10 | (flash_bank << 24) |
1947 (flash_add >> DeviceInfo.nBitsInPageDataSize)),
1948 0x43);
1949 index_addr((u32)(MODE_10 | (flash_bank << 24) |
1950 (flash_add >> DeviceInfo.nBitsInPageDataSize)),
1951 0x2000 | page_count);
1952
1953 while (!(ioread32(FlashReg + intr_status) &
1954 INTR_STATUS0__LOAD_COMP))
1955 ;
1956
1957 iowrite32((u32)(MODE_01 | (flash_bank << 24) |
1958 (flash_add >>
1959 DeviceInfo.nBitsInPageDataSize)),
1960 FlashMem);
1961
1962 for (i = 0; i < PageSize / 4; i++)
1963 *(((u32 *)page_main_spare) + i) =
1964 ioread32(FlashMem + 0x10);
1965
1966 if (enable_ecc) {
1967 for (i = PageDataSize; i < PageSize -
1968 spareSkipBytes; i++)
1969 page_main_spare[i] = page_main_spare[i +
1970 spareSkipBytes];
1971
1972 for (j = 0;
1973 j < DeviceInfo.wPageDataSize / eccSectorSize;
1974 j++) {
1975
1976 for (i = 0; i < eccSectorSize; i++)
1977 read_data_l[eccSectorSize * j +
1978 i] =
1979 page_main_spare[
1980 (eccSectorSize +
1981 eccBytes) * j + i];
1982
1983 for (i = 0; i < eccBytes; i++)
1984 read_data_l[PageDataSize +
1985 spareFlagBytes +
1986 eccBytes * j + i] =
1987 page_main_spare[
1988 (eccSectorSize +
1989 eccBytes) * j +
1990 eccSectorSize + i];
1991 }
1992
1993 for (i = 0; i < spareFlagBytes; i++)
1994 read_data_l[PageDataSize + i] =
1995 page_main_spare[(eccSectorSize +
1996 eccBytes) * j + i];
1997 } else {
1998 for (i = 0; i < (PageDataSize + PageSpareSize);
1999 i++)
2000 read_data_l[i] = page_main_spare[i];
2001
2002 }
2003
2004 if (enable_ecc) {
2005 while (!(ioread32(FlashReg + intr_status) &
2006 (INTR_STATUS0__ECC_TRANSACTION_DONE |
2007 INTR_STATUS0__ECC_ERR)))
2008 ;
2009
2010 if (ioread32(FlashReg + intr_status) &
2011 INTR_STATUS0__ECC_ERR) {
2012 iowrite32(INTR_STATUS0__ECC_ERR,
2013 FlashReg + intr_status);
2014 status = do_ecc_new(flash_bank,
2015 read_data, block, page);
2016 }
2017
2018 if (ioread32(FlashReg + intr_status) &
2019 INTR_STATUS0__ECC_TRANSACTION_DONE &
2020 INTR_STATUS0__ECC_ERR) {
2021 iowrite32(INTR_STATUS0__ECC_ERR |
2022 INTR_STATUS0__ECC_TRANSACTION_DONE,
2023 FlashReg + intr_status);
2024 } else if (ioread32(FlashReg + intr_status) &
2025 INTR_STATUS0__ECC_TRANSACTION_DONE) {
2026 iowrite32(
2027 INTR_STATUS0__ECC_TRANSACTION_DONE,
2028 FlashReg + intr_status);
2029 } else if (ioread32(FlashReg + intr_status) &
2030 INTR_STATUS0__ECC_ERR) {
2031 iowrite32(INTR_STATUS0__ECC_ERR,
2032 FlashReg + intr_status);
2033 }
2034 }
2035
2036 page++;
2037 page_count--;
2038 read_data_l += PageSize;
2039 }
2040 }
2041
2042 iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
2043
2044 index_addr((u32)(MODE_10 | (flash_bank << 24) |
2045 (flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
2046
2047 return status;
2048}
2049
2050u16 NAND_Pipeline_Write_Ahead(u8 *write_data, u32 block,
2051 u16 page, u16 page_count)
2052{
2053 u16 status = PASS;
2054 u32 NumPages = page_count;
2055 u64 flash_add;
2056 u32 flash_bank;
2057 u32 intr_status = 0;
2058 u32 intr_status_addresses[4] = {INTR_STATUS0,
2059 INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
2060 int ret;
2061
2062 nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
2063 __FILE__, __LINE__, __func__);
2064
2065 status = Boundary_Check_Block_Page(block, page, page_count);
2066
2067 if (page_count < 2)
2068 status = FAIL;
2069
2070 if (status != PASS)
2071 return status;
2072
2073 flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
2074 * DeviceInfo.wBlockDataSize +
2075 (u64)page * DeviceInfo.wPageDataSize;
2076
2077 flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
2078
2079 intr_status = intr_status_addresses[flash_bank];
2080 iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
2081
2082 iowrite32(1, FlashReg + DMA_ENABLE);
2083 while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
2084 ;
2085
2086 iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
2087
2088 /* Fill the mrst_nand_info structure */
2089 info.state = INT_PIPELINE_WRITE_AHEAD;
2090 info.write_data = write_data;
2091 info.flash_bank = flash_bank;
2092 info.block = block;
2093 info.page = page;
2094 info.ret = PASS;
2095
2096 index_addr((u32)(MODE_10 | (flash_bank << 24) |
2097 (flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
2098
2099 ddma_trans(write_data, flash_add, flash_bank, 1, NumPages);
2100
2101 iowrite32(1, FlashReg + GLOBAL_INT_ENABLE); /* Enable interrupt */
2102
2103 ret = wait_for_completion_timeout(&info.complete, 10 * HZ);
2104 if (!ret) {
2105 printk(KERN_ERR "Wait for completion timeout "
2106 "in %s, Line %d\n", __FILE__, __LINE__);
2107 status = ERR;
2108 } else {
2109 status = info.ret;
2110 }
2111
2112 iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
2113
2114 iowrite32(0, FlashReg + DMA_ENABLE);
2115 while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
2116 ;
2117
2118 return status;
2119}
2120
2121/* Un-tested function */
2122u16 NAND_Multiplane_Write(u8 *write_data, u32 block, u16 page,
2123 u16 page_count)
2124{
2125 u16 status = PASS;
2126 u32 NumPages = page_count;
2127 u64 flash_add;
2128 u32 flash_bank;
2129 u32 intr_status = 0;
2130 u32 intr_status_addresses[4] = {INTR_STATUS0,
2131 INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
2132 u16 status2 = PASS;
2133 u32 t;
2134
2135 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
2136 __FILE__, __LINE__, __func__);
2137
2138 status = Boundary_Check_Block_Page(block, page, page_count);
2139 if (status != PASS)
2140 return status;
2141
2142 flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
2143 * DeviceInfo.wBlockDataSize +
2144 (u64)page * DeviceInfo.wPageDataSize;
2145
2146 flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
2147
2148 intr_status = intr_status_addresses[flash_bank];
2149 iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
2150
2151 iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
2152 iowrite32(0x01, FlashReg + MULTIPLANE_OPERATION);
2153
2154 iowrite32(1, FlashReg + DMA_ENABLE);
2155 while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
2156 ;
2157
2158 iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
2159
2160 index_addr((u32)(MODE_10 | (flash_bank << 24) |
2161 (flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
2162
2163 ddma_trans(write_data, flash_add, flash_bank, 1, NumPages);
2164
2165 while (1) {
2166 while (!ioread32(FlashReg + intr_status))
2167 ;
2168
2169 if (ioread32(FlashReg + intr_status) &
2170 INTR_STATUS0__DMA_CMD_COMP) {
2171 iowrite32(INTR_STATUS0__DMA_CMD_COMP,
2172 FlashReg + intr_status);
2173 status = PASS;
2174 if (status2 == FAIL)
2175 status = FAIL;
2176 break;
2177 } else if (ioread32(FlashReg + intr_status) &
2178 INTR_STATUS0__PROGRAM_FAIL) {
2179 status2 = FAIL;
2180 status = FAIL;
2181 t = ioread32(FlashReg + intr_status) &
2182 INTR_STATUS0__PROGRAM_FAIL;
2183 iowrite32(t, FlashReg + intr_status);
2184 } else {
2185 iowrite32((~INTR_STATUS0__PROGRAM_FAIL) &
2186 (~INTR_STATUS0__DMA_CMD_COMP),
2187 FlashReg + intr_status);
2188 }
2189 }
2190
2191 iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
2192
2193 iowrite32(0, FlashReg + DMA_ENABLE);
2194
2195 while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
2196 ;
2197
2198 iowrite32(0, FlashReg + MULTIPLANE_OPERATION);
2199
2200 return status;
2201}
2202
2203
2204#if CMD_DMA
2205static irqreturn_t cdma_isr(int irq, void *dev_id)
2206{
2207 struct mrst_nand_info *dev = dev_id;
2208 int first_failed_cmd;
2209
2210 nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
2211 __FILE__, __LINE__, __func__);
2212
2213 if (!is_cdma_interrupt())
2214 return IRQ_NONE;
2215
2216 /* Disable controller interrupts */
2217 iowrite32(0, FlashReg + GLOBAL_INT_ENABLE);
2218 GLOB_FTL_Event_Status(&first_failed_cmd);
2219 complete(&dev->complete);
2220
2221 return IRQ_HANDLED;
2222}
2223#else
2224static void handle_nand_int_read(struct mrst_nand_info *dev)
2225{
2226 u32 intr_status_addresses[4] = {INTR_STATUS0,
2227 INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
2228 u32 intr_status;
2229 u32 ecc_done_OR_dma_comp = 0;
2230
2231 nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
2232 __FILE__, __LINE__, __func__);
2233
2234 dev->ret = PASS;
2235 intr_status = intr_status_addresses[dev->flash_bank];
2236
2237 while (1) {
2238 if (enable_ecc) {
2239 if (ioread32(FlashReg + intr_status) &
2240 INTR_STATUS0__ECC_ERR) {
2241 iowrite32(INTR_STATUS0__ECC_ERR,
2242 FlashReg + intr_status);
2243 dev->ret = do_ecc_new(dev->flash_bank,
2244 dev->read_data,
2245 dev->block, dev->page);
2246 } else if (ioread32(FlashReg + intr_status) &
2247 INTR_STATUS0__DMA_CMD_COMP) {
2248 iowrite32(INTR_STATUS0__DMA_CMD_COMP,
2249 FlashReg + intr_status);
2250 if (1 == ecc_done_OR_dma_comp)
2251 break;
2252 ecc_done_OR_dma_comp = 1;
2253 } else if (ioread32(FlashReg + intr_status) &
2254 INTR_STATUS0__ECC_TRANSACTION_DONE) {
2255 iowrite32(INTR_STATUS0__ECC_TRANSACTION_DONE,
2256 FlashReg + intr_status);
2257 if (1 == ecc_done_OR_dma_comp)
2258 break;
2259 ecc_done_OR_dma_comp = 1;
2260 }
2261 } else {
2262 if (ioread32(FlashReg + intr_status) &
2263 INTR_STATUS0__DMA_CMD_COMP) {
2264 iowrite32(INTR_STATUS0__DMA_CMD_COMP,
2265 FlashReg + intr_status);
2266 break;
2267 } else {
2268 printk(KERN_ERR "Illegal INTS "
2269 "(offset addr 0x%x) value: 0x%x\n",
2270 intr_status,
2271 ioread32(FlashReg + intr_status));
2272 }
2273 }
2274
2275 iowrite32((~INTR_STATUS0__ECC_ERR) &
2276 (~INTR_STATUS0__ECC_TRANSACTION_DONE) &
2277 (~INTR_STATUS0__DMA_CMD_COMP),
2278 FlashReg + intr_status);
2279 }
2280}
2281
2282static void handle_nand_int_write(struct mrst_nand_info *dev)
2283{
2284 u32 intr_status;
2285 u32 intr[4] = {INTR_STATUS0, INTR_STATUS1,
2286 INTR_STATUS2, INTR_STATUS3};
2287 int status = PASS;
2288
2289 nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
2290 __FILE__, __LINE__, __func__);
2291
2292 dev->ret = PASS;
2293 intr_status = intr[dev->flash_bank];
2294
2295 while (1) {
2296 while (!ioread32(FlashReg + intr_status))
2297 ;
2298
2299 if (ioread32(FlashReg + intr_status) &
2300 INTR_STATUS0__DMA_CMD_COMP) {
2301 iowrite32(INTR_STATUS0__DMA_CMD_COMP,
2302 FlashReg + intr_status);
2303 if (FAIL == status)
2304 dev->ret = FAIL;
2305 break;
2306 } else if (ioread32(FlashReg + intr_status) &
2307 INTR_STATUS0__PROGRAM_FAIL) {
2308 status = FAIL;
2309 iowrite32(INTR_STATUS0__PROGRAM_FAIL,
2310 FlashReg + intr_status);
2311 } else {
2312 iowrite32((~INTR_STATUS0__PROGRAM_FAIL) &
2313 (~INTR_STATUS0__DMA_CMD_COMP),
2314 FlashReg + intr_status);
2315 }
2316 }
2317}
2318
2319static irqreturn_t ddma_isr(int irq, void *dev_id)
2320{
2321 struct mrst_nand_info *dev = dev_id;
2322 u32 int_mask, ints0, ints1, ints2, ints3, ints_offset;
2323 u32 intr[4] = {INTR_STATUS0, INTR_STATUS1,
2324 INTR_STATUS2, INTR_STATUS3};
2325
2326 int_mask = INTR_STATUS0__DMA_CMD_COMP |
2327 INTR_STATUS0__ECC_TRANSACTION_DONE |
2328 INTR_STATUS0__ECC_ERR |
2329 INTR_STATUS0__PROGRAM_FAIL |
2330 INTR_STATUS0__ERASE_FAIL;
2331
2332 ints0 = ioread32(FlashReg + INTR_STATUS0);
2333 ints1 = ioread32(FlashReg + INTR_STATUS1);
2334 ints2 = ioread32(FlashReg + INTR_STATUS2);
2335 ints3 = ioread32(FlashReg + INTR_STATUS3);
2336
2337 ints_offset = intr[dev->flash_bank];
2338
2339 nand_dbg_print(NAND_DBG_DEBUG,
2340 "INTR0: 0x%x, INTR1: 0x%x, INTR2: 0x%x, INTR3: 0x%x, "
2341 "DMA_INTR: 0x%x, "
2342 "dev->state: 0x%x, dev->flash_bank: %d\n",
2343 ints0, ints1, ints2, ints3,
2344 ioread32(FlashReg + DMA_INTR),
2345 dev->state, dev->flash_bank);
2346
2347 if (!(ioread32(FlashReg + ints_offset) & int_mask)) {
2348 iowrite32(ints0, FlashReg + INTR_STATUS0);
2349 iowrite32(ints1, FlashReg + INTR_STATUS1);
2350 iowrite32(ints2, FlashReg + INTR_STATUS2);
2351 iowrite32(ints3, FlashReg + INTR_STATUS3);
2352 nand_dbg_print(NAND_DBG_WARN,
2353 "ddma_isr: Invalid interrupt for NAND controller. "
2354 "Ignore it\n");
2355 return IRQ_NONE;
2356 }
2357
2358 switch (dev->state) {
2359 case INT_READ_PAGE_MAIN:
2360 case INT_PIPELINE_READ_AHEAD:
2361 /* Disable controller interrupts */
2362 iowrite32(0, FlashReg + GLOBAL_INT_ENABLE);
2363 handle_nand_int_read(dev);
2364 break;
2365 case INT_WRITE_PAGE_MAIN:
2366 case INT_PIPELINE_WRITE_AHEAD:
2367 iowrite32(0, FlashReg + GLOBAL_INT_ENABLE);
2368 handle_nand_int_write(dev);
2369 break;
2370 default:
2371 printk(KERN_ERR "ddma_isr - Illegal state: 0x%x\n",
2372 dev->state);
2373 return IRQ_NONE;
2374 }
2375
2376 dev->state = INT_IDLE_STATE;
2377 complete(&dev->complete);
2378 return IRQ_HANDLED;
2379}
2380#endif
2381
2382static const struct pci_device_id nand_pci_ids[] = {
2383 {
2384 .vendor = 0x8086,
2385 .device = 0x0809,
2386 .subvendor = PCI_ANY_ID,
2387 .subdevice = PCI_ANY_ID,
2388 },
2389 { /* end: all zeroes */ }
2390};
2391
2392static int nand_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
2393{
2394 int ret = -ENODEV;
2395 unsigned long csr_base;
2396 unsigned long csr_len;
2397 struct mrst_nand_info *pndev = &info;
2398
2399 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
2400 __FILE__, __LINE__, __func__);
2401
2402 ret = pci_enable_device(dev);
2403 if (ret) {
2404 printk(KERN_ERR "Spectra: pci_enable_device failed.\n");
2405 return ret;
2406 }
2407
2408 pci_set_master(dev);
2409 pndev->dev = dev;
2410
2411 csr_base = pci_resource_start(dev, 0);
2412 if (!csr_base) {
2413 printk(KERN_ERR "Spectra: pci_resource_start failed!\n");
2414 return -ENODEV;
2415 }
2416
2417 csr_len = pci_resource_len(dev, 0);
2418 if (!csr_len) {
2419 printk(KERN_ERR "Spectra: pci_resource_len failed!\n");
2420 return -ENODEV;
2421 }
2422
2423 ret = pci_request_regions(dev, SPECTRA_NAND_NAME);
2424 if (ret) {
2425 printk(KERN_ERR "Spectra: Unable to request "
2426 "memory region\n");
2427 goto failed_req_csr;
2428 }
2429
2430 pndev->ioaddr = ioremap_nocache(csr_base, csr_len);
2431 if (!pndev->ioaddr) {
2432 printk(KERN_ERR "Spectra: Unable to remap memory region\n");
2433 ret = -ENOMEM;
2434 goto failed_remap_csr;
2435 }
2436 nand_dbg_print(NAND_DBG_DEBUG, "Spectra: CSR 0x%08lx -> 0x%p (0x%lx)\n",
2437 csr_base, pndev->ioaddr, csr_len);
2438
2439 init_completion(&pndev->complete);
2440 nand_dbg_print(NAND_DBG_DEBUG, "Spectra: IRQ %d\n", dev->irq);
2441
2442#if CMD_DMA
2443 if (request_irq(dev->irq, cdma_isr, IRQF_SHARED,
2444 SPECTRA_NAND_NAME, &info)) {
2445 printk(KERN_ERR "Spectra: Unable to allocate IRQ\n");
2446 ret = -ENODEV;
2447 iounmap(pndev->ioaddr);
2448 goto failed_remap_csr;
2449 }
2450#else
2451 if (request_irq(dev->irq, ddma_isr, IRQF_SHARED,
2452 SPECTRA_NAND_NAME, &info)) {
2453 printk(KERN_ERR "Spectra: Unable to allocate IRQ\n");
2454 ret = -ENODEV;
2455 iounmap(pndev->ioaddr);
2456 goto failed_remap_csr;
2457 }
2458#endif
2459
2460 pci_set_drvdata(dev, pndev);
2461
2462 return 0;
2463
2464failed_remap_csr:
2465 pci_release_regions(dev);
2466failed_req_csr:
2467
2468 return ret;
2469}
2470
2471static void nand_pci_remove(struct pci_dev *dev)
2472{
2473 struct mrst_nand_info *pndev = pci_get_drvdata(dev);
2474
2475 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
2476 __FILE__, __LINE__, __func__);
2477
2478#if CMD_DMA
2479 free_irq(dev->irq, pndev);
2480#endif
2481 iounmap(pndev->ioaddr);
2482 pci_release_regions(dev);
2483 pci_disable_device(dev);
2484}
2485
2486MODULE_DEVICE_TABLE(pci, nand_pci_ids);
2487
2488static struct pci_driver nand_pci_driver = {
2489 .name = SPECTRA_NAND_NAME,
2490 .id_table = nand_pci_ids,
2491 .probe = nand_pci_probe,
2492 .remove = nand_pci_remove,
2493};
2494
2495int NAND_Flash_Init(void)
2496{
2497 int retval;
2498 u32 int_mask;
2499
2500 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
2501 __FILE__, __LINE__, __func__);
2502
2503 FlashReg = ioremap_nocache(GLOB_HWCTL_REG_BASE,
2504 GLOB_HWCTL_REG_SIZE);
2505 if (!FlashReg) {
2506 printk(KERN_ERR "Spectra: ioremap_nocache failed!");
2507 return -ENOMEM;
2508 }
2509 nand_dbg_print(NAND_DBG_WARN,
2510 "Spectra: Remapped reg base address: "
2511 "0x%p, len: %d\n",
2512 FlashReg, GLOB_HWCTL_REG_SIZE);
2513
2514 FlashMem = ioremap_nocache(GLOB_HWCTL_MEM_BASE,
2515 GLOB_HWCTL_MEM_SIZE);
2516 if (!FlashMem) {
2517 printk(KERN_ERR "Spectra: ioremap_nocache failed!");
2518 iounmap(FlashReg);
2519 return -ENOMEM;
2520 }
2521 nand_dbg_print(NAND_DBG_WARN,
2522 "Spectra: Remapped flash base address: "
2523 "0x%p, len: %d\n",
2524 (void *)FlashMem, GLOB_HWCTL_MEM_SIZE);
2525
2526 nand_dbg_print(NAND_DBG_DEBUG, "Dump timing register values:"
2527 "acc_clks: %d, re_2_we: %d, we_2_re: %d,"
2528 "addr_2_data: %d, rdwr_en_lo_cnt: %d, "
2529 "rdwr_en_hi_cnt: %d, cs_setup_cnt: %d\n",
2530 ioread32(FlashReg + ACC_CLKS),
2531 ioread32(FlashReg + RE_2_WE),
2532 ioread32(FlashReg + WE_2_RE),
2533 ioread32(FlashReg + ADDR_2_DATA),
2534 ioread32(FlashReg + RDWR_EN_LO_CNT),
2535 ioread32(FlashReg + RDWR_EN_HI_CNT),
2536 ioread32(FlashReg + CS_SETUP_CNT));
2537
2538 NAND_Flash_Reset();
2539
2540 iowrite32(0, FlashReg + GLOBAL_INT_ENABLE);
2541
2542#if CMD_DMA
2543 info.pcmds_num = 0;
2544 info.flash_bank = 0;
2545 info.cdma_num = 0;
2546 int_mask = (DMA_INTR__DESC_COMP_CHANNEL0 |
2547 DMA_INTR__DESC_COMP_CHANNEL1 |
2548 DMA_INTR__DESC_COMP_CHANNEL2 |
2549 DMA_INTR__DESC_COMP_CHANNEL3 |
2550 DMA_INTR__MEMCOPY_DESC_COMP);
2551 iowrite32(int_mask, FlashReg + DMA_INTR_EN);
2552 iowrite32(0xFFFF, FlashReg + DMA_INTR);
2553
2554 int_mask = (INTR_STATUS0__ECC_ERR |
2555 INTR_STATUS0__PROGRAM_FAIL |
2556 INTR_STATUS0__ERASE_FAIL);
2557#else
2558 int_mask = INTR_STATUS0__DMA_CMD_COMP |
2559 INTR_STATUS0__ECC_TRANSACTION_DONE |
2560 INTR_STATUS0__ECC_ERR |
2561 INTR_STATUS0__PROGRAM_FAIL |
2562 INTR_STATUS0__ERASE_FAIL;
2563#endif
2564 iowrite32(int_mask, FlashReg + INTR_EN0);
2565 iowrite32(int_mask, FlashReg + INTR_EN1);
2566 iowrite32(int_mask, FlashReg + INTR_EN2);
2567 iowrite32(int_mask, FlashReg + INTR_EN3);
2568
2569 /* Clear all status bits */
2570 iowrite32(0xFFFF, FlashReg + INTR_STATUS0);
2571 iowrite32(0xFFFF, FlashReg + INTR_STATUS1);
2572 iowrite32(0xFFFF, FlashReg + INTR_STATUS2);
2573 iowrite32(0xFFFF, FlashReg + INTR_STATUS3);
2574
2575 iowrite32(0x0F, FlashReg + RB_PIN_ENABLED);
2576 iowrite32(CHIP_EN_DONT_CARE__FLAG, FlashReg + CHIP_ENABLE_DONT_CARE);
2577
2578 /* Should set value for these registers when init */
2579 iowrite32(0, FlashReg + TWO_ROW_ADDR_CYCLES);
2580 iowrite32(1, FlashReg + ECC_ENABLE);
2581 enable_ecc = 1;
2582
2583 retval = pci_register_driver(&nand_pci_driver);
2584 if (retval)
2585 return -ENOMEM;
2586
2587 return PASS;
2588}
2589
2590/* Free memory */
2591int nand_release_spectra(void)
2592{
2593 pci_unregister_driver(&nand_pci_driver);
2594 iounmap(FlashMem);
2595 iounmap(FlashReg);
2596
2597 return 0;
2598}
2599
2600
2601
diff --git a/drivers/staging/spectra/lld_nand.h b/drivers/staging/spectra/lld_nand.h
new file mode 100644
index 00000000000..d08388287da
--- /dev/null
+++ b/drivers/staging/spectra/lld_nand.h
@@ -0,0 +1,131 @@
1/*
2 * NAND Flash Controller Device Driver
3 * Copyright (c) 2009, Intel Corporation and its suppliers.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 */
19
20#ifndef _LLD_NAND_
21#define _LLD_NAND_
22
23#ifdef ELDORA
24#include "defs.h"
25#else
26#include "flash.h"
27#include "ffsport.h"
28#endif
29
30#define MODE_00 0x00000000
31#define MODE_01 0x04000000
32#define MODE_10 0x08000000
33#define MODE_11 0x0C000000
34
35
36#define DATA_TRANSFER_MODE 0
37#define PROTECTION_PER_BLOCK 1
38#define LOAD_WAIT_COUNT 2
39#define PROGRAM_WAIT_COUNT 3
40#define ERASE_WAIT_COUNT 4
41#define INT_MONITOR_CYCLE_COUNT 5
42#define READ_BUSY_PIN_ENABLED 6
43#define MULTIPLANE_OPERATION_SUPPORT 7
44#define PRE_FETCH_MODE 8
45#define CE_DONT_CARE_SUPPORT 9
46#define COPYBACK_SUPPORT 10
47#define CACHE_WRITE_SUPPORT 11
48#define CACHE_READ_SUPPORT 12
49#define NUM_PAGES_IN_BLOCK 13
50#define ECC_ENABLE_SELECT 14
51#define WRITE_ENABLE_2_READ_ENABLE 15
52#define ADDRESS_2_DATA 16
53#define READ_ENABLE_2_WRITE_ENABLE 17
54#define TWO_ROW_ADDRESS_CYCLES 18
55#define MULTIPLANE_ADDRESS_RESTRICT 19
56#define ACC_CLOCKS 20
57#define READ_WRITE_ENABLE_LOW_COUNT 21
58#define READ_WRITE_ENABLE_HIGH_COUNT 22
59
60#define ECC_SECTOR_SIZE 512
61#define LLD_MAX_FLASH_BANKS 4
62
63struct mrst_nand_info {
64 struct pci_dev *dev;
65 u32 state;
66 u32 flash_bank;
67 u8 *read_data;
68 u8 *write_data;
69 u32 block;
70 u16 page;
71 u32 use_dma;
72 void __iomem *ioaddr; /* Mapped io reg base address */
73 int ret;
74 u32 pcmds_num;
75 struct pending_cmd *pcmds;
76 int cdma_num; /* CDMA descriptor number in this chan */
77 u8 *cdma_desc_buf; /* CDMA descriptor table */
78 u8 *memcp_desc_buf; /* Memory copy descriptor table */
79 dma_addr_t cdma_desc; /* Mapped CDMA descriptor table */
80 dma_addr_t memcp_desc; /* Mapped memory copy descriptor table */
81 struct completion complete;
82};
83
84int NAND_Flash_Init(void);
85int nand_release_spectra(void);
86u16 NAND_Flash_Reset(void);
87u16 NAND_Read_Device_ID(void);
88u16 NAND_Erase_Block(u32 flash_add);
89u16 NAND_Write_Page_Main(u8 *write_data, u32 block, u16 page,
90 u16 page_count);
91u16 NAND_Read_Page_Main(u8 *read_data, u32 block, u16 page,
92 u16 page_count);
93u16 NAND_UnlockArrayAll(void);
94u16 NAND_Write_Page_Main_Spare(u8 *write_data, u32 block,
95 u16 page, u16 page_count);
96u16 NAND_Write_Page_Spare(u8 *read_data, u32 block, u16 page,
97 u16 page_count);
98u16 NAND_Read_Page_Main_Spare(u8 *read_data, u32 block, u16 page,
99 u16 page_count);
100u16 NAND_Read_Page_Spare(u8 *read_data, u32 block, u16 page,
101 u16 page_count);
102void NAND_LLD_Enable_Disable_Interrupts(u16 INT_ENABLE);
103u16 NAND_Get_Bad_Block(u32 block);
104u16 NAND_Pipeline_Read_Ahead(u8 *read_data, u32 block, u16 page,
105 u16 page_count);
106u16 NAND_Pipeline_Write_Ahead(u8 *write_data, u32 block,
107 u16 page, u16 page_count);
108u16 NAND_Multiplane_Read(u8 *read_data, u32 block, u16 page,
109 u16 page_count);
110u16 NAND_Multiplane_Write(u8 *write_data, u32 block, u16 page,
111 u16 page_count);
112void NAND_ECC_Ctrl(int enable);
113u16 NAND_Read_Page_Main_Polling(u8 *read_data,
114 u32 block, u16 page, u16 page_count);
115u16 NAND_Pipeline_Read_Ahead_Polling(u8 *read_data,
116 u32 block, u16 page, u16 page_count);
117void Conv_Spare_Data_Log2Phy_Format(u8 *data);
118void Conv_Spare_Data_Phy2Log_Format(u8 *data);
119void Conv_Main_Spare_Data_Log2Phy_Format(u8 *data, u16 page_count);
120void Conv_Main_Spare_Data_Phy2Log_Format(u8 *data, u16 page_count);
121
122extern void __iomem *FlashReg;
123extern void __iomem *FlashMem;
124
125extern int totalUsedBanks;
126extern u32 GLOB_valid_banks[LLD_MAX_FLASH_BANKS];
127
128#endif /*_LLD_NAND_*/
129
130
131
diff --git a/drivers/staging/spectra/nand_regs.h b/drivers/staging/spectra/nand_regs.h
new file mode 100644
index 00000000000..e192e4ae8c1
--- /dev/null
+++ b/drivers/staging/spectra/nand_regs.h
@@ -0,0 +1,619 @@
1/*
2 * NAND Flash Controller Device Driver
3 * Copyright (c) 2009, Intel Corporation and its suppliers.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 */
19
20#define DEVICE_RESET 0x0
21#define DEVICE_RESET__BANK0 0x0001
22#define DEVICE_RESET__BANK1 0x0002
23#define DEVICE_RESET__BANK2 0x0004
24#define DEVICE_RESET__BANK3 0x0008
25
26#define TRANSFER_SPARE_REG 0x10
27#define TRANSFER_SPARE_REG__FLAG 0x0001
28
29#define LOAD_WAIT_CNT 0x20
30#define LOAD_WAIT_CNT__VALUE 0xffff
31
32#define PROGRAM_WAIT_CNT 0x30
33#define PROGRAM_WAIT_CNT__VALUE 0xffff
34
35#define ERASE_WAIT_CNT 0x40
36#define ERASE_WAIT_CNT__VALUE 0xffff
37
38#define INT_MON_CYCCNT 0x50
39#define INT_MON_CYCCNT__VALUE 0xffff
40
41#define RB_PIN_ENABLED 0x60
42#define RB_PIN_ENABLED__BANK0 0x0001
43#define RB_PIN_ENABLED__BANK1 0x0002
44#define RB_PIN_ENABLED__BANK2 0x0004
45#define RB_PIN_ENABLED__BANK3 0x0008
46
47#define MULTIPLANE_OPERATION 0x70
48#define MULTIPLANE_OPERATION__FLAG 0x0001
49
50#define MULTIPLANE_READ_ENABLE 0x80
51#define MULTIPLANE_READ_ENABLE__FLAG 0x0001
52
53#define COPYBACK_DISABLE 0x90
54#define COPYBACK_DISABLE__FLAG 0x0001
55
56#define CACHE_WRITE_ENABLE 0xa0
57#define CACHE_WRITE_ENABLE__FLAG 0x0001
58
59#define CACHE_READ_ENABLE 0xb0
60#define CACHE_READ_ENABLE__FLAG 0x0001
61
62#define PREFETCH_MODE 0xc0
63#define PREFETCH_MODE__PREFETCH_EN 0x0001
64#define PREFETCH_MODE__PREFETCH_BURST_LENGTH 0xfff0
65
66#define CHIP_ENABLE_DONT_CARE 0xd0
67#define CHIP_EN_DONT_CARE__FLAG 0x01
68
69#define ECC_ENABLE 0xe0
70#define ECC_ENABLE__FLAG 0x0001
71
72#define GLOBAL_INT_ENABLE 0xf0
73#define GLOBAL_INT_EN_FLAG 0x01
74
75#define WE_2_RE 0x100
76#define WE_2_RE__VALUE 0x003f
77
78#define ADDR_2_DATA 0x110
79#define ADDR_2_DATA__VALUE 0x003f
80
81#define RE_2_WE 0x120
82#define RE_2_WE__VALUE 0x003f
83
84#define ACC_CLKS 0x130
85#define ACC_CLKS__VALUE 0x000f
86
87#define NUMBER_OF_PLANES 0x140
88#define NUMBER_OF_PLANES__VALUE 0x0007
89
90#define PAGES_PER_BLOCK 0x150
91#define PAGES_PER_BLOCK__VALUE 0xffff
92
93#define DEVICE_WIDTH 0x160
94#define DEVICE_WIDTH__VALUE 0x0003
95
96#define DEVICE_MAIN_AREA_SIZE 0x170
97#define DEVICE_MAIN_AREA_SIZE__VALUE 0xffff
98
99#define DEVICE_SPARE_AREA_SIZE 0x180
100#define DEVICE_SPARE_AREA_SIZE__VALUE 0xffff
101
102#define TWO_ROW_ADDR_CYCLES 0x190
103#define TWO_ROW_ADDR_CYCLES__FLAG 0x0001
104
105#define MULTIPLANE_ADDR_RESTRICT 0x1a0
106#define MULTIPLANE_ADDR_RESTRICT__FLAG 0x0001
107
108#define ECC_CORRECTION 0x1b0
109#define ECC_CORRECTION__VALUE 0x001f
110
111#define READ_MODE 0x1c0
112#define READ_MODE__VALUE 0x000f
113
114#define WRITE_MODE 0x1d0
115#define WRITE_MODE__VALUE 0x000f
116
117#define COPYBACK_MODE 0x1e0
118#define COPYBACK_MODE__VALUE 0x000f
119
120#define RDWR_EN_LO_CNT 0x1f0
121#define RDWR_EN_LO_CNT__VALUE 0x001f
122
123#define RDWR_EN_HI_CNT 0x200
124#define RDWR_EN_HI_CNT__VALUE 0x001f
125
126#define MAX_RD_DELAY 0x210
127#define MAX_RD_DELAY__VALUE 0x000f
128
129#define CS_SETUP_CNT 0x220
130#define CS_SETUP_CNT__VALUE 0x001f
131
132#define SPARE_AREA_SKIP_BYTES 0x230
133#define SPARE_AREA_SKIP_BYTES__VALUE 0x003f
134
135#define SPARE_AREA_MARKER 0x240
136#define SPARE_AREA_MARKER__VALUE 0xffff
137
138#define DEVICES_CONNECTED 0x250
139#define DEVICES_CONNECTED__VALUE 0x0007
140
141#define DIE_MASK 0x260
142#define DIE_MASK__VALUE 0x00ff
143
144#define FIRST_BLOCK_OF_NEXT_PLANE 0x270
145#define FIRST_BLOCK_OF_NEXT_PLANE__VALUE 0xffff
146
147#define WRITE_PROTECT 0x280
148#define WRITE_PROTECT__FLAG 0x0001
149
150#define RE_2_RE 0x290
151#define RE_2_RE__VALUE 0x003f
152
153#define MANUFACTURER_ID 0x300
154#define MANUFACTURER_ID__VALUE 0x00ff
155
156#define DEVICE_ID 0x310
157#define DEVICE_ID__VALUE 0x00ff
158
159#define DEVICE_PARAM_0 0x320
160#define DEVICE_PARAM_0__VALUE 0x00ff
161
162#define DEVICE_PARAM_1 0x330
163#define DEVICE_PARAM_1__VALUE 0x00ff
164
165#define DEVICE_PARAM_2 0x340
166#define DEVICE_PARAM_2__VALUE 0x00ff
167
168#define LOGICAL_PAGE_DATA_SIZE 0x350
169#define LOGICAL_PAGE_DATA_SIZE__VALUE 0xffff
170
171#define LOGICAL_PAGE_SPARE_SIZE 0x360
172#define LOGICAL_PAGE_SPARE_SIZE__VALUE 0xffff
173
174#define REVISION 0x370
175#define REVISION__VALUE 0xffff
176
177#define ONFI_DEVICE_FEATURES 0x380
178#define ONFI_DEVICE_FEATURES__VALUE 0x003f
179
180#define ONFI_OPTIONAL_COMMANDS 0x390
181#define ONFI_OPTIONAL_COMMANDS__VALUE 0x003f
182
183#define ONFI_TIMING_MODE 0x3a0
184#define ONFI_TIMING_MODE__VALUE 0x003f
185
186#define ONFI_PGM_CACHE_TIMING_MODE 0x3b0
187#define ONFI_PGM_CACHE_TIMING_MODE__VALUE 0x003f
188
189#define ONFI_DEVICE_NO_OF_LUNS 0x3c0
190#define ONFI_DEVICE_NO_OF_LUNS__NO_OF_LUNS 0x00ff
191#define ONFI_DEVICE_NO_OF_LUNS__ONFI_DEVICE 0x0100
192
193#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L 0x3d0
194#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L__VALUE 0xffff
195
196#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U 0x3e0
197#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U__VALUE 0xffff
198
199#define FEATURES 0x3f0
200#define FEATURES__N_BANKS 0x0003
201#define FEATURES__ECC_MAX_ERR 0x003c
202#define FEATURES__DMA 0x0040
203#define FEATURES__CMD_DMA 0x0080
204#define FEATURES__PARTITION 0x0100
205#define FEATURES__XDMA_SIDEBAND 0x0200
206#define FEATURES__GPREG 0x0400
207#define FEATURES__INDEX_ADDR 0x0800
208
209#define TRANSFER_MODE 0x400
210#define TRANSFER_MODE__VALUE 0x0003
211
212#define INTR_STATUS0 0x410
213#define INTR_STATUS0__ECC_TRANSACTION_DONE 0x0001
214#define INTR_STATUS0__ECC_ERR 0x0002
215#define INTR_STATUS0__DMA_CMD_COMP 0x0004
216#define INTR_STATUS0__TIME_OUT 0x0008
217#define INTR_STATUS0__PROGRAM_FAIL 0x0010
218#define INTR_STATUS0__ERASE_FAIL 0x0020
219#define INTR_STATUS0__LOAD_COMP 0x0040
220#define INTR_STATUS0__PROGRAM_COMP 0x0080
221#define INTR_STATUS0__ERASE_COMP 0x0100
222#define INTR_STATUS0__PIPE_CPYBCK_CMD_COMP 0x0200
223#define INTR_STATUS0__LOCKED_BLK 0x0400
224#define INTR_STATUS0__UNSUP_CMD 0x0800
225#define INTR_STATUS0__INT_ACT 0x1000
226#define INTR_STATUS0__RST_COMP 0x2000
227#define INTR_STATUS0__PIPE_CMD_ERR 0x4000
228#define INTR_STATUS0__PAGE_XFER_INC 0x8000
229
230#define INTR_EN0 0x420
231#define INTR_EN0__ECC_TRANSACTION_DONE 0x0001
232#define INTR_EN0__ECC_ERR 0x0002
233#define INTR_EN0__DMA_CMD_COMP 0x0004
234#define INTR_EN0__TIME_OUT 0x0008
235#define INTR_EN0__PROGRAM_FAIL 0x0010
236#define INTR_EN0__ERASE_FAIL 0x0020
237#define INTR_EN0__LOAD_COMP 0x0040
238#define INTR_EN0__PROGRAM_COMP 0x0080
239#define INTR_EN0__ERASE_COMP 0x0100
240#define INTR_EN0__PIPE_CPYBCK_CMD_COMP 0x0200
241#define INTR_EN0__LOCKED_BLK 0x0400
242#define INTR_EN0__UNSUP_CMD 0x0800
243#define INTR_EN0__INT_ACT 0x1000
244#define INTR_EN0__RST_COMP 0x2000
245#define INTR_EN0__PIPE_CMD_ERR 0x4000
246#define INTR_EN0__PAGE_XFER_INC 0x8000
247
248#define PAGE_CNT0 0x430
249#define PAGE_CNT0__VALUE 0x00ff
250
251#define ERR_PAGE_ADDR0 0x440
252#define ERR_PAGE_ADDR0__VALUE 0xffff
253
254#define ERR_BLOCK_ADDR0 0x450
255#define ERR_BLOCK_ADDR0__VALUE 0xffff
256
257#define INTR_STATUS1 0x460
258#define INTR_STATUS1__ECC_TRANSACTION_DONE 0x0001
259#define INTR_STATUS1__ECC_ERR 0x0002
260#define INTR_STATUS1__DMA_CMD_COMP 0x0004
261#define INTR_STATUS1__TIME_OUT 0x0008
262#define INTR_STATUS1__PROGRAM_FAIL 0x0010
263#define INTR_STATUS1__ERASE_FAIL 0x0020
264#define INTR_STATUS1__LOAD_COMP 0x0040
265#define INTR_STATUS1__PROGRAM_COMP 0x0080
266#define INTR_STATUS1__ERASE_COMP 0x0100
267#define INTR_STATUS1__PIPE_CPYBCK_CMD_COMP 0x0200
268#define INTR_STATUS1__LOCKED_BLK 0x0400
269#define INTR_STATUS1__UNSUP_CMD 0x0800
270#define INTR_STATUS1__INT_ACT 0x1000
271#define INTR_STATUS1__RST_COMP 0x2000
272#define INTR_STATUS1__PIPE_CMD_ERR 0x4000
273#define INTR_STATUS1__PAGE_XFER_INC 0x8000
274
275#define INTR_EN1 0x470
276#define INTR_EN1__ECC_TRANSACTION_DONE 0x0001
277#define INTR_EN1__ECC_ERR 0x0002
278#define INTR_EN1__DMA_CMD_COMP 0x0004
279#define INTR_EN1__TIME_OUT 0x0008
280#define INTR_EN1__PROGRAM_FAIL 0x0010
281#define INTR_EN1__ERASE_FAIL 0x0020
282#define INTR_EN1__LOAD_COMP 0x0040
283#define INTR_EN1__PROGRAM_COMP 0x0080
284#define INTR_EN1__ERASE_COMP 0x0100
285#define INTR_EN1__PIPE_CPYBCK_CMD_COMP 0x0200
286#define INTR_EN1__LOCKED_BLK 0x0400
287#define INTR_EN1__UNSUP_CMD 0x0800
288#define INTR_EN1__INT_ACT 0x1000
289#define INTR_EN1__RST_COMP 0x2000
290#define INTR_EN1__PIPE_CMD_ERR 0x4000
291#define INTR_EN1__PAGE_XFER_INC 0x8000
292
293#define PAGE_CNT1 0x480
294#define PAGE_CNT1__VALUE 0x00ff
295
296#define ERR_PAGE_ADDR1 0x490
297#define ERR_PAGE_ADDR1__VALUE 0xffff
298
299#define ERR_BLOCK_ADDR1 0x4a0
300#define ERR_BLOCK_ADDR1__VALUE 0xffff
301
302#define INTR_STATUS2 0x4b0
303#define INTR_STATUS2__ECC_TRANSACTION_DONE 0x0001
304#define INTR_STATUS2__ECC_ERR 0x0002
305#define INTR_STATUS2__DMA_CMD_COMP 0x0004
306#define INTR_STATUS2__TIME_OUT 0x0008
307#define INTR_STATUS2__PROGRAM_FAIL 0x0010
308#define INTR_STATUS2__ERASE_FAIL 0x0020
309#define INTR_STATUS2__LOAD_COMP 0x0040
310#define INTR_STATUS2__PROGRAM_COMP 0x0080
311#define INTR_STATUS2__ERASE_COMP 0x0100
312#define INTR_STATUS2__PIPE_CPYBCK_CMD_COMP 0x0200
313#define INTR_STATUS2__LOCKED_BLK 0x0400
314#define INTR_STATUS2__UNSUP_CMD 0x0800
315#define INTR_STATUS2__INT_ACT 0x1000
316#define INTR_STATUS2__RST_COMP 0x2000
317#define INTR_STATUS2__PIPE_CMD_ERR 0x4000
318#define INTR_STATUS2__PAGE_XFER_INC 0x8000
319
320#define INTR_EN2 0x4c0
321#define INTR_EN2__ECC_TRANSACTION_DONE 0x0001
322#define INTR_EN2__ECC_ERR 0x0002
323#define INTR_EN2__DMA_CMD_COMP 0x0004
324#define INTR_EN2__TIME_OUT 0x0008
325#define INTR_EN2__PROGRAM_FAIL 0x0010
326#define INTR_EN2__ERASE_FAIL 0x0020
327#define INTR_EN2__LOAD_COMP 0x0040
328#define INTR_EN2__PROGRAM_COMP 0x0080
329#define INTR_EN2__ERASE_COMP 0x0100
330#define INTR_EN2__PIPE_CPYBCK_CMD_COMP 0x0200
331#define INTR_EN2__LOCKED_BLK 0x0400
332#define INTR_EN2__UNSUP_CMD 0x0800
333#define INTR_EN2__INT_ACT 0x1000
334#define INTR_EN2__RST_COMP 0x2000
335#define INTR_EN2__PIPE_CMD_ERR 0x4000
336#define INTR_EN2__PAGE_XFER_INC 0x8000
337
338#define PAGE_CNT2 0x4d0
339#define PAGE_CNT2__VALUE 0x00ff
340
341#define ERR_PAGE_ADDR2 0x4e0
342#define ERR_PAGE_ADDR2__VALUE 0xffff
343
344#define ERR_BLOCK_ADDR2 0x4f0
345#define ERR_BLOCK_ADDR2__VALUE 0xffff
346
347#define INTR_STATUS3 0x500
348#define INTR_STATUS3__ECC_TRANSACTION_DONE 0x0001
349#define INTR_STATUS3__ECC_ERR 0x0002
350#define INTR_STATUS3__DMA_CMD_COMP 0x0004
351#define INTR_STATUS3__TIME_OUT 0x0008
352#define INTR_STATUS3__PROGRAM_FAIL 0x0010
353#define INTR_STATUS3__ERASE_FAIL 0x0020
354#define INTR_STATUS3__LOAD_COMP 0x0040
355#define INTR_STATUS3__PROGRAM_COMP 0x0080
356#define INTR_STATUS3__ERASE_COMP 0x0100
357#define INTR_STATUS3__PIPE_CPYBCK_CMD_COMP 0x0200
358#define INTR_STATUS3__LOCKED_BLK 0x0400
359#define INTR_STATUS3__UNSUP_CMD 0x0800
360#define INTR_STATUS3__INT_ACT 0x1000
361#define INTR_STATUS3__RST_COMP 0x2000
362#define INTR_STATUS3__PIPE_CMD_ERR 0x4000
363#define INTR_STATUS3__PAGE_XFER_INC 0x8000
364
365#define INTR_EN3 0x510
366#define INTR_EN3__ECC_TRANSACTION_DONE 0x0001
367#define INTR_EN3__ECC_ERR 0x0002
368#define INTR_EN3__DMA_CMD_COMP 0x0004
369#define INTR_EN3__TIME_OUT 0x0008
370#define INTR_EN3__PROGRAM_FAIL 0x0010
371#define INTR_EN3__ERASE_FAIL 0x0020
372#define INTR_EN3__LOAD_COMP 0x0040
373#define INTR_EN3__PROGRAM_COMP 0x0080
374#define INTR_EN3__ERASE_COMP 0x0100
375#define INTR_EN3__PIPE_CPYBCK_CMD_COMP 0x0200
376#define INTR_EN3__LOCKED_BLK 0x0400
377#define INTR_EN3__UNSUP_CMD 0x0800
378#define INTR_EN3__INT_ACT 0x1000
379#define INTR_EN3__RST_COMP 0x2000
380#define INTR_EN3__PIPE_CMD_ERR 0x4000
381#define INTR_EN3__PAGE_XFER_INC 0x8000
382
383#define PAGE_CNT3 0x520
384#define PAGE_CNT3__VALUE 0x00ff
385
386#define ERR_PAGE_ADDR3 0x530
387#define ERR_PAGE_ADDR3__VALUE 0xffff
388
389#define ERR_BLOCK_ADDR3 0x540
390#define ERR_BLOCK_ADDR3__VALUE 0xffff
391
392#define DATA_INTR 0x550
393#define DATA_INTR__WRITE_SPACE_AV 0x0001
394#define DATA_INTR__READ_DATA_AV 0x0002
395
396#define DATA_INTR_EN 0x560
397#define DATA_INTR_EN__WRITE_SPACE_AV 0x0001
398#define DATA_INTR_EN__READ_DATA_AV 0x0002
399
400#define GPREG_0 0x570
401#define GPREG_0__VALUE 0xffff
402
403#define GPREG_1 0x580
404#define GPREG_1__VALUE 0xffff
405
406#define GPREG_2 0x590
407#define GPREG_2__VALUE 0xffff
408
409#define GPREG_3 0x5a0
410#define GPREG_3__VALUE 0xffff
411
412#define ECC_THRESHOLD 0x600
413#define ECC_THRESHOLD__VALUE 0x03ff
414
415#define ECC_ERROR_BLOCK_ADDRESS 0x610
416#define ECC_ERROR_BLOCK_ADDRESS__VALUE 0xffff
417
418#define ECC_ERROR_PAGE_ADDRESS 0x620
419#define ECC_ERROR_PAGE_ADDRESS__VALUE 0x0fff
420#define ECC_ERROR_PAGE_ADDRESS__BANK 0xf000
421
422#define ECC_ERROR_ADDRESS 0x630
423#define ECC_ERROR_ADDRESS__OFFSET 0x0fff
424#define ECC_ERROR_ADDRESS__SECTOR_NR 0xf000
425
426#define ERR_CORRECTION_INFO 0x640
427#define ERR_CORRECTION_INFO__BYTEMASK 0x00ff
428#define ERR_CORRECTION_INFO__DEVICE_NR 0x0f00
429#define ERR_CORRECTION_INFO__ERROR_TYPE 0x4000
430#define ERR_CORRECTION_INFO__LAST_ERR_INFO 0x8000
431
432#define DMA_ENABLE 0x700
433#define DMA_ENABLE__FLAG 0x0001
434
435#define IGNORE_ECC_DONE 0x710
436#define IGNORE_ECC_DONE__FLAG 0x0001
437
438#define DMA_INTR 0x720
439#define DMA_INTR__TARGET_ERROR 0x0001
440#define DMA_INTR__DESC_COMP_CHANNEL0 0x0002
441#define DMA_INTR__DESC_COMP_CHANNEL1 0x0004
442#define DMA_INTR__DESC_COMP_CHANNEL2 0x0008
443#define DMA_INTR__DESC_COMP_CHANNEL3 0x0010
444#define DMA_INTR__MEMCOPY_DESC_COMP 0x0020
445
446#define DMA_INTR_EN 0x730
447#define DMA_INTR_EN__TARGET_ERROR 0x0001
448#define DMA_INTR_EN__DESC_COMP_CHANNEL0 0x0002
449#define DMA_INTR_EN__DESC_COMP_CHANNEL1 0x0004
450#define DMA_INTR_EN__DESC_COMP_CHANNEL2 0x0008
451#define DMA_INTR_EN__DESC_COMP_CHANNEL3 0x0010
452#define DMA_INTR_EN__MEMCOPY_DESC_COMP 0x0020
453
454#define TARGET_ERR_ADDR_LO 0x740
455#define TARGET_ERR_ADDR_LO__VALUE 0xffff
456
457#define TARGET_ERR_ADDR_HI 0x750
458#define TARGET_ERR_ADDR_HI__VALUE 0xffff
459
460#define CHNL_ACTIVE 0x760
461#define CHNL_ACTIVE__CHANNEL0 0x0001
462#define CHNL_ACTIVE__CHANNEL1 0x0002
463#define CHNL_ACTIVE__CHANNEL2 0x0004
464#define CHNL_ACTIVE__CHANNEL3 0x0008
465
466#define ACTIVE_SRC_ID 0x800
467#define ACTIVE_SRC_ID__VALUE 0x00ff
468
469#define PTN_INTR 0x810
470#define PTN_INTR__CONFIG_ERROR 0x0001
471#define PTN_INTR__ACCESS_ERROR_BANK0 0x0002
472#define PTN_INTR__ACCESS_ERROR_BANK1 0x0004
473#define PTN_INTR__ACCESS_ERROR_BANK2 0x0008
474#define PTN_INTR__ACCESS_ERROR_BANK3 0x0010
475#define PTN_INTR__REG_ACCESS_ERROR 0x0020
476
477#define PTN_INTR_EN 0x820
478#define PTN_INTR_EN__CONFIG_ERROR 0x0001
479#define PTN_INTR_EN__ACCESS_ERROR_BANK0 0x0002
480#define PTN_INTR_EN__ACCESS_ERROR_BANK1 0x0004
481#define PTN_INTR_EN__ACCESS_ERROR_BANK2 0x0008
482#define PTN_INTR_EN__ACCESS_ERROR_BANK3 0x0010
483#define PTN_INTR_EN__REG_ACCESS_ERROR 0x0020
484
485#define PERM_SRC_ID_0 0x830
486#define PERM_SRC_ID_0__SRCID 0x00ff
487#define PERM_SRC_ID_0__DIRECT_ACCESS_ACTIVE 0x0800
488#define PERM_SRC_ID_0__WRITE_ACTIVE 0x2000
489#define PERM_SRC_ID_0__READ_ACTIVE 0x4000
490#define PERM_SRC_ID_0__PARTITION_VALID 0x8000
491
492#define MIN_BLK_ADDR_0 0x840
493#define MIN_BLK_ADDR_0__VALUE 0xffff
494
495#define MAX_BLK_ADDR_0 0x850
496#define MAX_BLK_ADDR_0__VALUE 0xffff
497
498#define MIN_MAX_BANK_0 0x860
499#define MIN_MAX_BANK_0__MIN_VALUE 0x0003
500#define MIN_MAX_BANK_0__MAX_VALUE 0x000c
501
502#define PERM_SRC_ID_1 0x870
503#define PERM_SRC_ID_1__SRCID 0x00ff
504#define PERM_SRC_ID_1__DIRECT_ACCESS_ACTIVE 0x0800
505#define PERM_SRC_ID_1__WRITE_ACTIVE 0x2000
506#define PERM_SRC_ID_1__READ_ACTIVE 0x4000
507#define PERM_SRC_ID_1__PARTITION_VALID 0x8000
508
509#define MIN_BLK_ADDR_1 0x880
510#define MIN_BLK_ADDR_1__VALUE 0xffff
511
512#define MAX_BLK_ADDR_1 0x890
513#define MAX_BLK_ADDR_1__VALUE 0xffff
514
515#define MIN_MAX_BANK_1 0x8a0
516#define MIN_MAX_BANK_1__MIN_VALUE 0x0003
517#define MIN_MAX_BANK_1__MAX_VALUE 0x000c
518
519#define PERM_SRC_ID_2 0x8b0
520#define PERM_SRC_ID_2__SRCID 0x00ff
521#define PERM_SRC_ID_2__DIRECT_ACCESS_ACTIVE 0x0800
522#define PERM_SRC_ID_2__WRITE_ACTIVE 0x2000
523#define PERM_SRC_ID_2__READ_ACTIVE 0x4000
524#define PERM_SRC_ID_2__PARTITION_VALID 0x8000
525
526#define MIN_BLK_ADDR_2 0x8c0
527#define MIN_BLK_ADDR_2__VALUE 0xffff
528
529#define MAX_BLK_ADDR_2 0x8d0
530#define MAX_BLK_ADDR_2__VALUE 0xffff
531
532#define MIN_MAX_BANK_2 0x8e0
533#define MIN_MAX_BANK_2__MIN_VALUE 0x0003
534#define MIN_MAX_BANK_2__MAX_VALUE 0x000c
535
536#define PERM_SRC_ID_3 0x8f0
537#define PERM_SRC_ID_3__SRCID 0x00ff
538#define PERM_SRC_ID_3__DIRECT_ACCESS_ACTIVE 0x0800
539#define PERM_SRC_ID_3__WRITE_ACTIVE 0x2000
540#define PERM_SRC_ID_3__READ_ACTIVE 0x4000
541#define PERM_SRC_ID_3__PARTITION_VALID 0x8000
542
543#define MIN_BLK_ADDR_3 0x900
544#define MIN_BLK_ADDR_3__VALUE 0xffff
545
546#define MAX_BLK_ADDR_3 0x910
547#define MAX_BLK_ADDR_3__VALUE 0xffff
548
549#define MIN_MAX_BANK_3 0x920
550#define MIN_MAX_BANK_3__MIN_VALUE 0x0003
551#define MIN_MAX_BANK_3__MAX_VALUE 0x000c
552
553#define PERM_SRC_ID_4 0x930
554#define PERM_SRC_ID_4__SRCID 0x00ff
555#define PERM_SRC_ID_4__DIRECT_ACCESS_ACTIVE 0x0800
556#define PERM_SRC_ID_4__WRITE_ACTIVE 0x2000
557#define PERM_SRC_ID_4__READ_ACTIVE 0x4000
558#define PERM_SRC_ID_4__PARTITION_VALID 0x8000
559
560#define MIN_BLK_ADDR_4 0x940
561#define MIN_BLK_ADDR_4__VALUE 0xffff
562
563#define MAX_BLK_ADDR_4 0x950
564#define MAX_BLK_ADDR_4__VALUE 0xffff
565
566#define MIN_MAX_BANK_4 0x960
567#define MIN_MAX_BANK_4__MIN_VALUE 0x0003
568#define MIN_MAX_BANK_4__MAX_VALUE 0x000c
569
570#define PERM_SRC_ID_5 0x970
571#define PERM_SRC_ID_5__SRCID 0x00ff
572#define PERM_SRC_ID_5__DIRECT_ACCESS_ACTIVE 0x0800
573#define PERM_SRC_ID_5__WRITE_ACTIVE 0x2000
574#define PERM_SRC_ID_5__READ_ACTIVE 0x4000
575#define PERM_SRC_ID_5__PARTITION_VALID 0x8000
576
577#define MIN_BLK_ADDR_5 0x980
578#define MIN_BLK_ADDR_5__VALUE 0xffff
579
580#define MAX_BLK_ADDR_5 0x990
581#define MAX_BLK_ADDR_5__VALUE 0xffff
582
583#define MIN_MAX_BANK_5 0x9a0
584#define MIN_MAX_BANK_5__MIN_VALUE 0x0003
585#define MIN_MAX_BANK_5__MAX_VALUE 0x000c
586
587#define PERM_SRC_ID_6 0x9b0
588#define PERM_SRC_ID_6__SRCID 0x00ff
589#define PERM_SRC_ID_6__DIRECT_ACCESS_ACTIVE 0x0800
590#define PERM_SRC_ID_6__WRITE_ACTIVE 0x2000
591#define PERM_SRC_ID_6__READ_ACTIVE 0x4000
592#define PERM_SRC_ID_6__PARTITION_VALID 0x8000
593
594#define MIN_BLK_ADDR_6 0x9c0
595#define MIN_BLK_ADDR_6__VALUE 0xffff
596
597#define MAX_BLK_ADDR_6 0x9d0
598#define MAX_BLK_ADDR_6__VALUE 0xffff
599
600#define MIN_MAX_BANK_6 0x9e0
601#define MIN_MAX_BANK_6__MIN_VALUE 0x0003
602#define MIN_MAX_BANK_6__MAX_VALUE 0x000c
603
604#define PERM_SRC_ID_7 0x9f0
605#define PERM_SRC_ID_7__SRCID 0x00ff
606#define PERM_SRC_ID_7__DIRECT_ACCESS_ACTIVE 0x0800
607#define PERM_SRC_ID_7__WRITE_ACTIVE 0x2000
608#define PERM_SRC_ID_7__READ_ACTIVE 0x4000
609#define PERM_SRC_ID_7__PARTITION_VALID 0x8000
610
611#define MIN_BLK_ADDR_7 0xa00
612#define MIN_BLK_ADDR_7__VALUE 0xffff
613
614#define MAX_BLK_ADDR_7 0xa10
615#define MAX_BLK_ADDR_7__VALUE 0xffff
616
617#define MIN_MAX_BANK_7 0xa20
618#define MIN_MAX_BANK_7__MIN_VALUE 0x0003
619#define MIN_MAX_BANK_7__MAX_VALUE 0x000c
diff --git a/drivers/staging/spectra/spectraswconfig.h b/drivers/staging/spectra/spectraswconfig.h
new file mode 100644
index 00000000000..557c091953d
--- /dev/null
+++ b/drivers/staging/spectra/spectraswconfig.h
@@ -0,0 +1,82 @@
1/*
2 * NAND Flash Controller Device Driver
3 * Copyright (c) 2009, Intel Corporation and its suppliers.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 */
19
20#ifndef _SPECTRASWCONFIG_
21#define _SPECTRASWCONFIG_
22
23/* NAND driver version */
24#define GLOB_VERSION "driver version 20100311"
25
26
27/***** Common Parameters *****/
28#define RETRY_TIMES 3
29
30#define READ_BADBLOCK_INFO 1
31#define READBACK_VERIFY 0
32#define AUTO_FORMAT_FLASH 0
33
34/***** Cache Parameters *****/
35#define CACHE_ITEM_NUM 128
36#define BLK_NUM_FOR_L2_CACHE 16
37
38/***** Block Table Parameters *****/
39#define BLOCK_TABLE_INDEX 0
40
41/***** Wear Leveling Parameters *****/
42#define WEAR_LEVELING_GATE 0x10
43#define WEAR_LEVELING_BLOCK_NUM 10
44
45#define DEBUG_BNDRY 0
46
47/***** Product Feature Support *****/
48#define FLASH_EMU defined(CONFIG_MRST_NAND_EMU)
49#define FLASH_NAND defined(CONFIG_MRST_NAND_HW)
50#define FLASH_MTD defined(CONFIG_MRST_NAND_MTD)
51#define CMD_DMA defined(CONFIG_MRST_NAND_HW_DMA)
52
53#define SPECTRA_PARTITION_ID 0
54
55/* Enable this macro if the number of flash blocks is larger than 16K. */
56#define SUPPORT_LARGE_BLOCKNUM 1
57
58/**** Block Table and Reserved Block Parameters *****/
59#define SPECTRA_START_BLOCK 3
60//#define NUM_FREE_BLOCKS_GATE 30
61#define NUM_FREE_BLOCKS_GATE 60
62
63/**** Hardware Parameters ****/
64#define GLOB_HWCTL_REG_BASE 0xFFA40000
65#define GLOB_HWCTL_REG_SIZE 4096
66
67#define GLOB_HWCTL_MEM_BASE 0xFFA48000
68#define GLOB_HWCTL_MEM_SIZE 4096
69
70/* KBV - Updated to LNW scratch register address */
71#define SCRATCH_REG_ADDR 0xFF108018
72#define SCRATCH_REG_SIZE 64
73
74#define GLOB_HWCTL_DEFAULT_BLKS 2048
75
76#define SUPPORT_15BITECC 1
77#define SUPPORT_8BITECC 1
78
79#define ONFI_BLOOM_TIME 0
80#define MODE5_WORKAROUND 1
81
82#endif /*_SPECTRASWCONFIG_*/