aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/mtd
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/mtd')
-rw-r--r--include/linux/mtd/cfi.h16
-rw-r--r--include/linux/mtd/cfi_endian.h76
-rw-r--r--include/linux/mtd/gpmi-nand.h68
-rw-r--r--include/linux/mtd/map.h3
-rw-r--r--include/linux/mtd/mtd.h332
-rw-r--r--include/linux/mtd/nand.h1
-rw-r--r--include/linux/mtd/physmap.h1
7 files changed, 362 insertions, 135 deletions
diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h
index d24925492972..d5d2ec6494bb 100644
--- a/include/linux/mtd/cfi.h
+++ b/include/linux/mtd/cfi.h
@@ -354,10 +354,10 @@ static inline map_word cfi_build_cmd(u_long cmd, struct map_info *map, struct cf
354 onecmd = cmd; 354 onecmd = cmd;
355 break; 355 break;
356 case 2: 356 case 2:
357 onecmd = cpu_to_cfi16(cmd); 357 onecmd = cpu_to_cfi16(map, cmd);
358 break; 358 break;
359 case 4: 359 case 4:
360 onecmd = cpu_to_cfi32(cmd); 360 onecmd = cpu_to_cfi32(map, cmd);
361 break; 361 break;
362 } 362 }
363 363
@@ -437,10 +437,10 @@ static inline unsigned long cfi_merge_status(map_word val, struct map_info *map,
437 case 1: 437 case 1:
438 break; 438 break;
439 case 2: 439 case 2:
440 res = cfi16_to_cpu(res); 440 res = cfi16_to_cpu(map, res);
441 break; 441 break;
442 case 4: 442 case 4:
443 res = cfi32_to_cpu(res); 443 res = cfi32_to_cpu(map, res);
444 break; 444 break;
445 default: BUG(); 445 default: BUG();
446 } 446 }
@@ -480,12 +480,12 @@ static inline uint8_t cfi_read_query(struct map_info *map, uint32_t addr)
480 if (map_bankwidth_is_1(map)) { 480 if (map_bankwidth_is_1(map)) {
481 return val.x[0]; 481 return val.x[0];
482 } else if (map_bankwidth_is_2(map)) { 482 } else if (map_bankwidth_is_2(map)) {
483 return cfi16_to_cpu(val.x[0]); 483 return cfi16_to_cpu(map, val.x[0]);
484 } else { 484 } else {
485 /* No point in a 64-bit byteswap since that would just be 485 /* No point in a 64-bit byteswap since that would just be
486 swapping the responses from different chips, and we are 486 swapping the responses from different chips, and we are
487 only interested in one chip (a representative sample) */ 487 only interested in one chip (a representative sample) */
488 return cfi32_to_cpu(val.x[0]); 488 return cfi32_to_cpu(map, val.x[0]);
489 } 489 }
490} 490}
491 491
@@ -496,12 +496,12 @@ static inline uint16_t cfi_read_query16(struct map_info *map, uint32_t addr)
496 if (map_bankwidth_is_1(map)) { 496 if (map_bankwidth_is_1(map)) {
497 return val.x[0] & 0xff; 497 return val.x[0] & 0xff;
498 } else if (map_bankwidth_is_2(map)) { 498 } else if (map_bankwidth_is_2(map)) {
499 return cfi16_to_cpu(val.x[0]); 499 return cfi16_to_cpu(map, val.x[0]);
500 } else { 500 } else {
501 /* No point in a 64-bit byteswap since that would just be 501 /* No point in a 64-bit byteswap since that would just be
502 swapping the responses from different chips, and we are 502 swapping the responses from different chips, and we are
503 only interested in one chip (a representative sample) */ 503 only interested in one chip (a representative sample) */
504 return cfi32_to_cpu(val.x[0]); 504 return cfi32_to_cpu(map, val.x[0]);
505 } 505 }
506} 506}
507 507
diff --git a/include/linux/mtd/cfi_endian.h b/include/linux/mtd/cfi_endian.h
index 51cc3f5917a8..b97a625071f8 100644
--- a/include/linux/mtd/cfi_endian.h
+++ b/include/linux/mtd/cfi_endian.h
@@ -19,53 +19,35 @@
19 19
20#include <asm/byteorder.h> 20#include <asm/byteorder.h>
21 21
22#ifndef CONFIG_MTD_CFI_ADV_OPTIONS 22#define CFI_HOST_ENDIAN 1
23 23#define CFI_LITTLE_ENDIAN 2
24#define CFI_HOST_ENDIAN 24#define CFI_BIG_ENDIAN 3
25 25
26#else 26#if !defined(CONFIG_MTD_CFI_ADV_OPTIONS) || defined(CONFIG_MTD_CFI_NOSWAP)
27 27#define CFI_DEFAULT_ENDIAN CFI_HOST_ENDIAN
28#ifdef CONFIG_MTD_CFI_NOSWAP 28#elif defined(CONFIG_MTD_CFI_LE_BYTE_SWAP)
29#define CFI_HOST_ENDIAN 29#define CFI_DEFAULT_ENDIAN CFI_LITTLE_ENDIAN
30#endif 30#elif defined(CONFIG_MTD_CFI_BE_BYTE_SWAP)
31 31#define CFI_DEFAULT_ENDIAN CFI_BIG_ENDIAN
32#ifdef CONFIG_MTD_CFI_LE_BYTE_SWAP
33#define CFI_LITTLE_ENDIAN
34#endif
35
36#ifdef CONFIG_MTD_CFI_BE_BYTE_SWAP
37#define CFI_BIG_ENDIAN
38#endif
39
40#endif
41
42#if defined(CFI_LITTLE_ENDIAN)
43#define cpu_to_cfi8(x) (x)
44#define cfi8_to_cpu(x) (x)
45#define cpu_to_cfi16(x) cpu_to_le16(x)
46#define cpu_to_cfi32(x) cpu_to_le32(x)
47#define cpu_to_cfi64(x) cpu_to_le64(x)
48#define cfi16_to_cpu(x) le16_to_cpu(x)
49#define cfi32_to_cpu(x) le32_to_cpu(x)
50#define cfi64_to_cpu(x) le64_to_cpu(x)
51#elif defined (CFI_BIG_ENDIAN)
52#define cpu_to_cfi8(x) (x)
53#define cfi8_to_cpu(x) (x)
54#define cpu_to_cfi16(x) cpu_to_be16(x)
55#define cpu_to_cfi32(x) cpu_to_be32(x)
56#define cpu_to_cfi64(x) cpu_to_be64(x)
57#define cfi16_to_cpu(x) be16_to_cpu(x)
58#define cfi32_to_cpu(x) be32_to_cpu(x)
59#define cfi64_to_cpu(x) be64_to_cpu(x)
60#elif defined (CFI_HOST_ENDIAN)
61#define cpu_to_cfi8(x) (x)
62#define cfi8_to_cpu(x) (x)
63#define cpu_to_cfi16(x) (x)
64#define cpu_to_cfi32(x) (x)
65#define cpu_to_cfi64(x) (x)
66#define cfi16_to_cpu(x) (x)
67#define cfi32_to_cpu(x) (x)
68#define cfi64_to_cpu(x) (x)
69#else 32#else
70#error No CFI endianness defined 33#error No CFI endianness defined
71#endif 34#endif
35
36#define cfi_default(s) ((s)?:CFI_DEFAULT_ENDIAN)
37#define cfi_be(s) (cfi_default(s) == CFI_BIG_ENDIAN)
38#define cfi_le(s) (cfi_default(s) == CFI_LITTLE_ENDIAN)
39#define cfi_host(s) (cfi_default(s) == CFI_HOST_ENDIAN)
40
41#define cpu_to_cfi8(map, x) (x)
42#define cfi8_to_cpu(map, x) (x)
43#define cpu_to_cfi16(map, x) _cpu_to_cfi(16, (map)->swap, (x))
44#define cpu_to_cfi32(map, x) _cpu_to_cfi(32, (map)->swap, (x))
45#define cpu_to_cfi64(map, x) _cpu_to_cfi(64, (map)->swap, (x))
46#define cfi16_to_cpu(map, x) _cfi_to_cpu(16, (map)->swap, (x))
47#define cfi32_to_cpu(map, x) _cfi_to_cpu(32, (map)->swap, (x))
48#define cfi64_to_cpu(map, x) _cfi_to_cpu(64, (map)->swap, (x))
49
50#define _cpu_to_cfi(w, s, x) (cfi_host(s)?(x):_swap_to_cfi(w, s, x))
51#define _cfi_to_cpu(w, s, x) (cfi_host(s)?(x):_swap_to_cpu(w, s, x))
52#define _swap_to_cfi(w, s, x) (cfi_be(s)?cpu_to_be##w(x):cpu_to_le##w(x))
53#define _swap_to_cpu(w, s, x) (cfi_be(s)?be##w##_to_cpu(x):le##w##_to_cpu(x))
diff --git a/include/linux/mtd/gpmi-nand.h b/include/linux/mtd/gpmi-nand.h
new file mode 100644
index 000000000000..69b6dbf46b5e
--- /dev/null
+++ b/include/linux/mtd/gpmi-nand.h
@@ -0,0 +1,68 @@
1/*
2 * Copyright (C) 2011 Freescale Semiconductor, Inc. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
17 */
18
19#ifndef __MACH_MXS_GPMI_NAND_H__
20#define __MACH_MXS_GPMI_NAND_H__
21
22/* The size of the resources is fixed. */
23#define GPMI_NAND_RES_SIZE 6
24
25/* Resource names for the GPMI NAND driver. */
26#define GPMI_NAND_GPMI_REGS_ADDR_RES_NAME "GPMI NAND GPMI Registers"
27#define GPMI_NAND_GPMI_INTERRUPT_RES_NAME "GPMI NAND GPMI Interrupt"
28#define GPMI_NAND_BCH_REGS_ADDR_RES_NAME "GPMI NAND BCH Registers"
29#define GPMI_NAND_BCH_INTERRUPT_RES_NAME "GPMI NAND BCH Interrupt"
30#define GPMI_NAND_DMA_CHANNELS_RES_NAME "GPMI NAND DMA Channels"
31#define GPMI_NAND_DMA_INTERRUPT_RES_NAME "GPMI NAND DMA Interrupt"
32
33/**
34 * struct gpmi_nand_platform_data - GPMI NAND driver platform data.
35 *
36 * This structure communicates platform-specific information to the GPMI NAND
37 * driver that can't be expressed as resources.
38 *
39 * @platform_init: A pointer to a function the driver will call to
40 * initialize the platform (e.g., set up the pin mux).
41 * @min_prop_delay_in_ns: Minimum propagation delay of GPMI signals to and
42 * from the NAND Flash device, in nanoseconds.
43 * @max_prop_delay_in_ns: Maximum propagation delay of GPMI signals to and
44 * from the NAND Flash device, in nanoseconds.
45 * @max_chip_count: The maximum number of chips for which the driver
46 * should configure the hardware. This value most
47 * likely reflects the number of pins that are
48 * connected to a NAND Flash device. If this is
49 * greater than the SoC hardware can support, the
50 * driver will print a message and fail to initialize.
51 * @partitions: An optional pointer to an array of partition
52 * descriptions.
53 * @partition_count: The number of elements in the partitions array.
54 */
55struct gpmi_nand_platform_data {
56 /* SoC hardware information. */
57 int (*platform_init)(void);
58
59 /* NAND Flash information. */
60 unsigned int min_prop_delay_in_ns;
61 unsigned int max_prop_delay_in_ns;
62 unsigned int max_chip_count;
63
64 /* Medium information. */
65 struct mtd_partition *partitions;
66 unsigned partition_count;
67};
68#endif
diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h
index a9e6ba46865e..94e924e2ecd5 100644
--- a/include/linux/mtd/map.h
+++ b/include/linux/mtd/map.h
@@ -26,7 +26,7 @@
26#include <linux/list.h> 26#include <linux/list.h>
27#include <linux/string.h> 27#include <linux/string.h>
28#include <linux/bug.h> 28#include <linux/bug.h>
29 29#include <linux/kernel.h>
30 30
31#include <asm/unaligned.h> 31#include <asm/unaligned.h>
32#include <asm/system.h> 32#include <asm/system.h>
@@ -214,6 +214,7 @@ struct map_info {
214 void __iomem *virt; 214 void __iomem *virt;
215 void *cached; 215 void *cached;
216 216
217 int swap; /* this mapping's byte-swapping requirement */
217 int bankwidth; /* in octets. This isn't necessarily the width 218 int bankwidth; /* in octets. This isn't necessarily the width
218 of actual bus cycles -- it's the repeat interval 219 of actual bus cycles -- it's the repeat interval
219 in bytes, before you are talking to the first chip again. 220 in bytes, before you are talking to the first chip again.
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index 9f5b312af783..d43dc25af82e 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -171,87 +171,60 @@ struct mtd_info {
171 struct mtd_erase_region_info *eraseregions; 171 struct mtd_erase_region_info *eraseregions;
172 172
173 /* 173 /*
174 * Erase is an asynchronous operation. Device drivers are supposed 174 * Do not call via these pointers, use corresponding mtd_*()
175 * to call instr->callback() whenever the operation completes, even 175 * wrappers instead.
176 * if it completes with a failure.
177 * Callers are supposed to pass a callback function and wait for it
178 * to be called before writing to the block.
179 */ 176 */
180 int (*erase) (struct mtd_info *mtd, struct erase_info *instr); 177 int (*erase) (struct mtd_info *mtd, struct erase_info *instr);
181
182 /* This stuff for eXecute-In-Place */
183 /* phys is optional and may be set to NULL */
184 int (*point) (struct mtd_info *mtd, loff_t from, size_t len, 178 int (*point) (struct mtd_info *mtd, loff_t from, size_t len,
185 size_t *retlen, void **virt, resource_size_t *phys); 179 size_t *retlen, void **virt, resource_size_t *phys);
186
187 /* We probably shouldn't allow XIP if the unpoint isn't a NULL */
188 void (*unpoint) (struct mtd_info *mtd, loff_t from, size_t len); 180 void (*unpoint) (struct mtd_info *mtd, loff_t from, size_t len);
189
190 /* Allow NOMMU mmap() to directly map the device (if not NULL)
191 * - return the address to which the offset maps
192 * - return -ENOSYS to indicate refusal to do the mapping
193 */
194 unsigned long (*get_unmapped_area) (struct mtd_info *mtd, 181 unsigned long (*get_unmapped_area) (struct mtd_info *mtd,
195 unsigned long len, 182 unsigned long len,
196 unsigned long offset, 183 unsigned long offset,
197 unsigned long flags); 184 unsigned long flags);
198 185 int (*read) (struct mtd_info *mtd, loff_t from, size_t len,
199 /* Backing device capabilities for this device 186 size_t *retlen, u_char *buf);
200 * - provides mmap capabilities 187 int (*write) (struct mtd_info *mtd, loff_t to, size_t len,
201 */ 188 size_t *retlen, const u_char *buf);
202 struct backing_dev_info *backing_dev_info; 189 int (*panic_write) (struct mtd_info *mtd, loff_t to, size_t len,
203 190 size_t *retlen, const u_char *buf);
204
205 int (*read) (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf);
206 int (*write) (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf);
207
208 /* In blackbox flight recorder like scenarios we want to make successful
209 writes in interrupt context. panic_write() is only intended to be
210 called when its known the kernel is about to panic and we need the
211 write to succeed. Since the kernel is not going to be running for much
212 longer, this function can break locks and delay to ensure the write
213 succeeds (but not sleep). */
214
215 int (*panic_write) (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf);
216
217 int (*read_oob) (struct mtd_info *mtd, loff_t from, 191 int (*read_oob) (struct mtd_info *mtd, loff_t from,
218 struct mtd_oob_ops *ops); 192 struct mtd_oob_ops *ops);
219 int (*write_oob) (struct mtd_info *mtd, loff_t to, 193 int (*write_oob) (struct mtd_info *mtd, loff_t to,
220 struct mtd_oob_ops *ops); 194 struct mtd_oob_ops *ops);
221 195 int (*get_fact_prot_info) (struct mtd_info *mtd, struct otp_info *buf,
222 /* 196 size_t len);
223 * Methods to access the protection register area, present in some 197 int (*read_fact_prot_reg) (struct mtd_info *mtd, loff_t from,
224 * flash devices. The user data is one time programmable but the 198 size_t len, size_t *retlen, u_char *buf);
225 * factory data is read only. 199 int (*get_user_prot_info) (struct mtd_info *mtd, struct otp_info *buf,
226 */ 200 size_t len);
227 int (*get_fact_prot_info) (struct mtd_info *mtd, struct otp_info *buf, size_t len); 201 int (*read_user_prot_reg) (struct mtd_info *mtd, loff_t from,
228 int (*read_fact_prot_reg) (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf); 202 size_t len, size_t *retlen, u_char *buf);
229 int (*get_user_prot_info) (struct mtd_info *mtd, struct otp_info *buf, size_t len); 203 int (*write_user_prot_reg) (struct mtd_info *mtd, loff_t to, size_t len,
230 int (*read_user_prot_reg) (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf); 204 size_t *retlen, u_char *buf);
231 int (*write_user_prot_reg) (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf); 205 int (*lock_user_prot_reg) (struct mtd_info *mtd, loff_t from,
232 int (*lock_user_prot_reg) (struct mtd_info *mtd, loff_t from, size_t len); 206 size_t len);
233 207 int (*writev) (struct mtd_info *mtd, const struct kvec *vecs,
234 /* kvec-based read/write methods. 208 unsigned long count, loff_t to, size_t *retlen);
235 NB: The 'count' parameter is the number of _vectors_, each of
236 which contains an (ofs, len) tuple.
237 */
238 int (*writev) (struct mtd_info *mtd, const struct kvec *vecs, unsigned long count, loff_t to, size_t *retlen);
239
240 /* Sync */
241 void (*sync) (struct mtd_info *mtd); 209 void (*sync) (struct mtd_info *mtd);
242
243 /* Chip-supported device locking */
244 int (*lock) (struct mtd_info *mtd, loff_t ofs, uint64_t len); 210 int (*lock) (struct mtd_info *mtd, loff_t ofs, uint64_t len);
245 int (*unlock) (struct mtd_info *mtd, loff_t ofs, uint64_t len); 211 int (*unlock) (struct mtd_info *mtd, loff_t ofs, uint64_t len);
246 int (*is_locked) (struct mtd_info *mtd, loff_t ofs, uint64_t len); 212 int (*is_locked) (struct mtd_info *mtd, loff_t ofs, uint64_t len);
247 213 int (*block_isbad) (struct mtd_info *mtd, loff_t ofs);
248 /* Power Management functions */ 214 int (*block_markbad) (struct mtd_info *mtd, loff_t ofs);
249 int (*suspend) (struct mtd_info *mtd); 215 int (*suspend) (struct mtd_info *mtd);
250 void (*resume) (struct mtd_info *mtd); 216 void (*resume) (struct mtd_info *mtd);
217 /*
218 * If the driver is something smart, like UBI, it may need to maintain
219 * its own reference counting. The below functions are only for driver.
220 */
221 int (*get_device) (struct mtd_info *mtd);
222 void (*put_device) (struct mtd_info *mtd);
251 223
252 /* Bad block management functions */ 224 /* Backing device capabilities for this device
253 int (*block_isbad) (struct mtd_info *mtd, loff_t ofs); 225 * - provides mmap capabilities
254 int (*block_markbad) (struct mtd_info *mtd, loff_t ofs); 226 */
227 struct backing_dev_info *backing_dev_info;
255 228
256 struct notifier_block reboot_notifier; /* default mode before reboot */ 229 struct notifier_block reboot_notifier; /* default mode before reboot */
257 230
@@ -265,18 +238,216 @@ struct mtd_info {
265 struct module *owner; 238 struct module *owner;
266 struct device dev; 239 struct device dev;
267 int usecount; 240 int usecount;
268
269 /* If the driver is something smart, like UBI, it may need to maintain
270 * its own reference counting. The below functions are only for driver.
271 * The driver may register its callbacks. These callbacks are not
272 * supposed to be called by MTD users */
273 int (*get_device) (struct mtd_info *mtd);
274 void (*put_device) (struct mtd_info *mtd);
275}; 241};
276 242
277static inline struct mtd_info *dev_to_mtd(struct device *dev) 243/*
244 * Erase is an asynchronous operation. Device drivers are supposed
245 * to call instr->callback() whenever the operation completes, even
246 * if it completes with a failure.
247 * Callers are supposed to pass a callback function and wait for it
248 * to be called before writing to the block.
249 */
250static inline int mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
251{
252 return mtd->erase(mtd, instr);
253}
254
255/*
256 * This stuff for eXecute-In-Place. phys is optional and may be set to NULL.
257 */
258static inline int mtd_point(struct mtd_info *mtd, loff_t from, size_t len,
259 size_t *retlen, void **virt, resource_size_t *phys)
260{
261 *retlen = 0;
262 if (!mtd->point)
263 return -EOPNOTSUPP;
264 return mtd->point(mtd, from, len, retlen, virt, phys);
265}
266
267/* We probably shouldn't allow XIP if the unpoint isn't a NULL */
268static inline void mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
278{ 269{
279 return dev ? dev_get_drvdata(dev) : NULL; 270 return mtd->unpoint(mtd, from, len);
271}
272
273/*
274 * Allow NOMMU mmap() to directly map the device (if not NULL)
275 * - return the address to which the offset maps
276 * - return -ENOSYS to indicate refusal to do the mapping
277 */
278static inline unsigned long mtd_get_unmapped_area(struct mtd_info *mtd,
279 unsigned long len,
280 unsigned long offset,
281 unsigned long flags)
282{
283 if (!mtd->get_unmapped_area)
284 return -EOPNOTSUPP;
285 return mtd->get_unmapped_area(mtd, len, offset, flags);
286}
287
288static inline int mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
289 size_t *retlen, u_char *buf)
290{
291 return mtd->read(mtd, from, len, retlen, buf);
292}
293
294static inline int mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
295 size_t *retlen, const u_char *buf)
296{
297 *retlen = 0;
298 if (!mtd->write)
299 return -EROFS;
300 return mtd->write(mtd, to, len, retlen, buf);
301}
302
303/*
304 * In blackbox flight recorder like scenarios we want to make successful writes
305 * in interrupt context. panic_write() is only intended to be called when its
306 * known the kernel is about to panic and we need the write to succeed. Since
307 * the kernel is not going to be running for much longer, this function can
308 * break locks and delay to ensure the write succeeds (but not sleep).
309 */
310static inline int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
311 size_t *retlen, const u_char *buf)
312{
313 *retlen = 0;
314 if (!mtd->panic_write)
315 return -EOPNOTSUPP;
316 return mtd->panic_write(mtd, to, len, retlen, buf);
317}
318
319static inline int mtd_read_oob(struct mtd_info *mtd, loff_t from,
320 struct mtd_oob_ops *ops)
321{
322 ops->retlen = ops->oobretlen = 0;
323 if (!mtd->read_oob)
324 return -EOPNOTSUPP;
325 return mtd->read_oob(mtd, from, ops);
326}
327
328static inline int mtd_write_oob(struct mtd_info *mtd, loff_t to,
329 struct mtd_oob_ops *ops)
330{
331 ops->retlen = ops->oobretlen = 0;
332 if (!mtd->write_oob)
333 return -EOPNOTSUPP;
334 return mtd->write_oob(mtd, to, ops);
335}
336
337/*
338 * Method to access the protection register area, present in some flash
339 * devices. The user data is one time programmable but the factory data is read
340 * only.
341 */
342static inline int mtd_get_fact_prot_info(struct mtd_info *mtd,
343 struct otp_info *buf, size_t len)
344{
345 if (!mtd->get_fact_prot_info)
346 return -EOPNOTSUPP;
347 return mtd->get_fact_prot_info(mtd, buf, len);
348}
349
350static inline int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
351 size_t len, size_t *retlen,
352 u_char *buf)
353{
354 *retlen = 0;
355 if (!mtd->read_fact_prot_reg)
356 return -EOPNOTSUPP;
357 return mtd->read_fact_prot_reg(mtd, from, len, retlen, buf);
358}
359
360static inline int mtd_get_user_prot_info(struct mtd_info *mtd,
361 struct otp_info *buf,
362 size_t len)
363{
364 if (!mtd->get_user_prot_info)
365 return -EOPNOTSUPP;
366 return mtd->get_user_prot_info(mtd, buf, len);
367}
368
369static inline int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
370 size_t len, size_t *retlen,
371 u_char *buf)
372{
373 *retlen = 0;
374 if (!mtd->read_user_prot_reg)
375 return -EOPNOTSUPP;
376 return mtd->read_user_prot_reg(mtd, from, len, retlen, buf);
377}
378
379static inline int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to,
380 size_t len, size_t *retlen,
381 u_char *buf)
382{
383 *retlen = 0;
384 if (!mtd->write_user_prot_reg)
385 return -EOPNOTSUPP;
386 return mtd->write_user_prot_reg(mtd, to, len, retlen, buf);
387}
388
389static inline int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
390 size_t len)
391{
392 if (!mtd->lock_user_prot_reg)
393 return -EOPNOTSUPP;
394 return mtd->lock_user_prot_reg(mtd, from, len);
395}
396
397int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
398 unsigned long count, loff_t to, size_t *retlen);
399
400static inline void mtd_sync(struct mtd_info *mtd)
401{
402 if (mtd->sync)
403 mtd->sync(mtd);
404}
405
406/* Chip-supported device locking */
407static inline int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
408{
409 if (!mtd->lock)
410 return -EOPNOTSUPP;
411 return mtd->lock(mtd, ofs, len);
412}
413
414static inline int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
415{
416 if (!mtd->unlock)
417 return -EOPNOTSUPP;
418 return mtd->unlock(mtd, ofs, len);
419}
420
421static inline int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
422{
423 if (!mtd->is_locked)
424 return -EOPNOTSUPP;
425 return mtd->is_locked(mtd, ofs, len);
426}
427
428static inline int mtd_suspend(struct mtd_info *mtd)
429{
430 return mtd->suspend ? mtd->suspend(mtd) : 0;
431}
432
433static inline void mtd_resume(struct mtd_info *mtd)
434{
435 if (mtd->resume)
436 mtd->resume(mtd);
437}
438
439static inline int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs)
440{
441 if (!mtd->block_isbad)
442 return 0;
443 return mtd->block_isbad(mtd, ofs);
444}
445
446static inline int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs)
447{
448 if (!mtd->block_markbad)
449 return -EOPNOTSUPP;
450 return mtd->block_markbad(mtd, ofs);
280} 451}
281 452
282static inline uint32_t mtd_div_by_eb(uint64_t sz, struct mtd_info *mtd) 453static inline uint32_t mtd_div_by_eb(uint64_t sz, struct mtd_info *mtd)
@@ -309,6 +480,16 @@ static inline uint32_t mtd_mod_by_ws(uint64_t sz, struct mtd_info *mtd)
309 return do_div(sz, mtd->writesize); 480 return do_div(sz, mtd->writesize);
310} 481}
311 482
483static inline int mtd_has_oob(const struct mtd_info *mtd)
484{
485 return mtd->read_oob && mtd->write_oob;
486}
487
488static inline int mtd_can_have_bb(const struct mtd_info *mtd)
489{
490 return !!mtd->block_isbad;
491}
492
312 /* Kernel-side ioctl definitions */ 493 /* Kernel-side ioctl definitions */
313 494
314struct mtd_partition; 495struct mtd_partition;
@@ -338,13 +519,6 @@ struct mtd_notifier {
338 519
339extern void register_mtd_user (struct mtd_notifier *new); 520extern void register_mtd_user (struct mtd_notifier *new);
340extern int unregister_mtd_user (struct mtd_notifier *old); 521extern int unregister_mtd_user (struct mtd_notifier *old);
341
342int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
343 unsigned long count, loff_t to, size_t *retlen);
344
345int default_mtd_readv(struct mtd_info *mtd, struct kvec *vecs,
346 unsigned long count, loff_t from, size_t *retlen);
347
348void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size); 522void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size);
349 523
350void mtd_erase_callback(struct erase_info *instr); 524void mtd_erase_callback(struct erase_info *instr);
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index 904131bab501..63b5a8b6dfbd 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -555,6 +555,7 @@ struct nand_chip {
555#define NAND_MFR_HYNIX 0xad 555#define NAND_MFR_HYNIX 0xad
556#define NAND_MFR_MICRON 0x2c 556#define NAND_MFR_MICRON 0x2c
557#define NAND_MFR_AMD 0x01 557#define NAND_MFR_AMD 0x01
558#define NAND_MFR_MACRONIX 0xc2
558 559
559/** 560/**
560 * struct nand_flash_dev - NAND Flash Device ID Structure 561 * struct nand_flash_dev - NAND Flash Device ID Structure
diff --git a/include/linux/mtd/physmap.h b/include/linux/mtd/physmap.h
index 04e018160e2b..d2887e76b7f6 100644
--- a/include/linux/mtd/physmap.h
+++ b/include/linux/mtd/physmap.h
@@ -30,6 +30,7 @@ struct physmap_flash_data {
30 unsigned int pfow_base; 30 unsigned int pfow_base;
31 char *probe_type; 31 char *probe_type;
32 struct mtd_partition *parts; 32 struct mtd_partition *parts;
33 const char **part_probe_types;
33}; 34};
34 35
35#endif /* __LINUX_MTD_PHYSMAP__ */ 36#endif /* __LINUX_MTD_PHYSMAP__ */