diff options
author | Artem Bityutskiy <artem.bityutskiy@linux.intel.com> | 2012-02-03 07:34:14 -0500 |
---|---|---|
committer | David Woodhouse <David.Woodhouse@intel.com> | 2012-03-26 19:28:05 -0400 |
commit | 8273a0c911d8e068297ef70aa7241ee78db4c712 (patch) | |
tree | 3aa97da098ac7417bc3ef8bc9b1a3e5b531ed268 /include/linux/mtd | |
parent | 5e4e6e3fdf48c1b012e2b6e80ed1d7e99d4fa6d1 (diff) |
mtd: add offset and length checks to the API function
Add verification of the offset and length to MTD API functions and verify that
MTD device offset and length are within MTD device size.
The modified API functions are:
'mtd_erase()'
'mtd_point()'
'mtd_unpoint()'
'mtd_get_unmapped_area()'
'mtd_read()'
'mtd_write()'
'mtd_panic_write()'
'mtd_lock()'
'mtd_unlock()'
'mtd_is_locked()'
'mtd_block_isbad()'
'mtd_block_markbad()'
This patch also uninlines these functions and exports in mtdcore.c because they
are not performance-critical and do not have to be inlined.
Signed-off-by: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Diffstat (limited to 'include/linux/mtd')
-rw-r--r-- | include/linux/mtd/mtd.h | 129 |
1 files changed, 17 insertions, 112 deletions
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h index 8c243117c087..317a80c4d54c 100644 --- a/include/linux/mtd/mtd.h +++ b/include/linux/mtd/mtd.h | |||
@@ -240,83 +240,18 @@ struct mtd_info { | |||
240 | int usecount; | 240 | int usecount; |
241 | }; | 241 | }; |
242 | 242 | ||
243 | /* | 243 | int mtd_erase(struct mtd_info *mtd, struct erase_info *instr); |
244 | * Erase is an asynchronous operation. Device drivers are supposed | 244 | int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, |
245 | * to call instr->callback() whenever the operation completes, even | 245 | void **virt, resource_size_t *phys); |
246 | * if it completes with a failure. | 246 | int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len); |
247 | * Callers are supposed to pass a callback function and wait for it | 247 | unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len, |
248 | * to be called before writing to the block. | 248 | unsigned long offset, unsigned long flags); |
249 | */ | 249 | int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, |
250 | static inline int mtd_erase(struct mtd_info *mtd, struct erase_info *instr) | 250 | u_char *buf); |
251 | { | 251 | int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, |
252 | return mtd->_erase(mtd, instr); | 252 | const u_char *buf); |
253 | } | 253 | int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, |
254 | 254 | const u_char *buf); | |
255 | /* | ||
256 | * This stuff for eXecute-In-Place. phys is optional and may be set to NULL. | ||
257 | */ | ||
258 | static inline int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, | ||
259 | size_t *retlen, void **virt, resource_size_t *phys) | ||
260 | { | ||
261 | *retlen = 0; | ||
262 | if (!mtd->_point) | ||
263 | return -EOPNOTSUPP; | ||
264 | return mtd->_point(mtd, from, len, retlen, virt, phys); | ||
265 | } | ||
266 | |||
267 | /* We probably shouldn't allow XIP if the unpoint isn't a NULL */ | ||
268 | static inline int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len) | ||
269 | { | ||
270 | if (!mtd->_point) | ||
271 | return -EOPNOTSUPP; | ||
272 | return mtd->_unpoint(mtd, from, len); | ||
273 | } | ||
274 | |||
275 | /* | ||
276 | * Allow NOMMU mmap() to directly map the device (if not NULL) | ||
277 | * - return the address to which the offset maps | ||
278 | * - return -ENOSYS to indicate refusal to do the mapping | ||
279 | */ | ||
280 | static inline unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, | ||
281 | unsigned long len, | ||
282 | unsigned long offset, | ||
283 | unsigned long flags) | ||
284 | { | ||
285 | if (!mtd->_get_unmapped_area) | ||
286 | return -EOPNOTSUPP; | ||
287 | return mtd->_get_unmapped_area(mtd, len, offset, flags); | ||
288 | } | ||
289 | |||
290 | static inline int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, | ||
291 | size_t *retlen, u_char *buf) | ||
292 | { | ||
293 | return mtd->_read(mtd, from, len, retlen, buf); | ||
294 | } | ||
295 | |||
296 | static inline int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, | ||
297 | size_t *retlen, const u_char *buf) | ||
298 | { | ||
299 | *retlen = 0; | ||
300 | if (!mtd->_write) | ||
301 | return -EROFS; | ||
302 | return mtd->_write(mtd, to, len, retlen, buf); | ||
303 | } | ||
304 | |||
305 | /* | ||
306 | * In blackbox flight recorder like scenarios we want to make successful writes | ||
307 | * in interrupt context. panic_write() is only intended to be called when its | ||
308 | * known the kernel is about to panic and we need the write to succeed. Since | ||
309 | * the kernel is not going to be running for much longer, this function can | ||
310 | * break locks and delay to ensure the write succeeds (but not sleep). | ||
311 | */ | ||
312 | static inline int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, | ||
313 | size_t *retlen, const u_char *buf) | ||
314 | { | ||
315 | *retlen = 0; | ||
316 | if (!mtd->_panic_write) | ||
317 | return -EOPNOTSUPP; | ||
318 | return mtd->_panic_write(mtd, to, len, retlen, buf); | ||
319 | } | ||
320 | 255 | ||
321 | static inline int mtd_read_oob(struct mtd_info *mtd, loff_t from, | 256 | static inline int mtd_read_oob(struct mtd_info *mtd, loff_t from, |
322 | struct mtd_oob_ops *ops) | 257 | struct mtd_oob_ops *ops) |
@@ -405,27 +340,11 @@ static inline void mtd_sync(struct mtd_info *mtd) | |||
405 | mtd->_sync(mtd); | 340 | mtd->_sync(mtd); |
406 | } | 341 | } |
407 | 342 | ||
408 | /* Chip-supported device locking */ | 343 | int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len); |
409 | static inline int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) | 344 | int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len); |
410 | { | 345 | int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len); |
411 | if (!mtd->_lock) | 346 | int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs); |
412 | return -EOPNOTSUPP; | 347 | int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs); |
413 | return mtd->_lock(mtd, ofs, len); | ||
414 | } | ||
415 | |||
416 | static inline int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) | ||
417 | { | ||
418 | if (!mtd->_unlock) | ||
419 | return -EOPNOTSUPP; | ||
420 | return mtd->_unlock(mtd, ofs, len); | ||
421 | } | ||
422 | |||
423 | static inline int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len) | ||
424 | { | ||
425 | if (!mtd->_is_locked) | ||
426 | return -EOPNOTSUPP; | ||
427 | return mtd->_is_locked(mtd, ofs, len); | ||
428 | } | ||
429 | 348 | ||
430 | static inline int mtd_suspend(struct mtd_info *mtd) | 349 | static inline int mtd_suspend(struct mtd_info *mtd) |
431 | { | 350 | { |
@@ -438,20 +357,6 @@ static inline void mtd_resume(struct mtd_info *mtd) | |||
438 | mtd->_resume(mtd); | 357 | mtd->_resume(mtd); |
439 | } | 358 | } |
440 | 359 | ||
441 | static inline int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs) | ||
442 | { | ||
443 | if (!mtd->_block_isbad) | ||
444 | return 0; | ||
445 | return mtd->_block_isbad(mtd, ofs); | ||
446 | } | ||
447 | |||
448 | static inline int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs) | ||
449 | { | ||
450 | if (!mtd->_block_markbad) | ||
451 | return -EOPNOTSUPP; | ||
452 | return mtd->_block_markbad(mtd, ofs); | ||
453 | } | ||
454 | |||
455 | static inline uint32_t mtd_div_by_eb(uint64_t sz, struct mtd_info *mtd) | 360 | static inline uint32_t mtd_div_by_eb(uint64_t sz, struct mtd_info *mtd) |
456 | { | 361 | { |
457 | if (mtd->erasesize_shift) | 362 | if (mtd->erasesize_shift) |