aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorArtem Bityutskiy <artem.bityutskiy@linux.intel.com>2012-02-03 07:34:14 -0500
committerDavid Woodhouse <David.Woodhouse@intel.com>2012-03-26 19:28:05 -0400
commit8273a0c911d8e068297ef70aa7241ee78db4c712 (patch)
tree3aa97da098ac7417bc3ef8bc9b1a3e5b531ed268
parent5e4e6e3fdf48c1b012e2b6e80ed1d7e99d4fa6d1 (diff)
mtd: add offset and length checks to the API function
Add verification of the offset and length to MTD API functions and verify that MTD device offset and length are within MTD device size. The modified API functions are: 'mtd_erase()' 'mtd_point()' 'mtd_unpoint()' 'mtd_get_unmapped_area()' 'mtd_read()' 'mtd_write()' 'mtd_panic_write()' 'mtd_lock()' 'mtd_unlock()' 'mtd_is_locked()' 'mtd_block_isbad()' 'mtd_block_markbad()' This patch also uninlines these functions and exports in mtdcore.c because they are not performance-critical and do not have to be inlined. Signed-off-by: Artem Bityutskiy <artem.bityutskiy@linux.intel.com> Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
-rw-r--r--drivers/mtd/mtdcore.c148
-rw-r--r--include/linux/mtd/mtd.h129
2 files changed, 165 insertions, 112 deletions
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 5ea22cf357fb..8d5e103695f9 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -683,6 +683,154 @@ void __put_mtd_device(struct mtd_info *mtd)
683EXPORT_SYMBOL_GPL(__put_mtd_device); 683EXPORT_SYMBOL_GPL(__put_mtd_device);
684 684
685/* 685/*
686 * Erase is an asynchronous operation. Device drivers are supposed
687 * to call instr->callback() whenever the operation completes, even
688 * if it completes with a failure.
689 * Callers are supposed to pass a callback function and wait for it
690 * to be called before writing to the block.
691 */
692int mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
693{
694 if (instr->addr > mtd->size || instr->len > mtd->size - instr->addr)
695 return -EINVAL;
696 return mtd->_erase(mtd, instr);
697}
698EXPORT_SYMBOL_GPL(mtd_erase);
699
700/*
701 * This stuff for eXecute-In-Place. phys is optional and may be set to NULL.
702 */
703int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
704 void **virt, resource_size_t *phys)
705{
706 *retlen = 0;
707 if (!mtd->_point)
708 return -EOPNOTSUPP;
709 if (from < 0 || from > mtd->size || len > mtd->size - from)
710 return -EINVAL;
711 return mtd->_point(mtd, from, len, retlen, virt, phys);
712}
713EXPORT_SYMBOL_GPL(mtd_point);
714
715/* We probably shouldn't allow XIP if the unpoint isn't a NULL */
716int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
717{
718 if (!mtd->_point)
719 return -EOPNOTSUPP;
720 if (from < 0 || from > mtd->size || len > mtd->size - from)
721 return -EINVAL;
722 return mtd->_unpoint(mtd, from, len);
723}
724EXPORT_SYMBOL_GPL(mtd_unpoint);
725
726/*
727 * Allow NOMMU mmap() to directly map the device (if not NULL)
728 * - return the address to which the offset maps
729 * - return -ENOSYS to indicate refusal to do the mapping
730 */
731unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len,
732 unsigned long offset, unsigned long flags)
733{
734 if (!mtd->_get_unmapped_area)
735 return -EOPNOTSUPP;
736 if (offset > mtd->size || len > mtd->size - offset)
737 return -EINVAL;
738 return mtd->_get_unmapped_area(mtd, len, offset, flags);
739}
740EXPORT_SYMBOL_GPL(mtd_get_unmapped_area);
741
742int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
743 u_char *buf)
744{
745 if (from < 0 || from > mtd->size || len > mtd->size - from)
746 return -EINVAL;
747 return mtd->_read(mtd, from, len, retlen, buf);
748}
749EXPORT_SYMBOL_GPL(mtd_read);
750
751int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
752 const u_char *buf)
753{
754 *retlen = 0;
755 if (!mtd->_write)
756 return -EROFS;
757 if (to < 0 || to > mtd->size || len > mtd->size - to)
758 return -EINVAL;
759 return mtd->_write(mtd, to, len, retlen, buf);
760}
761EXPORT_SYMBOL_GPL(mtd_write);
762
763/*
764 * In blackbox flight recorder like scenarios we want to make successful writes
765 * in interrupt context. panic_write() is only intended to be called when its
766 * known the kernel is about to panic and we need the write to succeed. Since
767 * the kernel is not going to be running for much longer, this function can
768 * break locks and delay to ensure the write succeeds (but not sleep).
769 */
770int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
771 const u_char *buf)
772{
773 *retlen = 0;
774 if (!mtd->_panic_write)
775 return -EOPNOTSUPP;
776 if (to < 0 || to > mtd->size || len > mtd->size - to)
777 return -EINVAL;
778 return mtd->_panic_write(mtd, to, len, retlen, buf);
779}
780EXPORT_SYMBOL_GPL(mtd_panic_write);
781
782/* Chip-supported device locking */
783int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
784{
785 if (!mtd->_lock)
786 return -EOPNOTSUPP;
787 if (ofs < 0 || ofs > mtd->size || len > mtd->size - ofs)
788 return -EINVAL;
789 return mtd->_lock(mtd, ofs, len);
790}
791EXPORT_SYMBOL_GPL(mtd_lock);
792
793int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
794{
795 if (!mtd->_unlock)
796 return -EOPNOTSUPP;
797 if (ofs < 0 || ofs > mtd->size || len > mtd->size - ofs)
798 return -EINVAL;
799 return mtd->_unlock(mtd, ofs, len);
800}
801EXPORT_SYMBOL_GPL(mtd_unlock);
802
803int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
804{
805 if (!mtd->_is_locked)
806 return -EOPNOTSUPP;
807 if (ofs < 0 || ofs > mtd->size || len > mtd->size - ofs)
808 return -EINVAL;
809 return mtd->_is_locked(mtd, ofs, len);
810}
811EXPORT_SYMBOL_GPL(mtd_is_locked);
812
813int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs)
814{
815 if (!mtd->_block_isbad)
816 return 0;
817 if (ofs < 0 || ofs > mtd->size)
818 return -EINVAL;
819 return mtd->_block_isbad(mtd, ofs);
820}
821EXPORT_SYMBOL_GPL(mtd_block_isbad);
822
823int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs)
824{
825 if (!mtd->_block_markbad)
826 return -EOPNOTSUPP;
827 if (ofs < 0 || ofs > mtd->size)
828 return -EINVAL;
829 return mtd->_block_markbad(mtd, ofs);
830}
831EXPORT_SYMBOL_GPL(mtd_block_markbad);
832
833/*
686 * default_mtd_writev - the default writev method 834 * default_mtd_writev - the default writev method
687 * @mtd: mtd device description object pointer 835 * @mtd: mtd device description object pointer
688 * @vecs: the vectors to write 836 * @vecs: the vectors to write
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index 8c243117c087..317a80c4d54c 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -240,83 +240,18 @@ struct mtd_info {
240 int usecount; 240 int usecount;
241}; 241};
242 242
243/* 243int mtd_erase(struct mtd_info *mtd, struct erase_info *instr);
244 * Erase is an asynchronous operation. Device drivers are supposed 244int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
245 * to call instr->callback() whenever the operation completes, even 245 void **virt, resource_size_t *phys);
246 * if it completes with a failure. 246int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
247 * Callers are supposed to pass a callback function and wait for it 247unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len,
248 * to be called before writing to the block. 248 unsigned long offset, unsigned long flags);
249 */ 249int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
250static inline int mtd_erase(struct mtd_info *mtd, struct erase_info *instr) 250 u_char *buf);
251{ 251int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
252 return mtd->_erase(mtd, instr); 252 const u_char *buf);
253} 253int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
254 254 const u_char *buf);
255/*
256 * This stuff for eXecute-In-Place. phys is optional and may be set to NULL.
257 */
258static inline int mtd_point(struct mtd_info *mtd, loff_t from, size_t len,
259 size_t *retlen, void **virt, resource_size_t *phys)
260{
261 *retlen = 0;
262 if (!mtd->_point)
263 return -EOPNOTSUPP;
264 return mtd->_point(mtd, from, len, retlen, virt, phys);
265}
266
267/* We probably shouldn't allow XIP if the unpoint isn't a NULL */
268static inline int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
269{
270 if (!mtd->_point)
271 return -EOPNOTSUPP;
272 return mtd->_unpoint(mtd, from, len);
273}
274
275/*
276 * Allow NOMMU mmap() to directly map the device (if not NULL)
277 * - return the address to which the offset maps
278 * - return -ENOSYS to indicate refusal to do the mapping
279 */
280static inline unsigned long mtd_get_unmapped_area(struct mtd_info *mtd,
281 unsigned long len,
282 unsigned long offset,
283 unsigned long flags)
284{
285 if (!mtd->_get_unmapped_area)
286 return -EOPNOTSUPP;
287 return mtd->_get_unmapped_area(mtd, len, offset, flags);
288}
289
290static inline int mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
291 size_t *retlen, u_char *buf)
292{
293 return mtd->_read(mtd, from, len, retlen, buf);
294}
295
296static inline int mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
297 size_t *retlen, const u_char *buf)
298{
299 *retlen = 0;
300 if (!mtd->_write)
301 return -EROFS;
302 return mtd->_write(mtd, to, len, retlen, buf);
303}
304
305/*
306 * In blackbox flight recorder like scenarios we want to make successful writes
307 * in interrupt context. panic_write() is only intended to be called when its
308 * known the kernel is about to panic and we need the write to succeed. Since
309 * the kernel is not going to be running for much longer, this function can
310 * break locks and delay to ensure the write succeeds (but not sleep).
311 */
312static inline int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
313 size_t *retlen, const u_char *buf)
314{
315 *retlen = 0;
316 if (!mtd->_panic_write)
317 return -EOPNOTSUPP;
318 return mtd->_panic_write(mtd, to, len, retlen, buf);
319}
320 255
321static inline int mtd_read_oob(struct mtd_info *mtd, loff_t from, 256static inline int mtd_read_oob(struct mtd_info *mtd, loff_t from,
322 struct mtd_oob_ops *ops) 257 struct mtd_oob_ops *ops)
@@ -405,27 +340,11 @@ static inline void mtd_sync(struct mtd_info *mtd)
405 mtd->_sync(mtd); 340 mtd->_sync(mtd);
406} 341}
407 342
408/* Chip-supported device locking */ 343int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
409static inline int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 344int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
410{ 345int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len);
411 if (!mtd->_lock) 346int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs);
412 return -EOPNOTSUPP; 347int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs);
413 return mtd->_lock(mtd, ofs, len);
414}
415
416static inline int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
417{
418 if (!mtd->_unlock)
419 return -EOPNOTSUPP;
420 return mtd->_unlock(mtd, ofs, len);
421}
422
423static inline int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
424{
425 if (!mtd->_is_locked)
426 return -EOPNOTSUPP;
427 return mtd->_is_locked(mtd, ofs, len);
428}
429 348
430static inline int mtd_suspend(struct mtd_info *mtd) 349static inline int mtd_suspend(struct mtd_info *mtd)
431{ 350{
@@ -438,20 +357,6 @@ static inline void mtd_resume(struct mtd_info *mtd)
438 mtd->_resume(mtd); 357 mtd->_resume(mtd);
439} 358}
440 359
441static inline int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs)
442{
443 if (!mtd->_block_isbad)
444 return 0;
445 return mtd->_block_isbad(mtd, ofs);
446}
447
448static inline int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs)
449{
450 if (!mtd->_block_markbad)
451 return -EOPNOTSUPP;
452 return mtd->_block_markbad(mtd, ofs);
453}
454
455static inline uint32_t mtd_div_by_eb(uint64_t sz, struct mtd_info *mtd) 360static inline uint32_t mtd_div_by_eb(uint64_t sz, struct mtd_info *mtd)
456{ 361{
457 if (mtd->erasesize_shift) 362 if (mtd->erasesize_shift)