diff options
Diffstat (limited to 'drivers/mtd/mtdchar.c')
-rw-r--r-- | drivers/mtd/mtdchar.c | 55 |
1 files changed, 23 insertions, 32 deletions
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c index 4c36ef66a46b..3f92731a5b9e 100644 --- a/drivers/mtd/mtdchar.c +++ b/drivers/mtd/mtdchar.c | |||
@@ -166,10 +166,23 @@ static int mtd_close(struct inode *inode, struct file *file) | |||
166 | return 0; | 166 | return 0; |
167 | } /* mtd_close */ | 167 | } /* mtd_close */ |
168 | 168 | ||
169 | /* FIXME: This _really_ needs to die. In 2.5, we should lock the | 169 | /* Back in June 2001, dwmw2 wrote: |
170 | userspace buffer down and use it directly with readv/writev. | 170 | * |
171 | */ | 171 | * FIXME: This _really_ needs to die. In 2.5, we should lock the |
172 | #define MAX_KMALLOC_SIZE 0x20000 | 172 | * userspace buffer down and use it directly with readv/writev. |
173 | * | ||
174 | * The implementation below, using mtd_kmalloc_up_to, mitigates | ||
175 | * allocation failures when the system is under low-memory situations | ||
176 | * or if memory is highly fragmented at the cost of reducing the | ||
177 | * performance of the requested transfer due to a smaller buffer size. | ||
178 | * | ||
179 | * A more complex but more memory-efficient implementation based on | ||
180 | * get_user_pages and iovecs to cover extents of those pages is a | ||
181 | * longer-term goal, as intimated by dwmw2 above. However, for the | ||
182 | * write case, this requires yet more complex head and tail transfer | ||
183 | * handling when those head and tail offsets and sizes are such that | ||
184 | * alignment requirements are not met in the NAND subdriver. | ||
185 | */ | ||
173 | 186 | ||
174 | static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t *ppos) | 187 | static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t *ppos) |
175 | { | 188 | { |
@@ -179,6 +192,7 @@ static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t | |||
179 | size_t total_retlen=0; | 192 | size_t total_retlen=0; |
180 | int ret=0; | 193 | int ret=0; |
181 | int len; | 194 | int len; |
195 | size_t size = count; | ||
182 | char *kbuf; | 196 | char *kbuf; |
183 | 197 | ||
184 | DEBUG(MTD_DEBUG_LEVEL0,"MTD_read\n"); | 198 | DEBUG(MTD_DEBUG_LEVEL0,"MTD_read\n"); |
@@ -189,23 +203,12 @@ static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t | |||
189 | if (!count) | 203 | if (!count) |
190 | return 0; | 204 | return 0; |
191 | 205 | ||
192 | /* FIXME: Use kiovec in 2.5 to lock down the user's buffers | 206 | kbuf = mtd_kmalloc_up_to(mtd, &size); |
193 | and pass them directly to the MTD functions */ | ||
194 | |||
195 | if (count > MAX_KMALLOC_SIZE) | ||
196 | kbuf=kmalloc(MAX_KMALLOC_SIZE, GFP_KERNEL); | ||
197 | else | ||
198 | kbuf=kmalloc(count, GFP_KERNEL); | ||
199 | |||
200 | if (!kbuf) | 207 | if (!kbuf) |
201 | return -ENOMEM; | 208 | return -ENOMEM; |
202 | 209 | ||
203 | while (count) { | 210 | while (count) { |
204 | 211 | len = min_t(size_t, count, size); | |
205 | if (count > MAX_KMALLOC_SIZE) | ||
206 | len = MAX_KMALLOC_SIZE; | ||
207 | else | ||
208 | len = count; | ||
209 | 212 | ||
210 | switch (mfi->mode) { | 213 | switch (mfi->mode) { |
211 | case MTD_MODE_OTP_FACTORY: | 214 | case MTD_MODE_OTP_FACTORY: |
@@ -268,6 +271,7 @@ static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count | |||
268 | { | 271 | { |
269 | struct mtd_file_info *mfi = file->private_data; | 272 | struct mtd_file_info *mfi = file->private_data; |
270 | struct mtd_info *mtd = mfi->mtd; | 273 | struct mtd_info *mtd = mfi->mtd; |
274 | size_t size = count; | ||
271 | char *kbuf; | 275 | char *kbuf; |
272 | size_t retlen; | 276 | size_t retlen; |
273 | size_t total_retlen=0; | 277 | size_t total_retlen=0; |
@@ -285,20 +289,12 @@ static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count | |||
285 | if (!count) | 289 | if (!count) |
286 | return 0; | 290 | return 0; |
287 | 291 | ||
288 | if (count > MAX_KMALLOC_SIZE) | 292 | kbuf = mtd_kmalloc_up_to(mtd, &size); |
289 | kbuf=kmalloc(MAX_KMALLOC_SIZE, GFP_KERNEL); | ||
290 | else | ||
291 | kbuf=kmalloc(count, GFP_KERNEL); | ||
292 | |||
293 | if (!kbuf) | 293 | if (!kbuf) |
294 | return -ENOMEM; | 294 | return -ENOMEM; |
295 | 295 | ||
296 | while (count) { | 296 | while (count) { |
297 | 297 | len = min_t(size_t, count, size); | |
298 | if (count > MAX_KMALLOC_SIZE) | ||
299 | len = MAX_KMALLOC_SIZE; | ||
300 | else | ||
301 | len = count; | ||
302 | 298 | ||
303 | if (copy_from_user(kbuf, buf, len)) { | 299 | if (copy_from_user(kbuf, buf, len)) { |
304 | kfree(kbuf); | 300 | kfree(kbuf); |
@@ -512,7 +508,6 @@ static int shrink_ecclayout(const struct nand_ecclayout *from, | |||
512 | return 0; | 508 | return 0; |
513 | } | 509 | } |
514 | 510 | ||
515 | #ifdef CONFIG_MTD_PARTITIONS | ||
516 | static int mtd_blkpg_ioctl(struct mtd_info *mtd, | 511 | static int mtd_blkpg_ioctl(struct mtd_info *mtd, |
517 | struct blkpg_ioctl_arg __user *arg) | 512 | struct blkpg_ioctl_arg __user *arg) |
518 | { | 513 | { |
@@ -548,8 +543,6 @@ static int mtd_blkpg_ioctl(struct mtd_info *mtd, | |||
548 | return -EINVAL; | 543 | return -EINVAL; |
549 | } | 544 | } |
550 | } | 545 | } |
551 | #endif | ||
552 | |||
553 | 546 | ||
554 | static int mtd_ioctl(struct file *file, u_int cmd, u_long arg) | 547 | static int mtd_ioctl(struct file *file, u_int cmd, u_long arg) |
555 | { | 548 | { |
@@ -941,7 +934,6 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg) | |||
941 | break; | 934 | break; |
942 | } | 935 | } |
943 | 936 | ||
944 | #ifdef CONFIG_MTD_PARTITIONS | ||
945 | case BLKPG: | 937 | case BLKPG: |
946 | { | 938 | { |
947 | ret = mtd_blkpg_ioctl(mtd, | 939 | ret = mtd_blkpg_ioctl(mtd, |
@@ -955,7 +947,6 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg) | |||
955 | ret = 0; | 947 | ret = 0; |
956 | break; | 948 | break; |
957 | } | 949 | } |
958 | #endif | ||
959 | 950 | ||
960 | default: | 951 | default: |
961 | ret = -ENOTTY; | 952 | ret = -ENOTTY; |