aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mtd/mtdchar.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mtd/mtdchar.c')
-rw-r--r--drivers/mtd/mtdchar.c50
1 files changed, 23 insertions, 27 deletions
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index 4c36ef66a46b..f488eabaa7b5 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -166,10 +166,23 @@ static int mtd_close(struct inode *inode, struct file *file)
166 return 0; 166 return 0;
167} /* mtd_close */ 167} /* mtd_close */
168 168
169/* FIXME: This _really_ needs to die. In 2.5, we should lock the 169/* Back in June 2001, dwmw2 wrote:
170 userspace buffer down and use it directly with readv/writev. 170 *
171*/ 171 * FIXME: This _really_ needs to die. In 2.5, we should lock the
172#define MAX_KMALLOC_SIZE 0x20000 172 * userspace buffer down and use it directly with readv/writev.
173 *
174 * The implementation below, using mtd_kmalloc_up_to, mitigates
175 * allocation failures when the system is under low-memory situations
176 * or if memory is highly fragmented at the cost of reducing the
177 * performance of the requested transfer due to a smaller buffer size.
178 *
179 * A more complex but more memory-efficient implementation based on
180 * get_user_pages and iovecs to cover extents of those pages is a
181 * longer-term goal, as intimated by dwmw2 above. However, for the
182 * write case, this requires yet more complex head and tail transfer
183 * handling when those head and tail offsets and sizes are such that
184 * alignment requirements are not met in the NAND subdriver.
185 */
173 186
174static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t *ppos) 187static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t *ppos)
175{ 188{
@@ -179,6 +192,7 @@ static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t
179 size_t total_retlen=0; 192 size_t total_retlen=0;
180 int ret=0; 193 int ret=0;
181 int len; 194 int len;
195 size_t size = count;
182 char *kbuf; 196 char *kbuf;
183 197
184 DEBUG(MTD_DEBUG_LEVEL0,"MTD_read\n"); 198 DEBUG(MTD_DEBUG_LEVEL0,"MTD_read\n");
@@ -189,23 +203,12 @@ static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t
189 if (!count) 203 if (!count)
190 return 0; 204 return 0;
191 205
192 /* FIXME: Use kiovec in 2.5 to lock down the user's buffers 206 kbuf = mtd_kmalloc_up_to(mtd, &size);
193 and pass them directly to the MTD functions */
194
195 if (count > MAX_KMALLOC_SIZE)
196 kbuf=kmalloc(MAX_KMALLOC_SIZE, GFP_KERNEL);
197 else
198 kbuf=kmalloc(count, GFP_KERNEL);
199
200 if (!kbuf) 207 if (!kbuf)
201 return -ENOMEM; 208 return -ENOMEM;
202 209
203 while (count) { 210 while (count) {
204 211 len = min_t(size_t, count, size);
205 if (count > MAX_KMALLOC_SIZE)
206 len = MAX_KMALLOC_SIZE;
207 else
208 len = count;
209 212
210 switch (mfi->mode) { 213 switch (mfi->mode) {
211 case MTD_MODE_OTP_FACTORY: 214 case MTD_MODE_OTP_FACTORY:
@@ -268,6 +271,7 @@ static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count
268{ 271{
269 struct mtd_file_info *mfi = file->private_data; 272 struct mtd_file_info *mfi = file->private_data;
270 struct mtd_info *mtd = mfi->mtd; 273 struct mtd_info *mtd = mfi->mtd;
274 size_t size = count;
271 char *kbuf; 275 char *kbuf;
272 size_t retlen; 276 size_t retlen;
273 size_t total_retlen=0; 277 size_t total_retlen=0;
@@ -285,20 +289,12 @@ static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count
285 if (!count) 289 if (!count)
286 return 0; 290 return 0;
287 291
288 if (count > MAX_KMALLOC_SIZE) 292 kbuf = mtd_kmalloc_up_to(mtd, &size);
289 kbuf=kmalloc(MAX_KMALLOC_SIZE, GFP_KERNEL);
290 else
291 kbuf=kmalloc(count, GFP_KERNEL);
292
293 if (!kbuf) 293 if (!kbuf)
294 return -ENOMEM; 294 return -ENOMEM;
295 295
296 while (count) { 296 while (count) {
297 297 len = min_t(size_t, count, size);
298 if (count > MAX_KMALLOC_SIZE)
299 len = MAX_KMALLOC_SIZE;
300 else
301 len = count;
302 298
303 if (copy_from_user(kbuf, buf, len)) { 299 if (copy_from_user(kbuf, buf, len)) {
304 kfree(kbuf); 300 kfree(kbuf);