aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mtd/mtdconcat.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@cruncher.tec.linutronix.de>2006-05-28 21:26:58 -0400
committerThomas Gleixner <tglx@cruncher.tec.linutronix.de>2006-05-29 09:06:51 -0400
commit8593fbc68b0df1168995de76d1af38eb62fd6b62 (patch)
treedd244def53d2be4f1fbff9f74eac404fab8e240f /drivers/mtd/mtdconcat.c
parentf4a43cfcecfcaeeaa40a9dbc1d1378298c22446e (diff)
[MTD] Rework the out of band handling completely
Hopefully the last iteration on this! The handling of out of band data on NAND was accompanied by tons of fruitless discussions and halfarsed patches to make it work for a particular problem. Sufficiently annoyed by I all those "I know it better" mails and the resonable amount of discarded "it solves my problem" patches, I finally decided to go for the big rework. After removing the _ecc variants of mtd read/write functions the solution to satisfy the various requirements was to refactor the read/write _oob functions in mtd. The major change is that read/write_oob now takes a pointer to an operation descriptor structure "struct mtd_oob_ops".instead of having a function with at least seven arguments. read/write_oob which should probably renamed to a more descriptive name, can do the following tasks: - read/write out of band data - read/write data content and out of band data - read/write raw data content and out of band data (ecc disabled) struct mtd_oob_ops has a mode field, which determines the oob handling mode. Aside of the MTD_OOB_RAW mode, which is intended to be especially for diagnostic purposes and some internal functions e.g. bad block table creation, the other two modes are for mtd clients: MTD_OOB_PLACE puts/gets the given oob data exactly to/from the place which is described by the ooboffs and ooblen fields of the mtd_oob_ops strcuture. It's up to the caller to make sure that the byte positions are not used by the ECC placement algorithms. MTD_OOB_AUTO puts/gets the given oob data automaticaly to/from the places in the out of band area which are described by the oobfree tuples in the ecclayout data structre which is associated to the devicee. The decision whether data plus oob or oob only handling is done depends on the setting of the datbuf member of the data structure. When datbuf == NULL then the internal read/write_oob functions are selected, otherwise the read/write data routines are invoked. Tested on a few platforms with all variants. Please be aware of possible regressions for your particular device / application scenario Disclaimer: Any whining will be ignored from those who just contributed "hot air blurb" and never sat down to tackle the underlying problem of the mess in the NAND driver grown over time and the big chunk of work to fix up the existing users. The problem was not the holiness of the existing MTD interfaces. The problems was the lack of time to go for the big overhaul. It's easy to add more mess to the existing one, but it takes alot of effort to go for a real solution. Improvements and bugfixes are welcome! Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'drivers/mtd/mtdconcat.c')
-rw-r--r--drivers/mtd/mtdconcat.c90
1 files changed, 37 insertions, 53 deletions
diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c
index ec15abcdbdfa..38151b8e6631 100644
--- a/drivers/mtd/mtdconcat.c
+++ b/drivers/mtd/mtdconcat.c
@@ -231,101 +231,85 @@ concat_writev(struct mtd_info *mtd, const struct kvec *vecs,
231} 231}
232 232
233static int 233static int
234concat_read_oob(struct mtd_info *mtd, loff_t from, size_t len, 234concat_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
235 size_t * retlen, u_char * buf)
236{ 235{
237 struct mtd_concat *concat = CONCAT(mtd); 236 struct mtd_concat *concat = CONCAT(mtd);
238 int err = -EINVAL; 237 struct mtd_oob_ops devops = *ops;
239 int i; 238 int i, err;
240 239
241 *retlen = 0; 240 ops->retlen = 0;
242 241
243 for (i = 0; i < concat->num_subdev; i++) { 242 for (i = 0; i < concat->num_subdev; i++) {
244 struct mtd_info *subdev = concat->subdev[i]; 243 struct mtd_info *subdev = concat->subdev[i];
245 size_t size, retsize;
246 244
247 if (from >= subdev->size) { 245 if (from >= subdev->size) {
248 /* Not destined for this subdev */
249 size = 0;
250 from -= subdev->size; 246 from -= subdev->size;
251 continue; 247 continue;
252 } 248 }
253 if (from + len > subdev->size)
254 /* First part goes into this subdev */
255 size = subdev->size - from;
256 else
257 /* Entire transaction goes into this subdev */
258 size = len;
259 249
260 if (subdev->read_oob) 250 /* partial read ? */
261 err = subdev->read_oob(subdev, from, size, 251 if (from + devops.len > subdev->size)
262 &retsize, buf); 252 devops.len = subdev->size - from;
263 else
264 err = -EINVAL;
265 253
254 err = subdev->read_oob(subdev, from, &devops);
255 ops->retlen += devops.retlen;
266 if (err) 256 if (err)
267 break; 257 return err;
268 258
269 *retlen += retsize; 259 devops.len = ops->len - ops->retlen;
270 len -= size; 260 if (!devops.len)
271 if (len == 0) 261 return 0;
272 break; 262
263 if (devops.datbuf)
264 devops.datbuf += devops.retlen;
265 if (devops.oobbuf)
266 devops.oobbuf += devops.ooblen;
273 267
274 err = -EINVAL;
275 buf += size;
276 from = 0; 268 from = 0;
277 } 269 }
278 return err; 270 return -EINVAL;
279} 271}
280 272
281static int 273static int
282concat_write_oob(struct mtd_info *mtd, loff_t to, size_t len, 274concat_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
283 size_t * retlen, const u_char * buf)
284{ 275{
285 struct mtd_concat *concat = CONCAT(mtd); 276 struct mtd_concat *concat = CONCAT(mtd);
286 int err = -EINVAL; 277 struct mtd_oob_ops devops = *ops;
287 int i; 278 int i, err;
288 279
289 if (!(mtd->flags & MTD_WRITEABLE)) 280 if (!(mtd->flags & MTD_WRITEABLE))
290 return -EROFS; 281 return -EROFS;
291 282
292 *retlen = 0; 283 ops->retlen = 0;
293 284
294 for (i = 0; i < concat->num_subdev; i++) { 285 for (i = 0; i < concat->num_subdev; i++) {
295 struct mtd_info *subdev = concat->subdev[i]; 286 struct mtd_info *subdev = concat->subdev[i];
296 size_t size, retsize;
297 287
298 if (to >= subdev->size) { 288 if (to >= subdev->size) {
299 size = 0;
300 to -= subdev->size; 289 to -= subdev->size;
301 continue; 290 continue;
302 } 291 }
303 if (to + len > subdev->size)
304 size = subdev->size - to;
305 else
306 size = len;
307 292
308 if (!(subdev->flags & MTD_WRITEABLE)) 293 /* partial write ? */
309 err = -EROFS; 294 if (to + devops.len > subdev->size)
310 else if (subdev->write_oob) 295 devops.len = subdev->size - to;
311 err = subdev->write_oob(subdev, to, size, &retsize,
312 buf);
313 else
314 err = -EINVAL;
315 296
297 err = subdev->write_oob(subdev, to, &devops);
298 ops->retlen += devops.retlen;
316 if (err) 299 if (err)
317 break; 300 return err;
318 301
319 *retlen += retsize; 302 devops.len = ops->len - ops->retlen;
320 len -= size; 303 if (!devops.len)
321 if (len == 0) 304 return 0;
322 break;
323 305
324 err = -EINVAL; 306 if (devops.datbuf)
325 buf += size; 307 devops.datbuf += devops.retlen;
308 if (devops.oobbuf)
309 devops.oobbuf += devops.ooblen;
326 to = 0; 310 to = 0;
327 } 311 }
328 return err; 312 return -EINVAL;
329} 313}
330 314
331static void concat_erase_callback(struct erase_info *instr) 315static void concat_erase_callback(struct erase_info *instr)