aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mtd/mtdpart.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mtd/mtdpart.c')
-rw-r--r--drivers/mtd/mtdpart.c200
1 files changed, 84 insertions, 116 deletions
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index a3d44c3416b4..9651c06de0a9 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -65,12 +65,8 @@ static int part_read(struct mtd_info *mtd, loff_t from, size_t len,
65 int res; 65 int res;
66 66
67 stats = part->master->ecc_stats; 67 stats = part->master->ecc_stats;
68 68 res = part->master->_read(part->master, from + part->offset, len,
69 if (from >= mtd->size) 69 retlen, buf);
70 len = 0;
71 else if (from + len > mtd->size)
72 len = mtd->size - from;
73 res = mtd_read(part->master, from + part->offset, len, retlen, buf);
74 if (unlikely(res)) { 70 if (unlikely(res)) {
75 if (mtd_is_bitflip(res)) 71 if (mtd_is_bitflip(res))
76 mtd->ecc_stats.corrected += part->master->ecc_stats.corrected - stats.corrected; 72 mtd->ecc_stats.corrected += part->master->ecc_stats.corrected - stats.corrected;
@@ -84,19 +80,16 @@ static int part_point(struct mtd_info *mtd, loff_t from, size_t len,
84 size_t *retlen, void **virt, resource_size_t *phys) 80 size_t *retlen, void **virt, resource_size_t *phys)
85{ 81{
86 struct mtd_part *part = PART(mtd); 82 struct mtd_part *part = PART(mtd);
87 if (from >= mtd->size) 83
88 len = 0; 84 return part->master->_point(part->master, from + part->offset, len,
89 else if (from + len > mtd->size) 85 retlen, virt, phys);
90 len = mtd->size - from;
91 return mtd_point(part->master, from + part->offset, len, retlen,
92 virt, phys);
93} 86}
94 87
95static void part_unpoint(struct mtd_info *mtd, loff_t from, size_t len) 88static int part_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
96{ 89{
97 struct mtd_part *part = PART(mtd); 90 struct mtd_part *part = PART(mtd);
98 91
99 mtd_unpoint(part->master, from + part->offset, len); 92 return part->master->_unpoint(part->master, from + part->offset, len);
100} 93}
101 94
102static unsigned long part_get_unmapped_area(struct mtd_info *mtd, 95static unsigned long part_get_unmapped_area(struct mtd_info *mtd,
@@ -107,7 +100,8 @@ static unsigned long part_get_unmapped_area(struct mtd_info *mtd,
107 struct mtd_part *part = PART(mtd); 100 struct mtd_part *part = PART(mtd);
108 101
109 offset += part->offset; 102 offset += part->offset;
110 return mtd_get_unmapped_area(part->master, len, offset, flags); 103 return part->master->_get_unmapped_area(part->master, len, offset,
104 flags);
111} 105}
112 106
113static int part_read_oob(struct mtd_info *mtd, loff_t from, 107static int part_read_oob(struct mtd_info *mtd, loff_t from,
@@ -138,7 +132,7 @@ static int part_read_oob(struct mtd_info *mtd, loff_t from,
138 return -EINVAL; 132 return -EINVAL;
139 } 133 }
140 134
141 res = mtd_read_oob(part->master, from + part->offset, ops); 135 res = part->master->_read_oob(part->master, from + part->offset, ops);
142 if (unlikely(res)) { 136 if (unlikely(res)) {
143 if (mtd_is_bitflip(res)) 137 if (mtd_is_bitflip(res))
144 mtd->ecc_stats.corrected++; 138 mtd->ecc_stats.corrected++;
@@ -152,55 +146,46 @@ static int part_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
152 size_t len, size_t *retlen, u_char *buf) 146 size_t len, size_t *retlen, u_char *buf)
153{ 147{
154 struct mtd_part *part = PART(mtd); 148 struct mtd_part *part = PART(mtd);
155 return mtd_read_user_prot_reg(part->master, from, len, retlen, buf); 149 return part->master->_read_user_prot_reg(part->master, from, len,
150 retlen, buf);
156} 151}
157 152
158static int part_get_user_prot_info(struct mtd_info *mtd, 153static int part_get_user_prot_info(struct mtd_info *mtd,
159 struct otp_info *buf, size_t len) 154 struct otp_info *buf, size_t len)
160{ 155{
161 struct mtd_part *part = PART(mtd); 156 struct mtd_part *part = PART(mtd);
162 return mtd_get_user_prot_info(part->master, buf, len); 157 return part->master->_get_user_prot_info(part->master, buf, len);
163} 158}
164 159
165static int part_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, 160static int part_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
166 size_t len, size_t *retlen, u_char *buf) 161 size_t len, size_t *retlen, u_char *buf)
167{ 162{
168 struct mtd_part *part = PART(mtd); 163 struct mtd_part *part = PART(mtd);
169 return mtd_read_fact_prot_reg(part->master, from, len, retlen, buf); 164 return part->master->_read_fact_prot_reg(part->master, from, len,
165 retlen, buf);
170} 166}
171 167
172static int part_get_fact_prot_info(struct mtd_info *mtd, struct otp_info *buf, 168static int part_get_fact_prot_info(struct mtd_info *mtd, struct otp_info *buf,
173 size_t len) 169 size_t len)
174{ 170{
175 struct mtd_part *part = PART(mtd); 171 struct mtd_part *part = PART(mtd);
176 return mtd_get_fact_prot_info(part->master, buf, len); 172 return part->master->_get_fact_prot_info(part->master, buf, len);
177} 173}
178 174
179static int part_write(struct mtd_info *mtd, loff_t to, size_t len, 175static int part_write(struct mtd_info *mtd, loff_t to, size_t len,
180 size_t *retlen, const u_char *buf) 176 size_t *retlen, const u_char *buf)
181{ 177{
182 struct mtd_part *part = PART(mtd); 178 struct mtd_part *part = PART(mtd);
183 if (!(mtd->flags & MTD_WRITEABLE)) 179 return part->master->_write(part->master, to + part->offset, len,
184 return -EROFS; 180 retlen, buf);
185 if (to >= mtd->size)
186 len = 0;
187 else if (to + len > mtd->size)
188 len = mtd->size - to;
189 return mtd_write(part->master, to + part->offset, len, retlen, buf);
190} 181}
191 182
192static int part_panic_write(struct mtd_info *mtd, loff_t to, size_t len, 183static int part_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
193 size_t *retlen, const u_char *buf) 184 size_t *retlen, const u_char *buf)
194{ 185{
195 struct mtd_part *part = PART(mtd); 186 struct mtd_part *part = PART(mtd);
196 if (!(mtd->flags & MTD_WRITEABLE)) 187 return part->master->_panic_write(part->master, to + part->offset, len,
197 return -EROFS; 188 retlen, buf);
198 if (to >= mtd->size)
199 len = 0;
200 else if (to + len > mtd->size)
201 len = mtd->size - to;
202 return mtd_panic_write(part->master, to + part->offset, len, retlen,
203 buf);
204} 189}
205 190
206static int part_write_oob(struct mtd_info *mtd, loff_t to, 191static int part_write_oob(struct mtd_info *mtd, loff_t to,
@@ -208,50 +193,43 @@ static int part_write_oob(struct mtd_info *mtd, loff_t to,
208{ 193{
209 struct mtd_part *part = PART(mtd); 194 struct mtd_part *part = PART(mtd);
210 195
211 if (!(mtd->flags & MTD_WRITEABLE))
212 return -EROFS;
213
214 if (to >= mtd->size) 196 if (to >= mtd->size)
215 return -EINVAL; 197 return -EINVAL;
216 if (ops->datbuf && to + ops->len > mtd->size) 198 if (ops->datbuf && to + ops->len > mtd->size)
217 return -EINVAL; 199 return -EINVAL;
218 return mtd_write_oob(part->master, to + part->offset, ops); 200 return part->master->_write_oob(part->master, to + part->offset, ops);
219} 201}
220 202
221static int part_write_user_prot_reg(struct mtd_info *mtd, loff_t from, 203static int part_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
222 size_t len, size_t *retlen, u_char *buf) 204 size_t len, size_t *retlen, u_char *buf)
223{ 205{
224 struct mtd_part *part = PART(mtd); 206 struct mtd_part *part = PART(mtd);
225 return mtd_write_user_prot_reg(part->master, from, len, retlen, buf); 207 return part->master->_write_user_prot_reg(part->master, from, len,
208 retlen, buf);
226} 209}
227 210
228static int part_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, 211static int part_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
229 size_t len) 212 size_t len)
230{ 213{
231 struct mtd_part *part = PART(mtd); 214 struct mtd_part *part = PART(mtd);
232 return mtd_lock_user_prot_reg(part->master, from, len); 215 return part->master->_lock_user_prot_reg(part->master, from, len);
233} 216}
234 217
235static int part_writev(struct mtd_info *mtd, const struct kvec *vecs, 218static int part_writev(struct mtd_info *mtd, const struct kvec *vecs,
236 unsigned long count, loff_t to, size_t *retlen) 219 unsigned long count, loff_t to, size_t *retlen)
237{ 220{
238 struct mtd_part *part = PART(mtd); 221 struct mtd_part *part = PART(mtd);
239 if (!(mtd->flags & MTD_WRITEABLE)) 222 return part->master->_writev(part->master, vecs, count,
240 return -EROFS; 223 to + part->offset, retlen);
241 return mtd_writev(part->master, vecs, count, to + part->offset,
242 retlen);
243} 224}
244 225
245static int part_erase(struct mtd_info *mtd, struct erase_info *instr) 226static int part_erase(struct mtd_info *mtd, struct erase_info *instr)
246{ 227{
247 struct mtd_part *part = PART(mtd); 228 struct mtd_part *part = PART(mtd);
248 int ret; 229 int ret;
249 if (!(mtd->flags & MTD_WRITEABLE)) 230
250 return -EROFS;
251 if (instr->addr >= mtd->size)
252 return -EINVAL;
253 instr->addr += part->offset; 231 instr->addr += part->offset;
254 ret = mtd_erase(part->master, instr); 232 ret = part->master->_erase(part->master, instr);
255 if (ret) { 233 if (ret) {
256 if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN) 234 if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
257 instr->fail_addr -= part->offset; 235 instr->fail_addr -= part->offset;
@@ -262,7 +240,7 @@ static int part_erase(struct mtd_info *mtd, struct erase_info *instr)
262 240
263void mtd_erase_callback(struct erase_info *instr) 241void mtd_erase_callback(struct erase_info *instr)
264{ 242{
265 if (instr->mtd->erase == part_erase) { 243 if (instr->mtd->_erase == part_erase) {
266 struct mtd_part *part = PART(instr->mtd); 244 struct mtd_part *part = PART(instr->mtd);
267 245
268 if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN) 246 if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
@@ -277,52 +255,44 @@ EXPORT_SYMBOL_GPL(mtd_erase_callback);
277static int part_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 255static int part_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
278{ 256{
279 struct mtd_part *part = PART(mtd); 257 struct mtd_part *part = PART(mtd);
280 if ((len + ofs) > mtd->size) 258 return part->master->_lock(part->master, ofs + part->offset, len);
281 return -EINVAL;
282 return mtd_lock(part->master, ofs + part->offset, len);
283} 259}
284 260
285static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 261static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
286{ 262{
287 struct mtd_part *part = PART(mtd); 263 struct mtd_part *part = PART(mtd);
288 if ((len + ofs) > mtd->size) 264 return part->master->_unlock(part->master, ofs + part->offset, len);
289 return -EINVAL;
290 return mtd_unlock(part->master, ofs + part->offset, len);
291} 265}
292 266
293static int part_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len) 267static int part_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
294{ 268{
295 struct mtd_part *part = PART(mtd); 269 struct mtd_part *part = PART(mtd);
296 if ((len + ofs) > mtd->size) 270 return part->master->_is_locked(part->master, ofs + part->offset, len);
297 return -EINVAL;
298 return mtd_is_locked(part->master, ofs + part->offset, len);
299} 271}
300 272
301static void part_sync(struct mtd_info *mtd) 273static void part_sync(struct mtd_info *mtd)
302{ 274{
303 struct mtd_part *part = PART(mtd); 275 struct mtd_part *part = PART(mtd);
304 mtd_sync(part->master); 276 part->master->_sync(part->master);
305} 277}
306 278
307static int part_suspend(struct mtd_info *mtd) 279static int part_suspend(struct mtd_info *mtd)
308{ 280{
309 struct mtd_part *part = PART(mtd); 281 struct mtd_part *part = PART(mtd);
310 return mtd_suspend(part->master); 282 return part->master->_suspend(part->master);
311} 283}
312 284
313static void part_resume(struct mtd_info *mtd) 285static void part_resume(struct mtd_info *mtd)
314{ 286{
315 struct mtd_part *part = PART(mtd); 287 struct mtd_part *part = PART(mtd);
316 mtd_resume(part->master); 288 part->master->_resume(part->master);
317} 289}
318 290
319static int part_block_isbad(struct mtd_info *mtd, loff_t ofs) 291static int part_block_isbad(struct mtd_info *mtd, loff_t ofs)
320{ 292{
321 struct mtd_part *part = PART(mtd); 293 struct mtd_part *part = PART(mtd);
322 if (ofs >= mtd->size)
323 return -EINVAL;
324 ofs += part->offset; 294 ofs += part->offset;
325 return mtd_block_isbad(part->master, ofs); 295 return part->master->_block_isbad(part->master, ofs);
326} 296}
327 297
328static int part_block_markbad(struct mtd_info *mtd, loff_t ofs) 298static int part_block_markbad(struct mtd_info *mtd, loff_t ofs)
@@ -330,12 +300,8 @@ static int part_block_markbad(struct mtd_info *mtd, loff_t ofs)
330 struct mtd_part *part = PART(mtd); 300 struct mtd_part *part = PART(mtd);
331 int res; 301 int res;
332 302
333 if (!(mtd->flags & MTD_WRITEABLE))
334 return -EROFS;
335 if (ofs >= mtd->size)
336 return -EINVAL;
337 ofs += part->offset; 303 ofs += part->offset;
338 res = mtd_block_markbad(part->master, ofs); 304 res = part->master->_block_markbad(part->master, ofs);
339 if (!res) 305 if (!res)
340 mtd->ecc_stats.badblocks++; 306 mtd->ecc_stats.badblocks++;
341 return res; 307 return res;
@@ -410,54 +376,55 @@ static struct mtd_part *allocate_partition(struct mtd_info *master,
410 */ 376 */
411 slave->mtd.dev.parent = master->dev.parent; 377 slave->mtd.dev.parent = master->dev.parent;
412 378
413 slave->mtd.read = part_read; 379 slave->mtd._read = part_read;
414 slave->mtd.write = part_write; 380 slave->mtd._write = part_write;
415 381
416 if (master->panic_write) 382 if (master->_panic_write)
417 slave->mtd.panic_write = part_panic_write; 383 slave->mtd._panic_write = part_panic_write;
418 384
419 if (master->point && master->unpoint) { 385 if (master->_point && master->_unpoint) {
420 slave->mtd.point = part_point; 386 slave->mtd._point = part_point;
421 slave->mtd.unpoint = part_unpoint; 387 slave->mtd._unpoint = part_unpoint;
422 } 388 }
423 389
424 if (master->get_unmapped_area) 390 if (master->_get_unmapped_area)
425 slave->mtd.get_unmapped_area = part_get_unmapped_area; 391 slave->mtd._get_unmapped_area = part_get_unmapped_area;
426 if (master->read_oob) 392 if (master->_read_oob)
427 slave->mtd.read_oob = part_read_oob; 393 slave->mtd._read_oob = part_read_oob;
428 if (master->write_oob) 394 if (master->_write_oob)
429 slave->mtd.write_oob = part_write_oob; 395 slave->mtd._write_oob = part_write_oob;
430 if (master->read_user_prot_reg) 396 if (master->_read_user_prot_reg)
431 slave->mtd.read_user_prot_reg = part_read_user_prot_reg; 397 slave->mtd._read_user_prot_reg = part_read_user_prot_reg;
432 if (master->read_fact_prot_reg) 398 if (master->_read_fact_prot_reg)
433 slave->mtd.read_fact_prot_reg = part_read_fact_prot_reg; 399 slave->mtd._read_fact_prot_reg = part_read_fact_prot_reg;
434 if (master->write_user_prot_reg) 400 if (master->_write_user_prot_reg)
435 slave->mtd.write_user_prot_reg = part_write_user_prot_reg; 401 slave->mtd._write_user_prot_reg = part_write_user_prot_reg;
436 if (master->lock_user_prot_reg) 402 if (master->_lock_user_prot_reg)
437 slave->mtd.lock_user_prot_reg = part_lock_user_prot_reg; 403 slave->mtd._lock_user_prot_reg = part_lock_user_prot_reg;
438 if (master->get_user_prot_info) 404 if (master->_get_user_prot_info)
439 slave->mtd.get_user_prot_info = part_get_user_prot_info; 405 slave->mtd._get_user_prot_info = part_get_user_prot_info;
440 if (master->get_fact_prot_info) 406 if (master->_get_fact_prot_info)
441 slave->mtd.get_fact_prot_info = part_get_fact_prot_info; 407 slave->mtd._get_fact_prot_info = part_get_fact_prot_info;
442 if (master->sync) 408 if (master->_sync)
443 slave->mtd.sync = part_sync; 409 slave->mtd._sync = part_sync;
444 if (!partno && !master->dev.class && master->suspend && master->resume) { 410 if (!partno && !master->dev.class && master->_suspend &&
445 slave->mtd.suspend = part_suspend; 411 master->_resume) {
446 slave->mtd.resume = part_resume; 412 slave->mtd._suspend = part_suspend;
413 slave->mtd._resume = part_resume;
447 } 414 }
448 if (master->writev) 415 if (master->_writev)
449 slave->mtd.writev = part_writev; 416 slave->mtd._writev = part_writev;
450 if (master->lock) 417 if (master->_lock)
451 slave->mtd.lock = part_lock; 418 slave->mtd._lock = part_lock;
452 if (master->unlock) 419 if (master->_unlock)
453 slave->mtd.unlock = part_unlock; 420 slave->mtd._unlock = part_unlock;
454 if (master->is_locked) 421 if (master->_is_locked)
455 slave->mtd.is_locked = part_is_locked; 422 slave->mtd._is_locked = part_is_locked;
456 if (master->block_isbad) 423 if (master->_block_isbad)
457 slave->mtd.block_isbad = part_block_isbad; 424 slave->mtd._block_isbad = part_block_isbad;
458 if (master->block_markbad) 425 if (master->_block_markbad)
459 slave->mtd.block_markbad = part_block_markbad; 426 slave->mtd._block_markbad = part_block_markbad;
460 slave->mtd.erase = part_erase; 427 slave->mtd._erase = part_erase;
461 slave->master = master; 428 slave->master = master;
462 slave->offset = part->offset; 429 slave->offset = part->offset;
463 430
@@ -549,7 +516,8 @@ static struct mtd_part *allocate_partition(struct mtd_info *master,
549 } 516 }
550 517
551 slave->mtd.ecclayout = master->ecclayout; 518 slave->mtd.ecclayout = master->ecclayout;
552 if (master->block_isbad) { 519 slave->mtd.ecc_strength = master->ecc_strength;
520 if (master->_block_isbad) {
553 uint64_t offs = 0; 521 uint64_t offs = 0;
554 522
555 while (offs < slave->mtd.size) { 523 while (offs < slave->mtd.size) {
@@ -761,7 +729,7 @@ int parse_mtd_partitions(struct mtd_info *master, const char **types,
761 for ( ; ret <= 0 && *types; types++) { 729 for ( ; ret <= 0 && *types; types++) {
762 parser = get_partition_parser(*types); 730 parser = get_partition_parser(*types);
763 if (!parser && !request_module("%s", *types)) 731 if (!parser && !request_module("%s", *types))
764 parser = get_partition_parser(*types); 732 parser = get_partition_parser(*types);
765 if (!parser) 733 if (!parser)
766 continue; 734 continue;
767 ret = (*parser->parse_fn)(master, pparts, data); 735 ret = (*parser->parse_fn)(master, pparts, data);