diff options
Diffstat (limited to 'drivers/mtd/lpddr/lpddr_cmds.c')
-rw-r--r-- | drivers/mtd/lpddr/lpddr_cmds.c | 796 |
1 files changed, 796 insertions, 0 deletions
diff --git a/drivers/mtd/lpddr/lpddr_cmds.c b/drivers/mtd/lpddr/lpddr_cmds.c new file mode 100644 index 00000000000..e22ca49583e --- /dev/null +++ b/drivers/mtd/lpddr/lpddr_cmds.c | |||
@@ -0,0 +1,796 @@ | |||
1 | /* | ||
2 | * LPDDR flash memory device operations. This module provides read, write, | ||
3 | * erase, lock/unlock support for LPDDR flash memories | ||
4 | * (C) 2008 Korolev Alexey <akorolev@infradead.org> | ||
5 | * (C) 2008 Vasiliy Leonenko <vasiliy.leonenko@gmail.com> | ||
6 | * Many thanks to Roman Borisov for intial enabling | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License | ||
10 | * as published by the Free Software Foundation; either version 2 | ||
11 | * of the License, or (at your option) any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program; if not, write to the Free Software | ||
20 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
21 | * 02110-1301, USA. | ||
22 | * TODO: | ||
23 | * Implement VPP management | ||
24 | * Implement XIP support | ||
25 | * Implement OTP support | ||
26 | */ | ||
27 | #include <linux/mtd/pfow.h> | ||
28 | #include <linux/mtd/qinfo.h> | ||
29 | |||
30 | static int lpddr_read(struct mtd_info *mtd, loff_t adr, size_t len, | ||
31 | size_t *retlen, u_char *buf); | ||
32 | static int lpddr_write_buffers(struct mtd_info *mtd, loff_t to, | ||
33 | size_t len, size_t *retlen, const u_char *buf); | ||
34 | static int lpddr_writev(struct mtd_info *mtd, const struct kvec *vecs, | ||
35 | unsigned long count, loff_t to, size_t *retlen); | ||
36 | static int lpddr_erase(struct mtd_info *mtd, struct erase_info *instr); | ||
37 | static int lpddr_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len); | ||
38 | static int lpddr_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len); | ||
39 | static int lpddr_point(struct mtd_info *mtd, loff_t adr, size_t len, | ||
40 | size_t *retlen, void **mtdbuf, resource_size_t *phys); | ||
41 | static void lpddr_unpoint(struct mtd_info *mtd, loff_t adr, size_t len); | ||
42 | static int get_chip(struct map_info *map, struct flchip *chip, int mode); | ||
43 | static int chip_ready(struct map_info *map, struct flchip *chip, int mode); | ||
44 | static void put_chip(struct map_info *map, struct flchip *chip); | ||
45 | |||
46 | struct mtd_info *lpddr_cmdset(struct map_info *map) | ||
47 | { | ||
48 | struct lpddr_private *lpddr = map->fldrv_priv; | ||
49 | struct flchip_shared *shared; | ||
50 | struct flchip *chip; | ||
51 | struct mtd_info *mtd; | ||
52 | int numchips; | ||
53 | int i, j; | ||
54 | |||
55 | mtd = kzalloc(sizeof(*mtd), GFP_KERNEL); | ||
56 | if (!mtd) { | ||
57 | printk(KERN_ERR "Failed to allocate memory for MTD device\n"); | ||
58 | return NULL; | ||
59 | } | ||
60 | mtd->priv = map; | ||
61 | mtd->type = MTD_NORFLASH; | ||
62 | |||
63 | /* Fill in the default mtd operations */ | ||
64 | mtd->read = lpddr_read; | ||
65 | mtd->type = MTD_NORFLASH; | ||
66 | mtd->flags = MTD_CAP_NORFLASH; | ||
67 | mtd->flags &= ~MTD_BIT_WRITEABLE; | ||
68 | mtd->erase = lpddr_erase; | ||
69 | mtd->write = lpddr_write_buffers; | ||
70 | mtd->writev = lpddr_writev; | ||
71 | mtd->read_oob = NULL; | ||
72 | mtd->write_oob = NULL; | ||
73 | mtd->sync = NULL; | ||
74 | mtd->lock = lpddr_lock; | ||
75 | mtd->unlock = lpddr_unlock; | ||
76 | mtd->suspend = NULL; | ||
77 | mtd->resume = NULL; | ||
78 | if (map_is_linear(map)) { | ||
79 | mtd->point = lpddr_point; | ||
80 | mtd->unpoint = lpddr_unpoint; | ||
81 | } | ||
82 | mtd->block_isbad = NULL; | ||
83 | mtd->block_markbad = NULL; | ||
84 | mtd->size = 1 << lpddr->qinfo->DevSizeShift; | ||
85 | mtd->erasesize = 1 << lpddr->qinfo->UniformBlockSizeShift; | ||
86 | mtd->writesize = 1 << lpddr->qinfo->BufSizeShift; | ||
87 | |||
88 | shared = kmalloc(sizeof(struct flchip_shared) * lpddr->numchips, | ||
89 | GFP_KERNEL); | ||
90 | if (!shared) { | ||
91 | kfree(lpddr); | ||
92 | kfree(mtd); | ||
93 | return NULL; | ||
94 | } | ||
95 | |||
96 | chip = &lpddr->chips[0]; | ||
97 | numchips = lpddr->numchips / lpddr->qinfo->HWPartsNum; | ||
98 | for (i = 0; i < numchips; i++) { | ||
99 | shared[i].writing = shared[i].erasing = NULL; | ||
100 | spin_lock_init(&shared[i].lock); | ||
101 | for (j = 0; j < lpddr->qinfo->HWPartsNum; j++) { | ||
102 | *chip = lpddr->chips[i]; | ||
103 | chip->start += j << lpddr->chipshift; | ||
104 | chip->oldstate = chip->state = FL_READY; | ||
105 | chip->priv = &shared[i]; | ||
106 | /* those should be reset too since | ||
107 | they create memory references. */ | ||
108 | init_waitqueue_head(&chip->wq); | ||
109 | spin_lock_init(&chip->_spinlock); | ||
110 | chip->mutex = &chip->_spinlock; | ||
111 | chip++; | ||
112 | } | ||
113 | } | ||
114 | |||
115 | return mtd; | ||
116 | } | ||
117 | EXPORT_SYMBOL(lpddr_cmdset); | ||
118 | |||
119 | static int wait_for_ready(struct map_info *map, struct flchip *chip, | ||
120 | unsigned int chip_op_time) | ||
121 | { | ||
122 | unsigned int timeo, reset_timeo, sleep_time; | ||
123 | unsigned int dsr; | ||
124 | flstate_t chip_state = chip->state; | ||
125 | int ret = 0; | ||
126 | |||
127 | /* set our timeout to 8 times the expected delay */ | ||
128 | timeo = chip_op_time * 8; | ||
129 | if (!timeo) | ||
130 | timeo = 500000; | ||
131 | reset_timeo = timeo; | ||
132 | sleep_time = chip_op_time / 2; | ||
133 | |||
134 | for (;;) { | ||
135 | dsr = CMDVAL(map_read(map, map->pfow_base + PFOW_DSR)); | ||
136 | if (dsr & DSR_READY_STATUS) | ||
137 | break; | ||
138 | if (!timeo) { | ||
139 | printk(KERN_ERR "%s: Flash timeout error state %d \n", | ||
140 | map->name, chip_state); | ||
141 | ret = -ETIME; | ||
142 | break; | ||
143 | } | ||
144 | |||
145 | /* OK Still waiting. Drop the lock, wait a while and retry. */ | ||
146 | spin_unlock(chip->mutex); | ||
147 | if (sleep_time >= 1000000/HZ) { | ||
148 | /* | ||
149 | * Half of the normal delay still remaining | ||
150 | * can be performed with a sleeping delay instead | ||
151 | * of busy waiting. | ||
152 | */ | ||
153 | msleep(sleep_time/1000); | ||
154 | timeo -= sleep_time; | ||
155 | sleep_time = 1000000/HZ; | ||
156 | } else { | ||
157 | udelay(1); | ||
158 | cond_resched(); | ||
159 | timeo--; | ||
160 | } | ||
161 | spin_lock(chip->mutex); | ||
162 | |||
163 | while (chip->state != chip_state) { | ||
164 | /* Someone's suspended the operation: sleep */ | ||
165 | DECLARE_WAITQUEUE(wait, current); | ||
166 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
167 | add_wait_queue(&chip->wq, &wait); | ||
168 | spin_unlock(chip->mutex); | ||
169 | schedule(); | ||
170 | remove_wait_queue(&chip->wq, &wait); | ||
171 | spin_lock(chip->mutex); | ||
172 | } | ||
173 | if (chip->erase_suspended || chip->write_suspended) { | ||
174 | /* Suspend has occured while sleep: reset timeout */ | ||
175 | timeo = reset_timeo; | ||
176 | chip->erase_suspended = chip->write_suspended = 0; | ||
177 | } | ||
178 | } | ||
179 | /* check status for errors */ | ||
180 | if (dsr & DSR_ERR) { | ||
181 | /* Clear DSR*/ | ||
182 | map_write(map, CMD(~(DSR_ERR)), map->pfow_base + PFOW_DSR); | ||
183 | printk(KERN_WARNING"%s: Bad status on wait: 0x%x \n", | ||
184 | map->name, dsr); | ||
185 | print_drs_error(dsr); | ||
186 | ret = -EIO; | ||
187 | } | ||
188 | chip->state = FL_READY; | ||
189 | return ret; | ||
190 | } | ||
191 | |||
192 | static int get_chip(struct map_info *map, struct flchip *chip, int mode) | ||
193 | { | ||
194 | int ret; | ||
195 | DECLARE_WAITQUEUE(wait, current); | ||
196 | |||
197 | retry: | ||
198 | if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING) | ||
199 | && chip->state != FL_SYNCING) { | ||
200 | /* | ||
201 | * OK. We have possibility for contension on the write/erase | ||
202 | * operations which are global to the real chip and not per | ||
203 | * partition. So let's fight it over in the partition which | ||
204 | * currently has authority on the operation. | ||
205 | * | ||
206 | * The rules are as follows: | ||
207 | * | ||
208 | * - any write operation must own shared->writing. | ||
209 | * | ||
210 | * - any erase operation must own _both_ shared->writing and | ||
211 | * shared->erasing. | ||
212 | * | ||
213 | * - contension arbitration is handled in the owner's context. | ||
214 | * | ||
215 | * The 'shared' struct can be read and/or written only when | ||
216 | * its lock is taken. | ||
217 | */ | ||
218 | struct flchip_shared *shared = chip->priv; | ||
219 | struct flchip *contender; | ||
220 | spin_lock(&shared->lock); | ||
221 | contender = shared->writing; | ||
222 | if (contender && contender != chip) { | ||
223 | /* | ||
224 | * The engine to perform desired operation on this | ||
225 | * partition is already in use by someone else. | ||
226 | * Let's fight over it in the context of the chip | ||
227 | * currently using it. If it is possible to suspend, | ||
228 | * that other partition will do just that, otherwise | ||
229 | * it'll happily send us to sleep. In any case, when | ||
230 | * get_chip returns success we're clear to go ahead. | ||
231 | */ | ||
232 | ret = spin_trylock(contender->mutex); | ||
233 | spin_unlock(&shared->lock); | ||
234 | if (!ret) | ||
235 | goto retry; | ||
236 | spin_unlock(chip->mutex); | ||
237 | ret = chip_ready(map, contender, mode); | ||
238 | spin_lock(chip->mutex); | ||
239 | |||
240 | if (ret == -EAGAIN) { | ||
241 | spin_unlock(contender->mutex); | ||
242 | goto retry; | ||
243 | } | ||
244 | if (ret) { | ||
245 | spin_unlock(contender->mutex); | ||
246 | return ret; | ||
247 | } | ||
248 | spin_lock(&shared->lock); | ||
249 | |||
250 | /* We should not own chip if it is already in FL_SYNCING | ||
251 | * state. Put contender and retry. */ | ||
252 | if (chip->state == FL_SYNCING) { | ||
253 | put_chip(map, contender); | ||
254 | spin_unlock(contender->mutex); | ||
255 | goto retry; | ||
256 | } | ||
257 | spin_unlock(contender->mutex); | ||
258 | } | ||
259 | |||
260 | /* Check if we have suspended erase on this chip. | ||
261 | Must sleep in such a case. */ | ||
262 | if (mode == FL_ERASING && shared->erasing | ||
263 | && shared->erasing->oldstate == FL_ERASING) { | ||
264 | spin_unlock(&shared->lock); | ||
265 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
266 | add_wait_queue(&chip->wq, &wait); | ||
267 | spin_unlock(chip->mutex); | ||
268 | schedule(); | ||
269 | remove_wait_queue(&chip->wq, &wait); | ||
270 | spin_lock(chip->mutex); | ||
271 | goto retry; | ||
272 | } | ||
273 | |||
274 | /* We now own it */ | ||
275 | shared->writing = chip; | ||
276 | if (mode == FL_ERASING) | ||
277 | shared->erasing = chip; | ||
278 | spin_unlock(&shared->lock); | ||
279 | } | ||
280 | |||
281 | ret = chip_ready(map, chip, mode); | ||
282 | if (ret == -EAGAIN) | ||
283 | goto retry; | ||
284 | |||
285 | return ret; | ||
286 | } | ||
287 | |||
288 | static int chip_ready(struct map_info *map, struct flchip *chip, int mode) | ||
289 | { | ||
290 | struct lpddr_private *lpddr = map->fldrv_priv; | ||
291 | int ret = 0; | ||
292 | DECLARE_WAITQUEUE(wait, current); | ||
293 | |||
294 | /* Prevent setting state FL_SYNCING for chip in suspended state. */ | ||
295 | if (FL_SYNCING == mode && FL_READY != chip->oldstate) | ||
296 | goto sleep; | ||
297 | |||
298 | switch (chip->state) { | ||
299 | case FL_READY: | ||
300 | case FL_JEDEC_QUERY: | ||
301 | return 0; | ||
302 | |||
303 | case FL_ERASING: | ||
304 | if (!lpddr->qinfo->SuspEraseSupp || | ||
305 | !(mode == FL_READY || mode == FL_POINT)) | ||
306 | goto sleep; | ||
307 | |||
308 | map_write(map, CMD(LPDDR_SUSPEND), | ||
309 | map->pfow_base + PFOW_PROGRAM_ERASE_SUSPEND); | ||
310 | chip->oldstate = FL_ERASING; | ||
311 | chip->state = FL_ERASE_SUSPENDING; | ||
312 | ret = wait_for_ready(map, chip, 0); | ||
313 | if (ret) { | ||
314 | /* Oops. something got wrong. */ | ||
315 | /* Resume and pretend we weren't here. */ | ||
316 | map_write(map, CMD(LPDDR_RESUME), | ||
317 | map->pfow_base + PFOW_COMMAND_CODE); | ||
318 | map_write(map, CMD(LPDDR_START_EXECUTION), | ||
319 | map->pfow_base + PFOW_COMMAND_EXECUTE); | ||
320 | chip->state = FL_ERASING; | ||
321 | chip->oldstate = FL_READY; | ||
322 | printk(KERN_ERR "%s: suspend operation failed." | ||
323 | "State may be wrong \n", map->name); | ||
324 | return -EIO; | ||
325 | } | ||
326 | chip->erase_suspended = 1; | ||
327 | chip->state = FL_READY; | ||
328 | return 0; | ||
329 | /* Erase suspend */ | ||
330 | case FL_POINT: | ||
331 | /* Only if there's no operation suspended... */ | ||
332 | if (mode == FL_READY && chip->oldstate == FL_READY) | ||
333 | return 0; | ||
334 | |||
335 | default: | ||
336 | sleep: | ||
337 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
338 | add_wait_queue(&chip->wq, &wait); | ||
339 | spin_unlock(chip->mutex); | ||
340 | schedule(); | ||
341 | remove_wait_queue(&chip->wq, &wait); | ||
342 | spin_lock(chip->mutex); | ||
343 | return -EAGAIN; | ||
344 | } | ||
345 | } | ||
346 | |||
347 | static void put_chip(struct map_info *map, struct flchip *chip) | ||
348 | { | ||
349 | if (chip->priv) { | ||
350 | struct flchip_shared *shared = chip->priv; | ||
351 | spin_lock(&shared->lock); | ||
352 | if (shared->writing == chip && chip->oldstate == FL_READY) { | ||
353 | /* We own the ability to write, but we're done */ | ||
354 | shared->writing = shared->erasing; | ||
355 | if (shared->writing && shared->writing != chip) { | ||
356 | /* give back the ownership */ | ||
357 | struct flchip *loaner = shared->writing; | ||
358 | spin_lock(loaner->mutex); | ||
359 | spin_unlock(&shared->lock); | ||
360 | spin_unlock(chip->mutex); | ||
361 | put_chip(map, loaner); | ||
362 | spin_lock(chip->mutex); | ||
363 | spin_unlock(loaner->mutex); | ||
364 | wake_up(&chip->wq); | ||
365 | return; | ||
366 | } | ||
367 | shared->erasing = NULL; | ||
368 | shared->writing = NULL; | ||
369 | } else if (shared->erasing == chip && shared->writing != chip) { | ||
370 | /* | ||
371 | * We own the ability to erase without the ability | ||
372 | * to write, which means the erase was suspended | ||
373 | * and some other partition is currently writing. | ||
374 | * Don't let the switch below mess things up since | ||
375 | * we don't have ownership to resume anything. | ||
376 | */ | ||
377 | spin_unlock(&shared->lock); | ||
378 | wake_up(&chip->wq); | ||
379 | return; | ||
380 | } | ||
381 | spin_unlock(&shared->lock); | ||
382 | } | ||
383 | |||
384 | switch (chip->oldstate) { | ||
385 | case FL_ERASING: | ||
386 | chip->state = chip->oldstate; | ||
387 | map_write(map, CMD(LPDDR_RESUME), | ||
388 | map->pfow_base + PFOW_COMMAND_CODE); | ||
389 | map_write(map, CMD(LPDDR_START_EXECUTION), | ||
390 | map->pfow_base + PFOW_COMMAND_EXECUTE); | ||
391 | chip->oldstate = FL_READY; | ||
392 | chip->state = FL_ERASING; | ||
393 | break; | ||
394 | case FL_READY: | ||
395 | break; | ||
396 | default: | ||
397 | printk(KERN_ERR "%s: put_chip() called with oldstate %d!\n", | ||
398 | map->name, chip->oldstate); | ||
399 | } | ||
400 | wake_up(&chip->wq); | ||
401 | } | ||
402 | |||
403 | int do_write_buffer(struct map_info *map, struct flchip *chip, | ||
404 | unsigned long adr, const struct kvec **pvec, | ||
405 | unsigned long *pvec_seek, int len) | ||
406 | { | ||
407 | struct lpddr_private *lpddr = map->fldrv_priv; | ||
408 | map_word datum; | ||
409 | int ret, wbufsize, word_gap, words; | ||
410 | const struct kvec *vec; | ||
411 | unsigned long vec_seek; | ||
412 | unsigned long prog_buf_ofs; | ||
413 | |||
414 | wbufsize = 1 << lpddr->qinfo->BufSizeShift; | ||
415 | |||
416 | spin_lock(chip->mutex); | ||
417 | ret = get_chip(map, chip, FL_WRITING); | ||
418 | if (ret) { | ||
419 | spin_unlock(chip->mutex); | ||
420 | return ret; | ||
421 | } | ||
422 | /* Figure out the number of words to write */ | ||
423 | word_gap = (-adr & (map_bankwidth(map)-1)); | ||
424 | words = (len - word_gap + map_bankwidth(map) - 1) / map_bankwidth(map); | ||
425 | if (!word_gap) { | ||
426 | words--; | ||
427 | } else { | ||
428 | word_gap = map_bankwidth(map) - word_gap; | ||
429 | adr -= word_gap; | ||
430 | datum = map_word_ff(map); | ||
431 | } | ||
432 | /* Write data */ | ||
433 | /* Get the program buffer offset from PFOW register data first*/ | ||
434 | prog_buf_ofs = map->pfow_base + CMDVAL(map_read(map, | ||
435 | map->pfow_base + PFOW_PROGRAM_BUFFER_OFFSET)); | ||
436 | vec = *pvec; | ||
437 | vec_seek = *pvec_seek; | ||
438 | do { | ||
439 | int n = map_bankwidth(map) - word_gap; | ||
440 | |||
441 | if (n > vec->iov_len - vec_seek) | ||
442 | n = vec->iov_len - vec_seek; | ||
443 | if (n > len) | ||
444 | n = len; | ||
445 | |||
446 | if (!word_gap && (len < map_bankwidth(map))) | ||
447 | datum = map_word_ff(map); | ||
448 | |||
449 | datum = map_word_load_partial(map, datum, | ||
450 | vec->iov_base + vec_seek, word_gap, n); | ||
451 | |||
452 | len -= n; | ||
453 | word_gap += n; | ||
454 | if (!len || word_gap == map_bankwidth(map)) { | ||
455 | map_write(map, datum, prog_buf_ofs); | ||
456 | prog_buf_ofs += map_bankwidth(map); | ||
457 | word_gap = 0; | ||
458 | } | ||
459 | |||
460 | vec_seek += n; | ||
461 | if (vec_seek == vec->iov_len) { | ||
462 | vec++; | ||
463 | vec_seek = 0; | ||
464 | } | ||
465 | } while (len); | ||
466 | *pvec = vec; | ||
467 | *pvec_seek = vec_seek; | ||
468 | |||
469 | /* GO GO GO */ | ||
470 | send_pfow_command(map, LPDDR_BUFF_PROGRAM, adr, wbufsize, NULL); | ||
471 | chip->state = FL_WRITING; | ||
472 | ret = wait_for_ready(map, chip, (1<<lpddr->qinfo->ProgBufferTime)); | ||
473 | if (ret) { | ||
474 | printk(KERN_WARNING"%s Buffer program error: %d at %lx; \n", | ||
475 | map->name, ret, adr); | ||
476 | goto out; | ||
477 | } | ||
478 | |||
479 | out: put_chip(map, chip); | ||
480 | spin_unlock(chip->mutex); | ||
481 | return ret; | ||
482 | } | ||
483 | |||
484 | int do_erase_oneblock(struct mtd_info *mtd, loff_t adr) | ||
485 | { | ||
486 | struct map_info *map = mtd->priv; | ||
487 | struct lpddr_private *lpddr = map->fldrv_priv; | ||
488 | int chipnum = adr >> lpddr->chipshift; | ||
489 | struct flchip *chip = &lpddr->chips[chipnum]; | ||
490 | int ret; | ||
491 | |||
492 | spin_lock(chip->mutex); | ||
493 | ret = get_chip(map, chip, FL_ERASING); | ||
494 | if (ret) { | ||
495 | spin_unlock(chip->mutex); | ||
496 | return ret; | ||
497 | } | ||
498 | send_pfow_command(map, LPDDR_BLOCK_ERASE, adr, 0, NULL); | ||
499 | chip->state = FL_ERASING; | ||
500 | ret = wait_for_ready(map, chip, (1<<lpddr->qinfo->BlockEraseTime)*1000); | ||
501 | if (ret) { | ||
502 | printk(KERN_WARNING"%s Erase block error %d at : %llx\n", | ||
503 | map->name, ret, adr); | ||
504 | goto out; | ||
505 | } | ||
506 | out: put_chip(map, chip); | ||
507 | spin_unlock(chip->mutex); | ||
508 | return ret; | ||
509 | } | ||
510 | |||
511 | static int lpddr_read(struct mtd_info *mtd, loff_t adr, size_t len, | ||
512 | size_t *retlen, u_char *buf) | ||
513 | { | ||
514 | struct map_info *map = mtd->priv; | ||
515 | struct lpddr_private *lpddr = map->fldrv_priv; | ||
516 | int chipnum = adr >> lpddr->chipshift; | ||
517 | struct flchip *chip = &lpddr->chips[chipnum]; | ||
518 | int ret = 0; | ||
519 | |||
520 | spin_lock(chip->mutex); | ||
521 | ret = get_chip(map, chip, FL_READY); | ||
522 | if (ret) { | ||
523 | spin_unlock(chip->mutex); | ||
524 | return ret; | ||
525 | } | ||
526 | |||
527 | map_copy_from(map, buf, adr, len); | ||
528 | *retlen = len; | ||
529 | |||
530 | put_chip(map, chip); | ||
531 | spin_unlock(chip->mutex); | ||
532 | return ret; | ||
533 | } | ||
534 | |||
535 | static int lpddr_point(struct mtd_info *mtd, loff_t adr, size_t len, | ||
536 | size_t *retlen, void **mtdbuf, resource_size_t *phys) | ||
537 | { | ||
538 | struct map_info *map = mtd->priv; | ||
539 | struct lpddr_private *lpddr = map->fldrv_priv; | ||
540 | int chipnum = adr >> lpddr->chipshift; | ||
541 | unsigned long ofs, last_end = 0; | ||
542 | struct flchip *chip = &lpddr->chips[chipnum]; | ||
543 | int ret = 0; | ||
544 | |||
545 | if (!map->virt || (adr + len > mtd->size)) | ||
546 | return -EINVAL; | ||
547 | |||
548 | /* ofs: offset within the first chip that the first read should start */ | ||
549 | ofs = adr - (chipnum << lpddr->chipshift); | ||
550 | |||
551 | *mtdbuf = (void *)map->virt + chip->start + ofs; | ||
552 | *retlen = 0; | ||
553 | |||
554 | while (len) { | ||
555 | unsigned long thislen; | ||
556 | |||
557 | if (chipnum >= lpddr->numchips) | ||
558 | break; | ||
559 | |||
560 | /* We cannot point across chips that are virtually disjoint */ | ||
561 | if (!last_end) | ||
562 | last_end = chip->start; | ||
563 | else if (chip->start != last_end) | ||
564 | break; | ||
565 | |||
566 | if ((len + ofs - 1) >> lpddr->chipshift) | ||
567 | thislen = (1<<lpddr->chipshift) - ofs; | ||
568 | else | ||
569 | thislen = len; | ||
570 | /* get the chip */ | ||
571 | spin_lock(chip->mutex); | ||
572 | ret = get_chip(map, chip, FL_POINT); | ||
573 | spin_unlock(chip->mutex); | ||
574 | if (ret) | ||
575 | break; | ||
576 | |||
577 | chip->state = FL_POINT; | ||
578 | chip->ref_point_counter++; | ||
579 | *retlen += thislen; | ||
580 | len -= thislen; | ||
581 | |||
582 | ofs = 0; | ||
583 | last_end += 1 << lpddr->chipshift; | ||
584 | chipnum++; | ||
585 | chip = &lpddr->chips[chipnum]; | ||
586 | } | ||
587 | return 0; | ||
588 | } | ||
589 | |||
590 | static void lpddr_unpoint (struct mtd_info *mtd, loff_t adr, size_t len) | ||
591 | { | ||
592 | struct map_info *map = mtd->priv; | ||
593 | struct lpddr_private *lpddr = map->fldrv_priv; | ||
594 | int chipnum = adr >> lpddr->chipshift; | ||
595 | unsigned long ofs; | ||
596 | |||
597 | /* ofs: offset within the first chip that the first read should start */ | ||
598 | ofs = adr - (chipnum << lpddr->chipshift); | ||
599 | |||
600 | while (len) { | ||
601 | unsigned long thislen; | ||
602 | struct flchip *chip; | ||
603 | |||
604 | chip = &lpddr->chips[chipnum]; | ||
605 | if (chipnum >= lpddr->numchips) | ||
606 | break; | ||
607 | |||
608 | if ((len + ofs - 1) >> lpddr->chipshift) | ||
609 | thislen = (1<<lpddr->chipshift) - ofs; | ||
610 | else | ||
611 | thislen = len; | ||
612 | |||
613 | spin_lock(chip->mutex); | ||
614 | if (chip->state == FL_POINT) { | ||
615 | chip->ref_point_counter--; | ||
616 | if (chip->ref_point_counter == 0) | ||
617 | chip->state = FL_READY; | ||
618 | } else | ||
619 | printk(KERN_WARNING "%s: Warning: unpoint called on non" | ||
620 | "pointed region\n", map->name); | ||
621 | |||
622 | put_chip(map, chip); | ||
623 | spin_unlock(chip->mutex); | ||
624 | |||
625 | len -= thislen; | ||
626 | ofs = 0; | ||
627 | chipnum++; | ||
628 | } | ||
629 | } | ||
630 | |||
631 | static int lpddr_write_buffers(struct mtd_info *mtd, loff_t to, size_t len, | ||
632 | size_t *retlen, const u_char *buf) | ||
633 | { | ||
634 | struct kvec vec; | ||
635 | |||
636 | vec.iov_base = (void *) buf; | ||
637 | vec.iov_len = len; | ||
638 | |||
639 | return lpddr_writev(mtd, &vec, 1, to, retlen); | ||
640 | } | ||
641 | |||
642 | |||
643 | static int lpddr_writev(struct mtd_info *mtd, const struct kvec *vecs, | ||
644 | unsigned long count, loff_t to, size_t *retlen) | ||
645 | { | ||
646 | struct map_info *map = mtd->priv; | ||
647 | struct lpddr_private *lpddr = map->fldrv_priv; | ||
648 | int ret = 0; | ||
649 | int chipnum; | ||
650 | unsigned long ofs, vec_seek, i; | ||
651 | int wbufsize = 1 << lpddr->qinfo->BufSizeShift; | ||
652 | |||
653 | size_t len = 0; | ||
654 | |||
655 | for (i = 0; i < count; i++) | ||
656 | len += vecs[i].iov_len; | ||
657 | |||
658 | *retlen = 0; | ||
659 | if (!len) | ||
660 | return 0; | ||
661 | |||
662 | chipnum = to >> lpddr->chipshift; | ||
663 | |||
664 | ofs = to; | ||
665 | vec_seek = 0; | ||
666 | |||
667 | do { | ||
668 | /* We must not cross write block boundaries */ | ||
669 | int size = wbufsize - (ofs & (wbufsize-1)); | ||
670 | |||
671 | if (size > len) | ||
672 | size = len; | ||
673 | |||
674 | ret = do_write_buffer(map, &lpddr->chips[chipnum], | ||
675 | ofs, &vecs, &vec_seek, size); | ||
676 | if (ret) | ||
677 | return ret; | ||
678 | |||
679 | ofs += size; | ||
680 | (*retlen) += size; | ||
681 | len -= size; | ||
682 | |||
683 | /* Be nice and reschedule with the chip in a usable | ||
684 | * state for other processes */ | ||
685 | cond_resched(); | ||
686 | |||
687 | } while (len); | ||
688 | |||
689 | return 0; | ||
690 | } | ||
691 | |||
692 | static int lpddr_erase(struct mtd_info *mtd, struct erase_info *instr) | ||
693 | { | ||
694 | unsigned long ofs, len; | ||
695 | int ret; | ||
696 | struct map_info *map = mtd->priv; | ||
697 | struct lpddr_private *lpddr = map->fldrv_priv; | ||
698 | int size = 1 << lpddr->qinfo->UniformBlockSizeShift; | ||
699 | |||
700 | ofs = instr->addr; | ||
701 | len = instr->len; | ||
702 | |||
703 | if (ofs > mtd->size || (len + ofs) > mtd->size) | ||
704 | return -EINVAL; | ||
705 | |||
706 | while (len > 0) { | ||
707 | ret = do_erase_oneblock(mtd, ofs); | ||
708 | if (ret) | ||
709 | return ret; | ||
710 | ofs += size; | ||
711 | len -= size; | ||
712 | } | ||
713 | instr->state = MTD_ERASE_DONE; | ||
714 | mtd_erase_callback(instr); | ||
715 | |||
716 | return 0; | ||
717 | } | ||
718 | |||
719 | #define DO_XXLOCK_LOCK 1 | ||
720 | #define DO_XXLOCK_UNLOCK 2 | ||
721 | int do_xxlock(struct mtd_info *mtd, loff_t adr, uint32_t len, int thunk) | ||
722 | { | ||
723 | int ret = 0; | ||
724 | struct map_info *map = mtd->priv; | ||
725 | struct lpddr_private *lpddr = map->fldrv_priv; | ||
726 | int chipnum = adr >> lpddr->chipshift; | ||
727 | struct flchip *chip = &lpddr->chips[chipnum]; | ||
728 | |||
729 | spin_lock(chip->mutex); | ||
730 | ret = get_chip(map, chip, FL_LOCKING); | ||
731 | if (ret) { | ||
732 | spin_unlock(chip->mutex); | ||
733 | return ret; | ||
734 | } | ||
735 | |||
736 | if (thunk == DO_XXLOCK_LOCK) { | ||
737 | send_pfow_command(map, LPDDR_LOCK_BLOCK, adr, adr + len, NULL); | ||
738 | chip->state = FL_LOCKING; | ||
739 | } else if (thunk == DO_XXLOCK_UNLOCK) { | ||
740 | send_pfow_command(map, LPDDR_UNLOCK_BLOCK, adr, adr + len, NULL); | ||
741 | chip->state = FL_UNLOCKING; | ||
742 | } else | ||
743 | BUG(); | ||
744 | |||
745 | ret = wait_for_ready(map, chip, 1); | ||
746 | if (ret) { | ||
747 | printk(KERN_ERR "%s: block unlock error status %d \n", | ||
748 | map->name, ret); | ||
749 | goto out; | ||
750 | } | ||
751 | out: put_chip(map, chip); | ||
752 | spin_unlock(chip->mutex); | ||
753 | return ret; | ||
754 | } | ||
755 | |||
756 | static int lpddr_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) | ||
757 | { | ||
758 | return do_xxlock(mtd, ofs, len, DO_XXLOCK_LOCK); | ||
759 | } | ||
760 | |||
761 | static int lpddr_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) | ||
762 | { | ||
763 | return do_xxlock(mtd, ofs, len, DO_XXLOCK_UNLOCK); | ||
764 | } | ||
765 | |||
766 | int word_program(struct map_info *map, loff_t adr, uint32_t curval) | ||
767 | { | ||
768 | int ret; | ||
769 | struct lpddr_private *lpddr = map->fldrv_priv; | ||
770 | int chipnum = adr >> lpddr->chipshift; | ||
771 | struct flchip *chip = &lpddr->chips[chipnum]; | ||
772 | |||
773 | spin_lock(chip->mutex); | ||
774 | ret = get_chip(map, chip, FL_WRITING); | ||
775 | if (ret) { | ||
776 | spin_unlock(chip->mutex); | ||
777 | return ret; | ||
778 | } | ||
779 | |||
780 | send_pfow_command(map, LPDDR_WORD_PROGRAM, adr, 0x00, (map_word *)&curval); | ||
781 | |||
782 | ret = wait_for_ready(map, chip, (1<<lpddr->qinfo->SingleWordProgTime)); | ||
783 | if (ret) { | ||
784 | printk(KERN_WARNING"%s word_program error at: %llx; val: %x\n", | ||
785 | map->name, adr, curval); | ||
786 | goto out; | ||
787 | } | ||
788 | |||
789 | out: put_chip(map, chip); | ||
790 | spin_unlock(chip->mutex); | ||
791 | return ret; | ||
792 | } | ||
793 | |||
794 | MODULE_LICENSE("GPL"); | ||
795 | MODULE_AUTHOR("Alexey Korolev <akorolev@infradead.org>"); | ||
796 | MODULE_DESCRIPTION("MTD driver for LPDDR flash chips"); | ||