diff options
-rw-r--r-- | drivers/mtd/maps/Kconfig | 12 | ||||
-rw-r--r-- | drivers/mtd/maps/Makefile | 1 | ||||
-rw-r--r-- | drivers/mtd/maps/vmu-flash.c | 832 |
3 files changed, 844 insertions, 1 deletions
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig index 043d50fb6ef6..729f899a5cd5 100644 --- a/drivers/mtd/maps/Kconfig +++ b/drivers/mtd/maps/Kconfig | |||
@@ -551,5 +551,15 @@ config MTD_PLATRAM | |||
551 | 551 | ||
552 | This selection automatically selects the map_ram driver. | 552 | This selection automatically selects the map_ram driver. |
553 | 553 | ||
554 | endmenu | 554 | config MTD_VMU |
555 | tristate "Map driver for Dreamcast VMU" | ||
556 | depends on MAPLE | ||
557 | help | ||
558 | This driver enables access to the Dreamcast Visual Memory Unit (VMU). | ||
559 | |||
560 | Most Dreamcast users will want to say Y here. | ||
555 | 561 | ||
562 | To build this as a module select M here, the module will be called | ||
563 | vmu-flash. | ||
564 | |||
565 | endmenu | ||
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile index 6d9ba35caf11..26b28a7a90b5 100644 --- a/drivers/mtd/maps/Makefile +++ b/drivers/mtd/maps/Makefile | |||
@@ -61,3 +61,4 @@ obj-$(CONFIG_MTD_PLATRAM) += plat-ram.o | |||
61 | obj-$(CONFIG_MTD_OMAP_NOR) += omap_nor.o | 61 | obj-$(CONFIG_MTD_OMAP_NOR) += omap_nor.o |
62 | obj-$(CONFIG_MTD_INTEL_VR_NOR) += intel_vr_nor.o | 62 | obj-$(CONFIG_MTD_INTEL_VR_NOR) += intel_vr_nor.o |
63 | obj-$(CONFIG_MTD_BFIN_ASYNC) += bfin-async-flash.o | 63 | obj-$(CONFIG_MTD_BFIN_ASYNC) += bfin-async-flash.o |
64 | obj-$(CONFIG_MTD_VMU) += vmu-flash.o | ||
diff --git a/drivers/mtd/maps/vmu-flash.c b/drivers/mtd/maps/vmu-flash.c new file mode 100644 index 000000000000..1f73297e7776 --- /dev/null +++ b/drivers/mtd/maps/vmu-flash.c | |||
@@ -0,0 +1,832 @@ | |||
1 | /* vmu-flash.c | ||
2 | * Driver for SEGA Dreamcast Visual Memory Unit | ||
3 | * | ||
4 | * Copyright (c) Adrian McMenamin 2002 - 2009 | ||
5 | * Copyright (c) Paul Mundt 2001 | ||
6 | * | ||
7 | * Licensed under version 2 of the | ||
8 | * GNU General Public Licence | ||
9 | */ | ||
10 | #include <linux/init.h> | ||
11 | #include <linux/sched.h> | ||
12 | #include <linux/delay.h> | ||
13 | #include <linux/maple.h> | ||
14 | #include <linux/mtd/mtd.h> | ||
15 | #include <linux/mtd/map.h> | ||
16 | |||
17 | struct vmu_cache { | ||
18 | unsigned char *buffer; /* Cache */ | ||
19 | unsigned int block; /* Which block was cached */ | ||
20 | unsigned long jiffies_atc; /* When was it cached? */ | ||
21 | int valid; | ||
22 | }; | ||
23 | |||
24 | struct mdev_part { | ||
25 | struct maple_device *mdev; | ||
26 | int partition; | ||
27 | }; | ||
28 | |||
29 | struct vmupart { | ||
30 | u16 user_blocks; | ||
31 | u16 root_block; | ||
32 | u16 numblocks; | ||
33 | char *name; | ||
34 | struct vmu_cache *pcache; | ||
35 | }; | ||
36 | |||
37 | struct memcard { | ||
38 | u16 tempA; | ||
39 | u16 tempB; | ||
40 | u32 partitions; | ||
41 | u32 blocklen; | ||
42 | u32 writecnt; | ||
43 | u32 readcnt; | ||
44 | u32 removeable; | ||
45 | int partition; | ||
46 | int read; | ||
47 | unsigned char *blockread; | ||
48 | struct vmupart *parts; | ||
49 | struct mtd_info *mtd; | ||
50 | }; | ||
51 | |||
52 | struct vmu_block { | ||
53 | unsigned int num; /* block number */ | ||
54 | unsigned int ofs; /* block offset */ | ||
55 | }; | ||
56 | |||
57 | static struct vmu_block *ofs_to_block(unsigned long src_ofs, | ||
58 | struct mtd_info *mtd, int partition) | ||
59 | { | ||
60 | struct vmu_block *vblock; | ||
61 | struct maple_device *mdev; | ||
62 | struct memcard *card; | ||
63 | struct mdev_part *mpart; | ||
64 | int num; | ||
65 | |||
66 | mpart = mtd->priv; | ||
67 | mdev = mpart->mdev; | ||
68 | card = maple_get_drvdata(mdev); | ||
69 | |||
70 | if (src_ofs >= card->parts[partition].numblocks * card->blocklen) | ||
71 | goto failed; | ||
72 | |||
73 | num = src_ofs / card->blocklen; | ||
74 | if (num > card->parts[partition].numblocks) | ||
75 | goto failed; | ||
76 | |||
77 | vblock = kmalloc(sizeof(struct vmu_block), GFP_KERNEL); | ||
78 | if (!vblock) | ||
79 | goto failed; | ||
80 | |||
81 | vblock->num = num; | ||
82 | vblock->ofs = src_ofs % card->blocklen; | ||
83 | return vblock; | ||
84 | |||
85 | failed: | ||
86 | return NULL; | ||
87 | } | ||
88 | |||
89 | /* Maple bus callback function for reads */ | ||
90 | static void vmu_blockread(struct mapleq *mq) | ||
91 | { | ||
92 | struct maple_device *mdev; | ||
93 | struct memcard *card; | ||
94 | |||
95 | mdev = mq->dev; | ||
96 | card = maple_get_drvdata(mdev); | ||
97 | /* copy the read in data */ | ||
98 | |||
99 | if (unlikely(!card->blockread)) | ||
100 | return; | ||
101 | |||
102 | memcpy(card->blockread, mq->recvbuf->buf + 12, | ||
103 | card->blocklen/card->readcnt); | ||
104 | |||
105 | } | ||
106 | |||
107 | /* Interface with maple bus to read blocks | ||
108 | * caching the results so that other parts | ||
109 | * of the driver can access block reads */ | ||
110 | static int maple_vmu_read_block(unsigned int num, unsigned char *buf, | ||
111 | struct mtd_info *mtd) | ||
112 | { | ||
113 | struct memcard *card; | ||
114 | struct mdev_part *mpart; | ||
115 | struct maple_device *mdev; | ||
116 | int partition, error = 0, x, wait; | ||
117 | unsigned char *blockread = NULL; | ||
118 | struct vmu_cache *pcache; | ||
119 | __be32 sendbuf; | ||
120 | |||
121 | mpart = mtd->priv; | ||
122 | mdev = mpart->mdev; | ||
123 | partition = mpart->partition; | ||
124 | card = maple_get_drvdata(mdev); | ||
125 | pcache = card->parts[partition].pcache; | ||
126 | pcache->valid = 0; | ||
127 | |||
128 | /* prepare the cache for this block */ | ||
129 | if (!pcache->buffer) { | ||
130 | pcache->buffer = kmalloc(card->blocklen, GFP_KERNEL); | ||
131 | if (!pcache->buffer) { | ||
132 | dev_err(&mdev->dev, "VMU at (%d, %d) - read fails due" | ||
133 | " to lack of memory\n", mdev->port, | ||
134 | mdev->unit); | ||
135 | error = -ENOMEM; | ||
136 | goto outB; | ||
137 | } | ||
138 | } | ||
139 | |||
140 | /* | ||
141 | * Reads may be phased - again the hardware spec | ||
142 | * supports this - though may not be any devices in | ||
143 | * the wild that implement it, but we will here | ||
144 | */ | ||
145 | for (x = 0; x < card->readcnt; x++) { | ||
146 | sendbuf = cpu_to_be32(partition << 24 | x << 16 | num); | ||
147 | |||
148 | if (atomic_read(&mdev->busy) == 1) { | ||
149 | wait_event_interruptible_timeout(mdev->maple_wait, | ||
150 | atomic_read(&mdev->busy) == 0, HZ); | ||
151 | if (atomic_read(&mdev->busy) == 1) { | ||
152 | dev_notice(&mdev->dev, "VMU at (%d, %d)" | ||
153 | " is busy\n", mdev->port, mdev->unit); | ||
154 | error = -EAGAIN; | ||
155 | goto outB; | ||
156 | } | ||
157 | } | ||
158 | |||
159 | atomic_set(&mdev->busy, 1); | ||
160 | blockread = kmalloc(card->blocklen/card->readcnt, GFP_KERNEL); | ||
161 | if (!blockread) { | ||
162 | error = -ENOMEM; | ||
163 | atomic_set(&mdev->busy, 0); | ||
164 | goto outB; | ||
165 | } | ||
166 | card->blockread = blockread; | ||
167 | |||
168 | maple_getcond_callback(mdev, vmu_blockread, 0, | ||
169 | MAPLE_FUNC_MEMCARD); | ||
170 | error = maple_add_packet(mdev, MAPLE_FUNC_MEMCARD, | ||
171 | MAPLE_COMMAND_BREAD, 2, &sendbuf); | ||
172 | /* Very long timeouts seem to be needed when box is stressed */ | ||
173 | wait = wait_event_interruptible_timeout(mdev->maple_wait, | ||
174 | (atomic_read(&mdev->busy) == 0 || | ||
175 | atomic_read(&mdev->busy) == 2), HZ * 3); | ||
176 | /* | ||
177 | * MTD layer does not handle hotplugging well | ||
178 | * so have to return errors when VMU is unplugged | ||
179 | * in the middle of a read (busy == 2) | ||
180 | */ | ||
181 | if (error || atomic_read(&mdev->busy) == 2) { | ||
182 | if (atomic_read(&mdev->busy) == 2) | ||
183 | error = -ENXIO; | ||
184 | atomic_set(&mdev->busy, 0); | ||
185 | card->blockread = NULL; | ||
186 | goto outA; | ||
187 | } | ||
188 | if (wait == 0 || wait == -ERESTARTSYS) { | ||
189 | card->blockread = NULL; | ||
190 | atomic_set(&mdev->busy, 0); | ||
191 | error = -EIO; | ||
192 | list_del_init(&(mdev->mq->list)); | ||
193 | kfree(mdev->mq->sendbuf); | ||
194 | mdev->mq->sendbuf = NULL; | ||
195 | if (wait == -ERESTARTSYS) { | ||
196 | dev_warn(&mdev->dev, "VMU read on (%d, %d)" | ||
197 | " interrupted on block 0x%X\n", | ||
198 | mdev->port, mdev->unit, num); | ||
199 | } else | ||
200 | dev_notice(&mdev->dev, "VMU read on (%d, %d)" | ||
201 | " timed out on block 0x%X\n", | ||
202 | mdev->port, mdev->unit, num); | ||
203 | goto outA; | ||
204 | } | ||
205 | |||
206 | memcpy(buf + (card->blocklen/card->readcnt) * x, blockread, | ||
207 | card->blocklen/card->readcnt); | ||
208 | |||
209 | memcpy(pcache->buffer + (card->blocklen/card->readcnt) * x, | ||
210 | card->blockread, card->blocklen/card->readcnt); | ||
211 | card->blockread = NULL; | ||
212 | pcache->block = num; | ||
213 | pcache->jiffies_atc = jiffies; | ||
214 | pcache->valid = 1; | ||
215 | kfree(blockread); | ||
216 | } | ||
217 | |||
218 | return error; | ||
219 | |||
220 | outA: | ||
221 | kfree(blockread); | ||
222 | outB: | ||
223 | return error; | ||
224 | } | ||
225 | |||
226 | /* communicate with maple bus for phased writing */ | ||
227 | static int maple_vmu_write_block(unsigned int num, const unsigned char *buf, | ||
228 | struct mtd_info *mtd) | ||
229 | { | ||
230 | struct memcard *card; | ||
231 | struct mdev_part *mpart; | ||
232 | struct maple_device *mdev; | ||
233 | int partition, error, locking, x, phaselen, wait; | ||
234 | __be32 *sendbuf; | ||
235 | |||
236 | mpart = mtd->priv; | ||
237 | mdev = mpart->mdev; | ||
238 | partition = mpart->partition; | ||
239 | card = maple_get_drvdata(mdev); | ||
240 | |||
241 | phaselen = card->blocklen/card->writecnt; | ||
242 | |||
243 | sendbuf = kmalloc(phaselen + 4, GFP_KERNEL); | ||
244 | if (!sendbuf) { | ||
245 | error = -ENOMEM; | ||
246 | goto fail_nosendbuf; | ||
247 | } | ||
248 | for (x = 0; x < card->writecnt; x++) { | ||
249 | sendbuf[0] = cpu_to_be32(partition << 24 | x << 16 | num); | ||
250 | memcpy(&sendbuf[1], buf + phaselen * x, phaselen); | ||
251 | /* wait until the device is not busy doing something else | ||
252 | * or 1 second - which ever is longer */ | ||
253 | if (atomic_read(&mdev->busy) == 1) { | ||
254 | wait_event_interruptible_timeout(mdev->maple_wait, | ||
255 | atomic_read(&mdev->busy) == 0, HZ); | ||
256 | if (atomic_read(&mdev->busy) == 1) { | ||
257 | error = -EBUSY; | ||
258 | dev_notice(&mdev->dev, "VMU write at (%d, %d)" | ||
259 | "failed - device is busy\n", | ||
260 | mdev->port, mdev->unit); | ||
261 | goto fail_nolock; | ||
262 | } | ||
263 | } | ||
264 | atomic_set(&mdev->busy, 1); | ||
265 | |||
266 | locking = maple_add_packet(mdev, MAPLE_FUNC_MEMCARD, | ||
267 | MAPLE_COMMAND_BWRITE, phaselen / 4 + 2, sendbuf); | ||
268 | wait = wait_event_interruptible_timeout(mdev->maple_wait, | ||
269 | atomic_read(&mdev->busy) == 0, HZ/10); | ||
270 | if (locking) { | ||
271 | error = -EIO; | ||
272 | atomic_set(&mdev->busy, 0); | ||
273 | goto fail_nolock; | ||
274 | } | ||
275 | if (atomic_read(&mdev->busy) == 2) { | ||
276 | atomic_set(&mdev->busy, 0); | ||
277 | } else if (wait == 0 || wait == -ERESTARTSYS) { | ||
278 | error = -EIO; | ||
279 | dev_warn(&mdev->dev, "Write at (%d, %d) of block" | ||
280 | " 0x%X at phase %d failed: could not" | ||
281 | " communicate with VMU", mdev->port, | ||
282 | mdev->unit, num, x); | ||
283 | atomic_set(&mdev->busy, 0); | ||
284 | kfree(mdev->mq->sendbuf); | ||
285 | mdev->mq->sendbuf = NULL; | ||
286 | list_del_init(&(mdev->mq->list)); | ||
287 | goto fail_nolock; | ||
288 | } | ||
289 | } | ||
290 | kfree(sendbuf); | ||
291 | |||
292 | return card->blocklen; | ||
293 | |||
294 | fail_nolock: | ||
295 | kfree(sendbuf); | ||
296 | fail_nosendbuf: | ||
297 | dev_err(&mdev->dev, "VMU (%d, %d): write failed\n", mdev->port, | ||
298 | mdev->unit); | ||
299 | return error; | ||
300 | } | ||
301 | |||
302 | /* mtd function to simulate reading byte by byte */ | ||
303 | static unsigned char vmu_flash_read_char(unsigned long ofs, int *retval, | ||
304 | struct mtd_info *mtd) | ||
305 | { | ||
306 | struct vmu_block *vblock; | ||
307 | struct memcard *card; | ||
308 | struct mdev_part *mpart; | ||
309 | struct maple_device *mdev; | ||
310 | unsigned char *buf, ret; | ||
311 | int partition, error; | ||
312 | |||
313 | mpart = mtd->priv; | ||
314 | mdev = mpart->mdev; | ||
315 | partition = mpart->partition; | ||
316 | card = maple_get_drvdata(mdev); | ||
317 | *retval = 0; | ||
318 | |||
319 | buf = kmalloc(card->blocklen, GFP_KERNEL); | ||
320 | if (!buf) { | ||
321 | *retval = 1; | ||
322 | ret = -ENOMEM; | ||
323 | goto finish; | ||
324 | } | ||
325 | |||
326 | vblock = ofs_to_block(ofs, mtd, partition); | ||
327 | if (!vblock) { | ||
328 | *retval = 3; | ||
329 | ret = -ENOMEM; | ||
330 | goto out_buf; | ||
331 | } | ||
332 | |||
333 | error = maple_vmu_read_block(vblock->num, buf, mtd); | ||
334 | if (error) { | ||
335 | ret = error; | ||
336 | *retval = 2; | ||
337 | goto out_vblock; | ||
338 | } | ||
339 | |||
340 | ret = buf[vblock->ofs]; | ||
341 | |||
342 | out_vblock: | ||
343 | kfree(vblock); | ||
344 | out_buf: | ||
345 | kfree(buf); | ||
346 | finish: | ||
347 | return ret; | ||
348 | } | ||
349 | |||
350 | /* mtd higher order function to read flash */ | ||
351 | static int vmu_flash_read(struct mtd_info *mtd, loff_t from, size_t len, | ||
352 | size_t *retlen, u_char *buf) | ||
353 | { | ||
354 | struct maple_device *mdev; | ||
355 | struct memcard *card; | ||
356 | struct mdev_part *mpart; | ||
357 | struct vmu_cache *pcache; | ||
358 | struct vmu_block *vblock; | ||
359 | int index = 0, retval, partition, leftover, numblocks; | ||
360 | unsigned char cx; | ||
361 | |||
362 | if (len < 1) | ||
363 | return -EIO; | ||
364 | |||
365 | mpart = mtd->priv; | ||
366 | mdev = mpart->mdev; | ||
367 | partition = mpart->partition; | ||
368 | card = maple_get_drvdata(mdev); | ||
369 | |||
370 | numblocks = card->parts[partition].numblocks; | ||
371 | if (from + len > numblocks * card->blocklen) | ||
372 | len = numblocks * card->blocklen - from; | ||
373 | if (len == 0) | ||
374 | return -EIO; | ||
375 | /* Have we cached this bit already? */ | ||
376 | pcache = card->parts[partition].pcache; | ||
377 | do { | ||
378 | vblock = ofs_to_block(from + index, mtd, partition); | ||
379 | if (!vblock) | ||
380 | return -ENOMEM; | ||
381 | /* Have we cached this and is the cache valid and timely? */ | ||
382 | if (pcache->valid && | ||
383 | time_before(jiffies, pcache->jiffies_atc + HZ) && | ||
384 | (pcache->block == vblock->num)) { | ||
385 | /* we have cached it, so do necessary copying */ | ||
386 | leftover = card->blocklen - vblock->ofs; | ||
387 | if (vblock->ofs + len - index < card->blocklen) { | ||
388 | /* only a bit of this block to copy */ | ||
389 | memcpy(buf + index, | ||
390 | pcache->buffer + vblock->ofs, | ||
391 | len - index); | ||
392 | index = len; | ||
393 | } else { | ||
394 | /* otherwise copy remainder of whole block */ | ||
395 | memcpy(buf + index, pcache->buffer + | ||
396 | vblock->ofs, leftover); | ||
397 | index += leftover; | ||
398 | } | ||
399 | } else { | ||
400 | /* | ||
401 | * Not cached so read one byte - | ||
402 | * but cache the rest of the block | ||
403 | */ | ||
404 | cx = vmu_flash_read_char(from + index, &retval, mtd); | ||
405 | if (retval) { | ||
406 | *retlen = index; | ||
407 | kfree(vblock); | ||
408 | return cx; | ||
409 | } | ||
410 | memset(buf + index, cx, 1); | ||
411 | index++; | ||
412 | } | ||
413 | kfree(vblock); | ||
414 | } while (len > index); | ||
415 | *retlen = index; | ||
416 | |||
417 | return 0; | ||
418 | } | ||
419 | |||
420 | static int vmu_flash_write(struct mtd_info *mtd, loff_t to, size_t len, | ||
421 | size_t *retlen, const u_char *buf) | ||
422 | { | ||
423 | struct maple_device *mdev; | ||
424 | struct memcard *card; | ||
425 | struct mdev_part *mpart; | ||
426 | int index = 0, partition, error = 0, numblocks; | ||
427 | struct vmu_cache *pcache; | ||
428 | struct vmu_block *vblock; | ||
429 | unsigned char *buffer; | ||
430 | |||
431 | mpart = mtd->priv; | ||
432 | mdev = mpart->mdev; | ||
433 | partition = mpart->partition; | ||
434 | card = maple_get_drvdata(mdev); | ||
435 | |||
436 | /* simple sanity checks */ | ||
437 | if (len < 1) { | ||
438 | error = -EIO; | ||
439 | goto failed; | ||
440 | } | ||
441 | numblocks = card->parts[partition].numblocks; | ||
442 | if (to + len > numblocks * card->blocklen) | ||
443 | len = numblocks * card->blocklen - to; | ||
444 | if (len == 0) { | ||
445 | error = -EIO; | ||
446 | goto failed; | ||
447 | } | ||
448 | |||
449 | vblock = ofs_to_block(to, mtd, partition); | ||
450 | if (!vblock) { | ||
451 | error = -ENOMEM; | ||
452 | goto failed; | ||
453 | } | ||
454 | |||
455 | buffer = kmalloc(card->blocklen, GFP_KERNEL); | ||
456 | if (!buffer) { | ||
457 | error = -ENOMEM; | ||
458 | goto fail_buffer; | ||
459 | } | ||
460 | |||
461 | do { | ||
462 | /* Read in the block we are to write to */ | ||
463 | error = maple_vmu_read_block(vblock->num, buffer, mtd); | ||
464 | if (error) | ||
465 | goto fail_io; | ||
466 | |||
467 | do { | ||
468 | buffer[vblock->ofs] = buf[index]; | ||
469 | vblock->ofs++; | ||
470 | index++; | ||
471 | if (index >= len) | ||
472 | break; | ||
473 | } while (vblock->ofs < card->blocklen); | ||
474 | |||
475 | /* write out new buffer */ | ||
476 | error = maple_vmu_write_block(vblock->num, buffer, mtd); | ||
477 | /* invalidate the cache */ | ||
478 | pcache = card->parts[partition].pcache; | ||
479 | pcache->valid = 0; | ||
480 | |||
481 | if (error != card->blocklen) | ||
482 | goto fail_io; | ||
483 | |||
484 | vblock->num++; | ||
485 | vblock->ofs = 0; | ||
486 | } while (len > index); | ||
487 | |||
488 | kfree(buffer); | ||
489 | *retlen = index; | ||
490 | kfree(vblock); | ||
491 | return 0; | ||
492 | |||
493 | fail_io: | ||
494 | kfree(buffer); | ||
495 | fail_buffer: | ||
496 | kfree(vblock); | ||
497 | failed: | ||
498 | dev_err(&mdev->dev, "VMU write failing with error %d\n", error); | ||
499 | return error; | ||
500 | } | ||
501 | |||
502 | static void vmu_flash_sync(struct mtd_info *mtd) | ||
503 | { | ||
504 | /* Do nothing here */ | ||
505 | } | ||
506 | |||
507 | /* Maple bus callback function to recursively query hardware details */ | ||
508 | static void vmu_queryblocks(struct mapleq *mq) | ||
509 | { | ||
510 | struct maple_device *mdev; | ||
511 | unsigned short *res; | ||
512 | struct memcard *card; | ||
513 | __be32 partnum; | ||
514 | struct vmu_cache *pcache; | ||
515 | struct mdev_part *mpart; | ||
516 | struct mtd_info *mtd_cur; | ||
517 | struct vmupart *part_cur; | ||
518 | int error; | ||
519 | |||
520 | mdev = mq->dev; | ||
521 | card = maple_get_drvdata(mdev); | ||
522 | res = (unsigned short *) (mq->recvbuf->buf); | ||
523 | card->tempA = res[12]; | ||
524 | card->tempB = res[6]; | ||
525 | |||
526 | dev_info(&mdev->dev, "VMU device at partition %d has %d user " | ||
527 | "blocks with a root block at %d\n", card->partition, | ||
528 | card->tempA, card->tempB); | ||
529 | |||
530 | part_cur = &card->parts[card->partition]; | ||
531 | part_cur->user_blocks = card->tempA; | ||
532 | part_cur->root_block = card->tempB; | ||
533 | part_cur->numblocks = card->tempB + 1; | ||
534 | part_cur->name = kmalloc(12, GFP_KERNEL); | ||
535 | if (!part_cur->name) | ||
536 | goto fail_name; | ||
537 | |||
538 | sprintf(part_cur->name, "vmu%d.%d.%d", | ||
539 | mdev->port, mdev->unit, card->partition); | ||
540 | mtd_cur = &card->mtd[card->partition]; | ||
541 | mtd_cur->name = part_cur->name; | ||
542 | mtd_cur->type = 8; | ||
543 | mtd_cur->flags = MTD_WRITEABLE|MTD_NO_ERASE; | ||
544 | mtd_cur->size = part_cur->numblocks * card->blocklen; | ||
545 | mtd_cur->erasesize = card->blocklen; | ||
546 | mtd_cur->write = vmu_flash_write; | ||
547 | mtd_cur->read = vmu_flash_read; | ||
548 | mtd_cur->sync = vmu_flash_sync; | ||
549 | mtd_cur->writesize = card->blocklen; | ||
550 | |||
551 | mpart = kmalloc(sizeof(struct mdev_part), GFP_KERNEL); | ||
552 | if (!mpart) | ||
553 | goto fail_mpart; | ||
554 | |||
555 | mpart->mdev = mdev; | ||
556 | mpart->partition = card->partition; | ||
557 | mtd_cur->priv = mpart; | ||
558 | mtd_cur->owner = THIS_MODULE; | ||
559 | |||
560 | pcache = kzalloc(sizeof(struct vmu_cache), GFP_KERNEL); | ||
561 | if (!pcache) | ||
562 | goto fail_cache_create; | ||
563 | part_cur->pcache = pcache; | ||
564 | |||
565 | error = add_mtd_device(mtd_cur); | ||
566 | if (error) | ||
567 | goto fail_mtd_register; | ||
568 | |||
569 | maple_getcond_callback(mdev, NULL, 0, | ||
570 | MAPLE_FUNC_MEMCARD); | ||
571 | |||
572 | /* | ||
573 | * Set up a recursive call to the (probably theoretical) | ||
574 | * second or more partition | ||
575 | */ | ||
576 | if (++card->partition < card->partitions) { | ||
577 | partnum = cpu_to_be32(card->partition << 24); | ||
578 | maple_getcond_callback(mdev, vmu_queryblocks, 0, | ||
579 | MAPLE_FUNC_MEMCARD); | ||
580 | maple_add_packet(mdev, MAPLE_FUNC_MEMCARD, | ||
581 | MAPLE_COMMAND_GETMINFO, 2, &partnum); | ||
582 | } | ||
583 | return; | ||
584 | |||
585 | fail_mtd_register: | ||
586 | dev_err(&mdev->dev, "Could not register maple device at (%d, %d)" | ||
587 | "error is 0x%X\n", mdev->port, mdev->unit, error); | ||
588 | for (error = 0; error <= card->partition; error++) { | ||
589 | kfree(((card->parts)[error]).pcache); | ||
590 | ((card->parts)[error]).pcache = NULL; | ||
591 | } | ||
592 | fail_cache_create: | ||
593 | fail_mpart: | ||
594 | for (error = 0; error <= card->partition; error++) { | ||
595 | kfree(((card->mtd)[error]).priv); | ||
596 | ((card->mtd)[error]).priv = NULL; | ||
597 | } | ||
598 | maple_getcond_callback(mdev, NULL, 0, | ||
599 | MAPLE_FUNC_MEMCARD); | ||
600 | kfree(part_cur->name); | ||
601 | fail_name: | ||
602 | return; | ||
603 | } | ||
604 | |||
605 | /* Handles very basic info about the flash, queries for details */ | ||
606 | static int __devinit vmu_connect(struct maple_device *mdev) | ||
607 | { | ||
608 | unsigned long test_flash_data, basic_flash_data; | ||
609 | int c, error; | ||
610 | struct memcard *card; | ||
611 | u32 partnum = 0; | ||
612 | |||
613 | test_flash_data = be32_to_cpu(mdev->devinfo.function); | ||
614 | /* Need to count how many bits are set - to find out which | ||
615 | * function_data element has details of the memory card: | ||
616 | * using Brian Kernighan's/Peter Wegner's method */ | ||
617 | for (c = 0; test_flash_data; c++) | ||
618 | test_flash_data &= test_flash_data - 1; | ||
619 | |||
620 | basic_flash_data = be32_to_cpu(mdev->devinfo.function_data[c - 1]); | ||
621 | |||
622 | card = kmalloc(sizeof(struct memcard), GFP_KERNEL); | ||
623 | if (!card) { | ||
624 | error = ENOMEM; | ||
625 | goto fail_nomem; | ||
626 | } | ||
627 | |||
628 | card->partitions = (basic_flash_data >> 24 & 0xFF) + 1; | ||
629 | card->blocklen = ((basic_flash_data >> 16 & 0xFF) + 1) << 5; | ||
630 | card->writecnt = basic_flash_data >> 12 & 0xF; | ||
631 | card->readcnt = basic_flash_data >> 8 & 0xF; | ||
632 | card->removeable = basic_flash_data >> 7 & 1; | ||
633 | |||
634 | card->partition = 0; | ||
635 | |||
636 | /* | ||
637 | * Not sure there are actually any multi-partition devices in the | ||
638 | * real world, but the hardware supports them, so, so will we | ||
639 | */ | ||
640 | card->parts = kmalloc(sizeof(struct vmupart) * card->partitions, | ||
641 | GFP_KERNEL); | ||
642 | if (!card->parts) { | ||
643 | error = -ENOMEM; | ||
644 | goto fail_partitions; | ||
645 | } | ||
646 | |||
647 | card->mtd = kmalloc(sizeof(struct mtd_info) * card->partitions, | ||
648 | GFP_KERNEL); | ||
649 | if (!card->mtd) { | ||
650 | error = -ENOMEM; | ||
651 | goto fail_mtd_info; | ||
652 | } | ||
653 | |||
654 | maple_set_drvdata(mdev, card); | ||
655 | |||
656 | /* | ||
657 | * We want to trap meminfo not get cond | ||
658 | * so set interval to zero, but rely on maple bus | ||
659 | * driver to pass back the results of the meminfo | ||
660 | */ | ||
661 | maple_getcond_callback(mdev, vmu_queryblocks, 0, | ||
662 | MAPLE_FUNC_MEMCARD); | ||
663 | |||
664 | /* Make sure we are clear to go */ | ||
665 | if (atomic_read(&mdev->busy) == 1) { | ||
666 | wait_event_interruptible_timeout(mdev->maple_wait, | ||
667 | atomic_read(&mdev->busy) == 0, HZ); | ||
668 | if (atomic_read(&mdev->busy) == 1) { | ||
669 | dev_notice(&mdev->dev, "VMU at (%d, %d) is busy\n", | ||
670 | mdev->port, mdev->unit); | ||
671 | error = -EAGAIN; | ||
672 | goto fail_device_busy; | ||
673 | } | ||
674 | } | ||
675 | |||
676 | atomic_set(&mdev->busy, 1); | ||
677 | |||
678 | /* | ||
679 | * Set up the minfo call: vmu_queryblocks will handle | ||
680 | * the information passed back | ||
681 | */ | ||
682 | error = maple_add_packet(mdev, MAPLE_FUNC_MEMCARD, | ||
683 | MAPLE_COMMAND_GETMINFO, 2, &partnum); | ||
684 | if (error) { | ||
685 | dev_err(&mdev->dev, "Could not lock VMU at (%d, %d)" | ||
686 | " error is 0x%X\n", mdev->port, mdev->unit, error); | ||
687 | goto fail_mtd_info; | ||
688 | } | ||
689 | return 0; | ||
690 | |||
691 | fail_device_busy: | ||
692 | kfree(card->mtd); | ||
693 | fail_mtd_info: | ||
694 | kfree(card->parts); | ||
695 | fail_partitions: | ||
696 | kfree(card); | ||
697 | fail_nomem: | ||
698 | return error; | ||
699 | } | ||
700 | |||
701 | static void __devexit vmu_disconnect(struct maple_device *mdev) | ||
702 | { | ||
703 | struct memcard *card; | ||
704 | struct mdev_part *mpart; | ||
705 | int x; | ||
706 | |||
707 | mdev->callback = NULL; | ||
708 | card = maple_get_drvdata(mdev); | ||
709 | for (x = 0; x < card->partitions; x++) { | ||
710 | mpart = ((card->mtd)[x]).priv; | ||
711 | mpart->mdev = NULL; | ||
712 | del_mtd_device(&((card->mtd)[x])); | ||
713 | kfree(((card->parts)[x]).name); | ||
714 | } | ||
715 | kfree(card->parts); | ||
716 | kfree(card->mtd); | ||
717 | kfree(card); | ||
718 | } | ||
719 | |||
720 | /* Callback to handle eccentricities of both mtd subsystem | ||
721 | * and general flakyness of Dreamcast VMUs | ||
722 | */ | ||
723 | static int vmu_can_unload(struct maple_device *mdev) | ||
724 | { | ||
725 | struct memcard *card; | ||
726 | int x; | ||
727 | struct mtd_info *mtd; | ||
728 | |||
729 | card = maple_get_drvdata(mdev); | ||
730 | for (x = 0; x < card->partitions; x++) { | ||
731 | mtd = &((card->mtd)[x]); | ||
732 | if (mtd->usecount > 0) | ||
733 | return 0; | ||
734 | } | ||
735 | return 1; | ||
736 | } | ||
737 | |||
738 | #define ERRSTR "VMU at (%d, %d) file error -" | ||
739 | |||
740 | static void vmu_file_error(struct maple_device *mdev, void *recvbuf) | ||
741 | { | ||
742 | enum maple_file_errors error = ((int *)recvbuf)[1]; | ||
743 | |||
744 | switch (error) { | ||
745 | |||
746 | case MAPLE_FILEERR_INVALID_PARTITION: | ||
747 | dev_notice(&mdev->dev, ERRSTR " invalid partition number\n", | ||
748 | mdev->port, mdev->unit); | ||
749 | break; | ||
750 | |||
751 | case MAPLE_FILEERR_PHASE_ERROR: | ||
752 | dev_notice(&mdev->dev, ERRSTR " phase error\n", | ||
753 | mdev->port, mdev->unit); | ||
754 | break; | ||
755 | |||
756 | case MAPLE_FILEERR_INVALID_BLOCK: | ||
757 | dev_notice(&mdev->dev, ERRSTR " invalid block number\n", | ||
758 | mdev->port, mdev->unit); | ||
759 | break; | ||
760 | |||
761 | case MAPLE_FILEERR_WRITE_ERROR: | ||
762 | dev_notice(&mdev->dev, ERRSTR " write error\n", | ||
763 | mdev->port, mdev->unit); | ||
764 | break; | ||
765 | |||
766 | case MAPLE_FILEERR_INVALID_WRITE_LENGTH: | ||
767 | dev_notice(&mdev->dev, ERRSTR " invalid write length\n", | ||
768 | mdev->port, mdev->unit); | ||
769 | break; | ||
770 | |||
771 | case MAPLE_FILEERR_BAD_CRC: | ||
772 | dev_notice(&mdev->dev, ERRSTR " bad CRC\n", | ||
773 | mdev->port, mdev->unit); | ||
774 | break; | ||
775 | |||
776 | default: | ||
777 | dev_notice(&mdev->dev, ERRSTR " 0x%X\n", | ||
778 | mdev->port, mdev->unit, error); | ||
779 | } | ||
780 | } | ||
781 | |||
782 | |||
783 | static int __devinit probe_maple_vmu(struct device *dev) | ||
784 | { | ||
785 | int error; | ||
786 | struct maple_device *mdev = to_maple_dev(dev); | ||
787 | struct maple_driver *mdrv = to_maple_driver(dev->driver); | ||
788 | |||
789 | mdev->can_unload = vmu_can_unload; | ||
790 | mdev->fileerr_handler = vmu_file_error; | ||
791 | mdev->driver = mdrv; | ||
792 | |||
793 | error = vmu_connect(mdev); | ||
794 | if (error) | ||
795 | return error; | ||
796 | |||
797 | return 0; | ||
798 | } | ||
799 | |||
800 | static int __devexit remove_maple_vmu(struct device *dev) | ||
801 | { | ||
802 | struct maple_device *mdev = to_maple_dev(dev); | ||
803 | |||
804 | vmu_disconnect(mdev); | ||
805 | return 0; | ||
806 | } | ||
807 | |||
808 | static struct maple_driver vmu_flash_driver = { | ||
809 | .function = MAPLE_FUNC_MEMCARD, | ||
810 | .drv = { | ||
811 | .name = "Dreamcast_visual_memory", | ||
812 | .probe = probe_maple_vmu, | ||
813 | .remove = __devexit_p(remove_maple_vmu), | ||
814 | }, | ||
815 | }; | ||
816 | |||
817 | static int __init vmu_flash_map_init(void) | ||
818 | { | ||
819 | return maple_driver_register(&vmu_flash_driver); | ||
820 | } | ||
821 | |||
822 | static void __exit vmu_flash_map_exit(void) | ||
823 | { | ||
824 | maple_driver_unregister(&vmu_flash_driver); | ||
825 | } | ||
826 | |||
827 | module_init(vmu_flash_map_init); | ||
828 | module_exit(vmu_flash_map_exit); | ||
829 | |||
830 | MODULE_LICENSE("GPL"); | ||
831 | MODULE_AUTHOR("Adrian McMenamin"); | ||
832 | MODULE_DESCRIPTION("Flash mapping for Sega Dreamcast visual memory"); | ||