diff options
Diffstat (limited to 'drivers/sh/maple/maple.c')
-rw-r--r-- | drivers/sh/maple/maple.c | 735 |
1 files changed, 735 insertions, 0 deletions
diff --git a/drivers/sh/maple/maple.c b/drivers/sh/maple/maple.c new file mode 100644 index 000000000000..161d1021b7eb --- /dev/null +++ b/drivers/sh/maple/maple.c | |||
@@ -0,0 +1,735 @@ | |||
1 | /* | ||
2 | * Core maple bus functionality | ||
3 | * | ||
4 | * Copyright (C) 2007 Adrian McMenamin | ||
5 | * | ||
6 | * Based on 2.4 code by: | ||
7 | * | ||
8 | * Copyright (C) 2000-2001 YAEGASHI Takeshi | ||
9 | * Copyright (C) 2001 M. R. Brown | ||
10 | * Copyright (C) 2001 Paul Mundt | ||
11 | * | ||
12 | * and others. | ||
13 | * | ||
14 | * This file is subject to the terms and conditions of the GNU General Public | ||
15 | * License. See the file "COPYING" in the main directory of this archive | ||
16 | * for more details. | ||
17 | */ | ||
18 | #include <linux/init.h> | ||
19 | #include <linux/kernel.h> | ||
20 | #include <linux/device.h> | ||
21 | #include <linux/module.h> | ||
22 | #include <linux/interrupt.h> | ||
23 | #include <linux/list.h> | ||
24 | #include <linux/io.h> | ||
25 | #include <linux/slab.h> | ||
26 | #include <linux/maple.h> | ||
27 | #include <linux/dma-mapping.h> | ||
28 | #include <asm/cacheflush.h> | ||
29 | #include <asm/dma.h> | ||
30 | #include <asm/io.h> | ||
31 | #include <asm/mach/dma.h> | ||
32 | #include <asm/mach/sysasic.h> | ||
33 | #include <asm/mach/maple.h> | ||
34 | |||
35 | MODULE_AUTHOR("Yaegshi Takeshi, Paul Mundt, M.R. Brown, Adrian McMenamin"); | ||
36 | MODULE_DESCRIPTION("Maple bus driver for Dreamcast"); | ||
37 | MODULE_LICENSE("GPL v2"); | ||
38 | MODULE_SUPPORTED_DEVICE("{{SEGA, Dreamcast/Maple}}"); | ||
39 | |||
40 | static void maple_dma_handler(struct work_struct *work); | ||
41 | static void maple_vblank_handler(struct work_struct *work); | ||
42 | |||
43 | static DECLARE_WORK(maple_dma_process, maple_dma_handler); | ||
44 | static DECLARE_WORK(maple_vblank_process, maple_vblank_handler); | ||
45 | |||
46 | static LIST_HEAD(maple_waitq); | ||
47 | static LIST_HEAD(maple_sentq); | ||
48 | |||
49 | static DEFINE_MUTEX(maple_list_lock); | ||
50 | |||
51 | static struct maple_driver maple_dummy_driver; | ||
52 | static struct device maple_bus; | ||
53 | static int subdevice_map[MAPLE_PORTS]; | ||
54 | static unsigned long *maple_sendbuf, *maple_sendptr, *maple_lastptr; | ||
55 | static unsigned long maple_pnp_time; | ||
56 | static int started, scanning, liststatus; | ||
57 | static struct kmem_cache *maple_queue_cache; | ||
58 | |||
59 | struct maple_device_specify { | ||
60 | int port; | ||
61 | int unit; | ||
62 | }; | ||
63 | |||
64 | /** | ||
65 | * maple_driver_register - register a device driver | ||
66 | * automatically makes the driver bus a maple bus | ||
67 | * @drv: the driver to be registered | ||
68 | */ | ||
69 | int maple_driver_register(struct device_driver *drv) | ||
70 | { | ||
71 | if (!drv) | ||
72 | return -EINVAL; | ||
73 | drv->bus = &maple_bus_type; | ||
74 | return driver_register(drv); | ||
75 | } | ||
76 | EXPORT_SYMBOL_GPL(maple_driver_register); | ||
77 | |||
78 | /* set hardware registers to enable next round of dma */ | ||
79 | static void maplebus_dma_reset(void) | ||
80 | { | ||
81 | ctrl_outl(MAPLE_MAGIC, MAPLE_RESET); | ||
82 | /* set trig type to 0 for software trigger, 1 for hardware (VBLANK) */ | ||
83 | ctrl_outl(1, MAPLE_TRIGTYPE); | ||
84 | ctrl_outl(MAPLE_2MBPS | MAPLE_TIMEOUT(50000), MAPLE_SPEED); | ||
85 | ctrl_outl(PHYSADDR(maple_sendbuf), MAPLE_DMAADDR); | ||
86 | ctrl_outl(1, MAPLE_ENABLE); | ||
87 | } | ||
88 | |||
89 | /** | ||
90 | * maple_getcond_callback - setup handling MAPLE_COMMAND_GETCOND | ||
91 | * @dev: device responding | ||
92 | * @callback: handler callback | ||
93 | * @interval: interval in jiffies between callbacks | ||
94 | * @function: the function code for the device | ||
95 | */ | ||
96 | void maple_getcond_callback(struct maple_device *dev, | ||
97 | void (*callback) (struct mapleq * mq), | ||
98 | unsigned long interval, unsigned long function) | ||
99 | { | ||
100 | dev->callback = callback; | ||
101 | dev->interval = interval; | ||
102 | dev->function = cpu_to_be32(function); | ||
103 | dev->when = jiffies; | ||
104 | } | ||
105 | EXPORT_SYMBOL_GPL(maple_getcond_callback); | ||
106 | |||
107 | static int maple_dma_done(void) | ||
108 | { | ||
109 | return (ctrl_inl(MAPLE_STATE) & 1) == 0; | ||
110 | } | ||
111 | |||
112 | static void maple_release_device(struct device *dev) | ||
113 | { | ||
114 | if (dev->type) { | ||
115 | kfree(dev->type->name); | ||
116 | kfree(dev->type); | ||
117 | } | ||
118 | } | ||
119 | |||
120 | /** | ||
121 | * maple_add_packet - add a single instruction to the queue | ||
122 | * @mq: instruction to add to waiting queue | ||
123 | */ | ||
124 | void maple_add_packet(struct mapleq *mq) | ||
125 | { | ||
126 | mutex_lock(&maple_list_lock); | ||
127 | list_add(&mq->list, &maple_waitq); | ||
128 | mutex_unlock(&maple_list_lock); | ||
129 | } | ||
130 | EXPORT_SYMBOL_GPL(maple_add_packet); | ||
131 | |||
132 | static struct mapleq *maple_allocq(struct maple_device *dev) | ||
133 | { | ||
134 | struct mapleq *mq; | ||
135 | |||
136 | mq = kmalloc(sizeof(*mq), GFP_KERNEL); | ||
137 | if (!mq) | ||
138 | return NULL; | ||
139 | |||
140 | mq->dev = dev; | ||
141 | mq->recvbufdcsp = kmem_cache_zalloc(maple_queue_cache, GFP_KERNEL); | ||
142 | mq->recvbuf = (void *) P2SEGADDR(mq->recvbufdcsp); | ||
143 | if (!mq->recvbuf) { | ||
144 | kfree(mq); | ||
145 | return NULL; | ||
146 | } | ||
147 | |||
148 | return mq; | ||
149 | } | ||
150 | |||
151 | static struct maple_device *maple_alloc_dev(int port, int unit) | ||
152 | { | ||
153 | struct maple_device *dev; | ||
154 | |||
155 | dev = kzalloc(sizeof(*dev), GFP_KERNEL); | ||
156 | if (!dev) | ||
157 | return NULL; | ||
158 | |||
159 | dev->port = port; | ||
160 | dev->unit = unit; | ||
161 | dev->mq = maple_allocq(dev); | ||
162 | |||
163 | if (!dev->mq) { | ||
164 | kfree(dev); | ||
165 | return NULL; | ||
166 | } | ||
167 | |||
168 | return dev; | ||
169 | } | ||
170 | |||
171 | static void maple_free_dev(struct maple_device *mdev) | ||
172 | { | ||
173 | if (!mdev) | ||
174 | return; | ||
175 | if (mdev->mq) { | ||
176 | kmem_cache_free(maple_queue_cache, mdev->mq->recvbufdcsp); | ||
177 | kfree(mdev->mq); | ||
178 | } | ||
179 | kfree(mdev); | ||
180 | } | ||
181 | |||
182 | /* process the command queue into a maple command block | ||
183 | * terminating command has bit 32 of first long set to 0 | ||
184 | */ | ||
185 | static void maple_build_block(struct mapleq *mq) | ||
186 | { | ||
187 | int port, unit, from, to, len; | ||
188 | unsigned long *lsendbuf = mq->sendbuf; | ||
189 | |||
190 | port = mq->dev->port & 3; | ||
191 | unit = mq->dev->unit; | ||
192 | len = mq->length; | ||
193 | from = port << 6; | ||
194 | to = (port << 6) | (unit > 0 ? (1 << (unit - 1)) & 0x1f : 0x20); | ||
195 | |||
196 | *maple_lastptr &= 0x7fffffff; | ||
197 | maple_lastptr = maple_sendptr; | ||
198 | |||
199 | *maple_sendptr++ = (port << 16) | len | 0x80000000; | ||
200 | *maple_sendptr++ = PHYSADDR(mq->recvbuf); | ||
201 | *maple_sendptr++ = | ||
202 | mq->command | (to << 8) | (from << 16) | (len << 24); | ||
203 | |||
204 | while (len-- > 0) | ||
205 | *maple_sendptr++ = *lsendbuf++; | ||
206 | } | ||
207 | |||
208 | /* build up command queue */ | ||
209 | static void maple_send(void) | ||
210 | { | ||
211 | int i; | ||
212 | int maple_packets; | ||
213 | struct mapleq *mq, *nmq; | ||
214 | |||
215 | if (!list_empty(&maple_sentq)) | ||
216 | return; | ||
217 | if (list_empty(&maple_waitq) || !maple_dma_done()) | ||
218 | return; | ||
219 | maple_packets = 0; | ||
220 | maple_sendptr = maple_lastptr = maple_sendbuf; | ||
221 | list_for_each_entry_safe(mq, nmq, &maple_waitq, list) { | ||
222 | maple_build_block(mq); | ||
223 | list_move(&mq->list, &maple_sentq); | ||
224 | if (maple_packets++ > MAPLE_MAXPACKETS) | ||
225 | break; | ||
226 | } | ||
227 | if (maple_packets > 0) { | ||
228 | for (i = 0; i < (1 << MAPLE_DMA_PAGES); i++) | ||
229 | dma_cache_sync(0, maple_sendbuf + i * PAGE_SIZE, | ||
230 | PAGE_SIZE, DMA_BIDIRECTIONAL); | ||
231 | } | ||
232 | } | ||
233 | |||
234 | static int attach_matching_maple_driver(struct device_driver *driver, | ||
235 | void *devptr) | ||
236 | { | ||
237 | struct maple_driver *maple_drv; | ||
238 | struct maple_device *mdev; | ||
239 | |||
240 | mdev = devptr; | ||
241 | maple_drv = to_maple_driver(driver); | ||
242 | if (mdev->devinfo.function & be32_to_cpu(maple_drv->function)) { | ||
243 | if (maple_drv->connect(mdev) == 0) { | ||
244 | mdev->driver = maple_drv; | ||
245 | return 1; | ||
246 | } | ||
247 | } | ||
248 | return 0; | ||
249 | } | ||
250 | |||
251 | static void maple_detach_driver(struct maple_device *mdev) | ||
252 | { | ||
253 | if (!mdev) | ||
254 | return; | ||
255 | if (mdev->driver) { | ||
256 | if (mdev->driver->disconnect) | ||
257 | mdev->driver->disconnect(mdev); | ||
258 | } | ||
259 | mdev->driver = NULL; | ||
260 | if (mdev->registered) { | ||
261 | maple_release_device(&mdev->dev); | ||
262 | device_unregister(&mdev->dev); | ||
263 | } | ||
264 | mdev->registered = 0; | ||
265 | maple_free_dev(mdev); | ||
266 | } | ||
267 | |||
268 | /* process initial MAPLE_COMMAND_DEVINFO for each device or port */ | ||
269 | static void maple_attach_driver(struct maple_device *dev) | ||
270 | { | ||
271 | char *p; | ||
272 | |||
273 | char *recvbuf; | ||
274 | unsigned long function; | ||
275 | int matched, retval; | ||
276 | |||
277 | recvbuf = dev->mq->recvbuf; | ||
278 | memcpy(&dev->devinfo, recvbuf + 4, sizeof(dev->devinfo)); | ||
279 | memcpy(dev->product_name, dev->devinfo.product_name, 30); | ||
280 | memcpy(dev->product_licence, dev->devinfo.product_licence, 60); | ||
281 | dev->product_name[30] = '\0'; | ||
282 | dev->product_licence[60] = '\0'; | ||
283 | |||
284 | for (p = dev->product_name + 29; dev->product_name <= p; p--) | ||
285 | if (*p == ' ') | ||
286 | *p = '\0'; | ||
287 | else | ||
288 | break; | ||
289 | |||
290 | for (p = dev->product_licence + 59; dev->product_licence <= p; p--) | ||
291 | if (*p == ' ') | ||
292 | *p = '\0'; | ||
293 | else | ||
294 | break; | ||
295 | |||
296 | function = be32_to_cpu(dev->devinfo.function); | ||
297 | |||
298 | if (function > 0x200) { | ||
299 | /* Do this silently - as not a real device */ | ||
300 | function = 0; | ||
301 | dev->driver = &maple_dummy_driver; | ||
302 | sprintf(dev->dev.bus_id, "%d:0.port", dev->port); | ||
303 | } else { | ||
304 | printk(KERN_INFO | ||
305 | "Maple bus at (%d, %d): Connected function 0x%lX\n", | ||
306 | dev->port, dev->unit, function); | ||
307 | |||
308 | matched = | ||
309 | bus_for_each_drv(&maple_bus_type, NULL, dev, | ||
310 | attach_matching_maple_driver); | ||
311 | |||
312 | if (matched == 0) { | ||
313 | /* Driver does not exist yet */ | ||
314 | printk(KERN_INFO | ||
315 | "No maple driver found for this device\n"); | ||
316 | dev->driver = &maple_dummy_driver; | ||
317 | } | ||
318 | |||
319 | sprintf(dev->dev.bus_id, "%d:0%d.%lX", dev->port, | ||
320 | dev->unit, function); | ||
321 | } | ||
322 | dev->function = function; | ||
323 | dev->dev.bus = &maple_bus_type; | ||
324 | dev->dev.parent = &maple_bus; | ||
325 | dev->dev.release = &maple_release_device; | ||
326 | retval = device_register(&dev->dev); | ||
327 | if (retval) { | ||
328 | printk(KERN_INFO | ||
329 | "Maple bus: Attempt to register device (%x, %x) failed.\n", | ||
330 | dev->port, dev->unit); | ||
331 | maple_free_dev(dev); | ||
332 | } | ||
333 | dev->registered = 1; | ||
334 | } | ||
335 | |||
336 | /* | ||
337 | * if device has been registered for the given | ||
338 | * port and unit then return 1 - allows identification | ||
339 | * of which devices need to be attached or detached | ||
340 | */ | ||
341 | static int detach_maple_device(struct device *device, void *portptr) | ||
342 | { | ||
343 | struct maple_device_specify *ds; | ||
344 | struct maple_device *mdev; | ||
345 | |||
346 | ds = portptr; | ||
347 | mdev = to_maple_dev(device); | ||
348 | if (mdev->port == ds->port && mdev->unit == ds->unit) | ||
349 | return 1; | ||
350 | return 0; | ||
351 | } | ||
352 | |||
353 | static int setup_maple_commands(struct device *device, void *ignored) | ||
354 | { | ||
355 | struct maple_device *maple_dev = to_maple_dev(device); | ||
356 | |||
357 | if ((maple_dev->interval > 0) | ||
358 | && time_after(jiffies, maple_dev->when)) { | ||
359 | maple_dev->when = jiffies + maple_dev->interval; | ||
360 | maple_dev->mq->command = MAPLE_COMMAND_GETCOND; | ||
361 | maple_dev->mq->sendbuf = &maple_dev->function; | ||
362 | maple_dev->mq->length = 1; | ||
363 | maple_add_packet(maple_dev->mq); | ||
364 | liststatus++; | ||
365 | } else { | ||
366 | if (time_after(jiffies, maple_pnp_time)) { | ||
367 | maple_dev->mq->command = MAPLE_COMMAND_DEVINFO; | ||
368 | maple_dev->mq->length = 0; | ||
369 | maple_add_packet(maple_dev->mq); | ||
370 | liststatus++; | ||
371 | } | ||
372 | } | ||
373 | |||
374 | return 0; | ||
375 | } | ||
376 | |||
377 | /* VBLANK bottom half - implemented via workqueue */ | ||
378 | static void maple_vblank_handler(struct work_struct *work) | ||
379 | { | ||
380 | if (!maple_dma_done()) | ||
381 | return; | ||
382 | if (!list_empty(&maple_sentq)) | ||
383 | return; | ||
384 | ctrl_outl(0, MAPLE_ENABLE); | ||
385 | liststatus = 0; | ||
386 | bus_for_each_dev(&maple_bus_type, NULL, NULL, | ||
387 | setup_maple_commands); | ||
388 | if (time_after(jiffies, maple_pnp_time)) | ||
389 | maple_pnp_time = jiffies + MAPLE_PNP_INTERVAL; | ||
390 | if (liststatus && list_empty(&maple_sentq)) { | ||
391 | INIT_LIST_HEAD(&maple_sentq); | ||
392 | maple_send(); | ||
393 | } | ||
394 | maplebus_dma_reset(); | ||
395 | } | ||
396 | |||
397 | /* handle devices added via hotplugs - placing them on queue for DEVINFO*/ | ||
398 | static void maple_map_subunits(struct maple_device *mdev, int submask) | ||
399 | { | ||
400 | int retval, k, devcheck; | ||
401 | struct maple_device *mdev_add; | ||
402 | struct maple_device_specify ds; | ||
403 | |||
404 | for (k = 0; k < 5; k++) { | ||
405 | ds.port = mdev->port; | ||
406 | ds.unit = k + 1; | ||
407 | retval = | ||
408 | bus_for_each_dev(&maple_bus_type, NULL, &ds, | ||
409 | detach_maple_device); | ||
410 | if (retval) { | ||
411 | submask = submask >> 1; | ||
412 | continue; | ||
413 | } | ||
414 | devcheck = submask & 0x01; | ||
415 | if (devcheck) { | ||
416 | mdev_add = maple_alloc_dev(mdev->port, k + 1); | ||
417 | if (!mdev_add) | ||
418 | return; | ||
419 | mdev_add->mq->command = MAPLE_COMMAND_DEVINFO; | ||
420 | mdev_add->mq->length = 0; | ||
421 | maple_add_packet(mdev_add->mq); | ||
422 | scanning = 1; | ||
423 | } | ||
424 | submask = submask >> 1; | ||
425 | } | ||
426 | } | ||
427 | |||
428 | /* mark a device as removed */ | ||
429 | static void maple_clean_submap(struct maple_device *mdev) | ||
430 | { | ||
431 | int killbit; | ||
432 | |||
433 | killbit = (mdev->unit > 0 ? (1 << (mdev->unit - 1)) & 0x1f : 0x20); | ||
434 | killbit = ~killbit; | ||
435 | killbit &= 0xFF; | ||
436 | subdevice_map[mdev->port] = subdevice_map[mdev->port] & killbit; | ||
437 | } | ||
438 | |||
439 | /* handle empty port or hotplug removal */ | ||
440 | static void maple_response_none(struct maple_device *mdev, | ||
441 | struct mapleq *mq) | ||
442 | { | ||
443 | if (mdev->unit != 0) { | ||
444 | list_del(&mq->list); | ||
445 | maple_clean_submap(mdev); | ||
446 | printk(KERN_INFO | ||
447 | "Maple bus device detaching at (%d, %d)\n", | ||
448 | mdev->port, mdev->unit); | ||
449 | maple_detach_driver(mdev); | ||
450 | return; | ||
451 | } | ||
452 | if (!started) { | ||
453 | printk(KERN_INFO "No maple devices attached to port %d\n", | ||
454 | mdev->port); | ||
455 | return; | ||
456 | } | ||
457 | maple_clean_submap(mdev); | ||
458 | } | ||
459 | |||
460 | /* preprocess hotplugs or scans */ | ||
461 | static void maple_response_devinfo(struct maple_device *mdev, | ||
462 | char *recvbuf) | ||
463 | { | ||
464 | char submask; | ||
465 | if ((!started) || (scanning == 2)) { | ||
466 | maple_attach_driver(mdev); | ||
467 | return; | ||
468 | } | ||
469 | if (mdev->unit == 0) { | ||
470 | submask = recvbuf[2] & 0x1F; | ||
471 | if (submask ^ subdevice_map[mdev->port]) { | ||
472 | maple_map_subunits(mdev, submask); | ||
473 | subdevice_map[mdev->port] = submask; | ||
474 | } | ||
475 | } | ||
476 | } | ||
477 | |||
478 | /* maple dma end bottom half - implemented via workqueue */ | ||
479 | static void maple_dma_handler(struct work_struct *work) | ||
480 | { | ||
481 | struct mapleq *mq, *nmq; | ||
482 | struct maple_device *dev; | ||
483 | char *recvbuf; | ||
484 | enum maple_code code; | ||
485 | |||
486 | if (!maple_dma_done()) | ||
487 | return; | ||
488 | ctrl_outl(0, MAPLE_ENABLE); | ||
489 | if (!list_empty(&maple_sentq)) { | ||
490 | list_for_each_entry_safe(mq, nmq, &maple_sentq, list) { | ||
491 | recvbuf = mq->recvbuf; | ||
492 | code = recvbuf[0]; | ||
493 | dev = mq->dev; | ||
494 | switch (code) { | ||
495 | case MAPLE_RESPONSE_NONE: | ||
496 | maple_response_none(dev, mq); | ||
497 | break; | ||
498 | |||
499 | case MAPLE_RESPONSE_DEVINFO: | ||
500 | maple_response_devinfo(dev, recvbuf); | ||
501 | break; | ||
502 | |||
503 | case MAPLE_RESPONSE_DATATRF: | ||
504 | if (dev->callback) | ||
505 | dev->callback(mq); | ||
506 | break; | ||
507 | |||
508 | case MAPLE_RESPONSE_FILEERR: | ||
509 | case MAPLE_RESPONSE_AGAIN: | ||
510 | case MAPLE_RESPONSE_BADCMD: | ||
511 | case MAPLE_RESPONSE_BADFUNC: | ||
512 | printk(KERN_DEBUG | ||
513 | "Maple non-fatal error 0x%X\n", | ||
514 | code); | ||
515 | break; | ||
516 | |||
517 | case MAPLE_RESPONSE_ALLINFO: | ||
518 | printk(KERN_DEBUG | ||
519 | "Maple - extended device information not supported\n"); | ||
520 | break; | ||
521 | |||
522 | case MAPLE_RESPONSE_OK: | ||
523 | break; | ||
524 | |||
525 | default: | ||
526 | break; | ||
527 | } | ||
528 | } | ||
529 | INIT_LIST_HEAD(&maple_sentq); | ||
530 | if (scanning == 1) { | ||
531 | maple_send(); | ||
532 | scanning = 2; | ||
533 | } else | ||
534 | scanning = 0; | ||
535 | |||
536 | if (started == 0) | ||
537 | started = 1; | ||
538 | } | ||
539 | maplebus_dma_reset(); | ||
540 | } | ||
541 | |||
542 | static irqreturn_t maplebus_dma_interrupt(int irq, void *dev_id) | ||
543 | { | ||
544 | /* Load everything into the bottom half */ | ||
545 | schedule_work(&maple_dma_process); | ||
546 | return IRQ_HANDLED; | ||
547 | } | ||
548 | |||
549 | static irqreturn_t maplebus_vblank_interrupt(int irq, void *dev_id) | ||
550 | { | ||
551 | schedule_work(&maple_vblank_process); | ||
552 | return IRQ_HANDLED; | ||
553 | } | ||
554 | |||
555 | static struct irqaction maple_dma_irq = { | ||
556 | .name = "maple bus DMA handler", | ||
557 | .handler = maplebus_dma_interrupt, | ||
558 | .flags = IRQF_SHARED, | ||
559 | }; | ||
560 | |||
561 | static struct irqaction maple_vblank_irq = { | ||
562 | .name = "maple bus VBLANK handler", | ||
563 | .handler = maplebus_vblank_interrupt, | ||
564 | .flags = IRQF_SHARED, | ||
565 | }; | ||
566 | |||
567 | static int maple_set_dma_interrupt_handler(void) | ||
568 | { | ||
569 | return setup_irq(HW_EVENT_MAPLE_DMA, &maple_dma_irq); | ||
570 | } | ||
571 | |||
572 | static int maple_set_vblank_interrupt_handler(void) | ||
573 | { | ||
574 | return setup_irq(HW_EVENT_VSYNC, &maple_vblank_irq); | ||
575 | } | ||
576 | |||
577 | static int maple_get_dma_buffer(void) | ||
578 | { | ||
579 | maple_sendbuf = | ||
580 | (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO, | ||
581 | MAPLE_DMA_PAGES); | ||
582 | if (!maple_sendbuf) | ||
583 | return -ENOMEM; | ||
584 | return 0; | ||
585 | } | ||
586 | |||
587 | static int match_maple_bus_driver(struct device *devptr, | ||
588 | struct device_driver *drvptr) | ||
589 | { | ||
590 | struct maple_driver *maple_drv; | ||
591 | struct maple_device *maple_dev; | ||
592 | |||
593 | maple_drv = container_of(drvptr, struct maple_driver, drv); | ||
594 | maple_dev = container_of(devptr, struct maple_device, dev); | ||
595 | /* Trap empty port case */ | ||
596 | if (maple_dev->devinfo.function == 0xFFFFFFFF) | ||
597 | return 0; | ||
598 | else if (maple_dev->devinfo.function & | ||
599 | be32_to_cpu(maple_drv->function)) | ||
600 | return 1; | ||
601 | return 0; | ||
602 | } | ||
603 | |||
604 | static int maple_bus_uevent(struct device *dev, char **envp, | ||
605 | int num_envp, char *buffer, int buffer_size) | ||
606 | { | ||
607 | return 0; | ||
608 | } | ||
609 | |||
610 | static void maple_bus_release(struct device *dev) | ||
611 | { | ||
612 | } | ||
613 | |||
614 | static struct maple_driver maple_dummy_driver = { | ||
615 | .drv = { | ||
616 | .name = "maple_dummy_driver", | ||
617 | .bus = &maple_bus_type, | ||
618 | }, | ||
619 | }; | ||
620 | |||
621 | struct bus_type maple_bus_type = { | ||
622 | .name = "maple", | ||
623 | .match = match_maple_bus_driver, | ||
624 | .uevent = maple_bus_uevent, | ||
625 | }; | ||
626 | EXPORT_SYMBOL_GPL(maple_bus_type); | ||
627 | |||
628 | static struct device maple_bus = { | ||
629 | .bus_id = "maple", | ||
630 | .release = maple_bus_release, | ||
631 | }; | ||
632 | |||
633 | static int __init maple_bus_init(void) | ||
634 | { | ||
635 | int retval, i; | ||
636 | struct maple_device *mdev[MAPLE_PORTS]; | ||
637 | ctrl_outl(0, MAPLE_STATE); | ||
638 | |||
639 | retval = device_register(&maple_bus); | ||
640 | if (retval) | ||
641 | goto cleanup; | ||
642 | |||
643 | retval = bus_register(&maple_bus_type); | ||
644 | if (retval) | ||
645 | goto cleanup_device; | ||
646 | |||
647 | retval = driver_register(&maple_dummy_driver.drv); | ||
648 | |||
649 | if (retval) | ||
650 | goto cleanup_bus; | ||
651 | |||
652 | /* allocate memory for maple bus dma */ | ||
653 | retval = maple_get_dma_buffer(); | ||
654 | if (retval) { | ||
655 | printk(KERN_INFO | ||
656 | "Maple bus: Failed to allocate Maple DMA buffers\n"); | ||
657 | goto cleanup_basic; | ||
658 | } | ||
659 | |||
660 | /* set up DMA interrupt handler */ | ||
661 | retval = maple_set_dma_interrupt_handler(); | ||
662 | if (retval) { | ||
663 | printk(KERN_INFO | ||
664 | "Maple bus: Failed to grab maple DMA IRQ\n"); | ||
665 | goto cleanup_dma; | ||
666 | } | ||
667 | |||
668 | /* set up VBLANK interrupt handler */ | ||
669 | retval = maple_set_vblank_interrupt_handler(); | ||
670 | if (retval) { | ||
671 | printk(KERN_INFO "Maple bus: Failed to grab VBLANK IRQ\n"); | ||
672 | goto cleanup_irq; | ||
673 | } | ||
674 | |||
675 | maple_queue_cache = | ||
676 | kmem_cache_create("maple_queue_cache", 0x400, 0, | ||
677 | SLAB_HWCACHE_ALIGN, NULL); | ||
678 | |||
679 | if (!maple_queue_cache) | ||
680 | goto cleanup_bothirqs; | ||
681 | |||
682 | /* setup maple ports */ | ||
683 | for (i = 0; i < MAPLE_PORTS; i++) { | ||
684 | mdev[i] = maple_alloc_dev(i, 0); | ||
685 | if (!mdev[i]) { | ||
686 | while (i-- > 0) | ||
687 | maple_free_dev(mdev[i]); | ||
688 | goto cleanup_cache; | ||
689 | } | ||
690 | mdev[i]->registered = 0; | ||
691 | mdev[i]->mq->command = MAPLE_COMMAND_DEVINFO; | ||
692 | mdev[i]->mq->length = 0; | ||
693 | maple_attach_driver(mdev[i]); | ||
694 | maple_add_packet(mdev[i]->mq); | ||
695 | subdevice_map[i] = 0; | ||
696 | } | ||
697 | |||
698 | /* setup maplebus hardware */ | ||
699 | maplebus_dma_reset(); | ||
700 | |||
701 | /* initial detection */ | ||
702 | maple_send(); | ||
703 | |||
704 | maple_pnp_time = jiffies; | ||
705 | |||
706 | printk(KERN_INFO "Maple bus core now registered.\n"); | ||
707 | |||
708 | return 0; | ||
709 | |||
710 | cleanup_cache: | ||
711 | kmem_cache_destroy(maple_queue_cache); | ||
712 | |||
713 | cleanup_bothirqs: | ||
714 | free_irq(HW_EVENT_VSYNC, 0); | ||
715 | |||
716 | cleanup_irq: | ||
717 | free_irq(HW_EVENT_MAPLE_DMA, 0); | ||
718 | |||
719 | cleanup_dma: | ||
720 | free_pages((unsigned long) maple_sendbuf, MAPLE_DMA_PAGES); | ||
721 | |||
722 | cleanup_basic: | ||
723 | driver_unregister(&maple_dummy_driver.drv); | ||
724 | |||
725 | cleanup_bus: | ||
726 | bus_unregister(&maple_bus_type); | ||
727 | |||
728 | cleanup_device: | ||
729 | device_unregister(&maple_bus); | ||
730 | |||
731 | cleanup: | ||
732 | printk(KERN_INFO "Maple bus registration failed\n"); | ||
733 | return retval; | ||
734 | } | ||
735 | subsys_initcall(maple_bus_init); | ||