aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/input/joystick/maplecontrol.c4
-rw-r--r--drivers/input/keyboard/maple_keyb.c37
-rw-r--r--drivers/sh/maple/maple.c463
-rw-r--r--include/linux/maple.h62
4 files changed, 300 insertions, 266 deletions
diff --git a/drivers/input/joystick/maplecontrol.c b/drivers/input/joystick/maplecontrol.c
index e50047bfe938..77cfde571bd9 100644
--- a/drivers/input/joystick/maplecontrol.c
+++ b/drivers/input/joystick/maplecontrol.c
@@ -3,7 +3,7 @@
3 * Based on drivers/usb/iforce.c 3 * Based on drivers/usb/iforce.c
4 * 4 *
5 * Copyright Yaegashi Takeshi, 2001 5 * Copyright Yaegashi Takeshi, 2001
6 * Adrian McMenamin, 2008 6 * Adrian McMenamin, 2008 - 2009
7 */ 7 */
8 8
9#include <linux/kernel.h> 9#include <linux/kernel.h>
@@ -29,7 +29,7 @@ static void dc_pad_callback(struct mapleq *mq)
29 struct maple_device *mapledev = mq->dev; 29 struct maple_device *mapledev = mq->dev;
30 struct dc_pad *pad = maple_get_drvdata(mapledev); 30 struct dc_pad *pad = maple_get_drvdata(mapledev);
31 struct input_dev *dev = pad->dev; 31 struct input_dev *dev = pad->dev;
32 unsigned char *res = mq->recvbuf; 32 unsigned char *res = mq->recvbuf->buf;
33 33
34 buttons = ~le16_to_cpup((__le16 *)(res + 8)); 34 buttons = ~le16_to_cpup((__le16 *)(res + 8));
35 35
diff --git a/drivers/input/keyboard/maple_keyb.c b/drivers/input/keyboard/maple_keyb.c
index 22f17a593be7..5aa2361aef95 100644
--- a/drivers/input/keyboard/maple_keyb.c
+++ b/drivers/input/keyboard/maple_keyb.c
@@ -1,8 +1,8 @@
1/* 1/*
2 * SEGA Dreamcast keyboard driver 2 * SEGA Dreamcast keyboard driver
3 * Based on drivers/usb/usbkbd.c 3 * Based on drivers/usb/usbkbd.c
4 * Copyright YAEGASHI Takeshi, 2001 4 * Copyright (c) YAEGASHI Takeshi, 2001
5 * Porting to 2.6 Copyright Adrian McMenamin, 2007, 2008 5 * Porting to 2.6 Copyright (c) Adrian McMenamin, 2007 - 2009
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by 8 * it under the terms of the GNU General Public License as published by
@@ -33,7 +33,7 @@ static DEFINE_MUTEX(maple_keyb_mutex);
33 33
34#define NR_SCANCODES 256 34#define NR_SCANCODES 256
35 35
36MODULE_AUTHOR("YAEGASHI Takeshi, Adrian McMenamin"); 36MODULE_AUTHOR("Adrian McMenamin <adrian@mcmen.demon.co.uk");
37MODULE_DESCRIPTION("SEGA Dreamcast keyboard driver"); 37MODULE_DESCRIPTION("SEGA Dreamcast keyboard driver");
38MODULE_LICENSE("GPL"); 38MODULE_LICENSE("GPL");
39 39
@@ -115,7 +115,7 @@ static void dc_scan_kbd(struct dc_kbd *kbd)
115 input_event(dev, EV_MSC, MSC_SCAN, code); 115 input_event(dev, EV_MSC, MSC_SCAN, code);
116 input_report_key(dev, keycode, 0); 116 input_report_key(dev, keycode, 0);
117 } else 117 } else
118 printk(KERN_DEBUG "maple_keyb: " 118 dev_dbg(&dev->dev,
119 "Unknown key (scancode %#x) released.", 119 "Unknown key (scancode %#x) released.",
120 code); 120 code);
121 } 121 }
@@ -127,7 +127,7 @@ static void dc_scan_kbd(struct dc_kbd *kbd)
127 input_event(dev, EV_MSC, MSC_SCAN, code); 127 input_event(dev, EV_MSC, MSC_SCAN, code);
128 input_report_key(dev, keycode, 1); 128 input_report_key(dev, keycode, 1);
129 } else 129 } else
130 printk(KERN_DEBUG "maple_keyb: " 130 dev_dbg(&dev->dev,
131 "Unknown key (scancode %#x) pressed.", 131 "Unknown key (scancode %#x) pressed.",
132 code); 132 code);
133 } 133 }
@@ -140,7 +140,7 @@ static void dc_kbd_callback(struct mapleq *mq)
140{ 140{
141 struct maple_device *mapledev = mq->dev; 141 struct maple_device *mapledev = mq->dev;
142 struct dc_kbd *kbd = maple_get_drvdata(mapledev); 142 struct dc_kbd *kbd = maple_get_drvdata(mapledev);
143 unsigned long *buf = mq->recvbuf; 143 unsigned long *buf = (unsigned long *)(mq->recvbuf->buf);
144 144
145 /* 145 /*
146 * We should always get the lock because the only 146 * We should always get the lock because the only
@@ -159,22 +159,27 @@ static void dc_kbd_callback(struct mapleq *mq)
159 159
160static int probe_maple_kbd(struct device *dev) 160static int probe_maple_kbd(struct device *dev)
161{ 161{
162 struct maple_device *mdev = to_maple_dev(dev); 162 struct maple_device *mdev;
163 struct maple_driver *mdrv = to_maple_driver(dev->driver); 163 struct maple_driver *mdrv;
164 int i, error; 164 int i, error;
165 struct dc_kbd *kbd; 165 struct dc_kbd *kbd;
166 struct input_dev *idev; 166 struct input_dev *idev;
167 167
168 if (!(mdev->function & MAPLE_FUNC_KEYBOARD)) 168 mdev = to_maple_dev(dev);
169 return -EINVAL; 169 mdrv = to_maple_driver(dev->driver);
170 170
171 kbd = kzalloc(sizeof(struct dc_kbd), GFP_KERNEL); 171 kbd = kzalloc(sizeof(struct dc_kbd), GFP_KERNEL);
172 idev = input_allocate_device(); 172 if (!kbd) {
173 if (!kbd || !idev) {
174 error = -ENOMEM; 173 error = -ENOMEM;
175 goto fail; 174 goto fail;
176 } 175 }
177 176
177 idev = input_allocate_device();
178 if (!idev) {
179 error = -ENOMEM;
180 goto fail_idev_alloc;
181 }
182
178 kbd->dev = idev; 183 kbd->dev = idev;
179 memcpy(kbd->keycode, dc_kbd_keycode, sizeof(kbd->keycode)); 184 memcpy(kbd->keycode, dc_kbd_keycode, sizeof(kbd->keycode));
180 185
@@ -195,7 +200,7 @@ static int probe_maple_kbd(struct device *dev)
195 200
196 error = input_register_device(idev); 201 error = input_register_device(idev);
197 if (error) 202 if (error)
198 goto fail; 203 goto fail_register;
199 204
200 /* Maple polling is locked to VBLANK - which may be just 50/s */ 205 /* Maple polling is locked to VBLANK - which may be just 50/s */
201 maple_getcond_callback(mdev, dc_kbd_callback, HZ/50, 206 maple_getcond_callback(mdev, dc_kbd_callback, HZ/50,
@@ -207,10 +212,12 @@ static int probe_maple_kbd(struct device *dev)
207 212
208 return error; 213 return error;
209 214
210fail: 215fail_register:
216 maple_set_drvdata(mdev, NULL);
211 input_free_device(idev); 217 input_free_device(idev);
218fail_idev_alloc:
212 kfree(kbd); 219 kfree(kbd);
213 maple_set_drvdata(mdev, NULL); 220fail:
214 return error; 221 return error;
215} 222}
216 223
diff --git a/drivers/sh/maple/maple.c b/drivers/sh/maple/maple.c
index 63f0de29aa14..4054fe93d6e4 100644
--- a/drivers/sh/maple/maple.c
+++ b/drivers/sh/maple/maple.c
@@ -1,16 +1,10 @@
1/* 1/*
2 * Core maple bus functionality 2 * Core maple bus functionality
3 * 3 *
4 * Copyright (C) 2007, 2008 Adrian McMenamin 4 * Copyright (C) 2007 - 2009 Adrian McMenamin
5 * Copyright (C) 2001 - 2008 Paul Mundt 5 * Copyright (C) 2001 - 2008 Paul Mundt
6 * 6 * Copyright (C) 2000 - 2001 YAEGASHI Takeshi
7 * Based on 2.4 code by:
8 *
9 * Copyright (C) 2000-2001 YAEGASHI Takeshi
10 * Copyright (C) 2001 M. R. Brown 7 * Copyright (C) 2001 M. R. Brown
11 * Copyright (C) 2001 Paul Mundt
12 *
13 * and others.
14 * 8 *
15 * This file is subject to the terms and conditions of the GNU General Public 9 * This file is subject to the terms and conditions of the GNU General Public
16 * License. See the file "COPYING" in the main directory of this archive 10 * License. See the file "COPYING" in the main directory of this archive
@@ -32,7 +26,7 @@
32#include <mach/dma.h> 26#include <mach/dma.h>
33#include <mach/sysasic.h> 27#include <mach/sysasic.h>
34 28
35MODULE_AUTHOR("Yaegashi Takeshi, Paul Mundt, M. R. Brown, Adrian McMenamin"); 29MODULE_AUTHOR("Adrian McMenamin <adrian@mcmen.demon.co.uk>");
36MODULE_DESCRIPTION("Maple bus driver for Dreamcast"); 30MODULE_DESCRIPTION("Maple bus driver for Dreamcast");
37MODULE_LICENSE("GPL v2"); 31MODULE_LICENSE("GPL v2");
38MODULE_SUPPORTED_DEVICE("{{SEGA, Dreamcast/Maple}}"); 32MODULE_SUPPORTED_DEVICE("{{SEGA, Dreamcast/Maple}}");
@@ -49,7 +43,7 @@ static LIST_HEAD(maple_sentq);
49/* mutex to protect queue of waiting packets */ 43/* mutex to protect queue of waiting packets */
50static DEFINE_MUTEX(maple_wlist_lock); 44static DEFINE_MUTEX(maple_wlist_lock);
51 45
52static struct maple_driver maple_dummy_driver; 46static struct maple_driver maple_unsupported_device;
53static struct device maple_bus; 47static struct device maple_bus;
54static int subdevice_map[MAPLE_PORTS]; 48static int subdevice_map[MAPLE_PORTS];
55static unsigned long *maple_sendbuf, *maple_sendptr, *maple_lastptr; 49static unsigned long *maple_sendbuf, *maple_sendptr, *maple_lastptr;
@@ -62,8 +56,9 @@ struct maple_device_specify {
62 int unit; 56 int unit;
63}; 57};
64 58
65static bool checked[4]; 59static bool checked[MAPLE_PORTS];
66static struct maple_device *baseunits[4]; 60static bool empty[MAPLE_PORTS];
61static struct maple_device *baseunits[MAPLE_PORTS];
67 62
68/** 63/**
69 * maple_driver_register - register a maple driver 64 * maple_driver_register - register a maple driver
@@ -97,12 +92,20 @@ void maple_driver_unregister(struct maple_driver *drv)
97EXPORT_SYMBOL_GPL(maple_driver_unregister); 92EXPORT_SYMBOL_GPL(maple_driver_unregister);
98 93
99/* set hardware registers to enable next round of dma */ 94/* set hardware registers to enable next round of dma */
100static void maplebus_dma_reset(void) 95static void maple_dma_reset(void)
101{ 96{
102 ctrl_outl(MAPLE_MAGIC, MAPLE_RESET); 97 ctrl_outl(MAPLE_MAGIC, MAPLE_RESET);
103 /* set trig type to 0 for software trigger, 1 for hardware (VBLANK) */ 98 /* set trig type to 0 for software trigger, 1 for hardware (VBLANK) */
104 ctrl_outl(1, MAPLE_TRIGTYPE); 99 ctrl_outl(1, MAPLE_TRIGTYPE);
105 ctrl_outl(MAPLE_2MBPS | MAPLE_TIMEOUT(50000), MAPLE_SPEED); 100 /*
101 * Maple system register
102 * bits 31 - 16 timeout in units of 20nsec
103 * bit 12 hard trigger - set 0 to keep responding to VBLANK
104 * bits 9 - 8 set 00 for 2 Mbps, 01 for 1 Mbps
105 * bits 3 - 0 delay (in 1.3ms) between VBLANK and start of DMA
106 * max delay is 11
107 */
108 ctrl_outl(MAPLE_2MBPS | MAPLE_TIMEOUT(0xFFFF), MAPLE_SPEED);
106 ctrl_outl(PHYSADDR(maple_sendbuf), MAPLE_DMAADDR); 109 ctrl_outl(PHYSADDR(maple_sendbuf), MAPLE_DMAADDR);
107 ctrl_outl(1, MAPLE_ENABLE); 110 ctrl_outl(1, MAPLE_ENABLE);
108} 111}
@@ -134,21 +137,16 @@ static void maple_release_device(struct device *dev)
134{ 137{
135 struct maple_device *mdev; 138 struct maple_device *mdev;
136 struct mapleq *mq; 139 struct mapleq *mq;
137 if (!dev) 140
138 return;
139 mdev = to_maple_dev(dev); 141 mdev = to_maple_dev(dev);
140 mq = mdev->mq; 142 mq = mdev->mq;
141 if (mq) { 143 kmem_cache_free(maple_queue_cache, mq->recvbuf);
142 if (mq->recvbufdcsp) 144 kfree(mq);
143 kmem_cache_free(maple_queue_cache, mq->recvbufdcsp);
144 kfree(mq);
145 mq = NULL;
146 }
147 kfree(mdev); 145 kfree(mdev);
148} 146}
149 147
150/** 148/**
151 * maple_add_packet - add a single instruction to the queue 149 * maple_add_packet - add a single instruction to the maple bus queue
152 * @mdev: maple device 150 * @mdev: maple device
153 * @function: function on device being queried 151 * @function: function on device being queried
154 * @command: maple command to add 152 * @command: maple command to add
@@ -158,68 +156,12 @@ static void maple_release_device(struct device *dev)
158int maple_add_packet(struct maple_device *mdev, u32 function, u32 command, 156int maple_add_packet(struct maple_device *mdev, u32 function, u32 command,
159 size_t length, void *data) 157 size_t length, void *data)
160{ 158{
161 int locking, ret = 0; 159 int ret = 0;
162 void *sendbuf = NULL; 160 void *sendbuf = NULL;
163 161
164 mutex_lock(&maple_wlist_lock);
165 /* bounce if device already locked */
166 locking = mutex_is_locked(&mdev->mq->mutex);
167 if (locking) {
168 ret = -EBUSY;
169 goto out;
170 }
171
172 mutex_lock(&mdev->mq->mutex);
173
174 if (length) { 162 if (length) {
175 sendbuf = kmalloc(length * 4, GFP_KERNEL); 163 sendbuf = kzalloc(length * 4, GFP_KERNEL);
176 if (!sendbuf) { 164 if (!sendbuf) {
177 mutex_unlock(&mdev->mq->mutex);
178 ret = -ENOMEM;
179 goto out;
180 }
181 ((__be32 *)sendbuf)[0] = cpu_to_be32(function);
182 }
183
184 mdev->mq->command = command;
185 mdev->mq->length = length;
186 if (length > 1)
187 memcpy(sendbuf + 4, data, (length - 1) * 4);
188 mdev->mq->sendbuf = sendbuf;
189
190 list_add(&mdev->mq->list, &maple_waitq);
191out:
192 mutex_unlock(&maple_wlist_lock);
193 return ret;
194}
195EXPORT_SYMBOL_GPL(maple_add_packet);
196
197/**
198 * maple_add_packet_sleeps - add a single instruction to the queue
199 * @mdev: maple device
200 * @function: function on device being queried
201 * @command: maple command to add
202 * @length: length of command string (in 32 bit words)
203 * @data: remainder of command string
204 *
205 * Same as maple_add_packet(), but waits for the lock to become free.
206 */
207int maple_add_packet_sleeps(struct maple_device *mdev, u32 function,
208 u32 command, size_t length, void *data)
209{
210 int locking, ret = 0;
211 void *sendbuf = NULL;
212
213 locking = mutex_lock_interruptible(&mdev->mq->mutex);
214 if (locking) {
215 ret = -EIO;
216 goto out;
217 }
218
219 if (length) {
220 sendbuf = kmalloc(length * 4, GFP_KERNEL);
221 if (!sendbuf) {
222 mutex_unlock(&mdev->mq->mutex);
223 ret = -ENOMEM; 165 ret = -ENOMEM;
224 goto out; 166 goto out;
225 } 167 }
@@ -233,38 +175,35 @@ int maple_add_packet_sleeps(struct maple_device *mdev, u32 function,
233 mdev->mq->sendbuf = sendbuf; 175 mdev->mq->sendbuf = sendbuf;
234 176
235 mutex_lock(&maple_wlist_lock); 177 mutex_lock(&maple_wlist_lock);
236 list_add(&mdev->mq->list, &maple_waitq); 178 list_add_tail(&mdev->mq->list, &maple_waitq);
237 mutex_unlock(&maple_wlist_lock); 179 mutex_unlock(&maple_wlist_lock);
238out: 180out:
239 return ret; 181 return ret;
240} 182}
241EXPORT_SYMBOL_GPL(maple_add_packet_sleeps); 183EXPORT_SYMBOL_GPL(maple_add_packet);
242 184
243static struct mapleq *maple_allocq(struct maple_device *mdev) 185static struct mapleq *maple_allocq(struct maple_device *mdev)
244{ 186{
245 struct mapleq *mq; 187 struct mapleq *mq;
246 188
247 mq = kmalloc(sizeof(*mq), GFP_KERNEL); 189 mq = kzalloc(sizeof(*mq), GFP_KERNEL);
248 if (!mq) 190 if (!mq)
249 goto failed_nomem; 191 goto failed_nomem;
250 192
193 INIT_LIST_HEAD(&mq->list);
251 mq->dev = mdev; 194 mq->dev = mdev;
252 mq->recvbufdcsp = kmem_cache_zalloc(maple_queue_cache, GFP_KERNEL); 195 mq->recvbuf = kmem_cache_zalloc(maple_queue_cache, GFP_KERNEL);
253 mq->recvbuf = (void *) P2SEGADDR(mq->recvbufdcsp);
254 if (!mq->recvbuf) 196 if (!mq->recvbuf)
255 goto failed_p2; 197 goto failed_p2;
256 /* 198 mq->recvbuf->buf = &((mq->recvbuf->bufx)[0]);
257 * most devices do not need the mutex - but
258 * anything that injects block reads or writes
259 * will rely on it
260 */
261 mutex_init(&mq->mutex);
262 199
263 return mq; 200 return mq;
264 201
265failed_p2: 202failed_p2:
266 kfree(mq); 203 kfree(mq);
267failed_nomem: 204failed_nomem:
205 dev_err(&mdev->dev, "could not allocate memory for device (%d, %d)\n",
206 mdev->port, mdev->unit);
268 return NULL; 207 return NULL;
269} 208}
270 209
@@ -272,12 +211,16 @@ static struct maple_device *maple_alloc_dev(int port, int unit)
272{ 211{
273 struct maple_device *mdev; 212 struct maple_device *mdev;
274 213
214 /* zero this out to avoid kobj subsystem
215 * thinking it has already been registered */
216
275 mdev = kzalloc(sizeof(*mdev), GFP_KERNEL); 217 mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
276 if (!mdev) 218 if (!mdev)
277 return NULL; 219 return NULL;
278 220
279 mdev->port = port; 221 mdev->port = port;
280 mdev->unit = unit; 222 mdev->unit = unit;
223
281 mdev->mq = maple_allocq(mdev); 224 mdev->mq = maple_allocq(mdev);
282 225
283 if (!mdev->mq) { 226 if (!mdev->mq) {
@@ -286,19 +229,14 @@ static struct maple_device *maple_alloc_dev(int port, int unit)
286 } 229 }
287 mdev->dev.bus = &maple_bus_type; 230 mdev->dev.bus = &maple_bus_type;
288 mdev->dev.parent = &maple_bus; 231 mdev->dev.parent = &maple_bus;
232 init_waitqueue_head(&mdev->maple_wait);
289 return mdev; 233 return mdev;
290} 234}
291 235
292static void maple_free_dev(struct maple_device *mdev) 236static void maple_free_dev(struct maple_device *mdev)
293{ 237{
294 if (!mdev) 238 kmem_cache_free(maple_queue_cache, mdev->mq->recvbuf);
295 return; 239 kfree(mdev->mq);
296 if (mdev->mq) {
297 if (mdev->mq->recvbufdcsp)
298 kmem_cache_free(maple_queue_cache,
299 mdev->mq->recvbufdcsp);
300 kfree(mdev->mq);
301 }
302 kfree(mdev); 240 kfree(mdev);
303} 241}
304 242
@@ -320,7 +258,7 @@ static void maple_build_block(struct mapleq *mq)
320 maple_lastptr = maple_sendptr; 258 maple_lastptr = maple_sendptr;
321 259
322 *maple_sendptr++ = (port << 16) | len | 0x80000000; 260 *maple_sendptr++ = (port << 16) | len | 0x80000000;
323 *maple_sendptr++ = PHYSADDR(mq->recvbuf); 261 *maple_sendptr++ = PHYSADDR(mq->recvbuf->buf);
324 *maple_sendptr++ = 262 *maple_sendptr++ =
325 mq->command | (to << 8) | (from << 16) | (len << 24); 263 mq->command | (to << 8) | (from << 16) | (len << 24);
326 while (len-- > 0) 264 while (len-- > 0)
@@ -333,20 +271,28 @@ static void maple_send(void)
333 int i, maple_packets = 0; 271 int i, maple_packets = 0;
334 struct mapleq *mq, *nmq; 272 struct mapleq *mq, *nmq;
335 273
336 if (!list_empty(&maple_sentq)) 274 if (!maple_dma_done())
337 return; 275 return;
276
277 /* disable DMA */
278 ctrl_outl(0, MAPLE_ENABLE);
279
280 if (!list_empty(&maple_sentq))
281 goto finish;
282
338 mutex_lock(&maple_wlist_lock); 283 mutex_lock(&maple_wlist_lock);
339 if (list_empty(&maple_waitq) || !maple_dma_done()) { 284 if (list_empty(&maple_waitq)) {
340 mutex_unlock(&maple_wlist_lock); 285 mutex_unlock(&maple_wlist_lock);
341 return; 286 goto finish;
342 } 287 }
343 mutex_unlock(&maple_wlist_lock); 288
344 maple_lastptr = maple_sendbuf; 289 maple_lastptr = maple_sendbuf;
345 maple_sendptr = maple_sendbuf; 290 maple_sendptr = maple_sendbuf;
346 mutex_lock(&maple_wlist_lock); 291
347 list_for_each_entry_safe(mq, nmq, &maple_waitq, list) { 292 list_for_each_entry_safe(mq, nmq, &maple_waitq, list) {
348 maple_build_block(mq); 293 maple_build_block(mq);
349 list_move(&mq->list, &maple_sentq); 294 list_del_init(&mq->list);
295 list_add_tail(&mq->list, &maple_sentq);
350 if (maple_packets++ > MAPLE_MAXPACKETS) 296 if (maple_packets++ > MAPLE_MAXPACKETS)
351 break; 297 break;
352 } 298 }
@@ -356,10 +302,13 @@ static void maple_send(void)
356 dma_cache_sync(0, maple_sendbuf + i * PAGE_SIZE, 302 dma_cache_sync(0, maple_sendbuf + i * PAGE_SIZE,
357 PAGE_SIZE, DMA_BIDIRECTIONAL); 303 PAGE_SIZE, DMA_BIDIRECTIONAL);
358 } 304 }
305
306finish:
307 maple_dma_reset();
359} 308}
360 309
361/* check if there is a driver registered likely to match this device */ 310/* check if there is a driver registered likely to match this device */
362static int check_matching_maple_driver(struct device_driver *driver, 311static int maple_check_matching_driver(struct device_driver *driver,
363 void *devptr) 312 void *devptr)
364{ 313{
365 struct maple_driver *maple_drv; 314 struct maple_driver *maple_drv;
@@ -374,10 +323,7 @@ static int check_matching_maple_driver(struct device_driver *driver,
374 323
375static void maple_detach_driver(struct maple_device *mdev) 324static void maple_detach_driver(struct maple_device *mdev)
376{ 325{
377 if (!mdev)
378 return;
379 device_unregister(&mdev->dev); 326 device_unregister(&mdev->dev);
380 mdev = NULL;
381} 327}
382 328
383/* process initial MAPLE_COMMAND_DEVINFO for each device or port */ 329/* process initial MAPLE_COMMAND_DEVINFO for each device or port */
@@ -385,9 +331,9 @@ static void maple_attach_driver(struct maple_device *mdev)
385{ 331{
386 char *p, *recvbuf; 332 char *p, *recvbuf;
387 unsigned long function; 333 unsigned long function;
388 int matched, retval; 334 int matched, error;
389 335
390 recvbuf = mdev->mq->recvbuf; 336 recvbuf = mdev->mq->recvbuf->buf;
391 /* copy the data as individual elements in 337 /* copy the data as individual elements in
392 * case of memory optimisation */ 338 * case of memory optimisation */
393 memcpy(&mdev->devinfo.function, recvbuf + 4, 4); 339 memcpy(&mdev->devinfo.function, recvbuf + 4, 4);
@@ -395,7 +341,6 @@ static void maple_attach_driver(struct maple_device *mdev)
395 memcpy(&mdev->devinfo.area_code, recvbuf + 20, 1); 341 memcpy(&mdev->devinfo.area_code, recvbuf + 20, 1);
396 memcpy(&mdev->devinfo.connector_direction, recvbuf + 21, 1); 342 memcpy(&mdev->devinfo.connector_direction, recvbuf + 21, 1);
397 memcpy(&mdev->devinfo.product_name[0], recvbuf + 22, 30); 343 memcpy(&mdev->devinfo.product_name[0], recvbuf + 22, 30);
398 memcpy(&mdev->devinfo.product_licence[0], recvbuf + 52, 60);
399 memcpy(&mdev->devinfo.standby_power, recvbuf + 112, 2); 344 memcpy(&mdev->devinfo.standby_power, recvbuf + 112, 2);
400 memcpy(&mdev->devinfo.max_power, recvbuf + 114, 2); 345 memcpy(&mdev->devinfo.max_power, recvbuf + 114, 2);
401 memcpy(mdev->product_name, mdev->devinfo.product_name, 30); 346 memcpy(mdev->product_name, mdev->devinfo.product_name, 30);
@@ -414,43 +359,41 @@ static void maple_attach_driver(struct maple_device *mdev)
414 else 359 else
415 break; 360 break;
416 361
417 printk(KERN_INFO "Maple device detected: %s\n",
418 mdev->product_name);
419 printk(KERN_INFO "Maple device: %s\n", mdev->product_licence);
420
421 function = be32_to_cpu(mdev->devinfo.function); 362 function = be32_to_cpu(mdev->devinfo.function);
422 363
364 dev_info(&mdev->dev, "detected %s: function 0x%lX: at (%d, %d)\n",
365 mdev->product_name, function, mdev->port, mdev->unit);
366
423 if (function > 0x200) { 367 if (function > 0x200) {
424 /* Do this silently - as not a real device */ 368 /* Do this silently - as not a real device */
425 function = 0; 369 function = 0;
426 mdev->driver = &maple_dummy_driver; 370 mdev->driver = &maple_unsupported_device;
427 sprintf(mdev->dev.bus_id, "%d:0.port", mdev->port); 371 sprintf(mdev->dev.bus_id, "%d:0.port", mdev->port);
372
428 } else { 373 } else {
429 printk(KERN_INFO
430 "Maple bus at (%d, %d): Function 0x%lX\n",
431 mdev->port, mdev->unit, function);
432 374
433 matched = 375 matched =
434 bus_for_each_drv(&maple_bus_type, NULL, mdev, 376 bus_for_each_drv(&maple_bus_type, NULL, mdev,
435 check_matching_maple_driver); 377 maple_check_matching_driver);
436 378
437 if (matched == 0) { 379 if (matched == 0) {
438 /* Driver does not exist yet */ 380 /* Driver does not exist yet */
439 printk(KERN_INFO 381 dev_info(&mdev->dev, "no driver found\n");
440 "No maple driver found.\n"); 382 mdev->driver = &maple_unsupported_device;
441 mdev->driver = &maple_dummy_driver;
442 } 383 }
443 sprintf(mdev->dev.bus_id, "%d:0%d.%lX", mdev->port, 384 sprintf(mdev->dev.bus_id, "%d:0%d.%lX", mdev->port,
444 mdev->unit, function); 385 mdev->unit, function);
445 } 386 }
387
446 mdev->function = function; 388 mdev->function = function;
447 mdev->dev.release = &maple_release_device; 389 mdev->dev.release = &maple_release_device;
448 retval = device_register(&mdev->dev); 390
449 if (retval) { 391 atomic_set(&mdev->busy, 0);
450 printk(KERN_INFO 392 error = device_register(&mdev->dev);
451 "Maple bus: Attempt to register device" 393 if (error) {
452 " (%x, %x) failed.\n", 394 dev_warn(&mdev->dev, "could not register device at"
453 mdev->port, mdev->unit); 395 " (%d, %d), with error 0x%X\n", mdev->unit,
396 mdev->port, error);
454 maple_free_dev(mdev); 397 maple_free_dev(mdev);
455 mdev = NULL; 398 mdev = NULL;
456 return; 399 return;
@@ -462,7 +405,7 @@ static void maple_attach_driver(struct maple_device *mdev)
462 * port and unit then return 1 - allows identification 405 * port and unit then return 1 - allows identification
463 * of which devices need to be attached or detached 406 * of which devices need to be attached or detached
464 */ 407 */
465static int detach_maple_device(struct device *device, void *portptr) 408static int check_maple_device(struct device *device, void *portptr)
466{ 409{
467 struct maple_device_specify *ds; 410 struct maple_device_specify *ds;
468 struct maple_device *mdev; 411 struct maple_device *mdev;
@@ -477,21 +420,25 @@ static int detach_maple_device(struct device *device, void *portptr)
477static int setup_maple_commands(struct device *device, void *ignored) 420static int setup_maple_commands(struct device *device, void *ignored)
478{ 421{
479 int add; 422 int add;
480 struct maple_device *maple_dev = to_maple_dev(device); 423 struct maple_device *mdev = to_maple_dev(device);
481 424 if (mdev->interval > 0 && atomic_read(&mdev->busy) == 0 &&
482 if ((maple_dev->interval > 0) 425 time_after(jiffies, mdev->when)) {
483 && time_after(jiffies, maple_dev->when)) { 426 /* bounce if we cannot add */
484 /* bounce if we cannot lock */ 427 add = maple_add_packet(mdev,
485 add = maple_add_packet(maple_dev, 428 be32_to_cpu(mdev->devinfo.function),
486 be32_to_cpu(maple_dev->devinfo.function),
487 MAPLE_COMMAND_GETCOND, 1, NULL); 429 MAPLE_COMMAND_GETCOND, 1, NULL);
488 if (!add) 430 if (!add)
489 maple_dev->when = jiffies + maple_dev->interval; 431 mdev->when = jiffies + mdev->interval;
490 } else { 432 } else {
491 if (time_after(jiffies, maple_pnp_time)) 433 if (time_after(jiffies, maple_pnp_time))
492 /* This will also bounce */ 434 /* Ensure we don't have block reads and devinfo
493 maple_add_packet(maple_dev, 0, 435 * calls interfering with one another - so flag the
494 MAPLE_COMMAND_DEVINFO, 0, NULL); 436 * device as busy */
437 if (atomic_read(&mdev->busy) == 0) {
438 atomic_set(&mdev->busy, 1);
439 maple_add_packet(mdev, 0,
440 MAPLE_COMMAND_DEVINFO, 0, NULL);
441 }
495 } 442 }
496 return 0; 443 return 0;
497} 444}
@@ -499,29 +446,50 @@ static int setup_maple_commands(struct device *device, void *ignored)
499/* VBLANK bottom half - implemented via workqueue */ 446/* VBLANK bottom half - implemented via workqueue */
500static void maple_vblank_handler(struct work_struct *work) 447static void maple_vblank_handler(struct work_struct *work)
501{ 448{
502 if (!list_empty(&maple_sentq) || !maple_dma_done()) 449 int x, locking;
450 struct maple_device *mdev;
451
452 if (!maple_dma_done())
503 return; 453 return;
504 454
505 ctrl_outl(0, MAPLE_ENABLE); 455 ctrl_outl(0, MAPLE_ENABLE);
506 456
457 if (!list_empty(&maple_sentq))
458 goto finish;
459
460 /*
461 * Set up essential commands - to fetch data and
462 * check devices are still present
463 */
507 bus_for_each_dev(&maple_bus_type, NULL, NULL, 464 bus_for_each_dev(&maple_bus_type, NULL, NULL,
508 setup_maple_commands); 465 setup_maple_commands);
466
467 if (time_after(jiffies, maple_pnp_time)) {
468 /*
469 * Scan the empty ports - bus is flakey and may have
470 * mis-reported emptyness
471 */
472 for (x = 0; x < MAPLE_PORTS; x++) {
473 if (checked[x] && empty[x]) {
474 mdev = baseunits[x];
475 if (!mdev)
476 break;
477 atomic_set(&mdev->busy, 1);
478 locking = maple_add_packet(mdev, 0,
479 MAPLE_COMMAND_DEVINFO, 0, NULL);
480 if (!locking)
481 break;
482 }
483 }
509 484
510 if (time_after(jiffies, maple_pnp_time))
511 maple_pnp_time = jiffies + MAPLE_PNP_INTERVAL; 485 maple_pnp_time = jiffies + MAPLE_PNP_INTERVAL;
512
513 mutex_lock(&maple_wlist_lock);
514 if (!list_empty(&maple_waitq) && list_empty(&maple_sentq)) {
515 mutex_unlock(&maple_wlist_lock);
516 maple_send();
517 } else {
518 mutex_unlock(&maple_wlist_lock);
519 } 486 }
520 487
521 maplebus_dma_reset(); 488finish:
489 maple_send();
522} 490}
523 491
524/* handle devices added via hotplugs - placing them on queue for DEVINFO*/ 492/* handle devices added via hotplugs - placing them on queue for DEVINFO */
525static void maple_map_subunits(struct maple_device *mdev, int submask) 493static void maple_map_subunits(struct maple_device *mdev, int submask)
526{ 494{
527 int retval, k, devcheck; 495 int retval, k, devcheck;
@@ -533,7 +501,7 @@ static void maple_map_subunits(struct maple_device *mdev, int submask)
533 ds.unit = k + 1; 501 ds.unit = k + 1;
534 retval = 502 retval =
535 bus_for_each_dev(&maple_bus_type, NULL, &ds, 503 bus_for_each_dev(&maple_bus_type, NULL, &ds,
536 detach_maple_device); 504 check_maple_device);
537 if (retval) { 505 if (retval) {
538 submask = submask >> 1; 506 submask = submask >> 1;
539 continue; 507 continue;
@@ -543,6 +511,7 @@ static void maple_map_subunits(struct maple_device *mdev, int submask)
543 mdev_add = maple_alloc_dev(mdev->port, k + 1); 511 mdev_add = maple_alloc_dev(mdev->port, k + 1);
544 if (!mdev_add) 512 if (!mdev_add)
545 return; 513 return;
514 atomic_set(&mdev_add->busy, 1);
546 maple_add_packet(mdev_add, 0, MAPLE_COMMAND_DEVINFO, 515 maple_add_packet(mdev_add, 0, MAPLE_COMMAND_DEVINFO,
547 0, NULL); 516 0, NULL);
548 /* mark that we are checking sub devices */ 517 /* mark that we are checking sub devices */
@@ -564,27 +533,45 @@ static void maple_clean_submap(struct maple_device *mdev)
564} 533}
565 534
566/* handle empty port or hotplug removal */ 535/* handle empty port or hotplug removal */
567static void maple_response_none(struct maple_device *mdev, 536static void maple_response_none(struct maple_device *mdev)
568 struct mapleq *mq) 537{
569{ 538 maple_clean_submap(mdev);
570 if (mdev->unit != 0) { 539
571 list_del(&mq->list); 540 if (likely(mdev->unit != 0)) {
572 maple_clean_submap(mdev); 541 /*
573 printk(KERN_INFO 542 * Block devices play up
574 "Maple bus device detaching at (%d, %d)\n", 543 * and give the impression they have
575 mdev->port, mdev->unit); 544 * been removed even when still in place or
545 * trip the mtd layer when they have
546 * really gone - this code traps that eventuality
547 * and ensures we aren't overloaded with useless
548 * error messages
549 */
550 if (mdev->can_unload) {
551 if (!mdev->can_unload(mdev)) {
552 atomic_set(&mdev->busy, 2);
553 wake_up(&mdev->maple_wait);
554 return;
555 }
556 }
557
558 dev_info(&mdev->dev, "detaching device at (%d, %d)\n",
559 mdev->port, mdev->unit);
576 maple_detach_driver(mdev); 560 maple_detach_driver(mdev);
577 return; 561 return;
578 } 562 } else {
579 if (!started || !fullscan) { 563 if (!started || !fullscan) {
580 if (checked[mdev->port] == false) { 564 if (checked[mdev->port] == false) {
581 checked[mdev->port] = true; 565 checked[mdev->port] = true;
582 printk(KERN_INFO "No maple devices attached" 566 empty[mdev->port] = true;
583 " to port %d\n", mdev->port); 567 dev_info(&mdev->dev, "no devices"
568 " to port %d\n", mdev->port);
569 }
570 return;
584 } 571 }
585 return;
586 } 572 }
587 maple_clean_submap(mdev); 573 /* Some hardware devices generate false detach messages on unit 0 */
574 atomic_set(&mdev->busy, 0);
588} 575}
589 576
590/* preprocess hotplugs or scans */ 577/* preprocess hotplugs or scans */
@@ -599,8 +586,11 @@ static void maple_response_devinfo(struct maple_device *mdev,
599 } else { 586 } else {
600 if (mdev->unit != 0) 587 if (mdev->unit != 0)
601 maple_attach_driver(mdev); 588 maple_attach_driver(mdev);
589 if (mdev->unit == 0) {
590 empty[mdev->port] = false;
591 maple_attach_driver(mdev);
592 }
602 } 593 }
603 return;
604 } 594 }
605 if (mdev->unit == 0) { 595 if (mdev->unit == 0) {
606 submask = recvbuf[2] & 0x1F; 596 submask = recvbuf[2] & 0x1F;
@@ -611,6 +601,17 @@ static void maple_response_devinfo(struct maple_device *mdev,
611 } 601 }
612} 602}
613 603
604static void maple_response_fileerr(struct maple_device *mdev, void *recvbuf)
605{
606 if (mdev->fileerr_handler) {
607 mdev->fileerr_handler(mdev, recvbuf);
608 return;
609 } else
610 dev_warn(&mdev->dev, "device at (%d, %d) reports"
611 "file error 0x%X\n", mdev->port, mdev->unit,
612 ((int *)recvbuf)[1]);
613}
614
614static void maple_port_rescan(void) 615static void maple_port_rescan(void)
615{ 616{
616 int i; 617 int i;
@@ -621,12 +622,6 @@ static void maple_port_rescan(void)
621 if (checked[i] == false) { 622 if (checked[i] == false) {
622 fullscan = 0; 623 fullscan = 0;
623 mdev = baseunits[i]; 624 mdev = baseunits[i];
624 /*
625 * test lock in case scan has failed
626 * but device is still locked
627 */
628 if (mutex_is_locked(&mdev->mq->mutex))
629 mutex_unlock(&mdev->mq->mutex);
630 maple_add_packet(mdev, 0, MAPLE_COMMAND_DEVINFO, 625 maple_add_packet(mdev, 0, MAPLE_COMMAND_DEVINFO,
631 0, NULL); 626 0, NULL);
632 } 627 }
@@ -637,7 +632,7 @@ static void maple_port_rescan(void)
637static void maple_dma_handler(struct work_struct *work) 632static void maple_dma_handler(struct work_struct *work)
638{ 633{
639 struct mapleq *mq, *nmq; 634 struct mapleq *mq, *nmq;
640 struct maple_device *dev; 635 struct maple_device *mdev;
641 char *recvbuf; 636 char *recvbuf;
642 enum maple_code code; 637 enum maple_code code;
643 638
@@ -646,43 +641,56 @@ static void maple_dma_handler(struct work_struct *work)
646 ctrl_outl(0, MAPLE_ENABLE); 641 ctrl_outl(0, MAPLE_ENABLE);
647 if (!list_empty(&maple_sentq)) { 642 if (!list_empty(&maple_sentq)) {
648 list_for_each_entry_safe(mq, nmq, &maple_sentq, list) { 643 list_for_each_entry_safe(mq, nmq, &maple_sentq, list) {
649 recvbuf = mq->recvbuf; 644 mdev = mq->dev;
645 recvbuf = mq->recvbuf->buf;
646 dma_cache_sync(&mdev->dev, recvbuf, 0x400,
647 DMA_FROM_DEVICE);
650 code = recvbuf[0]; 648 code = recvbuf[0];
651 dev = mq->dev;
652 kfree(mq->sendbuf); 649 kfree(mq->sendbuf);
653 mutex_unlock(&mq->mutex);
654 list_del_init(&mq->list); 650 list_del_init(&mq->list);
655
656 switch (code) { 651 switch (code) {
657 case MAPLE_RESPONSE_NONE: 652 case MAPLE_RESPONSE_NONE:
658 maple_response_none(dev, mq); 653 maple_response_none(mdev);
659 break; 654 break;
660 655
661 case MAPLE_RESPONSE_DEVINFO: 656 case MAPLE_RESPONSE_DEVINFO:
662 maple_response_devinfo(dev, recvbuf); 657 maple_response_devinfo(mdev, recvbuf);
658 atomic_set(&mdev->busy, 0);
663 break; 659 break;
664 660
665 case MAPLE_RESPONSE_DATATRF: 661 case MAPLE_RESPONSE_DATATRF:
666 if (dev->callback) 662 if (mdev->callback)
667 dev->callback(mq); 663 mdev->callback(mq);
664 atomic_set(&mdev->busy, 0);
665 wake_up(&mdev->maple_wait);
668 break; 666 break;
669 667
670 case MAPLE_RESPONSE_FILEERR: 668 case MAPLE_RESPONSE_FILEERR:
669 maple_response_fileerr(mdev, recvbuf);
670 atomic_set(&mdev->busy, 0);
671 wake_up(&mdev->maple_wait);
672 break;
673
671 case MAPLE_RESPONSE_AGAIN: 674 case MAPLE_RESPONSE_AGAIN:
672 case MAPLE_RESPONSE_BADCMD: 675 case MAPLE_RESPONSE_BADCMD:
673 case MAPLE_RESPONSE_BADFUNC: 676 case MAPLE_RESPONSE_BADFUNC:
674 printk(KERN_DEBUG 677 dev_warn(&mdev->dev, "non-fatal error"
675 "Maple non-fatal error 0x%X\n", 678 " 0x%X at (%d, %d)\n", code,
676 code); 679 mdev->port, mdev->unit);
680 atomic_set(&mdev->busy, 0);
677 break; 681 break;
678 682
679 case MAPLE_RESPONSE_ALLINFO: 683 case MAPLE_RESPONSE_ALLINFO:
680 printk(KERN_DEBUG 684 dev_notice(&mdev->dev, "extended"
681 "Maple - extended device information" 685 " device information request for (%d, %d)"
682 " not supported\n"); 686 " but call is not supported\n", mdev->port,
687 mdev->unit);
688 atomic_set(&mdev->busy, 0);
683 break; 689 break;
684 690
685 case MAPLE_RESPONSE_OK: 691 case MAPLE_RESPONSE_OK:
692 atomic_set(&mdev->busy, 0);
693 wake_up(&mdev->maple_wait);
686 break; 694 break;
687 695
688 default: 696 default:
@@ -699,20 +707,19 @@ static void maple_dma_handler(struct work_struct *work)
699 if (!fullscan) 707 if (!fullscan)
700 maple_port_rescan(); 708 maple_port_rescan();
701 /* mark that we have been through the first scan */ 709 /* mark that we have been through the first scan */
702 if (started == 0) 710 started = 1;
703 started = 1;
704 } 711 }
705 maplebus_dma_reset(); 712 maple_send();
706} 713}
707 714
708static irqreturn_t maplebus_dma_interrupt(int irq, void *dev_id) 715static irqreturn_t maple_dma_interrupt(int irq, void *dev_id)
709{ 716{
710 /* Load everything into the bottom half */ 717 /* Load everything into the bottom half */
711 schedule_work(&maple_dma_process); 718 schedule_work(&maple_dma_process);
712 return IRQ_HANDLED; 719 return IRQ_HANDLED;
713} 720}
714 721
715static irqreturn_t maplebus_vblank_interrupt(int irq, void *dev_id) 722static irqreturn_t maple_vblank_interrupt(int irq, void *dev_id)
716{ 723{
717 schedule_work(&maple_vblank_process); 724 schedule_work(&maple_vblank_process);
718 return IRQ_HANDLED; 725 return IRQ_HANDLED;
@@ -720,14 +727,14 @@ static irqreturn_t maplebus_vblank_interrupt(int irq, void *dev_id)
720 727
721static int maple_set_dma_interrupt_handler(void) 728static int maple_set_dma_interrupt_handler(void)
722{ 729{
723 return request_irq(HW_EVENT_MAPLE_DMA, maplebus_dma_interrupt, 730 return request_irq(HW_EVENT_MAPLE_DMA, maple_dma_interrupt,
724 IRQF_SHARED, "maple bus DMA", &maple_dummy_driver); 731 IRQF_SHARED, "maple bus DMA", &maple_unsupported_device);
725} 732}
726 733
727static int maple_set_vblank_interrupt_handler(void) 734static int maple_set_vblank_interrupt_handler(void)
728{ 735{
729 return request_irq(HW_EVENT_VSYNC, maplebus_vblank_interrupt, 736 return request_irq(HW_EVENT_VSYNC, maple_vblank_interrupt,
730 IRQF_SHARED, "maple bus VBLANK", &maple_dummy_driver); 737 IRQF_SHARED, "maple bus VBLANK", &maple_unsupported_device);
731} 738}
732 739
733static int maple_get_dma_buffer(void) 740static int maple_get_dma_buffer(void)
@@ -740,7 +747,7 @@ static int maple_get_dma_buffer(void)
740 return 0; 747 return 0;
741} 748}
742 749
743static int match_maple_bus_driver(struct device *devptr, 750static int maple_match_bus_driver(struct device *devptr,
744 struct device_driver *drvptr) 751 struct device_driver *drvptr)
745{ 752{
746 struct maple_driver *maple_drv = to_maple_driver(drvptr); 753 struct maple_driver *maple_drv = to_maple_driver(drvptr);
@@ -765,16 +772,18 @@ static void maple_bus_release(struct device *dev)
765{ 772{
766} 773}
767 774
768static struct maple_driver maple_dummy_driver = { 775static struct maple_driver maple_unsupported_device = {
769 .drv = { 776 .drv = {
770 .name = "maple_dummy_driver", 777 .name = "maple_unsupported_device",
771 .bus = &maple_bus_type, 778 .bus = &maple_bus_type,
772 }, 779 },
773}; 780};
774 781/**
782 * maple_bus_type - core maple bus structure
783 */
775struct bus_type maple_bus_type = { 784struct bus_type maple_bus_type = {
776 .name = "maple", 785 .name = "maple",
777 .match = match_maple_bus_driver, 786 .match = maple_match_bus_driver,
778 .uevent = maple_bus_uevent, 787 .uevent = maple_bus_uevent,
779}; 788};
780EXPORT_SYMBOL_GPL(maple_bus_type); 789EXPORT_SYMBOL_GPL(maple_bus_type);
@@ -788,7 +797,8 @@ static int __init maple_bus_init(void)
788{ 797{
789 int retval, i; 798 int retval, i;
790 struct maple_device *mdev[MAPLE_PORTS]; 799 struct maple_device *mdev[MAPLE_PORTS];
791 ctrl_outl(0, MAPLE_STATE); 800
801 ctrl_outl(0, MAPLE_ENABLE);
792 802
793 retval = device_register(&maple_bus); 803 retval = device_register(&maple_bus);
794 if (retval) 804 if (retval)
@@ -798,36 +808,33 @@ static int __init maple_bus_init(void)
798 if (retval) 808 if (retval)
799 goto cleanup_device; 809 goto cleanup_device;
800 810
801 retval = driver_register(&maple_dummy_driver.drv); 811 retval = driver_register(&maple_unsupported_device.drv);
802 if (retval) 812 if (retval)
803 goto cleanup_bus; 813 goto cleanup_bus;
804 814
805 /* allocate memory for maple bus dma */ 815 /* allocate memory for maple bus dma */
806 retval = maple_get_dma_buffer(); 816 retval = maple_get_dma_buffer();
807 if (retval) { 817 if (retval) {
808 printk(KERN_INFO 818 dev_err(&maple_bus, "failed to allocate DMA buffers\n");
809 "Maple bus: Failed to allocate Maple DMA buffers\n");
810 goto cleanup_basic; 819 goto cleanup_basic;
811 } 820 }
812 821
813 /* set up DMA interrupt handler */ 822 /* set up DMA interrupt handler */
814 retval = maple_set_dma_interrupt_handler(); 823 retval = maple_set_dma_interrupt_handler();
815 if (retval) { 824 if (retval) {
816 printk(KERN_INFO 825 dev_err(&maple_bus, "bus failed to grab maple "
817 "Maple bus: Failed to grab maple DMA IRQ\n"); 826 "DMA IRQ\n");
818 goto cleanup_dma; 827 goto cleanup_dma;
819 } 828 }
820 829
821 /* set up VBLANK interrupt handler */ 830 /* set up VBLANK interrupt handler */
822 retval = maple_set_vblank_interrupt_handler(); 831 retval = maple_set_vblank_interrupt_handler();
823 if (retval) { 832 if (retval) {
824 printk(KERN_INFO "Maple bus: Failed to grab VBLANK IRQ\n"); 833 dev_err(&maple_bus, "bus failed to grab VBLANK IRQ\n");
825 goto cleanup_irq; 834 goto cleanup_irq;
826 } 835 }
827 836
828 maple_queue_cache = 837 maple_queue_cache = KMEM_CACHE(maple_buffer, SLAB_HWCACHE_ALIGN);
829 kmem_cache_create("maple_queue_cache", 0x400, 0,
830 SLAB_HWCACHE_ALIGN, NULL);
831 838
832 if (!maple_queue_cache) 839 if (!maple_queue_cache)
833 goto cleanup_bothirqs; 840 goto cleanup_bothirqs;
@@ -838,23 +845,23 @@ static int __init maple_bus_init(void)
838 /* setup maple ports */ 845 /* setup maple ports */
839 for (i = 0; i < MAPLE_PORTS; i++) { 846 for (i = 0; i < MAPLE_PORTS; i++) {
840 checked[i] = false; 847 checked[i] = false;
848 empty[i] = false;
841 mdev[i] = maple_alloc_dev(i, 0); 849 mdev[i] = maple_alloc_dev(i, 0);
842 baseunits[i] = mdev[i];
843 if (!mdev[i]) { 850 if (!mdev[i]) {
844 while (i-- > 0) 851 while (i-- > 0)
845 maple_free_dev(mdev[i]); 852 maple_free_dev(mdev[i]);
846 goto cleanup_cache; 853 goto cleanup_cache;
847 } 854 }
855 baseunits[i] = mdev[i];
856 atomic_set(&mdev[i]->busy, 1);
848 maple_add_packet(mdev[i], 0, MAPLE_COMMAND_DEVINFO, 0, NULL); 857 maple_add_packet(mdev[i], 0, MAPLE_COMMAND_DEVINFO, 0, NULL);
849 subdevice_map[i] = 0; 858 subdevice_map[i] = 0;
850 } 859 }
851 860
852 /* setup maplebus hardware */ 861 maple_pnp_time = jiffies + HZ;
853 maplebus_dma_reset(); 862 /* prepare initial queue */
854 /* initial detection */
855 maple_send(); 863 maple_send();
856 maple_pnp_time = jiffies; 864 dev_info(&maple_bus, "bus core now registered\n");
857 printk(KERN_INFO "Maple bus core now registered.\n");
858 865
859 return 0; 866 return 0;
860 867
@@ -871,7 +878,7 @@ cleanup_dma:
871 free_pages((unsigned long) maple_sendbuf, MAPLE_DMA_PAGES); 878 free_pages((unsigned long) maple_sendbuf, MAPLE_DMA_PAGES);
872 879
873cleanup_basic: 880cleanup_basic:
874 driver_unregister(&maple_dummy_driver.drv); 881 driver_unregister(&maple_unsupported_device.drv);
875 882
876cleanup_bus: 883cleanup_bus:
877 bus_unregister(&maple_bus_type); 884 bus_unregister(&maple_bus_type);
@@ -880,7 +887,7 @@ cleanup_device:
880 device_unregister(&maple_bus); 887 device_unregister(&maple_bus);
881 888
882cleanup: 889cleanup:
883 printk(KERN_INFO "Maple bus registration failed\n"); 890 printk(KERN_ERR "Maple bus registration failed\n");
884 return retval; 891 return retval;
885} 892}
886/* Push init to later to ensure hardware gets detected */ 893/* Push init to later to ensure hardware gets detected */
diff --git a/include/linux/maple.h b/include/linux/maple.h
index c23d3f51ba40..d9a51b9b3300 100644
--- a/include/linux/maple.h
+++ b/include/linux/maple.h
@@ -8,33 +8,49 @@ extern struct bus_type maple_bus_type;
8 8
9/* Maple Bus command and response codes */ 9/* Maple Bus command and response codes */
10enum maple_code { 10enum maple_code {
11 MAPLE_RESPONSE_FILEERR = -5, 11 MAPLE_RESPONSE_FILEERR = -5,
12 MAPLE_RESPONSE_AGAIN = -4, /* request should be retransmitted */ 12 MAPLE_RESPONSE_AGAIN, /* retransmit */
13 MAPLE_RESPONSE_BADCMD = -3, 13 MAPLE_RESPONSE_BADCMD,
14 MAPLE_RESPONSE_BADFUNC = -2, 14 MAPLE_RESPONSE_BADFUNC,
15 MAPLE_RESPONSE_NONE = -1, /* unit didn't respond at all */ 15 MAPLE_RESPONSE_NONE, /* unit didn't respond*/
16 MAPLE_COMMAND_DEVINFO = 1, 16 MAPLE_COMMAND_DEVINFO = 1,
17 MAPLE_COMMAND_ALLINFO = 2, 17 MAPLE_COMMAND_ALLINFO,
18 MAPLE_COMMAND_RESET = 3, 18 MAPLE_COMMAND_RESET,
19 MAPLE_COMMAND_KILL = 4, 19 MAPLE_COMMAND_KILL,
20 MAPLE_RESPONSE_DEVINFO = 5, 20 MAPLE_RESPONSE_DEVINFO,
21 MAPLE_RESPONSE_ALLINFO = 6, 21 MAPLE_RESPONSE_ALLINFO,
22 MAPLE_RESPONSE_OK = 7, 22 MAPLE_RESPONSE_OK,
23 MAPLE_RESPONSE_DATATRF = 8, 23 MAPLE_RESPONSE_DATATRF,
24 MAPLE_COMMAND_GETCOND = 9, 24 MAPLE_COMMAND_GETCOND,
25 MAPLE_COMMAND_GETMINFO = 10, 25 MAPLE_COMMAND_GETMINFO,
26 MAPLE_COMMAND_BREAD = 11, 26 MAPLE_COMMAND_BREAD,
27 MAPLE_COMMAND_BWRITE = 12, 27 MAPLE_COMMAND_BWRITE,
28 MAPLE_COMMAND_SETCOND = 14 28 MAPLE_COMMAND_BSYNC,
29 MAPLE_COMMAND_SETCOND,
30 MAPLE_COMMAND_MICCONTROL
31};
32
33enum maple_file_errors {
34 MAPLE_FILEERR_INVALID_PARTITION = 0x01000000,
35 MAPLE_FILEERR_PHASE_ERROR = 0x02000000,
36 MAPLE_FILEERR_INVALID_BLOCK = 0x04000000,
37 MAPLE_FILEERR_WRITE_ERROR = 0x08000000,
38 MAPLE_FILEERR_INVALID_WRITE_LENGTH = 0x10000000,
39 MAPLE_FILEERR_BAD_CRC = 0x20000000
40};
41
42struct maple_buffer {
43 char bufx[0x400];
44 void *buf;
29}; 45};
30 46
31struct mapleq { 47struct mapleq {
32 struct list_head list; 48 struct list_head list;
33 struct maple_device *dev; 49 struct maple_device *dev;
34 void *sendbuf, *recvbuf, *recvbufdcsp; 50 struct maple_buffer *recvbuf;
51 void *sendbuf, *recvbuf_p2;
35 unsigned char length; 52 unsigned char length;
36 enum maple_code command; 53 enum maple_code command;
37 struct mutex mutex;
38}; 54};
39 55
40struct maple_devinfo { 56struct maple_devinfo {
@@ -52,11 +68,15 @@ struct maple_device {
52 struct maple_driver *driver; 68 struct maple_driver *driver;
53 struct mapleq *mq; 69 struct mapleq *mq;
54 void (*callback) (struct mapleq * mq); 70 void (*callback) (struct mapleq * mq);
71 void (*fileerr_handler)(struct maple_device *mdev, void *recvbuf);
72 int (*can_unload)(struct maple_device *mdev);
55 unsigned long when, interval, function; 73 unsigned long when, interval, function;
56 struct maple_devinfo devinfo; 74 struct maple_devinfo devinfo;
57 unsigned char port, unit; 75 unsigned char port, unit;
58 char product_name[32]; 76 char product_name[32];
59 char product_licence[64]; 77 char product_licence[64];
78 atomic_t busy;
79 wait_queue_head_t maple_wait;
60 struct device dev; 80 struct device dev;
61}; 81};
62 82
@@ -72,7 +92,7 @@ void maple_getcond_callback(struct maple_device *dev,
72int maple_driver_register(struct maple_driver *); 92int maple_driver_register(struct maple_driver *);
73void maple_driver_unregister(struct maple_driver *); 93void maple_driver_unregister(struct maple_driver *);
74 94
75int maple_add_packet_sleeps(struct maple_device *mdev, u32 function, 95int maple_add_packet(struct maple_device *mdev, u32 function,
76 u32 command, u32 length, void *data); 96 u32 command, u32 length, void *data);
77void maple_clear_dev(struct maple_device *mdev); 97void maple_clear_dev(struct maple_device *mdev);
78 98