aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/sh/maple/maple.c
diff options
context:
space:
mode:
authorAdrian McMenamin <adrian@mcmen.demon.co.uk>2009-02-27 02:07:32 -0500
committerPaul Mundt <lethal@linux-sh.org>2009-02-27 02:07:32 -0500
commitb233b28eac0cc37d07c2d007ea08c86c778c5af4 (patch)
tree636f91b57d675d1886d8b3ab4aca8d8488d65d90 /drivers/sh/maple/maple.c
parent41480ae7a383dcffa497decdd97b3cb2caaa18ec (diff)
sh: maple: Support block reads and writes.
This patch updates the maple bus to support asynchronous block reads and writes as well as generally improving the quality of the code and supporting concurrency (all needed to support the Dreamcast visual memory unit - a driver will also be posted for that). Changes in the bus driver necessitate some changes in the two maple bus input drivers that are currently in mainline. As well as supporting block reads and writes this code clean up removes some poor handling of locks, uses an atomic status variable to serialise access to devices and more robusly handles the general performance problems of the bus. Signed-off-by: Adrian McMenamin <adrian@mcmen.demon.co.uk> Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'drivers/sh/maple/maple.c')
-rw-r--r--drivers/sh/maple/maple.c463
1 files changed, 235 insertions, 228 deletions
diff --git a/drivers/sh/maple/maple.c b/drivers/sh/maple/maple.c
index 63f0de29aa14..4054fe93d6e4 100644
--- a/drivers/sh/maple/maple.c
+++ b/drivers/sh/maple/maple.c
@@ -1,16 +1,10 @@
1/* 1/*
2 * Core maple bus functionality 2 * Core maple bus functionality
3 * 3 *
4 * Copyright (C) 2007, 2008 Adrian McMenamin 4 * Copyright (C) 2007 - 2009 Adrian McMenamin
5 * Copyright (C) 2001 - 2008 Paul Mundt 5 * Copyright (C) 2001 - 2008 Paul Mundt
6 * 6 * Copyright (C) 2000 - 2001 YAEGASHI Takeshi
7 * Based on 2.4 code by:
8 *
9 * Copyright (C) 2000-2001 YAEGASHI Takeshi
10 * Copyright (C) 2001 M. R. Brown 7 * Copyright (C) 2001 M. R. Brown
11 * Copyright (C) 2001 Paul Mundt
12 *
13 * and others.
14 * 8 *
15 * This file is subject to the terms and conditions of the GNU General Public 9 * This file is subject to the terms and conditions of the GNU General Public
16 * License. See the file "COPYING" in the main directory of this archive 10 * License. See the file "COPYING" in the main directory of this archive
@@ -32,7 +26,7 @@
32#include <mach/dma.h> 26#include <mach/dma.h>
33#include <mach/sysasic.h> 27#include <mach/sysasic.h>
34 28
35MODULE_AUTHOR("Yaegashi Takeshi, Paul Mundt, M. R. Brown, Adrian McMenamin"); 29MODULE_AUTHOR("Adrian McMenamin <adrian@mcmen.demon.co.uk>");
36MODULE_DESCRIPTION("Maple bus driver for Dreamcast"); 30MODULE_DESCRIPTION("Maple bus driver for Dreamcast");
37MODULE_LICENSE("GPL v2"); 31MODULE_LICENSE("GPL v2");
38MODULE_SUPPORTED_DEVICE("{{SEGA, Dreamcast/Maple}}"); 32MODULE_SUPPORTED_DEVICE("{{SEGA, Dreamcast/Maple}}");
@@ -49,7 +43,7 @@ static LIST_HEAD(maple_sentq);
49/* mutex to protect queue of waiting packets */ 43/* mutex to protect queue of waiting packets */
50static DEFINE_MUTEX(maple_wlist_lock); 44static DEFINE_MUTEX(maple_wlist_lock);
51 45
52static struct maple_driver maple_dummy_driver; 46static struct maple_driver maple_unsupported_device;
53static struct device maple_bus; 47static struct device maple_bus;
54static int subdevice_map[MAPLE_PORTS]; 48static int subdevice_map[MAPLE_PORTS];
55static unsigned long *maple_sendbuf, *maple_sendptr, *maple_lastptr; 49static unsigned long *maple_sendbuf, *maple_sendptr, *maple_lastptr;
@@ -62,8 +56,9 @@ struct maple_device_specify {
62 int unit; 56 int unit;
63}; 57};
64 58
65static bool checked[4]; 59static bool checked[MAPLE_PORTS];
66static struct maple_device *baseunits[4]; 60static bool empty[MAPLE_PORTS];
61static struct maple_device *baseunits[MAPLE_PORTS];
67 62
68/** 63/**
69 * maple_driver_register - register a maple driver 64 * maple_driver_register - register a maple driver
@@ -97,12 +92,20 @@ void maple_driver_unregister(struct maple_driver *drv)
97EXPORT_SYMBOL_GPL(maple_driver_unregister); 92EXPORT_SYMBOL_GPL(maple_driver_unregister);
98 93
99/* set hardware registers to enable next round of dma */ 94/* set hardware registers to enable next round of dma */
100static void maplebus_dma_reset(void) 95static void maple_dma_reset(void)
101{ 96{
102 ctrl_outl(MAPLE_MAGIC, MAPLE_RESET); 97 ctrl_outl(MAPLE_MAGIC, MAPLE_RESET);
103 /* set trig type to 0 for software trigger, 1 for hardware (VBLANK) */ 98 /* set trig type to 0 for software trigger, 1 for hardware (VBLANK) */
104 ctrl_outl(1, MAPLE_TRIGTYPE); 99 ctrl_outl(1, MAPLE_TRIGTYPE);
105 ctrl_outl(MAPLE_2MBPS | MAPLE_TIMEOUT(50000), MAPLE_SPEED); 100 /*
101 * Maple system register
102 * bits 31 - 16 timeout in units of 20nsec
103 * bit 12 hard trigger - set 0 to keep responding to VBLANK
104 * bits 9 - 8 set 00 for 2 Mbps, 01 for 1 Mbps
105 * bits 3 - 0 delay (in 1.3ms) between VBLANK and start of DMA
106 * max delay is 11
107 */
108 ctrl_outl(MAPLE_2MBPS | MAPLE_TIMEOUT(0xFFFF), MAPLE_SPEED);
106 ctrl_outl(PHYSADDR(maple_sendbuf), MAPLE_DMAADDR); 109 ctrl_outl(PHYSADDR(maple_sendbuf), MAPLE_DMAADDR);
107 ctrl_outl(1, MAPLE_ENABLE); 110 ctrl_outl(1, MAPLE_ENABLE);
108} 111}
@@ -134,21 +137,16 @@ static void maple_release_device(struct device *dev)
134{ 137{
135 struct maple_device *mdev; 138 struct maple_device *mdev;
136 struct mapleq *mq; 139 struct mapleq *mq;
137 if (!dev) 140
138 return;
139 mdev = to_maple_dev(dev); 141 mdev = to_maple_dev(dev);
140 mq = mdev->mq; 142 mq = mdev->mq;
141 if (mq) { 143 kmem_cache_free(maple_queue_cache, mq->recvbuf);
142 if (mq->recvbufdcsp) 144 kfree(mq);
143 kmem_cache_free(maple_queue_cache, mq->recvbufdcsp);
144 kfree(mq);
145 mq = NULL;
146 }
147 kfree(mdev); 145 kfree(mdev);
148} 146}
149 147
150/** 148/**
151 * maple_add_packet - add a single instruction to the queue 149 * maple_add_packet - add a single instruction to the maple bus queue
152 * @mdev: maple device 150 * @mdev: maple device
153 * @function: function on device being queried 151 * @function: function on device being queried
154 * @command: maple command to add 152 * @command: maple command to add
@@ -158,68 +156,12 @@ static void maple_release_device(struct device *dev)
158int maple_add_packet(struct maple_device *mdev, u32 function, u32 command, 156int maple_add_packet(struct maple_device *mdev, u32 function, u32 command,
159 size_t length, void *data) 157 size_t length, void *data)
160{ 158{
161 int locking, ret = 0; 159 int ret = 0;
162 void *sendbuf = NULL; 160 void *sendbuf = NULL;
163 161
164 mutex_lock(&maple_wlist_lock);
165 /* bounce if device already locked */
166 locking = mutex_is_locked(&mdev->mq->mutex);
167 if (locking) {
168 ret = -EBUSY;
169 goto out;
170 }
171
172 mutex_lock(&mdev->mq->mutex);
173
174 if (length) { 162 if (length) {
175 sendbuf = kmalloc(length * 4, GFP_KERNEL); 163 sendbuf = kzalloc(length * 4, GFP_KERNEL);
176 if (!sendbuf) { 164 if (!sendbuf) {
177 mutex_unlock(&mdev->mq->mutex);
178 ret = -ENOMEM;
179 goto out;
180 }
181 ((__be32 *)sendbuf)[0] = cpu_to_be32(function);
182 }
183
184 mdev->mq->command = command;
185 mdev->mq->length = length;
186 if (length > 1)
187 memcpy(sendbuf + 4, data, (length - 1) * 4);
188 mdev->mq->sendbuf = sendbuf;
189
190 list_add(&mdev->mq->list, &maple_waitq);
191out:
192 mutex_unlock(&maple_wlist_lock);
193 return ret;
194}
195EXPORT_SYMBOL_GPL(maple_add_packet);
196
197/**
198 * maple_add_packet_sleeps - add a single instruction to the queue
199 * @mdev: maple device
200 * @function: function on device being queried
201 * @command: maple command to add
202 * @length: length of command string (in 32 bit words)
203 * @data: remainder of command string
204 *
205 * Same as maple_add_packet(), but waits for the lock to become free.
206 */
207int maple_add_packet_sleeps(struct maple_device *mdev, u32 function,
208 u32 command, size_t length, void *data)
209{
210 int locking, ret = 0;
211 void *sendbuf = NULL;
212
213 locking = mutex_lock_interruptible(&mdev->mq->mutex);
214 if (locking) {
215 ret = -EIO;
216 goto out;
217 }
218
219 if (length) {
220 sendbuf = kmalloc(length * 4, GFP_KERNEL);
221 if (!sendbuf) {
222 mutex_unlock(&mdev->mq->mutex);
223 ret = -ENOMEM; 165 ret = -ENOMEM;
224 goto out; 166 goto out;
225 } 167 }
@@ -233,38 +175,35 @@ int maple_add_packet_sleeps(struct maple_device *mdev, u32 function,
233 mdev->mq->sendbuf = sendbuf; 175 mdev->mq->sendbuf = sendbuf;
234 176
235 mutex_lock(&maple_wlist_lock); 177 mutex_lock(&maple_wlist_lock);
236 list_add(&mdev->mq->list, &maple_waitq); 178 list_add_tail(&mdev->mq->list, &maple_waitq);
237 mutex_unlock(&maple_wlist_lock); 179 mutex_unlock(&maple_wlist_lock);
238out: 180out:
239 return ret; 181 return ret;
240} 182}
241EXPORT_SYMBOL_GPL(maple_add_packet_sleeps); 183EXPORT_SYMBOL_GPL(maple_add_packet);
242 184
243static struct mapleq *maple_allocq(struct maple_device *mdev) 185static struct mapleq *maple_allocq(struct maple_device *mdev)
244{ 186{
245 struct mapleq *mq; 187 struct mapleq *mq;
246 188
247 mq = kmalloc(sizeof(*mq), GFP_KERNEL); 189 mq = kzalloc(sizeof(*mq), GFP_KERNEL);
248 if (!mq) 190 if (!mq)
249 goto failed_nomem; 191 goto failed_nomem;
250 192
193 INIT_LIST_HEAD(&mq->list);
251 mq->dev = mdev; 194 mq->dev = mdev;
252 mq->recvbufdcsp = kmem_cache_zalloc(maple_queue_cache, GFP_KERNEL); 195 mq->recvbuf = kmem_cache_zalloc(maple_queue_cache, GFP_KERNEL);
253 mq->recvbuf = (void *) P2SEGADDR(mq->recvbufdcsp);
254 if (!mq->recvbuf) 196 if (!mq->recvbuf)
255 goto failed_p2; 197 goto failed_p2;
256 /* 198 mq->recvbuf->buf = &((mq->recvbuf->bufx)[0]);
257 * most devices do not need the mutex - but
258 * anything that injects block reads or writes
259 * will rely on it
260 */
261 mutex_init(&mq->mutex);
262 199
263 return mq; 200 return mq;
264 201
265failed_p2: 202failed_p2:
266 kfree(mq); 203 kfree(mq);
267failed_nomem: 204failed_nomem:
205 dev_err(&mdev->dev, "could not allocate memory for device (%d, %d)\n",
206 mdev->port, mdev->unit);
268 return NULL; 207 return NULL;
269} 208}
270 209
@@ -272,12 +211,16 @@ static struct maple_device *maple_alloc_dev(int port, int unit)
272{ 211{
273 struct maple_device *mdev; 212 struct maple_device *mdev;
274 213
214 /* zero this out to avoid kobj subsystem
215 * thinking it has already been registered */
216
275 mdev = kzalloc(sizeof(*mdev), GFP_KERNEL); 217 mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
276 if (!mdev) 218 if (!mdev)
277 return NULL; 219 return NULL;
278 220
279 mdev->port = port; 221 mdev->port = port;
280 mdev->unit = unit; 222 mdev->unit = unit;
223
281 mdev->mq = maple_allocq(mdev); 224 mdev->mq = maple_allocq(mdev);
282 225
283 if (!mdev->mq) { 226 if (!mdev->mq) {
@@ -286,19 +229,14 @@ static struct maple_device *maple_alloc_dev(int port, int unit)
286 } 229 }
287 mdev->dev.bus = &maple_bus_type; 230 mdev->dev.bus = &maple_bus_type;
288 mdev->dev.parent = &maple_bus; 231 mdev->dev.parent = &maple_bus;
232 init_waitqueue_head(&mdev->maple_wait);
289 return mdev; 233 return mdev;
290} 234}
291 235
292static void maple_free_dev(struct maple_device *mdev) 236static void maple_free_dev(struct maple_device *mdev)
293{ 237{
294 if (!mdev) 238 kmem_cache_free(maple_queue_cache, mdev->mq->recvbuf);
295 return; 239 kfree(mdev->mq);
296 if (mdev->mq) {
297 if (mdev->mq->recvbufdcsp)
298 kmem_cache_free(maple_queue_cache,
299 mdev->mq->recvbufdcsp);
300 kfree(mdev->mq);
301 }
302 kfree(mdev); 240 kfree(mdev);
303} 241}
304 242
@@ -320,7 +258,7 @@ static void maple_build_block(struct mapleq *mq)
320 maple_lastptr = maple_sendptr; 258 maple_lastptr = maple_sendptr;
321 259
322 *maple_sendptr++ = (port << 16) | len | 0x80000000; 260 *maple_sendptr++ = (port << 16) | len | 0x80000000;
323 *maple_sendptr++ = PHYSADDR(mq->recvbuf); 261 *maple_sendptr++ = PHYSADDR(mq->recvbuf->buf);
324 *maple_sendptr++ = 262 *maple_sendptr++ =
325 mq->command | (to << 8) | (from << 16) | (len << 24); 263 mq->command | (to << 8) | (from << 16) | (len << 24);
326 while (len-- > 0) 264 while (len-- > 0)
@@ -333,20 +271,28 @@ static void maple_send(void)
333 int i, maple_packets = 0; 271 int i, maple_packets = 0;
334 struct mapleq *mq, *nmq; 272 struct mapleq *mq, *nmq;
335 273
336 if (!list_empty(&maple_sentq)) 274 if (!maple_dma_done())
337 return; 275 return;
276
277 /* disable DMA */
278 ctrl_outl(0, MAPLE_ENABLE);
279
280 if (!list_empty(&maple_sentq))
281 goto finish;
282
338 mutex_lock(&maple_wlist_lock); 283 mutex_lock(&maple_wlist_lock);
339 if (list_empty(&maple_waitq) || !maple_dma_done()) { 284 if (list_empty(&maple_waitq)) {
340 mutex_unlock(&maple_wlist_lock); 285 mutex_unlock(&maple_wlist_lock);
341 return; 286 goto finish;
342 } 287 }
343 mutex_unlock(&maple_wlist_lock); 288
344 maple_lastptr = maple_sendbuf; 289 maple_lastptr = maple_sendbuf;
345 maple_sendptr = maple_sendbuf; 290 maple_sendptr = maple_sendbuf;
346 mutex_lock(&maple_wlist_lock); 291
347 list_for_each_entry_safe(mq, nmq, &maple_waitq, list) { 292 list_for_each_entry_safe(mq, nmq, &maple_waitq, list) {
348 maple_build_block(mq); 293 maple_build_block(mq);
349 list_move(&mq->list, &maple_sentq); 294 list_del_init(&mq->list);
295 list_add_tail(&mq->list, &maple_sentq);
350 if (maple_packets++ > MAPLE_MAXPACKETS) 296 if (maple_packets++ > MAPLE_MAXPACKETS)
351 break; 297 break;
352 } 298 }
@@ -356,10 +302,13 @@ static void maple_send(void)
356 dma_cache_sync(0, maple_sendbuf + i * PAGE_SIZE, 302 dma_cache_sync(0, maple_sendbuf + i * PAGE_SIZE,
357 PAGE_SIZE, DMA_BIDIRECTIONAL); 303 PAGE_SIZE, DMA_BIDIRECTIONAL);
358 } 304 }
305
306finish:
307 maple_dma_reset();
359} 308}
360 309
361/* check if there is a driver registered likely to match this device */ 310/* check if there is a driver registered likely to match this device */
362static int check_matching_maple_driver(struct device_driver *driver, 311static int maple_check_matching_driver(struct device_driver *driver,
363 void *devptr) 312 void *devptr)
364{ 313{
365 struct maple_driver *maple_drv; 314 struct maple_driver *maple_drv;
@@ -374,10 +323,7 @@ static int check_matching_maple_driver(struct device_driver *driver,
374 323
375static void maple_detach_driver(struct maple_device *mdev) 324static void maple_detach_driver(struct maple_device *mdev)
376{ 325{
377 if (!mdev)
378 return;
379 device_unregister(&mdev->dev); 326 device_unregister(&mdev->dev);
380 mdev = NULL;
381} 327}
382 328
383/* process initial MAPLE_COMMAND_DEVINFO for each device or port */ 329/* process initial MAPLE_COMMAND_DEVINFO for each device or port */
@@ -385,9 +331,9 @@ static void maple_attach_driver(struct maple_device *mdev)
385{ 331{
386 char *p, *recvbuf; 332 char *p, *recvbuf;
387 unsigned long function; 333 unsigned long function;
388 int matched, retval; 334 int matched, error;
389 335
390 recvbuf = mdev->mq->recvbuf; 336 recvbuf = mdev->mq->recvbuf->buf;
391 /* copy the data as individual elements in 337 /* copy the data as individual elements in
392 * case of memory optimisation */ 338 * case of memory optimisation */
393 memcpy(&mdev->devinfo.function, recvbuf + 4, 4); 339 memcpy(&mdev->devinfo.function, recvbuf + 4, 4);
@@ -395,7 +341,6 @@ static void maple_attach_driver(struct maple_device *mdev)
395 memcpy(&mdev->devinfo.area_code, recvbuf + 20, 1); 341 memcpy(&mdev->devinfo.area_code, recvbuf + 20, 1);
396 memcpy(&mdev->devinfo.connector_direction, recvbuf + 21, 1); 342 memcpy(&mdev->devinfo.connector_direction, recvbuf + 21, 1);
397 memcpy(&mdev->devinfo.product_name[0], recvbuf + 22, 30); 343 memcpy(&mdev->devinfo.product_name[0], recvbuf + 22, 30);
398 memcpy(&mdev->devinfo.product_licence[0], recvbuf + 52, 60);
399 memcpy(&mdev->devinfo.standby_power, recvbuf + 112, 2); 344 memcpy(&mdev->devinfo.standby_power, recvbuf + 112, 2);
400 memcpy(&mdev->devinfo.max_power, recvbuf + 114, 2); 345 memcpy(&mdev->devinfo.max_power, recvbuf + 114, 2);
401 memcpy(mdev->product_name, mdev->devinfo.product_name, 30); 346 memcpy(mdev->product_name, mdev->devinfo.product_name, 30);
@@ -414,43 +359,41 @@ static void maple_attach_driver(struct maple_device *mdev)
414 else 359 else
415 break; 360 break;
416 361
417 printk(KERN_INFO "Maple device detected: %s\n",
418 mdev->product_name);
419 printk(KERN_INFO "Maple device: %s\n", mdev->product_licence);
420
421 function = be32_to_cpu(mdev->devinfo.function); 362 function = be32_to_cpu(mdev->devinfo.function);
422 363
364 dev_info(&mdev->dev, "detected %s: function 0x%lX: at (%d, %d)\n",
365 mdev->product_name, function, mdev->port, mdev->unit);
366
423 if (function > 0x200) { 367 if (function > 0x200) {
424 /* Do this silently - as not a real device */ 368 /* Do this silently - as not a real device */
425 function = 0; 369 function = 0;
426 mdev->driver = &maple_dummy_driver; 370 mdev->driver = &maple_unsupported_device;
427 sprintf(mdev->dev.bus_id, "%d:0.port", mdev->port); 371 sprintf(mdev->dev.bus_id, "%d:0.port", mdev->port);
372
428 } else { 373 } else {
429 printk(KERN_INFO
430 "Maple bus at (%d, %d): Function 0x%lX\n",
431 mdev->port, mdev->unit, function);
432 374
433 matched = 375 matched =
434 bus_for_each_drv(&maple_bus_type, NULL, mdev, 376 bus_for_each_drv(&maple_bus_type, NULL, mdev,
435 check_matching_maple_driver); 377 maple_check_matching_driver);
436 378
437 if (matched == 0) { 379 if (matched == 0) {
438 /* Driver does not exist yet */ 380 /* Driver does not exist yet */
439 printk(KERN_INFO 381 dev_info(&mdev->dev, "no driver found\n");
440 "No maple driver found.\n"); 382 mdev->driver = &maple_unsupported_device;
441 mdev->driver = &maple_dummy_driver;
442 } 383 }
443 sprintf(mdev->dev.bus_id, "%d:0%d.%lX", mdev->port, 384 sprintf(mdev->dev.bus_id, "%d:0%d.%lX", mdev->port,
444 mdev->unit, function); 385 mdev->unit, function);
445 } 386 }
387
446 mdev->function = function; 388 mdev->function = function;
447 mdev->dev.release = &maple_release_device; 389 mdev->dev.release = &maple_release_device;
448 retval = device_register(&mdev->dev); 390
449 if (retval) { 391 atomic_set(&mdev->busy, 0);
450 printk(KERN_INFO 392 error = device_register(&mdev->dev);
451 "Maple bus: Attempt to register device" 393 if (error) {
452 " (%x, %x) failed.\n", 394 dev_warn(&mdev->dev, "could not register device at"
453 mdev->port, mdev->unit); 395 " (%d, %d), with error 0x%X\n", mdev->unit,
396 mdev->port, error);
454 maple_free_dev(mdev); 397 maple_free_dev(mdev);
455 mdev = NULL; 398 mdev = NULL;
456 return; 399 return;
@@ -462,7 +405,7 @@ static void maple_attach_driver(struct maple_device *mdev)
462 * port and unit then return 1 - allows identification 405 * port and unit then return 1 - allows identification
463 * of which devices need to be attached or detached 406 * of which devices need to be attached or detached
464 */ 407 */
465static int detach_maple_device(struct device *device, void *portptr) 408static int check_maple_device(struct device *device, void *portptr)
466{ 409{
467 struct maple_device_specify *ds; 410 struct maple_device_specify *ds;
468 struct maple_device *mdev; 411 struct maple_device *mdev;
@@ -477,21 +420,25 @@ static int detach_maple_device(struct device *device, void *portptr)
477static int setup_maple_commands(struct device *device, void *ignored) 420static int setup_maple_commands(struct device *device, void *ignored)
478{ 421{
479 int add; 422 int add;
480 struct maple_device *maple_dev = to_maple_dev(device); 423 struct maple_device *mdev = to_maple_dev(device);
481 424 if (mdev->interval > 0 && atomic_read(&mdev->busy) == 0 &&
482 if ((maple_dev->interval > 0) 425 time_after(jiffies, mdev->when)) {
483 && time_after(jiffies, maple_dev->when)) { 426 /* bounce if we cannot add */
484 /* bounce if we cannot lock */ 427 add = maple_add_packet(mdev,
485 add = maple_add_packet(maple_dev, 428 be32_to_cpu(mdev->devinfo.function),
486 be32_to_cpu(maple_dev->devinfo.function),
487 MAPLE_COMMAND_GETCOND, 1, NULL); 429 MAPLE_COMMAND_GETCOND, 1, NULL);
488 if (!add) 430 if (!add)
489 maple_dev->when = jiffies + maple_dev->interval; 431 mdev->when = jiffies + mdev->interval;
490 } else { 432 } else {
491 if (time_after(jiffies, maple_pnp_time)) 433 if (time_after(jiffies, maple_pnp_time))
492 /* This will also bounce */ 434 /* Ensure we don't have block reads and devinfo
493 maple_add_packet(maple_dev, 0, 435 * calls interfering with one another - so flag the
494 MAPLE_COMMAND_DEVINFO, 0, NULL); 436 * device as busy */
437 if (atomic_read(&mdev->busy) == 0) {
438 atomic_set(&mdev->busy, 1);
439 maple_add_packet(mdev, 0,
440 MAPLE_COMMAND_DEVINFO, 0, NULL);
441 }
495 } 442 }
496 return 0; 443 return 0;
497} 444}
@@ -499,29 +446,50 @@ static int setup_maple_commands(struct device *device, void *ignored)
499/* VBLANK bottom half - implemented via workqueue */ 446/* VBLANK bottom half - implemented via workqueue */
500static void maple_vblank_handler(struct work_struct *work) 447static void maple_vblank_handler(struct work_struct *work)
501{ 448{
502 if (!list_empty(&maple_sentq) || !maple_dma_done()) 449 int x, locking;
450 struct maple_device *mdev;
451
452 if (!maple_dma_done())
503 return; 453 return;
504 454
505 ctrl_outl(0, MAPLE_ENABLE); 455 ctrl_outl(0, MAPLE_ENABLE);
506 456
457 if (!list_empty(&maple_sentq))
458 goto finish;
459
460 /*
461 * Set up essential commands - to fetch data and
462 * check devices are still present
463 */
507 bus_for_each_dev(&maple_bus_type, NULL, NULL, 464 bus_for_each_dev(&maple_bus_type, NULL, NULL,
508 setup_maple_commands); 465 setup_maple_commands);
466
467 if (time_after(jiffies, maple_pnp_time)) {
468 /*
469 * Scan the empty ports - bus is flakey and may have
470 * mis-reported emptyness
471 */
472 for (x = 0; x < MAPLE_PORTS; x++) {
473 if (checked[x] && empty[x]) {
474 mdev = baseunits[x];
475 if (!mdev)
476 break;
477 atomic_set(&mdev->busy, 1);
478 locking = maple_add_packet(mdev, 0,
479 MAPLE_COMMAND_DEVINFO, 0, NULL);
480 if (!locking)
481 break;
482 }
483 }
509 484
510 if (time_after(jiffies, maple_pnp_time))
511 maple_pnp_time = jiffies + MAPLE_PNP_INTERVAL; 485 maple_pnp_time = jiffies + MAPLE_PNP_INTERVAL;
512
513 mutex_lock(&maple_wlist_lock);
514 if (!list_empty(&maple_waitq) && list_empty(&maple_sentq)) {
515 mutex_unlock(&maple_wlist_lock);
516 maple_send();
517 } else {
518 mutex_unlock(&maple_wlist_lock);
519 } 486 }
520 487
521 maplebus_dma_reset(); 488finish:
489 maple_send();
522} 490}
523 491
524/* handle devices added via hotplugs - placing them on queue for DEVINFO*/ 492/* handle devices added via hotplugs - placing them on queue for DEVINFO */
525static void maple_map_subunits(struct maple_device *mdev, int submask) 493static void maple_map_subunits(struct maple_device *mdev, int submask)
526{ 494{
527 int retval, k, devcheck; 495 int retval, k, devcheck;
@@ -533,7 +501,7 @@ static void maple_map_subunits(struct maple_device *mdev, int submask)
533 ds.unit = k + 1; 501 ds.unit = k + 1;
534 retval = 502 retval =
535 bus_for_each_dev(&maple_bus_type, NULL, &ds, 503 bus_for_each_dev(&maple_bus_type, NULL, &ds,
536 detach_maple_device); 504 check_maple_device);
537 if (retval) { 505 if (retval) {
538 submask = submask >> 1; 506 submask = submask >> 1;
539 continue; 507 continue;
@@ -543,6 +511,7 @@ static void maple_map_subunits(struct maple_device *mdev, int submask)
543 mdev_add = maple_alloc_dev(mdev->port, k + 1); 511 mdev_add = maple_alloc_dev(mdev->port, k + 1);
544 if (!mdev_add) 512 if (!mdev_add)
545 return; 513 return;
514 atomic_set(&mdev_add->busy, 1);
546 maple_add_packet(mdev_add, 0, MAPLE_COMMAND_DEVINFO, 515 maple_add_packet(mdev_add, 0, MAPLE_COMMAND_DEVINFO,
547 0, NULL); 516 0, NULL);
548 /* mark that we are checking sub devices */ 517 /* mark that we are checking sub devices */
@@ -564,27 +533,45 @@ static void maple_clean_submap(struct maple_device *mdev)
564} 533}
565 534
566/* handle empty port or hotplug removal */ 535/* handle empty port or hotplug removal */
567static void maple_response_none(struct maple_device *mdev, 536static void maple_response_none(struct maple_device *mdev)
568 struct mapleq *mq) 537{
569{ 538 maple_clean_submap(mdev);
570 if (mdev->unit != 0) { 539
571 list_del(&mq->list); 540 if (likely(mdev->unit != 0)) {
572 maple_clean_submap(mdev); 541 /*
573 printk(KERN_INFO 542 * Block devices play up
574 "Maple bus device detaching at (%d, %d)\n", 543 * and give the impression they have
575 mdev->port, mdev->unit); 544 * been removed even when still in place or
545 * trip the mtd layer when they have
546 * really gone - this code traps that eventuality
547 * and ensures we aren't overloaded with useless
548 * error messages
549 */
550 if (mdev->can_unload) {
551 if (!mdev->can_unload(mdev)) {
552 atomic_set(&mdev->busy, 2);
553 wake_up(&mdev->maple_wait);
554 return;
555 }
556 }
557
558 dev_info(&mdev->dev, "detaching device at (%d, %d)\n",
559 mdev->port, mdev->unit);
576 maple_detach_driver(mdev); 560 maple_detach_driver(mdev);
577 return; 561 return;
578 } 562 } else {
579 if (!started || !fullscan) { 563 if (!started || !fullscan) {
580 if (checked[mdev->port] == false) { 564 if (checked[mdev->port] == false) {
581 checked[mdev->port] = true; 565 checked[mdev->port] = true;
582 printk(KERN_INFO "No maple devices attached" 566 empty[mdev->port] = true;
583 " to port %d\n", mdev->port); 567 dev_info(&mdev->dev, "no devices"
568 " to port %d\n", mdev->port);
569 }
570 return;
584 } 571 }
585 return;
586 } 572 }
587 maple_clean_submap(mdev); 573 /* Some hardware devices generate false detach messages on unit 0 */
574 atomic_set(&mdev->busy, 0);
588} 575}
589 576
590/* preprocess hotplugs or scans */ 577/* preprocess hotplugs or scans */
@@ -599,8 +586,11 @@ static void maple_response_devinfo(struct maple_device *mdev,
599 } else { 586 } else {
600 if (mdev->unit != 0) 587 if (mdev->unit != 0)
601 maple_attach_driver(mdev); 588 maple_attach_driver(mdev);
589 if (mdev->unit == 0) {
590 empty[mdev->port] = false;
591 maple_attach_driver(mdev);
592 }
602 } 593 }
603 return;
604 } 594 }
605 if (mdev->unit == 0) { 595 if (mdev->unit == 0) {
606 submask = recvbuf[2] & 0x1F; 596 submask = recvbuf[2] & 0x1F;
@@ -611,6 +601,17 @@ static void maple_response_devinfo(struct maple_device *mdev,
611 } 601 }
612} 602}
613 603
604static void maple_response_fileerr(struct maple_device *mdev, void *recvbuf)
605{
606 if (mdev->fileerr_handler) {
607 mdev->fileerr_handler(mdev, recvbuf);
608 return;
609 } else
610 dev_warn(&mdev->dev, "device at (%d, %d) reports"
611 "file error 0x%X\n", mdev->port, mdev->unit,
612 ((int *)recvbuf)[1]);
613}
614
614static void maple_port_rescan(void) 615static void maple_port_rescan(void)
615{ 616{
616 int i; 617 int i;
@@ -621,12 +622,6 @@ static void maple_port_rescan(void)
621 if (checked[i] == false) { 622 if (checked[i] == false) {
622 fullscan = 0; 623 fullscan = 0;
623 mdev = baseunits[i]; 624 mdev = baseunits[i];
624 /*
625 * test lock in case scan has failed
626 * but device is still locked
627 */
628 if (mutex_is_locked(&mdev->mq->mutex))
629 mutex_unlock(&mdev->mq->mutex);
630 maple_add_packet(mdev, 0, MAPLE_COMMAND_DEVINFO, 625 maple_add_packet(mdev, 0, MAPLE_COMMAND_DEVINFO,
631 0, NULL); 626 0, NULL);
632 } 627 }
@@ -637,7 +632,7 @@ static void maple_port_rescan(void)
637static void maple_dma_handler(struct work_struct *work) 632static void maple_dma_handler(struct work_struct *work)
638{ 633{
639 struct mapleq *mq, *nmq; 634 struct mapleq *mq, *nmq;
640 struct maple_device *dev; 635 struct maple_device *mdev;
641 char *recvbuf; 636 char *recvbuf;
642 enum maple_code code; 637 enum maple_code code;
643 638
@@ -646,43 +641,56 @@ static void maple_dma_handler(struct work_struct *work)
646 ctrl_outl(0, MAPLE_ENABLE); 641 ctrl_outl(0, MAPLE_ENABLE);
647 if (!list_empty(&maple_sentq)) { 642 if (!list_empty(&maple_sentq)) {
648 list_for_each_entry_safe(mq, nmq, &maple_sentq, list) { 643 list_for_each_entry_safe(mq, nmq, &maple_sentq, list) {
649 recvbuf = mq->recvbuf; 644 mdev = mq->dev;
645 recvbuf = mq->recvbuf->buf;
646 dma_cache_sync(&mdev->dev, recvbuf, 0x400,
647 DMA_FROM_DEVICE);
650 code = recvbuf[0]; 648 code = recvbuf[0];
651 dev = mq->dev;
652 kfree(mq->sendbuf); 649 kfree(mq->sendbuf);
653 mutex_unlock(&mq->mutex);
654 list_del_init(&mq->list); 650 list_del_init(&mq->list);
655
656 switch (code) { 651 switch (code) {
657 case MAPLE_RESPONSE_NONE: 652 case MAPLE_RESPONSE_NONE:
658 maple_response_none(dev, mq); 653 maple_response_none(mdev);
659 break; 654 break;
660 655
661 case MAPLE_RESPONSE_DEVINFO: 656 case MAPLE_RESPONSE_DEVINFO:
662 maple_response_devinfo(dev, recvbuf); 657 maple_response_devinfo(mdev, recvbuf);
658 atomic_set(&mdev->busy, 0);
663 break; 659 break;
664 660
665 case MAPLE_RESPONSE_DATATRF: 661 case MAPLE_RESPONSE_DATATRF:
666 if (dev->callback) 662 if (mdev->callback)
667 dev->callback(mq); 663 mdev->callback(mq);
664 atomic_set(&mdev->busy, 0);
665 wake_up(&mdev->maple_wait);
668 break; 666 break;
669 667
670 case MAPLE_RESPONSE_FILEERR: 668 case MAPLE_RESPONSE_FILEERR:
669 maple_response_fileerr(mdev, recvbuf);
670 atomic_set(&mdev->busy, 0);
671 wake_up(&mdev->maple_wait);
672 break;
673
671 case MAPLE_RESPONSE_AGAIN: 674 case MAPLE_RESPONSE_AGAIN:
672 case MAPLE_RESPONSE_BADCMD: 675 case MAPLE_RESPONSE_BADCMD:
673 case MAPLE_RESPONSE_BADFUNC: 676 case MAPLE_RESPONSE_BADFUNC:
674 printk(KERN_DEBUG 677 dev_warn(&mdev->dev, "non-fatal error"
675 "Maple non-fatal error 0x%X\n", 678 " 0x%X at (%d, %d)\n", code,
676 code); 679 mdev->port, mdev->unit);
680 atomic_set(&mdev->busy, 0);
677 break; 681 break;
678 682
679 case MAPLE_RESPONSE_ALLINFO: 683 case MAPLE_RESPONSE_ALLINFO:
680 printk(KERN_DEBUG 684 dev_notice(&mdev->dev, "extended"
681 "Maple - extended device information" 685 " device information request for (%d, %d)"
682 " not supported\n"); 686 " but call is not supported\n", mdev->port,
687 mdev->unit);
688 atomic_set(&mdev->busy, 0);
683 break; 689 break;
684 690
685 case MAPLE_RESPONSE_OK: 691 case MAPLE_RESPONSE_OK:
692 atomic_set(&mdev->busy, 0);
693 wake_up(&mdev->maple_wait);
686 break; 694 break;
687 695
688 default: 696 default:
@@ -699,20 +707,19 @@ static void maple_dma_handler(struct work_struct *work)
699 if (!fullscan) 707 if (!fullscan)
700 maple_port_rescan(); 708 maple_port_rescan();
701 /* mark that we have been through the first scan */ 709 /* mark that we have been through the first scan */
702 if (started == 0) 710 started = 1;
703 started = 1;
704 } 711 }
705 maplebus_dma_reset(); 712 maple_send();
706} 713}
707 714
708static irqreturn_t maplebus_dma_interrupt(int irq, void *dev_id) 715static irqreturn_t maple_dma_interrupt(int irq, void *dev_id)
709{ 716{
710 /* Load everything into the bottom half */ 717 /* Load everything into the bottom half */
711 schedule_work(&maple_dma_process); 718 schedule_work(&maple_dma_process);
712 return IRQ_HANDLED; 719 return IRQ_HANDLED;
713} 720}
714 721
715static irqreturn_t maplebus_vblank_interrupt(int irq, void *dev_id) 722static irqreturn_t maple_vblank_interrupt(int irq, void *dev_id)
716{ 723{
717 schedule_work(&maple_vblank_process); 724 schedule_work(&maple_vblank_process);
718 return IRQ_HANDLED; 725 return IRQ_HANDLED;
@@ -720,14 +727,14 @@ static irqreturn_t maplebus_vblank_interrupt(int irq, void *dev_id)
720 727
721static int maple_set_dma_interrupt_handler(void) 728static int maple_set_dma_interrupt_handler(void)
722{ 729{
723 return request_irq(HW_EVENT_MAPLE_DMA, maplebus_dma_interrupt, 730 return request_irq(HW_EVENT_MAPLE_DMA, maple_dma_interrupt,
724 IRQF_SHARED, "maple bus DMA", &maple_dummy_driver); 731 IRQF_SHARED, "maple bus DMA", &maple_unsupported_device);
725} 732}
726 733
727static int maple_set_vblank_interrupt_handler(void) 734static int maple_set_vblank_interrupt_handler(void)
728{ 735{
729 return request_irq(HW_EVENT_VSYNC, maplebus_vblank_interrupt, 736 return request_irq(HW_EVENT_VSYNC, maple_vblank_interrupt,
730 IRQF_SHARED, "maple bus VBLANK", &maple_dummy_driver); 737 IRQF_SHARED, "maple bus VBLANK", &maple_unsupported_device);
731} 738}
732 739
733static int maple_get_dma_buffer(void) 740static int maple_get_dma_buffer(void)
@@ -740,7 +747,7 @@ static int maple_get_dma_buffer(void)
740 return 0; 747 return 0;
741} 748}
742 749
743static int match_maple_bus_driver(struct device *devptr, 750static int maple_match_bus_driver(struct device *devptr,
744 struct device_driver *drvptr) 751 struct device_driver *drvptr)
745{ 752{
746 struct maple_driver *maple_drv = to_maple_driver(drvptr); 753 struct maple_driver *maple_drv = to_maple_driver(drvptr);
@@ -765,16 +772,18 @@ static void maple_bus_release(struct device *dev)
765{ 772{
766} 773}
767 774
768static struct maple_driver maple_dummy_driver = { 775static struct maple_driver maple_unsupported_device = {
769 .drv = { 776 .drv = {
770 .name = "maple_dummy_driver", 777 .name = "maple_unsupported_device",
771 .bus = &maple_bus_type, 778 .bus = &maple_bus_type,
772 }, 779 },
773}; 780};
774 781/**
782 * maple_bus_type - core maple bus structure
783 */
775struct bus_type maple_bus_type = { 784struct bus_type maple_bus_type = {
776 .name = "maple", 785 .name = "maple",
777 .match = match_maple_bus_driver, 786 .match = maple_match_bus_driver,
778 .uevent = maple_bus_uevent, 787 .uevent = maple_bus_uevent,
779}; 788};
780EXPORT_SYMBOL_GPL(maple_bus_type); 789EXPORT_SYMBOL_GPL(maple_bus_type);
@@ -788,7 +797,8 @@ static int __init maple_bus_init(void)
788{ 797{
789 int retval, i; 798 int retval, i;
790 struct maple_device *mdev[MAPLE_PORTS]; 799 struct maple_device *mdev[MAPLE_PORTS];
791 ctrl_outl(0, MAPLE_STATE); 800
801 ctrl_outl(0, MAPLE_ENABLE);
792 802
793 retval = device_register(&maple_bus); 803 retval = device_register(&maple_bus);
794 if (retval) 804 if (retval)
@@ -798,36 +808,33 @@ static int __init maple_bus_init(void)
798 if (retval) 808 if (retval)
799 goto cleanup_device; 809 goto cleanup_device;
800 810
801 retval = driver_register(&maple_dummy_driver.drv); 811 retval = driver_register(&maple_unsupported_device.drv);
802 if (retval) 812 if (retval)
803 goto cleanup_bus; 813 goto cleanup_bus;
804 814
805 /* allocate memory for maple bus dma */ 815 /* allocate memory for maple bus dma */
806 retval = maple_get_dma_buffer(); 816 retval = maple_get_dma_buffer();
807 if (retval) { 817 if (retval) {
808 printk(KERN_INFO 818 dev_err(&maple_bus, "failed to allocate DMA buffers\n");
809 "Maple bus: Failed to allocate Maple DMA buffers\n");
810 goto cleanup_basic; 819 goto cleanup_basic;
811 } 820 }
812 821
813 /* set up DMA interrupt handler */ 822 /* set up DMA interrupt handler */
814 retval = maple_set_dma_interrupt_handler(); 823 retval = maple_set_dma_interrupt_handler();
815 if (retval) { 824 if (retval) {
816 printk(KERN_INFO 825 dev_err(&maple_bus, "bus failed to grab maple "
817 "Maple bus: Failed to grab maple DMA IRQ\n"); 826 "DMA IRQ\n");
818 goto cleanup_dma; 827 goto cleanup_dma;
819 } 828 }
820 829
821 /* set up VBLANK interrupt handler */ 830 /* set up VBLANK interrupt handler */
822 retval = maple_set_vblank_interrupt_handler(); 831 retval = maple_set_vblank_interrupt_handler();
823 if (retval) { 832 if (retval) {
824 printk(KERN_INFO "Maple bus: Failed to grab VBLANK IRQ\n"); 833 dev_err(&maple_bus, "bus failed to grab VBLANK IRQ\n");
825 goto cleanup_irq; 834 goto cleanup_irq;
826 } 835 }
827 836
828 maple_queue_cache = 837 maple_queue_cache = KMEM_CACHE(maple_buffer, SLAB_HWCACHE_ALIGN);
829 kmem_cache_create("maple_queue_cache", 0x400, 0,
830 SLAB_HWCACHE_ALIGN, NULL);
831 838
832 if (!maple_queue_cache) 839 if (!maple_queue_cache)
833 goto cleanup_bothirqs; 840 goto cleanup_bothirqs;
@@ -838,23 +845,23 @@ static int __init maple_bus_init(void)
838 /* setup maple ports */ 845 /* setup maple ports */
839 for (i = 0; i < MAPLE_PORTS; i++) { 846 for (i = 0; i < MAPLE_PORTS; i++) {
840 checked[i] = false; 847 checked[i] = false;
848 empty[i] = false;
841 mdev[i] = maple_alloc_dev(i, 0); 849 mdev[i] = maple_alloc_dev(i, 0);
842 baseunits[i] = mdev[i];
843 if (!mdev[i]) { 850 if (!mdev[i]) {
844 while (i-- > 0) 851 while (i-- > 0)
845 maple_free_dev(mdev[i]); 852 maple_free_dev(mdev[i]);
846 goto cleanup_cache; 853 goto cleanup_cache;
847 } 854 }
855 baseunits[i] = mdev[i];
856 atomic_set(&mdev[i]->busy, 1);
848 maple_add_packet(mdev[i], 0, MAPLE_COMMAND_DEVINFO, 0, NULL); 857 maple_add_packet(mdev[i], 0, MAPLE_COMMAND_DEVINFO, 0, NULL);
849 subdevice_map[i] = 0; 858 subdevice_map[i] = 0;
850 } 859 }
851 860
852 /* setup maplebus hardware */ 861 maple_pnp_time = jiffies + HZ;
853 maplebus_dma_reset(); 862 /* prepare initial queue */
854 /* initial detection */
855 maple_send(); 863 maple_send();
856 maple_pnp_time = jiffies; 864 dev_info(&maple_bus, "bus core now registered\n");
857 printk(KERN_INFO "Maple bus core now registered.\n");
858 865
859 return 0; 866 return 0;
860 867
@@ -871,7 +878,7 @@ cleanup_dma:
871 free_pages((unsigned long) maple_sendbuf, MAPLE_DMA_PAGES); 878 free_pages((unsigned long) maple_sendbuf, MAPLE_DMA_PAGES);
872 879
873cleanup_basic: 880cleanup_basic:
874 driver_unregister(&maple_dummy_driver.drv); 881 driver_unregister(&maple_unsupported_device.drv);
875 882
876cleanup_bus: 883cleanup_bus:
877 bus_unregister(&maple_bus_type); 884 bus_unregister(&maple_bus_type);
@@ -880,7 +887,7 @@ cleanup_device:
880 device_unregister(&maple_bus); 887 device_unregister(&maple_bus);
881 888
882cleanup: 889cleanup:
883 printk(KERN_INFO "Maple bus registration failed\n"); 890 printk(KERN_ERR "Maple bus registration failed\n");
884 return retval; 891 return retval;
885} 892}
886/* Push init to later to ensure hardware gets detected */ 893/* Push init to later to ensure hardware gets detected */