aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorAdrian McMenamin <adrian@mcmen.demon.co.uk>2008-07-29 09:10:56 -0400
committerPaul Mundt <lethal@linux-sh.org>2008-07-29 09:10:56 -0400
commit1795cf48b322b4d19230a40dbe7181acedd34a94 (patch)
tree5d1426c12687f1c80cb648da2f105714c9819001 /drivers
parent0764bff445bb13cd17e41b6ab196ef83c23c6c17 (diff)
sh/maple: clean maple bus code
This patch cleans up the handling of the maple bus queue to remove the risk of races when adding packets. It also removes references to the redundant connect and disconnect functions. Signed-off-by: Adrian McMenamin <adrian@mcmen.demon.co.uk> Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/sh/maple/maple.c265
1 files changed, 184 insertions, 81 deletions
diff --git a/drivers/sh/maple/maple.c b/drivers/sh/maple/maple.c
index 617efb1640b1..be97789fa5fd 100644
--- a/drivers/sh/maple/maple.c
+++ b/drivers/sh/maple/maple.c
@@ -24,13 +24,12 @@
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/maple.h> 25#include <linux/maple.h>
26#include <linux/dma-mapping.h> 26#include <linux/dma-mapping.h>
27#include <linux/delay.h>
27#include <asm/cacheflush.h> 28#include <asm/cacheflush.h>
28#include <asm/dma.h> 29#include <asm/dma.h>
29#include <asm/io.h> 30#include <asm/io.h>
30#include <asm/mach/dma.h> 31#include <mach/dma.h>
31#include <asm/mach/sysasic.h> 32#include <mach/sysasic.h>
32#include <asm/mach/maple.h>
33#include <linux/delay.h>
34 33
35MODULE_AUTHOR("Yaegshi Takeshi, Paul Mundt, M.R. Brown, Adrian McMenamin"); 34MODULE_AUTHOR("Yaegshi Takeshi, Paul Mundt, M.R. Brown, Adrian McMenamin");
36MODULE_DESCRIPTION("Maple bus driver for Dreamcast"); 35MODULE_DESCRIPTION("Maple bus driver for Dreamcast");
@@ -46,14 +45,15 @@ static DECLARE_WORK(maple_vblank_process, maple_vblank_handler);
46static LIST_HEAD(maple_waitq); 45static LIST_HEAD(maple_waitq);
47static LIST_HEAD(maple_sentq); 46static LIST_HEAD(maple_sentq);
48 47
49static DEFINE_MUTEX(maple_list_lock); 48/* mutex to protect queue of waiting packets */
49static DEFINE_MUTEX(maple_wlist_lock);
50 50
51static struct maple_driver maple_dummy_driver; 51static struct maple_driver maple_dummy_driver;
52static struct device maple_bus; 52static struct device maple_bus;
53static int subdevice_map[MAPLE_PORTS]; 53static int subdevice_map[MAPLE_PORTS];
54static unsigned long *maple_sendbuf, *maple_sendptr, *maple_lastptr; 54static unsigned long *maple_sendbuf, *maple_sendptr, *maple_lastptr;
55static unsigned long maple_pnp_time; 55static unsigned long maple_pnp_time;
56static int started, scanning, liststatus, fullscan; 56static int started, scanning, fullscan;
57static struct kmem_cache *maple_queue_cache; 57static struct kmem_cache *maple_queue_cache;
58 58
59struct maple_device_specify { 59struct maple_device_specify {
@@ -129,35 +129,124 @@ static void maple_release_device(struct device *dev)
129 kfree(mdev); 129 kfree(mdev);
130} 130}
131 131
132/** 132/*
133 * maple_add_packet - add a single instruction to the queue 133 * maple_add_packet - add a single instruction to the queue
134 * @mq: instruction to add to waiting queue 134 * @mdev - maple device
135 * @function - function on device being queried
136 * @command - maple command to add
137 * @length - length of command string (in 32 bit words)
138 * @data - remainder of command string
135 */ 139 */
136void maple_add_packet(struct mapleq *mq) 140int maple_add_packet(struct maple_device *mdev, u32 function, u32 command,
141 size_t length, void *data)
137{ 142{
138 mutex_lock(&maple_list_lock); 143 int locking, ret = 0;
139 list_add(&mq->list, &maple_waitq); 144 void *sendbuf = NULL;
140 mutex_unlock(&maple_list_lock); 145
146 mutex_lock(&maple_wlist_lock);
147 /* bounce if device already locked */
148 locking = mutex_is_locked(&mdev->mq->mutex);
149 if (locking) {
150 ret = -EBUSY;
151 goto out;
152 }
153
154 mutex_lock(&mdev->mq->mutex);
155
156 if (length) {
157 sendbuf = kmalloc(length * 4, GFP_KERNEL);
158 if (!sendbuf) {
159 mutex_unlock(&mdev->mq->mutex);
160 ret = -ENOMEM;
161 goto out;
162 }
163 ((__be32 *)sendbuf)[0] = cpu_to_be32(function);
164 }
165
166 mdev->mq->command = command;
167 mdev->mq->length = length;
168 if (length > 1)
169 memcpy(sendbuf + 4, data, (length - 1) * 4);
170 mdev->mq->sendbuf = sendbuf;
171
172 list_add(&mdev->mq->list, &maple_waitq);
173out:
174 mutex_unlock(&maple_wlist_lock);
175 return ret;
141} 176}
142EXPORT_SYMBOL_GPL(maple_add_packet); 177EXPORT_SYMBOL_GPL(maple_add_packet);
143 178
179/*
180 * maple_add_packet_sleeps - add a single instruction to the queue
181 * - waits for lock to be free
182 * @mdev - maple device
183 * @function - function on device being queried
184 * @command - maple command to add
185 * @length - length of command string (in 32 bit words)
186 * @data - remainder of command string
187 */
188int maple_add_packet_sleeps(struct maple_device *mdev, u32 function,
189 u32 command, size_t length, void *data)
190{
191 int locking, ret = 0;
192 void *sendbuf = NULL;
193
194 locking = mutex_lock_interruptible(&mdev->mq->mutex);
195 if (locking) {
196 ret = -EIO;
197 goto out;
198 }
199
200 if (length) {
201 sendbuf = kmalloc(length * 4, GFP_KERNEL);
202 if (!sendbuf) {
203 mutex_unlock(&mdev->mq->mutex);
204 ret = -ENOMEM;
205 goto out;
206 }
207 ((__be32 *)sendbuf)[0] = cpu_to_be32(function);
208 }
209
210 mdev->mq->command = command;
211 mdev->mq->length = length;
212 if (length > 1)
213 memcpy(sendbuf + 4, data, (length - 1) * 4);
214 mdev->mq->sendbuf = sendbuf;
215
216 mutex_lock(&maple_wlist_lock);
217 list_add(&mdev->mq->list, &maple_waitq);
218 mutex_unlock(&maple_wlist_lock);
219out:
220 return ret;
221}
222EXPORT_SYMBOL_GPL(maple_add_packet_sleeps);
223
144static struct mapleq *maple_allocq(struct maple_device *mdev) 224static struct mapleq *maple_allocq(struct maple_device *mdev)
145{ 225{
146 struct mapleq *mq; 226 struct mapleq *mq;
147 227
148 mq = kmalloc(sizeof(*mq), GFP_KERNEL); 228 mq = kmalloc(sizeof(*mq), GFP_KERNEL);
149 if (!mq) 229 if (!mq)
150 return NULL; 230 goto failed_nomem;
151 231
152 mq->dev = mdev; 232 mq->dev = mdev;
153 mq->recvbufdcsp = kmem_cache_zalloc(maple_queue_cache, GFP_KERNEL); 233 mq->recvbufdcsp = kmem_cache_zalloc(maple_queue_cache, GFP_KERNEL);
154 mq->recvbuf = (void *) P2SEGADDR(mq->recvbufdcsp); 234 mq->recvbuf = (void *) P2SEGADDR(mq->recvbufdcsp);
155 if (!mq->recvbuf) { 235 if (!mq->recvbuf)
156 kfree(mq); 236 goto failed_p2;
157 return NULL; 237 /*
158 } 238 * most devices do not need the mutex - but
239 * anything that injects block reads or writes
240 * will rely on it
241 */
242 mutex_init(&mq->mutex);
159 243
160 return mq; 244 return mq;
245
246failed_p2:
247 kfree(mq);
248failed_nomem:
249 return NULL;
161} 250}
162 251
163static struct maple_device *maple_alloc_dev(int port, int unit) 252static struct maple_device *maple_alloc_dev(int port, int unit)
@@ -178,7 +267,6 @@ static struct maple_device *maple_alloc_dev(int port, int unit)
178 } 267 }
179 mdev->dev.bus = &maple_bus_type; 268 mdev->dev.bus = &maple_bus_type;
180 mdev->dev.parent = &maple_bus; 269 mdev->dev.parent = &maple_bus;
181 mdev->function = 0;
182 return mdev; 270 return mdev;
183} 271}
184 272
@@ -216,7 +304,6 @@ static void maple_build_block(struct mapleq *mq)
216 *maple_sendptr++ = PHYSADDR(mq->recvbuf); 304 *maple_sendptr++ = PHYSADDR(mq->recvbuf);
217 *maple_sendptr++ = 305 *maple_sendptr++ =
218 mq->command | (to << 8) | (from << 16) | (len << 24); 306 mq->command | (to << 8) | (from << 16) | (len << 24);
219
220 while (len-- > 0) 307 while (len-- > 0)
221 *maple_sendptr++ = *lsendbuf++; 308 *maple_sendptr++ = *lsendbuf++;
222} 309}
@@ -224,22 +311,27 @@ static void maple_build_block(struct mapleq *mq)
224/* build up command queue */ 311/* build up command queue */
225static void maple_send(void) 312static void maple_send(void)
226{ 313{
227 int i; 314 int i, maple_packets = 0;
228 int maple_packets;
229 struct mapleq *mq, *nmq; 315 struct mapleq *mq, *nmq;
230 316
231 if (!list_empty(&maple_sentq)) 317 if (!list_empty(&maple_sentq))
232 return; 318 return;
233 if (list_empty(&maple_waitq) || !maple_dma_done()) 319 mutex_lock(&maple_wlist_lock);
320 if (list_empty(&maple_waitq) || !maple_dma_done()) {
321 mutex_unlock(&maple_wlist_lock);
234 return; 322 return;
235 maple_packets = 0; 323 }
236 maple_sendptr = maple_lastptr = maple_sendbuf; 324 mutex_unlock(&maple_wlist_lock);
325 maple_lastptr = maple_sendbuf;
326 maple_sendptr = maple_sendbuf;
327 mutex_lock(&maple_wlist_lock);
237 list_for_each_entry_safe(mq, nmq, &maple_waitq, list) { 328 list_for_each_entry_safe(mq, nmq, &maple_waitq, list) {
238 maple_build_block(mq); 329 maple_build_block(mq);
239 list_move(&mq->list, &maple_sentq); 330 list_move(&mq->list, &maple_sentq);
240 if (maple_packets++ > MAPLE_MAXPACKETS) 331 if (maple_packets++ > MAPLE_MAXPACKETS)
241 break; 332 break;
242 } 333 }
334 mutex_unlock(&maple_wlist_lock);
243 if (maple_packets > 0) { 335 if (maple_packets > 0) {
244 for (i = 0; i < (1 << MAPLE_DMA_PAGES); i++) 336 for (i = 0; i < (1 << MAPLE_DMA_PAGES); i++)
245 dma_cache_sync(0, maple_sendbuf + i * PAGE_SIZE, 337 dma_cache_sync(0, maple_sendbuf + i * PAGE_SIZE,
@@ -247,7 +339,8 @@ static void maple_send(void)
247 } 339 }
248} 340}
249 341
250static int attach_matching_maple_driver(struct device_driver *driver, 342/* check if there is a driver registered likely to match this device */
343static int check_matching_maple_driver(struct device_driver *driver,
251 void *devptr) 344 void *devptr)
252{ 345{
253 struct maple_driver *maple_drv; 346 struct maple_driver *maple_drv;
@@ -255,12 +348,8 @@ static int attach_matching_maple_driver(struct device_driver *driver,
255 348
256 mdev = devptr; 349 mdev = devptr;
257 maple_drv = to_maple_driver(driver); 350 maple_drv = to_maple_driver(driver);
258 if (mdev->devinfo.function & be32_to_cpu(maple_drv->function)) { 351 if (mdev->devinfo.function & cpu_to_be32(maple_drv->function))
259 if (maple_drv->connect(mdev) == 0) { 352 return 1;
260 mdev->driver = maple_drv;
261 return 1;
262 }
263 }
264 return 0; 353 return 0;
265} 354}
266 355
@@ -268,11 +357,6 @@ static void maple_detach_driver(struct maple_device *mdev)
268{ 357{
269 if (!mdev) 358 if (!mdev)
270 return; 359 return;
271 if (mdev->driver) {
272 if (mdev->driver->disconnect)
273 mdev->driver->disconnect(mdev);
274 }
275 mdev->driver = NULL;
276 device_unregister(&mdev->dev); 360 device_unregister(&mdev->dev);
277 mdev = NULL; 361 mdev = NULL;
278} 362}
@@ -328,8 +412,8 @@ static void maple_attach_driver(struct maple_device *mdev)
328 mdev->port, mdev->unit, function); 412 mdev->port, mdev->unit, function);
329 413
330 matched = 414 matched =
331 bus_for_each_drv(&maple_bus_type, NULL, mdev, 415 bus_for_each_drv(&maple_bus_type, NULL, mdev,
332 attach_matching_maple_driver); 416 check_matching_maple_driver);
333 417
334 if (matched == 0) { 418 if (matched == 0) {
335 /* Driver does not exist yet */ 419 /* Driver does not exist yet */
@@ -373,45 +457,48 @@ static int detach_maple_device(struct device *device, void *portptr)
373 457
374static int setup_maple_commands(struct device *device, void *ignored) 458static int setup_maple_commands(struct device *device, void *ignored)
375{ 459{
460 int add;
376 struct maple_device *maple_dev = to_maple_dev(device); 461 struct maple_device *maple_dev = to_maple_dev(device);
377 462
378 if ((maple_dev->interval > 0) 463 if ((maple_dev->interval > 0)
379 && time_after(jiffies, maple_dev->when)) { 464 && time_after(jiffies, maple_dev->when)) {
380 maple_dev->when = jiffies + maple_dev->interval; 465 /* bounce if we cannot lock */
381 maple_dev->mq->command = MAPLE_COMMAND_GETCOND; 466 add = maple_add_packet(maple_dev,
382 maple_dev->mq->sendbuf = &maple_dev->function; 467 be32_to_cpu(maple_dev->devinfo.function),
383 maple_dev->mq->length = 1; 468 MAPLE_COMMAND_GETCOND, 1, NULL);
384 maple_add_packet(maple_dev->mq); 469 if (!add)
385 liststatus++; 470 maple_dev->when = jiffies + maple_dev->interval;
386 } else { 471 } else {
387 if (time_after(jiffies, maple_pnp_time)) { 472 if (time_after(jiffies, maple_pnp_time))
388 maple_dev->mq->command = MAPLE_COMMAND_DEVINFO; 473 /* This will also bounce */
389 maple_dev->mq->length = 0; 474 maple_add_packet(maple_dev, 0,
390 maple_add_packet(maple_dev->mq); 475 MAPLE_COMMAND_DEVINFO, 0, NULL);
391 liststatus++;
392 }
393 } 476 }
394
395 return 0; 477 return 0;
396} 478}
397 479
398/* VBLANK bottom half - implemented via workqueue */ 480/* VBLANK bottom half - implemented via workqueue */
399static void maple_vblank_handler(struct work_struct *work) 481static void maple_vblank_handler(struct work_struct *work)
400{ 482{
401 if (!maple_dma_done()) 483 if (!list_empty(&maple_sentq) || !maple_dma_done())
402 return;
403 if (!list_empty(&maple_sentq))
404 return; 484 return;
485
405 ctrl_outl(0, MAPLE_ENABLE); 486 ctrl_outl(0, MAPLE_ENABLE);
406 liststatus = 0; 487
407 bus_for_each_dev(&maple_bus_type, NULL, NULL, 488 bus_for_each_dev(&maple_bus_type, NULL, NULL,
408 setup_maple_commands); 489 setup_maple_commands);
490
409 if (time_after(jiffies, maple_pnp_time)) 491 if (time_after(jiffies, maple_pnp_time))
410 maple_pnp_time = jiffies + MAPLE_PNP_INTERVAL; 492 maple_pnp_time = jiffies + MAPLE_PNP_INTERVAL;
411 if (liststatus && list_empty(&maple_sentq)) { 493
412 INIT_LIST_HEAD(&maple_sentq); 494 mutex_lock(&maple_wlist_lock);
495 if (!list_empty(&maple_waitq) && list_empty(&maple_sentq)) {
496 mutex_unlock(&maple_wlist_lock);
413 maple_send(); 497 maple_send();
498 } else {
499 mutex_unlock(&maple_wlist_lock);
414 } 500 }
501
415 maplebus_dma_reset(); 502 maplebus_dma_reset();
416} 503}
417 504
@@ -422,8 +509,8 @@ static void maple_map_subunits(struct maple_device *mdev, int submask)
422 struct maple_device *mdev_add; 509 struct maple_device *mdev_add;
423 struct maple_device_specify ds; 510 struct maple_device_specify ds;
424 511
512 ds.port = mdev->port;
425 for (k = 0; k < 5; k++) { 513 for (k = 0; k < 5; k++) {
426 ds.port = mdev->port;
427 ds.unit = k + 1; 514 ds.unit = k + 1;
428 retval = 515 retval =
429 bus_for_each_dev(&maple_bus_type, NULL, &ds, 516 bus_for_each_dev(&maple_bus_type, NULL, &ds,
@@ -437,9 +524,9 @@ static void maple_map_subunits(struct maple_device *mdev, int submask)
437 mdev_add = maple_alloc_dev(mdev->port, k + 1); 524 mdev_add = maple_alloc_dev(mdev->port, k + 1);
438 if (!mdev_add) 525 if (!mdev_add)
439 return; 526 return;
440 mdev_add->mq->command = MAPLE_COMMAND_DEVINFO; 527 maple_add_packet(mdev_add, 0, MAPLE_COMMAND_DEVINFO,
441 mdev_add->mq->length = 0; 528 0, NULL);
442 maple_add_packet(mdev_add->mq); 529 /* mark that we are checking sub devices */
443 scanning = 1; 530 scanning = 1;
444 } 531 }
445 submask = submask >> 1; 532 submask = submask >> 1;
@@ -505,6 +592,28 @@ static void maple_response_devinfo(struct maple_device *mdev,
505 } 592 }
506} 593}
507 594
595static void maple_port_rescan(void)
596{
597 int i;
598 struct maple_device *mdev;
599
600 fullscan = 1;
601 for (i = 0; i < MAPLE_PORTS; i++) {
602 if (checked[i] == false) {
603 fullscan = 0;
604 mdev = baseunits[i];
605 /*
606 * test lock in case scan has failed
607 * but device is still locked
608 */
609 if (mutex_is_locked(&mdev->mq->mutex))
610 mutex_unlock(&mdev->mq->mutex);
611 maple_add_packet(mdev, 0, MAPLE_COMMAND_DEVINFO,
612 0, NULL);
613 }
614 }
615}
616
508/* maple dma end bottom half - implemented via workqueue */ 617/* maple dma end bottom half - implemented via workqueue */
509static void maple_dma_handler(struct work_struct *work) 618static void maple_dma_handler(struct work_struct *work)
510{ 619{
@@ -512,7 +621,6 @@ static void maple_dma_handler(struct work_struct *work)
512 struct maple_device *dev; 621 struct maple_device *dev;
513 char *recvbuf; 622 char *recvbuf;
514 enum maple_code code; 623 enum maple_code code;
515 int i;
516 624
517 if (!maple_dma_done()) 625 if (!maple_dma_done())
518 return; 626 return;
@@ -522,6 +630,10 @@ static void maple_dma_handler(struct work_struct *work)
522 recvbuf = mq->recvbuf; 630 recvbuf = mq->recvbuf;
523 code = recvbuf[0]; 631 code = recvbuf[0];
524 dev = mq->dev; 632 dev = mq->dev;
633 kfree(mq->sendbuf);
634 mutex_unlock(&mq->mutex);
635 list_del_init(&mq->list);
636
525 switch (code) { 637 switch (code) {
526 case MAPLE_RESPONSE_NONE: 638 case MAPLE_RESPONSE_NONE:
527 maple_response_none(dev, mq); 639 maple_response_none(dev, mq);
@@ -558,26 +670,16 @@ static void maple_dma_handler(struct work_struct *work)
558 break; 670 break;
559 } 671 }
560 } 672 }
561 INIT_LIST_HEAD(&maple_sentq); 673 /* if scanning is 1 then we have subdevices to check */
562 if (scanning == 1) { 674 if (scanning == 1) {
563 maple_send(); 675 maple_send();
564 scanning = 2; 676 scanning = 2;
565 } else 677 } else
566 scanning = 0; 678 scanning = 0;
567 679 /*check if we have actually tested all ports yet */
568 if (!fullscan) { 680 if (!fullscan)
569 fullscan = 1; 681 maple_port_rescan();
570 for (i = 0; i < MAPLE_PORTS; i++) { 682 /* mark that we have been through the first scan */
571 if (checked[i] == false) {
572 fullscan = 0;
573 dev = baseunits[i];
574 dev->mq->command =
575 MAPLE_COMMAND_DEVINFO;
576 dev->mq->length = 0;
577 maple_add_packet(dev->mq);
578 }
579 }
580 }
581 if (started == 0) 683 if (started == 0)
582 started = 1; 684 started = 1;
583 } 685 }
@@ -631,7 +733,7 @@ static int match_maple_bus_driver(struct device *devptr,
631 if (maple_dev->devinfo.function == 0xFFFFFFFF) 733 if (maple_dev->devinfo.function == 0xFFFFFFFF)
632 return 0; 734 return 0;
633 else if (maple_dev->devinfo.function & 735 else if (maple_dev->devinfo.function &
634 be32_to_cpu(maple_drv->function)) 736 cpu_to_be32(maple_drv->function))
635 return 1; 737 return 1;
636 return 0; 738 return 0;
637} 739}
@@ -713,6 +815,9 @@ static int __init maple_bus_init(void)
713 if (!maple_queue_cache) 815 if (!maple_queue_cache)
714 goto cleanup_bothirqs; 816 goto cleanup_bothirqs;
715 817
818 INIT_LIST_HEAD(&maple_waitq);
819 INIT_LIST_HEAD(&maple_sentq);
820
716 /* setup maple ports */ 821 /* setup maple ports */
717 for (i = 0; i < MAPLE_PORTS; i++) { 822 for (i = 0; i < MAPLE_PORTS; i++) {
718 checked[i] = false; 823 checked[i] = false;
@@ -723,9 +828,7 @@ static int __init maple_bus_init(void)
723 maple_free_dev(mdev[i]); 828 maple_free_dev(mdev[i]);
724 goto cleanup_cache; 829 goto cleanup_cache;
725 } 830 }
726 mdev[i]->mq->command = MAPLE_COMMAND_DEVINFO; 831 maple_add_packet(mdev[i], 0, MAPLE_COMMAND_DEVINFO, 0, NULL);
727 mdev[i]->mq->length = 0;
728 maple_add_packet(mdev[i]->mq);
729 subdevice_map[i] = 0; 832 subdevice_map[i] = 0;
730 } 833 }
731 834