aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2009-01-06 13:38:15 -0500
committerDan Williams <dan.j.williams@intel.com>2009-01-06 13:38:15 -0500
commit33df8ca068123457db56c316946a3c0e4ef787d6 (patch)
treee594340e903ea3eb8af83906c649eeaf85cbc0b2
parent59b5ec21446b9239d706ab237fb261d525b75e81 (diff)
dmatest: convert to dma_request_channel
Replace the client registration infrastructure with a custom loop to poll for channels. Once dma_request_channel returns NULL stop asking for channels. A userspace side effect of this change if that loading the dmatest module before loading a dma driver will result in no channels being found, previously dmatest would get a callback. To facilitate testing in the built-in case dmatest_init is marked as a late_initcall. Another side effect is that channels under test can not be used for any other purpose. Cc: Haavard Skinnemoen <haavard.skinnemoen@atmel.com> Reviewed-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
-rw-r--r--drivers/dma/dmatest.c115
-rw-r--r--include/linux/dmaengine.h6
2 files changed, 49 insertions, 72 deletions
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index db4050884713..1d6e48f9cd02 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -35,7 +35,7 @@ MODULE_PARM_DESC(threads_per_chan,
35 35
36static unsigned int max_channels; 36static unsigned int max_channels;
37module_param(max_channels, uint, S_IRUGO); 37module_param(max_channels, uint, S_IRUGO);
38MODULE_PARM_DESC(nr_channels, 38MODULE_PARM_DESC(max_channels,
39 "Maximum number of channels to use (default: all)"); 39 "Maximum number of channels to use (default: all)");
40 40
41/* 41/*
@@ -71,7 +71,7 @@ struct dmatest_chan {
71 71
72/* 72/*
73 * These are protected by dma_list_mutex since they're only used by 73 * These are protected by dma_list_mutex since they're only used by
74 * the DMA client event callback 74 * the DMA filter function callback
75 */ 75 */
76static LIST_HEAD(dmatest_channels); 76static LIST_HEAD(dmatest_channels);
77static unsigned int nr_channels; 77static unsigned int nr_channels;
@@ -317,21 +317,16 @@ static void dmatest_cleanup_channel(struct dmatest_chan *dtc)
317 kfree(dtc); 317 kfree(dtc);
318} 318}
319 319
320static enum dma_state_client dmatest_add_channel(struct dma_chan *chan) 320static int dmatest_add_channel(struct dma_chan *chan)
321{ 321{
322 struct dmatest_chan *dtc; 322 struct dmatest_chan *dtc;
323 struct dmatest_thread *thread; 323 struct dmatest_thread *thread;
324 unsigned int i; 324 unsigned int i;
325 325
326 /* Have we already been told about this channel? */
327 list_for_each_entry(dtc, &dmatest_channels, node)
328 if (dtc->chan == chan)
329 return DMA_DUP;
330
331 dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL); 326 dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL);
332 if (!dtc) { 327 if (!dtc) {
333 pr_warning("dmatest: No memory for %s\n", dev_name(&chan->dev)); 328 pr_warning("dmatest: No memory for %s\n", dev_name(&chan->dev));
334 return DMA_NAK; 329 return -ENOMEM;
335 } 330 }
336 331
337 dtc->chan = chan; 332 dtc->chan = chan;
@@ -365,81 +360,57 @@ static enum dma_state_client dmatest_add_channel(struct dma_chan *chan)
365 list_add_tail(&dtc->node, &dmatest_channels); 360 list_add_tail(&dtc->node, &dmatest_channels);
366 nr_channels++; 361 nr_channels++;
367 362
368 return DMA_ACK; 363 return 0;
369} 364}
370 365
371static enum dma_state_client dmatest_remove_channel(struct dma_chan *chan) 366static enum dma_state_client filter(struct dma_chan *chan, void *param)
372{ 367{
373 struct dmatest_chan *dtc, *_dtc; 368 if (!dmatest_match_channel(chan) || !dmatest_match_device(chan->device))
374 369 return DMA_DUP;
375 list_for_each_entry_safe(dtc, _dtc, &dmatest_channels, node) { 370 else
376 if (dtc->chan == chan) { 371 return DMA_ACK;
377 list_del(&dtc->node);
378 dmatest_cleanup_channel(dtc);
379 pr_debug("dmatest: lost channel %s\n",
380 dev_name(&chan->dev));
381 return DMA_ACK;
382 }
383 }
384
385 return DMA_DUP;
386}
387
388/*
389 * Start testing threads as new channels are assigned to us, and kill
390 * them when the channels go away.
391 *
392 * When we unregister the client, all channels are removed so this
393 * will also take care of cleaning things up when the module is
394 * unloaded.
395 */
396static enum dma_state_client
397dmatest_event(struct dma_client *client, struct dma_chan *chan,
398 enum dma_state state)
399{
400 enum dma_state_client ack = DMA_NAK;
401
402 switch (state) {
403 case DMA_RESOURCE_AVAILABLE:
404 if (!dmatest_match_channel(chan)
405 || !dmatest_match_device(chan->device))
406 ack = DMA_DUP;
407 else if (max_channels && nr_channels >= max_channels)
408 ack = DMA_NAK;
409 else
410 ack = dmatest_add_channel(chan);
411 break;
412
413 case DMA_RESOURCE_REMOVED:
414 ack = dmatest_remove_channel(chan);
415 break;
416
417 default:
418 pr_info("dmatest: Unhandled event %u (%s)\n",
419 state, dev_name(&chan->dev));
420 break;
421 }
422
423 return ack;
424} 372}
425 373
426static struct dma_client dmatest_client = {
427 .event_callback = dmatest_event,
428};
429
430static int __init dmatest_init(void) 374static int __init dmatest_init(void)
431{ 375{
432 dma_cap_set(DMA_MEMCPY, dmatest_client.cap_mask); 376 dma_cap_mask_t mask;
433 dma_async_client_register(&dmatest_client); 377 struct dma_chan *chan;
434 dma_async_client_chan_request(&dmatest_client); 378 int err = 0;
379
380 dma_cap_zero(mask);
381 dma_cap_set(DMA_MEMCPY, mask);
382 for (;;) {
383 chan = dma_request_channel(mask, filter, NULL);
384 if (chan) {
385 err = dmatest_add_channel(chan);
386 if (err == 0)
387 continue;
388 else {
389 dma_release_channel(chan);
390 break; /* add_channel failed, punt */
391 }
392 } else
393 break; /* no more channels available */
394 if (max_channels && nr_channels >= max_channels)
395 break; /* we have all we need */
396 }
435 397
436 return 0; 398 return err;
437} 399}
438module_init(dmatest_init); 400/* when compiled-in wait for drivers to load first */
401late_initcall(dmatest_init);
439 402
440static void __exit dmatest_exit(void) 403static void __exit dmatest_exit(void)
441{ 404{
442 dma_async_client_unregister(&dmatest_client); 405 struct dmatest_chan *dtc, *_dtc;
406
407 list_for_each_entry_safe(dtc, _dtc, &dmatest_channels, node) {
408 list_del(&dtc->node);
409 dmatest_cleanup_channel(dtc);
410 pr_debug("dmatest: dropped channel %s\n",
411 dev_name(&dtc->chan->dev));
412 dma_release_channel(dtc->chan);
413 }
443} 414}
444module_exit(dmatest_exit); 415module_exit(dmatest_exit);
445 416
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index fe40bc020af6..6f2d070ac7f3 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -400,6 +400,12 @@ __dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
400 set_bit(tx_type, dstp->bits); 400 set_bit(tx_type, dstp->bits);
401} 401}
402 402
403#define dma_cap_zero(mask) __dma_cap_zero(&(mask))
404static inline void __dma_cap_zero(dma_cap_mask_t *dstp)
405{
406 bitmap_zero(dstp->bits, DMA_TX_TYPE_END);
407}
408
403#define dma_has_cap(tx, mask) __dma_has_cap((tx), &(mask)) 409#define dma_has_cap(tx, mask) __dma_has_cap((tx), &(mask))
404static inline int 410static inline int
405__dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp) 411__dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp)