aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/dmaengine.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-09-09 21:07:15 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-09-09 21:07:15 -0400
commit26b0332e30c7f93e780aaa054bd84e3437f84354 (patch)
treee9cf240b67bf7eebae9fabbdba4e6a0fdfd359d7 /drivers/dma/dmaengine.c
parent640414171818c6293c23e74a28d1c69b2a1a7fe5 (diff)
parent4a43f394a08214eaf92cdd8ce3eae75e555323d8 (diff)
Merge tag 'dmaengine-3.12' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/dmaengine
Pull dmaengine update from Dan Williams: "Collection of random updates to the core and some end-driver fixups for ioatdma and mv_xor: - NUMA aware channel allocation - Cleanup dmatest debugfs interface - ioat: make raid-support Atom only - mv_xor: big endian Aside from the top three commits these have all had some soak time in -next. The top commit fixes a recent build breakage. It has been a long while since my last pull request, hopefully it does not show. Thanks to Vinod for keeping an eye on drivers/dma/ this past year" * tag 'dmaengine-3.12' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/dmaengine: dmaengine: dma_sync_wait and dma_find_channel undefined MAINTAINERS: update email for Dan Williams dma: mv_xor: Fix incorrect error path ioatdma: silence GCC warnings dmaengine: make dma_channel_rebalance() NUMA aware dmaengine: make dma_submit_error() return an error code ioatdma: disable RAID on non-Atom platforms and reenable unaligned copies mv_xor: support big endian systems using descriptor swap feature mv_xor: use {readl, writel}_relaxed instead of __raw_{readl, writel} dmatest: print message on debug level in case of no error dmatest: remove IS_ERR_OR_NULL checks of debugfs calls dmatest: make module parameters writable
Diffstat (limited to 'drivers/dma/dmaengine.c')
-rw-r--r--drivers/dma/dmaengine.c55
1 files changed, 27 insertions, 28 deletions
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 99af4db5948b..eee16b01fa89 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -382,20 +382,30 @@ void dma_issue_pending_all(void)
382EXPORT_SYMBOL(dma_issue_pending_all); 382EXPORT_SYMBOL(dma_issue_pending_all);
383 383
384/** 384/**
385 * nth_chan - returns the nth channel of the given capability 385 * dma_chan_is_local - returns true if the channel is in the same numa-node as the cpu
386 */
387static bool dma_chan_is_local(struct dma_chan *chan, int cpu)
388{
389 int node = dev_to_node(chan->device->dev);
390 return node == -1 || cpumask_test_cpu(cpu, cpumask_of_node(node));
391}
392
393/**
394 * min_chan - returns the channel with min count and in the same numa-node as the cpu
386 * @cap: capability to match 395 * @cap: capability to match
387 * @n: nth channel desired 396 * @cpu: cpu index which the channel should be close to
388 * 397 *
389 * Defaults to returning the channel with the desired capability and the 398 * If some channels are close to the given cpu, the one with the lowest
390 * lowest reference count when 'n' cannot be satisfied. Must be called 399 * reference count is returned. Otherwise, cpu is ignored and only the
391 * under dma_list_mutex. 400 * reference count is taken into account.
401 * Must be called under dma_list_mutex.
392 */ 402 */
393static struct dma_chan *nth_chan(enum dma_transaction_type cap, int n) 403static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu)
394{ 404{
395 struct dma_device *device; 405 struct dma_device *device;
396 struct dma_chan *chan; 406 struct dma_chan *chan;
397 struct dma_chan *ret = NULL;
398 struct dma_chan *min = NULL; 407 struct dma_chan *min = NULL;
408 struct dma_chan *localmin = NULL;
399 409
400 list_for_each_entry(device, &dma_device_list, global_node) { 410 list_for_each_entry(device, &dma_device_list, global_node) {
401 if (!dma_has_cap(cap, device->cap_mask) || 411 if (!dma_has_cap(cap, device->cap_mask) ||
@@ -404,27 +414,22 @@ static struct dma_chan *nth_chan(enum dma_transaction_type cap, int n)
404 list_for_each_entry(chan, &device->channels, device_node) { 414 list_for_each_entry(chan, &device->channels, device_node) {
405 if (!chan->client_count) 415 if (!chan->client_count)
406 continue; 416 continue;
407 if (!min) 417 if (!min || chan->table_count < min->table_count)
408 min = chan;
409 else if (chan->table_count < min->table_count)
410 min = chan; 418 min = chan;
411 419
412 if (n-- == 0) { 420 if (dma_chan_is_local(chan, cpu))
413 ret = chan; 421 if (!localmin ||
414 break; /* done */ 422 chan->table_count < localmin->table_count)
415 } 423 localmin = chan;
416 } 424 }
417 if (ret)
418 break; /* done */
419 } 425 }
420 426
421 if (!ret) 427 chan = localmin ? localmin : min;
422 ret = min;
423 428
424 if (ret) 429 if (chan)
425 ret->table_count++; 430 chan->table_count++;
426 431
427 return ret; 432 return chan;
428} 433}
429 434
430/** 435/**
@@ -441,7 +446,6 @@ static void dma_channel_rebalance(void)
441 struct dma_device *device; 446 struct dma_device *device;
442 int cpu; 447 int cpu;
443 int cap; 448 int cap;
444 int n;
445 449
446 /* undo the last distribution */ 450 /* undo the last distribution */
447 for_each_dma_cap_mask(cap, dma_cap_mask_all) 451 for_each_dma_cap_mask(cap, dma_cap_mask_all)
@@ -460,14 +464,9 @@ static void dma_channel_rebalance(void)
460 return; 464 return;
461 465
462 /* redistribute available channels */ 466 /* redistribute available channels */
463 n = 0;
464 for_each_dma_cap_mask(cap, dma_cap_mask_all) 467 for_each_dma_cap_mask(cap, dma_cap_mask_all)
465 for_each_online_cpu(cpu) { 468 for_each_online_cpu(cpu) {
466 if (num_possible_cpus() > 1) 469 chan = min_chan(cap, cpu);
467 chan = nth_chan(cap, n++);
468 else
469 chan = nth_chan(cap, -1);
470
471 per_cpu_ptr(channel_table[cap], cpu)->chan = chan; 470 per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
472 } 471 }
473} 472}