diff options
-rw-r--r-- | Documentation/dmatest.txt | 15 | ||||
-rw-r--r-- | MAINTAINERS | 18 | ||||
-rw-r--r-- | drivers/dma/dmaengine.c | 55 | ||||
-rw-r--r-- | drivers/dma/dmatest.c | 182 | ||||
-rw-r--r-- | drivers/dma/ioat/dma_v3.c | 26 | ||||
-rw-r--r-- | drivers/dma/mv_xor.c | 53 | ||||
-rw-r--r-- | drivers/dma/mv_xor.h | 28 | ||||
-rw-r--r-- | include/linux/dmaengine.h | 17 |
8 files changed, 146 insertions, 248 deletions
diff --git a/Documentation/dmatest.txt b/Documentation/dmatest.txt index 132a094c7bc3..a2b5663eae26 100644 --- a/Documentation/dmatest.txt +++ b/Documentation/dmatest.txt | |||
@@ -16,15 +16,16 @@ be built as module or inside kernel. Let's consider those cases. | |||
16 | Part 2 - When dmatest is built as a module... | 16 | Part 2 - When dmatest is built as a module... |
17 | 17 | ||
18 | After mounting debugfs and loading the module, the /sys/kernel/debug/dmatest | 18 | After mounting debugfs and loading the module, the /sys/kernel/debug/dmatest |
19 | folder with nodes will be created. They are the same as module parameters with | 19 | folder with nodes will be created. There are two important files located. First |
20 | addition of the 'run' node that controls run and stop phases of the test. | 20 | is the 'run' node that controls run and stop phases of the test, and the second |
21 | one, 'results', is used to get the test case results. | ||
21 | 22 | ||
22 | Note that in this case test will not run on load automatically. | 23 | Note that in this case test will not run on load automatically. |
23 | 24 | ||
24 | Example of usage: | 25 | Example of usage: |
25 | % echo dma0chan0 > /sys/kernel/debug/dmatest/channel | 26 | % echo dma0chan0 > /sys/module/dmatest/parameters/channel |
26 | % echo 2000 > /sys/kernel/debug/dmatest/timeout | 27 | % echo 2000 > /sys/module/dmatest/parameters/timeout |
27 | % echo 1 > /sys/kernel/debug/dmatest/iterations | 28 | % echo 1 > /sys/module/dmatest/parameters/iterations |
28 | % echo 1 > /sys/kernel/debug/dmatest/run | 29 | % echo 1 > /sys/kernel/debug/dmatest/run |
29 | 30 | ||
30 | Hint: available channel list could be extracted by running the following | 31 | Hint: available channel list could be extracted by running the following |
@@ -55,8 +56,8 @@ for the first performed test. After user gets a control, the test could be | |||
55 | re-run with the same or different parameters. For the details see the above | 56 | re-run with the same or different parameters. For the details see the above |
56 | section "Part 2 - When dmatest is built as a module..." | 57 | section "Part 2 - When dmatest is built as a module..." |
57 | 58 | ||
58 | In both cases the module parameters are used as initial values for the test case. | 59 | In both cases the module parameters are used as the actual values for the test |
59 | You always could check them at run-time by running | 60 | case. You always could check them at run-time by running |
60 | % grep -H . /sys/module/dmatest/parameters/* | 61 | % grep -H . /sys/module/dmatest/parameters/* |
61 | 62 | ||
62 | Part 4 - Gathering the test results | 63 | Part 4 - Gathering the test results |
diff --git a/MAINTAINERS b/MAINTAINERS index 7a78b9f2aae2..87efa1f5c7f3 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -933,24 +933,24 @@ F: arch/arm/mach-pxa/colibri-pxa270-income.c | |||
933 | 933 | ||
934 | ARM/INTEL IOP32X ARM ARCHITECTURE | 934 | ARM/INTEL IOP32X ARM ARCHITECTURE |
935 | M: Lennert Buytenhek <kernel@wantstofly.org> | 935 | M: Lennert Buytenhek <kernel@wantstofly.org> |
936 | M: Dan Williams <djbw@fb.com> | 936 | M: Dan Williams <dan.j.williams@intel.com> |
937 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 937 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
938 | S: Maintained | 938 | S: Maintained |
939 | 939 | ||
940 | ARM/INTEL IOP33X ARM ARCHITECTURE | 940 | ARM/INTEL IOP33X ARM ARCHITECTURE |
941 | M: Dan Williams <djbw@fb.com> | 941 | M: Dan Williams <dan.j.williams@intel.com> |
942 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 942 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
943 | S: Maintained | 943 | S: Maintained |
944 | 944 | ||
945 | ARM/INTEL IOP13XX ARM ARCHITECTURE | 945 | ARM/INTEL IOP13XX ARM ARCHITECTURE |
946 | M: Lennert Buytenhek <kernel@wantstofly.org> | 946 | M: Lennert Buytenhek <kernel@wantstofly.org> |
947 | M: Dan Williams <djbw@fb.com> | 947 | M: Dan Williams <dan.j.williams@intel.com> |
948 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 948 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
949 | S: Maintained | 949 | S: Maintained |
950 | 950 | ||
951 | ARM/INTEL IQ81342EX MACHINE SUPPORT | 951 | ARM/INTEL IQ81342EX MACHINE SUPPORT |
952 | M: Lennert Buytenhek <kernel@wantstofly.org> | 952 | M: Lennert Buytenhek <kernel@wantstofly.org> |
953 | M: Dan Williams <djbw@fb.com> | 953 | M: Dan Williams <dan.j.williams@intel.com> |
954 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 954 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
955 | S: Maintained | 955 | S: Maintained |
956 | 956 | ||
@@ -975,7 +975,7 @@ F: drivers/pcmcia/pxa2xx_stargate2.c | |||
975 | 975 | ||
976 | ARM/INTEL XSC3 (MANZANO) ARM CORE | 976 | ARM/INTEL XSC3 (MANZANO) ARM CORE |
977 | M: Lennert Buytenhek <kernel@wantstofly.org> | 977 | M: Lennert Buytenhek <kernel@wantstofly.org> |
978 | M: Dan Williams <djbw@fb.com> | 978 | M: Dan Williams <dan.j.williams@intel.com> |
979 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 979 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
980 | S: Maintained | 980 | S: Maintained |
981 | 981 | ||
@@ -1386,7 +1386,7 @@ F: drivers/platform/x86/asus*.c | |||
1386 | F: drivers/platform/x86/eeepc*.c | 1386 | F: drivers/platform/x86/eeepc*.c |
1387 | 1387 | ||
1388 | ASYNCHRONOUS TRANSFERS/TRANSFORMS (IOAT) API | 1388 | ASYNCHRONOUS TRANSFERS/TRANSFORMS (IOAT) API |
1389 | M: Dan Williams <djbw@fb.com> | 1389 | M: Dan Williams <dan.j.williams@intel.com> |
1390 | W: http://sourceforge.net/projects/xscaleiop | 1390 | W: http://sourceforge.net/projects/xscaleiop |
1391 | S: Maintained | 1391 | S: Maintained |
1392 | F: Documentation/crypto/async-tx-api.txt | 1392 | F: Documentation/crypto/async-tx-api.txt |
@@ -2691,7 +2691,7 @@ T: git git://git.linaro.org/people/sumitsemwal/linux-dma-buf.git | |||
2691 | 2691 | ||
2692 | DMA GENERIC OFFLOAD ENGINE SUBSYSTEM | 2692 | DMA GENERIC OFFLOAD ENGINE SUBSYSTEM |
2693 | M: Vinod Koul <vinod.koul@intel.com> | 2693 | M: Vinod Koul <vinod.koul@intel.com> |
2694 | M: Dan Williams <djbw@fb.com> | 2694 | M: Dan Williams <dan.j.williams@intel.com> |
2695 | S: Supported | 2695 | S: Supported |
2696 | F: drivers/dma/ | 2696 | F: drivers/dma/ |
2697 | F: include/linux/dma* | 2697 | F: include/linux/dma* |
@@ -4323,7 +4323,7 @@ F: arch/x86/kernel/microcode_core.c | |||
4323 | F: arch/x86/kernel/microcode_intel.c | 4323 | F: arch/x86/kernel/microcode_intel.c |
4324 | 4324 | ||
4325 | INTEL I/OAT DMA DRIVER | 4325 | INTEL I/OAT DMA DRIVER |
4326 | M: Dan Williams <djbw@fb.com> | 4326 | M: Dan Williams <dan.j.williams@intel.com> |
4327 | S: Maintained | 4327 | S: Maintained |
4328 | F: drivers/dma/ioat* | 4328 | F: drivers/dma/ioat* |
4329 | 4329 | ||
@@ -4336,7 +4336,7 @@ F: drivers/iommu/intel-iommu.c | |||
4336 | F: include/linux/intel-iommu.h | 4336 | F: include/linux/intel-iommu.h |
4337 | 4337 | ||
4338 | INTEL IOP-ADMA DMA DRIVER | 4338 | INTEL IOP-ADMA DMA DRIVER |
4339 | M: Dan Williams <djbw@fb.com> | 4339 | M: Dan Williams <dan.j.williams@intel.com> |
4340 | S: Odd fixes | 4340 | S: Odd fixes |
4341 | F: drivers/dma/iop-adma.c | 4341 | F: drivers/dma/iop-adma.c |
4342 | 4342 | ||
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 99af4db5948b..eee16b01fa89 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c | |||
@@ -382,20 +382,30 @@ void dma_issue_pending_all(void) | |||
382 | EXPORT_SYMBOL(dma_issue_pending_all); | 382 | EXPORT_SYMBOL(dma_issue_pending_all); |
383 | 383 | ||
384 | /** | 384 | /** |
385 | * nth_chan - returns the nth channel of the given capability | 385 | * dma_chan_is_local - returns true if the channel is in the same numa-node as the cpu |
386 | */ | ||
387 | static bool dma_chan_is_local(struct dma_chan *chan, int cpu) | ||
388 | { | ||
389 | int node = dev_to_node(chan->device->dev); | ||
390 | return node == -1 || cpumask_test_cpu(cpu, cpumask_of_node(node)); | ||
391 | } | ||
392 | |||
393 | /** | ||
394 | * min_chan - returns the channel with min count and in the same numa-node as the cpu | ||
386 | * @cap: capability to match | 395 | * @cap: capability to match |
387 | * @n: nth channel desired | 396 | * @cpu: cpu index which the channel should be close to |
388 | * | 397 | * |
389 | * Defaults to returning the channel with the desired capability and the | 398 | * If some channels are close to the given cpu, the one with the lowest |
390 | * lowest reference count when 'n' cannot be satisfied. Must be called | 399 | * reference count is returned. Otherwise, cpu is ignored and only the |
391 | * under dma_list_mutex. | 400 | * reference count is taken into account. |
401 | * Must be called under dma_list_mutex. | ||
392 | */ | 402 | */ |
393 | static struct dma_chan *nth_chan(enum dma_transaction_type cap, int n) | 403 | static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu) |
394 | { | 404 | { |
395 | struct dma_device *device; | 405 | struct dma_device *device; |
396 | struct dma_chan *chan; | 406 | struct dma_chan *chan; |
397 | struct dma_chan *ret = NULL; | ||
398 | struct dma_chan *min = NULL; | 407 | struct dma_chan *min = NULL; |
408 | struct dma_chan *localmin = NULL; | ||
399 | 409 | ||
400 | list_for_each_entry(device, &dma_device_list, global_node) { | 410 | list_for_each_entry(device, &dma_device_list, global_node) { |
401 | if (!dma_has_cap(cap, device->cap_mask) || | 411 | if (!dma_has_cap(cap, device->cap_mask) || |
@@ -404,27 +414,22 @@ static struct dma_chan *nth_chan(enum dma_transaction_type cap, int n) | |||
404 | list_for_each_entry(chan, &device->channels, device_node) { | 414 | list_for_each_entry(chan, &device->channels, device_node) { |
405 | if (!chan->client_count) | 415 | if (!chan->client_count) |
406 | continue; | 416 | continue; |
407 | if (!min) | 417 | if (!min || chan->table_count < min->table_count) |
408 | min = chan; | ||
409 | else if (chan->table_count < min->table_count) | ||
410 | min = chan; | 418 | min = chan; |
411 | 419 | ||
412 | if (n-- == 0) { | 420 | if (dma_chan_is_local(chan, cpu)) |
413 | ret = chan; | 421 | if (!localmin || |
414 | break; /* done */ | 422 | chan->table_count < localmin->table_count) |
415 | } | 423 | localmin = chan; |
416 | } | 424 | } |
417 | if (ret) | ||
418 | break; /* done */ | ||
419 | } | 425 | } |
420 | 426 | ||
421 | if (!ret) | 427 | chan = localmin ? localmin : min; |
422 | ret = min; | ||
423 | 428 | ||
424 | if (ret) | 429 | if (chan) |
425 | ret->table_count++; | 430 | chan->table_count++; |
426 | 431 | ||
427 | return ret; | 432 | return chan; |
428 | } | 433 | } |
429 | 434 | ||
430 | /** | 435 | /** |
@@ -441,7 +446,6 @@ static void dma_channel_rebalance(void) | |||
441 | struct dma_device *device; | 446 | struct dma_device *device; |
442 | int cpu; | 447 | int cpu; |
443 | int cap; | 448 | int cap; |
444 | int n; | ||
445 | 449 | ||
446 | /* undo the last distribution */ | 450 | /* undo the last distribution */ |
447 | for_each_dma_cap_mask(cap, dma_cap_mask_all) | 451 | for_each_dma_cap_mask(cap, dma_cap_mask_all) |
@@ -460,14 +464,9 @@ static void dma_channel_rebalance(void) | |||
460 | return; | 464 | return; |
461 | 465 | ||
462 | /* redistribute available channels */ | 466 | /* redistribute available channels */ |
463 | n = 0; | ||
464 | for_each_dma_cap_mask(cap, dma_cap_mask_all) | 467 | for_each_dma_cap_mask(cap, dma_cap_mask_all) |
465 | for_each_online_cpu(cpu) { | 468 | for_each_online_cpu(cpu) { |
466 | if (num_possible_cpus() > 1) | 469 | chan = min_chan(cap, cpu); |
467 | chan = nth_chan(cap, n++); | ||
468 | else | ||
469 | chan = nth_chan(cap, -1); | ||
470 | |||
471 | per_cpu_ptr(channel_table[cap], cpu)->chan = chan; | 470 | per_cpu_ptr(channel_table[cap], cpu)->chan = chan; |
472 | } | 471 | } |
473 | } | 472 | } |
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index e88ded2c8d2f..92f796cdc6ab 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c | |||
@@ -25,44 +25,46 @@ | |||
25 | #include <linux/seq_file.h> | 25 | #include <linux/seq_file.h> |
26 | 26 | ||
27 | static unsigned int test_buf_size = 16384; | 27 | static unsigned int test_buf_size = 16384; |
28 | module_param(test_buf_size, uint, S_IRUGO); | 28 | module_param(test_buf_size, uint, S_IRUGO | S_IWUSR); |
29 | MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer"); | 29 | MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer"); |
30 | 30 | ||
31 | static char test_channel[20]; | 31 | static char test_channel[20]; |
32 | module_param_string(channel, test_channel, sizeof(test_channel), S_IRUGO); | 32 | module_param_string(channel, test_channel, sizeof(test_channel), |
33 | S_IRUGO | S_IWUSR); | ||
33 | MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)"); | 34 | MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)"); |
34 | 35 | ||
35 | static char test_device[20]; | 36 | static char test_device[20]; |
36 | module_param_string(device, test_device, sizeof(test_device), S_IRUGO); | 37 | module_param_string(device, test_device, sizeof(test_device), |
38 | S_IRUGO | S_IWUSR); | ||
37 | MODULE_PARM_DESC(device, "Bus ID of the DMA Engine to test (default: any)"); | 39 | MODULE_PARM_DESC(device, "Bus ID of the DMA Engine to test (default: any)"); |
38 | 40 | ||
39 | static unsigned int threads_per_chan = 1; | 41 | static unsigned int threads_per_chan = 1; |
40 | module_param(threads_per_chan, uint, S_IRUGO); | 42 | module_param(threads_per_chan, uint, S_IRUGO | S_IWUSR); |
41 | MODULE_PARM_DESC(threads_per_chan, | 43 | MODULE_PARM_DESC(threads_per_chan, |
42 | "Number of threads to start per channel (default: 1)"); | 44 | "Number of threads to start per channel (default: 1)"); |
43 | 45 | ||
44 | static unsigned int max_channels; | 46 | static unsigned int max_channels; |
45 | module_param(max_channels, uint, S_IRUGO); | 47 | module_param(max_channels, uint, S_IRUGO | S_IWUSR); |
46 | MODULE_PARM_DESC(max_channels, | 48 | MODULE_PARM_DESC(max_channels, |
47 | "Maximum number of channels to use (default: all)"); | 49 | "Maximum number of channels to use (default: all)"); |
48 | 50 | ||
49 | static unsigned int iterations; | 51 | static unsigned int iterations; |
50 | module_param(iterations, uint, S_IRUGO); | 52 | module_param(iterations, uint, S_IRUGO | S_IWUSR); |
51 | MODULE_PARM_DESC(iterations, | 53 | MODULE_PARM_DESC(iterations, |
52 | "Iterations before stopping test (default: infinite)"); | 54 | "Iterations before stopping test (default: infinite)"); |
53 | 55 | ||
54 | static unsigned int xor_sources = 3; | 56 | static unsigned int xor_sources = 3; |
55 | module_param(xor_sources, uint, S_IRUGO); | 57 | module_param(xor_sources, uint, S_IRUGO | S_IWUSR); |
56 | MODULE_PARM_DESC(xor_sources, | 58 | MODULE_PARM_DESC(xor_sources, |
57 | "Number of xor source buffers (default: 3)"); | 59 | "Number of xor source buffers (default: 3)"); |
58 | 60 | ||
59 | static unsigned int pq_sources = 3; | 61 | static unsigned int pq_sources = 3; |
60 | module_param(pq_sources, uint, S_IRUGO); | 62 | module_param(pq_sources, uint, S_IRUGO | S_IWUSR); |
61 | MODULE_PARM_DESC(pq_sources, | 63 | MODULE_PARM_DESC(pq_sources, |
62 | "Number of p+q source buffers (default: 3)"); | 64 | "Number of p+q source buffers (default: 3)"); |
63 | 65 | ||
64 | static int timeout = 3000; | 66 | static int timeout = 3000; |
65 | module_param(timeout, uint, S_IRUGO); | 67 | module_param(timeout, uint, S_IRUGO | S_IWUSR); |
66 | MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), " | 68 | MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), " |
67 | "Pass -1 for infinite timeout"); | 69 | "Pass -1 for infinite timeout"); |
68 | 70 | ||
@@ -193,7 +195,6 @@ struct dmatest_info { | |||
193 | 195 | ||
194 | /* debugfs related stuff */ | 196 | /* debugfs related stuff */ |
195 | struct dentry *root; | 197 | struct dentry *root; |
196 | struct dmatest_params dbgfs_params; | ||
197 | 198 | ||
198 | /* Test results */ | 199 | /* Test results */ |
199 | struct list_head results; | 200 | struct list_head results; |
@@ -406,7 +407,11 @@ static int thread_result_add(struct dmatest_info *info, | |||
406 | list_add_tail(&tr->node, &r->results); | 407 | list_add_tail(&tr->node, &r->results); |
407 | mutex_unlock(&info->results_lock); | 408 | mutex_unlock(&info->results_lock); |
408 | 409 | ||
409 | pr_warn("%s\n", thread_result_get(r->name, tr)); | 410 | if (tr->type == DMATEST_ET_OK) |
411 | pr_debug("%s\n", thread_result_get(r->name, tr)); | ||
412 | else | ||
413 | pr_warn("%s\n", thread_result_get(r->name, tr)); | ||
414 | |||
410 | return 0; | 415 | return 0; |
411 | } | 416 | } |
412 | 417 | ||
@@ -1007,7 +1012,15 @@ static int __restart_threaded_test(struct dmatest_info *info, bool run) | |||
1007 | result_free(info, NULL); | 1012 | result_free(info, NULL); |
1008 | 1013 | ||
1009 | /* Copy test parameters */ | 1014 | /* Copy test parameters */ |
1010 | memcpy(params, &info->dbgfs_params, sizeof(*params)); | 1015 | params->buf_size = test_buf_size; |
1016 | strlcpy(params->channel, strim(test_channel), sizeof(params->channel)); | ||
1017 | strlcpy(params->device, strim(test_device), sizeof(params->device)); | ||
1018 | params->threads_per_chan = threads_per_chan; | ||
1019 | params->max_channels = max_channels; | ||
1020 | params->iterations = iterations; | ||
1021 | params->xor_sources = xor_sources; | ||
1022 | params->pq_sources = pq_sources; | ||
1023 | params->timeout = timeout; | ||
1011 | 1024 | ||
1012 | /* Run test with new parameters */ | 1025 | /* Run test with new parameters */ |
1013 | return __run_threaded_test(info); | 1026 | return __run_threaded_test(info); |
@@ -1029,71 +1042,6 @@ static bool __is_threaded_test_run(struct dmatest_info *info) | |||
1029 | return false; | 1042 | return false; |
1030 | } | 1043 | } |
1031 | 1044 | ||
1032 | static ssize_t dtf_write_string(void *to, size_t available, loff_t *ppos, | ||
1033 | const void __user *from, size_t count) | ||
1034 | { | ||
1035 | char tmp[20]; | ||
1036 | ssize_t len; | ||
1037 | |||
1038 | len = simple_write_to_buffer(tmp, sizeof(tmp) - 1, ppos, from, count); | ||
1039 | if (len >= 0) { | ||
1040 | tmp[len] = '\0'; | ||
1041 | strlcpy(to, strim(tmp), available); | ||
1042 | } | ||
1043 | |||
1044 | return len; | ||
1045 | } | ||
1046 | |||
1047 | static ssize_t dtf_read_channel(struct file *file, char __user *buf, | ||
1048 | size_t count, loff_t *ppos) | ||
1049 | { | ||
1050 | struct dmatest_info *info = file->private_data; | ||
1051 | return simple_read_from_buffer(buf, count, ppos, | ||
1052 | info->dbgfs_params.channel, | ||
1053 | strlen(info->dbgfs_params.channel)); | ||
1054 | } | ||
1055 | |||
1056 | static ssize_t dtf_write_channel(struct file *file, const char __user *buf, | ||
1057 | size_t size, loff_t *ppos) | ||
1058 | { | ||
1059 | struct dmatest_info *info = file->private_data; | ||
1060 | return dtf_write_string(info->dbgfs_params.channel, | ||
1061 | sizeof(info->dbgfs_params.channel), | ||
1062 | ppos, buf, size); | ||
1063 | } | ||
1064 | |||
1065 | static const struct file_operations dtf_channel_fops = { | ||
1066 | .read = dtf_read_channel, | ||
1067 | .write = dtf_write_channel, | ||
1068 | .open = simple_open, | ||
1069 | .llseek = default_llseek, | ||
1070 | }; | ||
1071 | |||
1072 | static ssize_t dtf_read_device(struct file *file, char __user *buf, | ||
1073 | size_t count, loff_t *ppos) | ||
1074 | { | ||
1075 | struct dmatest_info *info = file->private_data; | ||
1076 | return simple_read_from_buffer(buf, count, ppos, | ||
1077 | info->dbgfs_params.device, | ||
1078 | strlen(info->dbgfs_params.device)); | ||
1079 | } | ||
1080 | |||
1081 | static ssize_t dtf_write_device(struct file *file, const char __user *buf, | ||
1082 | size_t size, loff_t *ppos) | ||
1083 | { | ||
1084 | struct dmatest_info *info = file->private_data; | ||
1085 | return dtf_write_string(info->dbgfs_params.device, | ||
1086 | sizeof(info->dbgfs_params.device), | ||
1087 | ppos, buf, size); | ||
1088 | } | ||
1089 | |||
1090 | static const struct file_operations dtf_device_fops = { | ||
1091 | .read = dtf_read_device, | ||
1092 | .write = dtf_write_device, | ||
1093 | .open = simple_open, | ||
1094 | .llseek = default_llseek, | ||
1095 | }; | ||
1096 | |||
1097 | static ssize_t dtf_read_run(struct file *file, char __user *user_buf, | 1045 | static ssize_t dtf_read_run(struct file *file, char __user *user_buf, |
1098 | size_t count, loff_t *ppos) | 1046 | size_t count, loff_t *ppos) |
1099 | { | 1047 | { |
@@ -1187,8 +1135,6 @@ static const struct file_operations dtf_results_fops = { | |||
1187 | static int dmatest_register_dbgfs(struct dmatest_info *info) | 1135 | static int dmatest_register_dbgfs(struct dmatest_info *info) |
1188 | { | 1136 | { |
1189 | struct dentry *d; | 1137 | struct dentry *d; |
1190 | struct dmatest_params *params = &info->dbgfs_params; | ||
1191 | int ret = -ENOMEM; | ||
1192 | 1138 | ||
1193 | d = debugfs_create_dir("dmatest", NULL); | 1139 | d = debugfs_create_dir("dmatest", NULL); |
1194 | if (IS_ERR(d)) | 1140 | if (IS_ERR(d)) |
@@ -1198,81 +1144,24 @@ static int dmatest_register_dbgfs(struct dmatest_info *info) | |||
1198 | 1144 | ||
1199 | info->root = d; | 1145 | info->root = d; |
1200 | 1146 | ||
1201 | /* Copy initial values */ | ||
1202 | memcpy(params, &info->params, sizeof(*params)); | ||
1203 | |||
1204 | /* Test parameters */ | ||
1205 | |||
1206 | d = debugfs_create_u32("test_buf_size", S_IWUSR | S_IRUGO, info->root, | ||
1207 | (u32 *)¶ms->buf_size); | ||
1208 | if (IS_ERR_OR_NULL(d)) | ||
1209 | goto err_node; | ||
1210 | |||
1211 | d = debugfs_create_file("channel", S_IRUGO | S_IWUSR, info->root, | ||
1212 | info, &dtf_channel_fops); | ||
1213 | if (IS_ERR_OR_NULL(d)) | ||
1214 | goto err_node; | ||
1215 | |||
1216 | d = debugfs_create_file("device", S_IRUGO | S_IWUSR, info->root, | ||
1217 | info, &dtf_device_fops); | ||
1218 | if (IS_ERR_OR_NULL(d)) | ||
1219 | goto err_node; | ||
1220 | |||
1221 | d = debugfs_create_u32("threads_per_chan", S_IWUSR | S_IRUGO, info->root, | ||
1222 | (u32 *)¶ms->threads_per_chan); | ||
1223 | if (IS_ERR_OR_NULL(d)) | ||
1224 | goto err_node; | ||
1225 | |||
1226 | d = debugfs_create_u32("max_channels", S_IWUSR | S_IRUGO, info->root, | ||
1227 | (u32 *)¶ms->max_channels); | ||
1228 | if (IS_ERR_OR_NULL(d)) | ||
1229 | goto err_node; | ||
1230 | |||
1231 | d = debugfs_create_u32("iterations", S_IWUSR | S_IRUGO, info->root, | ||
1232 | (u32 *)¶ms->iterations); | ||
1233 | if (IS_ERR_OR_NULL(d)) | ||
1234 | goto err_node; | ||
1235 | |||
1236 | d = debugfs_create_u32("xor_sources", S_IWUSR | S_IRUGO, info->root, | ||
1237 | (u32 *)¶ms->xor_sources); | ||
1238 | if (IS_ERR_OR_NULL(d)) | ||
1239 | goto err_node; | ||
1240 | |||
1241 | d = debugfs_create_u32("pq_sources", S_IWUSR | S_IRUGO, info->root, | ||
1242 | (u32 *)¶ms->pq_sources); | ||
1243 | if (IS_ERR_OR_NULL(d)) | ||
1244 | goto err_node; | ||
1245 | |||
1246 | d = debugfs_create_u32("timeout", S_IWUSR | S_IRUGO, info->root, | ||
1247 | (u32 *)¶ms->timeout); | ||
1248 | if (IS_ERR_OR_NULL(d)) | ||
1249 | goto err_node; | ||
1250 | |||
1251 | /* Run or stop threaded test */ | 1147 | /* Run or stop threaded test */ |
1252 | d = debugfs_create_file("run", S_IWUSR | S_IRUGO, info->root, | 1148 | debugfs_create_file("run", S_IWUSR | S_IRUGO, info->root, info, |
1253 | info, &dtf_run_fops); | 1149 | &dtf_run_fops); |
1254 | if (IS_ERR_OR_NULL(d)) | ||
1255 | goto err_node; | ||
1256 | 1150 | ||
1257 | /* Results of test in progress */ | 1151 | /* Results of test in progress */ |
1258 | d = debugfs_create_file("results", S_IRUGO, info->root, info, | 1152 | debugfs_create_file("results", S_IRUGO, info->root, info, |
1259 | &dtf_results_fops); | 1153 | &dtf_results_fops); |
1260 | if (IS_ERR_OR_NULL(d)) | ||
1261 | goto err_node; | ||
1262 | 1154 | ||
1263 | return 0; | 1155 | return 0; |
1264 | 1156 | ||
1265 | err_node: | ||
1266 | debugfs_remove_recursive(info->root); | ||
1267 | err_root: | 1157 | err_root: |
1268 | pr_err("dmatest: Failed to initialize debugfs\n"); | 1158 | pr_err("dmatest: Failed to initialize debugfs\n"); |
1269 | return ret; | 1159 | return -ENOMEM; |
1270 | } | 1160 | } |
1271 | 1161 | ||
1272 | static int __init dmatest_init(void) | 1162 | static int __init dmatest_init(void) |
1273 | { | 1163 | { |
1274 | struct dmatest_info *info = &test_info; | 1164 | struct dmatest_info *info = &test_info; |
1275 | struct dmatest_params *params = &info->params; | ||
1276 | int ret; | 1165 | int ret; |
1277 | 1166 | ||
1278 | memset(info, 0, sizeof(*info)); | 1167 | memset(info, 0, sizeof(*info)); |
@@ -1283,17 +1172,6 @@ static int __init dmatest_init(void) | |||
1283 | mutex_init(&info->results_lock); | 1172 | mutex_init(&info->results_lock); |
1284 | INIT_LIST_HEAD(&info->results); | 1173 | INIT_LIST_HEAD(&info->results); |
1285 | 1174 | ||
1286 | /* Set default parameters */ | ||
1287 | params->buf_size = test_buf_size; | ||
1288 | strlcpy(params->channel, test_channel, sizeof(params->channel)); | ||
1289 | strlcpy(params->device, test_device, sizeof(params->device)); | ||
1290 | params->threads_per_chan = threads_per_chan; | ||
1291 | params->max_channels = max_channels; | ||
1292 | params->iterations = iterations; | ||
1293 | params->xor_sources = xor_sources; | ||
1294 | params->pq_sources = pq_sources; | ||
1295 | params->timeout = timeout; | ||
1296 | |||
1297 | ret = dmatest_register_dbgfs(info); | 1175 | ret = dmatest_register_dbgfs(info); |
1298 | if (ret) | 1176 | if (ret) |
1299 | return ret; | 1177 | return ret; |
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c index b642e035579b..d8ececaf1b57 100644 --- a/drivers/dma/ioat/dma_v3.c +++ b/drivers/dma/ioat/dma_v3.c | |||
@@ -251,7 +251,7 @@ static bool is_bwd_noraid(struct pci_dev *pdev) | |||
251 | } | 251 | } |
252 | 252 | ||
253 | static void pq16_set_src(struct ioat_raw_descriptor *desc[3], | 253 | static void pq16_set_src(struct ioat_raw_descriptor *desc[3], |
254 | dma_addr_t addr, u32 offset, u8 coef, int idx) | 254 | dma_addr_t addr, u32 offset, u8 coef, unsigned idx) |
255 | { | 255 | { |
256 | struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *)desc[0]; | 256 | struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *)desc[0]; |
257 | struct ioat_pq16a_descriptor *pq16 = | 257 | struct ioat_pq16a_descriptor *pq16 = |
@@ -1775,15 +1775,12 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca) | |||
1775 | dma->device_alloc_chan_resources = ioat2_alloc_chan_resources; | 1775 | dma->device_alloc_chan_resources = ioat2_alloc_chan_resources; |
1776 | dma->device_free_chan_resources = ioat2_free_chan_resources; | 1776 | dma->device_free_chan_resources = ioat2_free_chan_resources; |
1777 | 1777 | ||
1778 | if (is_xeon_cb32(pdev)) | ||
1779 | dma->copy_align = 6; | ||
1780 | |||
1781 | dma_cap_set(DMA_INTERRUPT, dma->cap_mask); | 1778 | dma_cap_set(DMA_INTERRUPT, dma->cap_mask); |
1782 | dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock; | 1779 | dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock; |
1783 | 1780 | ||
1784 | device->cap = readl(device->reg_base + IOAT_DMA_CAP_OFFSET); | 1781 | device->cap = readl(device->reg_base + IOAT_DMA_CAP_OFFSET); |
1785 | 1782 | ||
1786 | if (is_bwd_noraid(pdev)) | 1783 | if (is_xeon_cb32(pdev) || is_bwd_noraid(pdev)) |
1787 | device->cap &= ~(IOAT_CAP_XOR | IOAT_CAP_PQ | IOAT_CAP_RAID16SS); | 1784 | device->cap &= ~(IOAT_CAP_XOR | IOAT_CAP_PQ | IOAT_CAP_RAID16SS); |
1788 | 1785 | ||
1789 | /* dca is incompatible with raid operations */ | 1786 | /* dca is incompatible with raid operations */ |
@@ -1793,7 +1790,6 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca) | |||
1793 | if (device->cap & IOAT_CAP_XOR) { | 1790 | if (device->cap & IOAT_CAP_XOR) { |
1794 | is_raid_device = true; | 1791 | is_raid_device = true; |
1795 | dma->max_xor = 8; | 1792 | dma->max_xor = 8; |
1796 | dma->xor_align = 6; | ||
1797 | 1793 | ||
1798 | dma_cap_set(DMA_XOR, dma->cap_mask); | 1794 | dma_cap_set(DMA_XOR, dma->cap_mask); |
1799 | dma->device_prep_dma_xor = ioat3_prep_xor; | 1795 | dma->device_prep_dma_xor = ioat3_prep_xor; |
@@ -1812,13 +1808,8 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca) | |||
1812 | 1808 | ||
1813 | if (device->cap & IOAT_CAP_RAID16SS) { | 1809 | if (device->cap & IOAT_CAP_RAID16SS) { |
1814 | dma_set_maxpq(dma, 16, 0); | 1810 | dma_set_maxpq(dma, 16, 0); |
1815 | dma->pq_align = 0; | ||
1816 | } else { | 1811 | } else { |
1817 | dma_set_maxpq(dma, 8, 0); | 1812 | dma_set_maxpq(dma, 8, 0); |
1818 | if (is_xeon_cb32(pdev)) | ||
1819 | dma->pq_align = 6; | ||
1820 | else | ||
1821 | dma->pq_align = 0; | ||
1822 | } | 1813 | } |
1823 | 1814 | ||
1824 | if (!(device->cap & IOAT_CAP_XOR)) { | 1815 | if (!(device->cap & IOAT_CAP_XOR)) { |
@@ -1829,13 +1820,8 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca) | |||
1829 | 1820 | ||
1830 | if (device->cap & IOAT_CAP_RAID16SS) { | 1821 | if (device->cap & IOAT_CAP_RAID16SS) { |
1831 | dma->max_xor = 16; | 1822 | dma->max_xor = 16; |
1832 | dma->xor_align = 0; | ||
1833 | } else { | 1823 | } else { |
1834 | dma->max_xor = 8; | 1824 | dma->max_xor = 8; |
1835 | if (is_xeon_cb32(pdev)) | ||
1836 | dma->xor_align = 6; | ||
1837 | else | ||
1838 | dma->xor_align = 0; | ||
1839 | } | 1825 | } |
1840 | } | 1826 | } |
1841 | } | 1827 | } |
@@ -1844,14 +1830,6 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca) | |||
1844 | device->cleanup_fn = ioat3_cleanup_event; | 1830 | device->cleanup_fn = ioat3_cleanup_event; |
1845 | device->timer_fn = ioat3_timer_event; | 1831 | device->timer_fn = ioat3_timer_event; |
1846 | 1832 | ||
1847 | if (is_xeon_cb32(pdev)) { | ||
1848 | dma_cap_clear(DMA_XOR_VAL, dma->cap_mask); | ||
1849 | dma->device_prep_dma_xor_val = NULL; | ||
1850 | |||
1851 | dma_cap_clear(DMA_PQ_VAL, dma->cap_mask); | ||
1852 | dma->device_prep_dma_pq_val = NULL; | ||
1853 | } | ||
1854 | |||
1855 | /* starting with CB3.3 super extended descriptors are supported */ | 1833 | /* starting with CB3.3 super extended descriptors are supported */ |
1856 | if (device->cap & IOAT_CAP_RAID16SS) { | 1834 | if (device->cap & IOAT_CAP_RAID16SS) { |
1857 | char pool_name[14]; | 1835 | char pool_name[14]; |
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index 200f1a3c9a44..0ec086d2b6a0 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c | |||
@@ -64,7 +64,7 @@ static u32 mv_desc_get_src_addr(struct mv_xor_desc_slot *desc, | |||
64 | int src_idx) | 64 | int src_idx) |
65 | { | 65 | { |
66 | struct mv_xor_desc *hw_desc = desc->hw_desc; | 66 | struct mv_xor_desc *hw_desc = desc->hw_desc; |
67 | return hw_desc->phy_src_addr[src_idx]; | 67 | return hw_desc->phy_src_addr[mv_phy_src_idx(src_idx)]; |
68 | } | 68 | } |
69 | 69 | ||
70 | 70 | ||
@@ -107,32 +107,32 @@ static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc, | |||
107 | int index, dma_addr_t addr) | 107 | int index, dma_addr_t addr) |
108 | { | 108 | { |
109 | struct mv_xor_desc *hw_desc = desc->hw_desc; | 109 | struct mv_xor_desc *hw_desc = desc->hw_desc; |
110 | hw_desc->phy_src_addr[index] = addr; | 110 | hw_desc->phy_src_addr[mv_phy_src_idx(index)] = addr; |
111 | if (desc->type == DMA_XOR) | 111 | if (desc->type == DMA_XOR) |
112 | hw_desc->desc_command |= (1 << index); | 112 | hw_desc->desc_command |= (1 << index); |
113 | } | 113 | } |
114 | 114 | ||
115 | static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan) | 115 | static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan) |
116 | { | 116 | { |
117 | return __raw_readl(XOR_CURR_DESC(chan)); | 117 | return readl_relaxed(XOR_CURR_DESC(chan)); |
118 | } | 118 | } |
119 | 119 | ||
120 | static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan, | 120 | static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan, |
121 | u32 next_desc_addr) | 121 | u32 next_desc_addr) |
122 | { | 122 | { |
123 | __raw_writel(next_desc_addr, XOR_NEXT_DESC(chan)); | 123 | writel_relaxed(next_desc_addr, XOR_NEXT_DESC(chan)); |
124 | } | 124 | } |
125 | 125 | ||
126 | static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan) | 126 | static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan) |
127 | { | 127 | { |
128 | u32 val = __raw_readl(XOR_INTR_MASK(chan)); | 128 | u32 val = readl_relaxed(XOR_INTR_MASK(chan)); |
129 | val |= XOR_INTR_MASK_VALUE << (chan->idx * 16); | 129 | val |= XOR_INTR_MASK_VALUE << (chan->idx * 16); |
130 | __raw_writel(val, XOR_INTR_MASK(chan)); | 130 | writel_relaxed(val, XOR_INTR_MASK(chan)); |
131 | } | 131 | } |
132 | 132 | ||
133 | static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan) | 133 | static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan) |
134 | { | 134 | { |
135 | u32 intr_cause = __raw_readl(XOR_INTR_CAUSE(chan)); | 135 | u32 intr_cause = readl_relaxed(XOR_INTR_CAUSE(chan)); |
136 | intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF; | 136 | intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF; |
137 | return intr_cause; | 137 | return intr_cause; |
138 | } | 138 | } |
@@ -149,13 +149,13 @@ static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan) | |||
149 | { | 149 | { |
150 | u32 val = ~(1 << (chan->idx * 16)); | 150 | u32 val = ~(1 << (chan->idx * 16)); |
151 | dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val); | 151 | dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val); |
152 | __raw_writel(val, XOR_INTR_CAUSE(chan)); | 152 | writel_relaxed(val, XOR_INTR_CAUSE(chan)); |
153 | } | 153 | } |
154 | 154 | ||
155 | static void mv_xor_device_clear_err_status(struct mv_xor_chan *chan) | 155 | static void mv_xor_device_clear_err_status(struct mv_xor_chan *chan) |
156 | { | 156 | { |
157 | u32 val = 0xFFFF0000 >> (chan->idx * 16); | 157 | u32 val = 0xFFFF0000 >> (chan->idx * 16); |
158 | __raw_writel(val, XOR_INTR_CAUSE(chan)); | 158 | writel_relaxed(val, XOR_INTR_CAUSE(chan)); |
159 | } | 159 | } |
160 | 160 | ||
161 | static int mv_can_chain(struct mv_xor_desc_slot *desc) | 161 | static int mv_can_chain(struct mv_xor_desc_slot *desc) |
@@ -173,7 +173,7 @@ static void mv_set_mode(struct mv_xor_chan *chan, | |||
173 | enum dma_transaction_type type) | 173 | enum dma_transaction_type type) |
174 | { | 174 | { |
175 | u32 op_mode; | 175 | u32 op_mode; |
176 | u32 config = __raw_readl(XOR_CONFIG(chan)); | 176 | u32 config = readl_relaxed(XOR_CONFIG(chan)); |
177 | 177 | ||
178 | switch (type) { | 178 | switch (type) { |
179 | case DMA_XOR: | 179 | case DMA_XOR: |
@@ -192,7 +192,14 @@ static void mv_set_mode(struct mv_xor_chan *chan, | |||
192 | 192 | ||
193 | config &= ~0x7; | 193 | config &= ~0x7; |
194 | config |= op_mode; | 194 | config |= op_mode; |
195 | __raw_writel(config, XOR_CONFIG(chan)); | 195 | |
196 | #if defined(__BIG_ENDIAN) | ||
197 | config |= XOR_DESCRIPTOR_SWAP; | ||
198 | #else | ||
199 | config &= ~XOR_DESCRIPTOR_SWAP; | ||
200 | #endif | ||
201 | |||
202 | writel_relaxed(config, XOR_CONFIG(chan)); | ||
196 | chan->current_type = type; | 203 | chan->current_type = type; |
197 | } | 204 | } |
198 | 205 | ||
@@ -201,14 +208,14 @@ static void mv_chan_activate(struct mv_xor_chan *chan) | |||
201 | u32 activation; | 208 | u32 activation; |
202 | 209 | ||
203 | dev_dbg(mv_chan_to_devp(chan), " activate chan.\n"); | 210 | dev_dbg(mv_chan_to_devp(chan), " activate chan.\n"); |
204 | activation = __raw_readl(XOR_ACTIVATION(chan)); | 211 | activation = readl_relaxed(XOR_ACTIVATION(chan)); |
205 | activation |= 0x1; | 212 | activation |= 0x1; |
206 | __raw_writel(activation, XOR_ACTIVATION(chan)); | 213 | writel_relaxed(activation, XOR_ACTIVATION(chan)); |
207 | } | 214 | } |
208 | 215 | ||
209 | static char mv_chan_is_busy(struct mv_xor_chan *chan) | 216 | static char mv_chan_is_busy(struct mv_xor_chan *chan) |
210 | { | 217 | { |
211 | u32 state = __raw_readl(XOR_ACTIVATION(chan)); | 218 | u32 state = readl_relaxed(XOR_ACTIVATION(chan)); |
212 | 219 | ||
213 | state = (state >> 4) & 0x3; | 220 | state = (state >> 4) & 0x3; |
214 | 221 | ||
@@ -755,22 +762,22 @@ static void mv_dump_xor_regs(struct mv_xor_chan *chan) | |||
755 | { | 762 | { |
756 | u32 val; | 763 | u32 val; |
757 | 764 | ||
758 | val = __raw_readl(XOR_CONFIG(chan)); | 765 | val = readl_relaxed(XOR_CONFIG(chan)); |
759 | dev_err(mv_chan_to_devp(chan), "config 0x%08x\n", val); | 766 | dev_err(mv_chan_to_devp(chan), "config 0x%08x\n", val); |
760 | 767 | ||
761 | val = __raw_readl(XOR_ACTIVATION(chan)); | 768 | val = readl_relaxed(XOR_ACTIVATION(chan)); |
762 | dev_err(mv_chan_to_devp(chan), "activation 0x%08x\n", val); | 769 | dev_err(mv_chan_to_devp(chan), "activation 0x%08x\n", val); |
763 | 770 | ||
764 | val = __raw_readl(XOR_INTR_CAUSE(chan)); | 771 | val = readl_relaxed(XOR_INTR_CAUSE(chan)); |
765 | dev_err(mv_chan_to_devp(chan), "intr cause 0x%08x\n", val); | 772 | dev_err(mv_chan_to_devp(chan), "intr cause 0x%08x\n", val); |
766 | 773 | ||
767 | val = __raw_readl(XOR_INTR_MASK(chan)); | 774 | val = readl_relaxed(XOR_INTR_MASK(chan)); |
768 | dev_err(mv_chan_to_devp(chan), "intr mask 0x%08x\n", val); | 775 | dev_err(mv_chan_to_devp(chan), "intr mask 0x%08x\n", val); |
769 | 776 | ||
770 | val = __raw_readl(XOR_ERROR_CAUSE(chan)); | 777 | val = readl_relaxed(XOR_ERROR_CAUSE(chan)); |
771 | dev_err(mv_chan_to_devp(chan), "error cause 0x%08x\n", val); | 778 | dev_err(mv_chan_to_devp(chan), "error cause 0x%08x\n", val); |
772 | 779 | ||
773 | val = __raw_readl(XOR_ERROR_ADDR(chan)); | 780 | val = readl_relaxed(XOR_ERROR_ADDR(chan)); |
774 | dev_err(mv_chan_to_devp(chan), "error addr 0x%08x\n", val); | 781 | dev_err(mv_chan_to_devp(chan), "error addr 0x%08x\n", val); |
775 | } | 782 | } |
776 | 783 | ||
@@ -1029,10 +1036,8 @@ mv_xor_channel_add(struct mv_xor_device *xordev, | |||
1029 | struct dma_device *dma_dev; | 1036 | struct dma_device *dma_dev; |
1030 | 1037 | ||
1031 | mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL); | 1038 | mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL); |
1032 | if (!mv_chan) { | 1039 | if (!mv_chan) |
1033 | ret = -ENOMEM; | 1040 | return ERR_PTR(-ENOMEM); |
1034 | goto err_free_dma; | ||
1035 | } | ||
1036 | 1041 | ||
1037 | mv_chan->idx = idx; | 1042 | mv_chan->idx = idx; |
1038 | mv_chan->irq = irq; | 1043 | mv_chan->irq = irq; |
diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h index c619359cb7fe..06b067f24c9b 100644 --- a/drivers/dma/mv_xor.h +++ b/drivers/dma/mv_xor.h | |||
@@ -29,8 +29,10 @@ | |||
29 | #define MV_XOR_THRESHOLD 1 | 29 | #define MV_XOR_THRESHOLD 1 |
30 | #define MV_XOR_MAX_CHANNELS 2 | 30 | #define MV_XOR_MAX_CHANNELS 2 |
31 | 31 | ||
32 | /* Values for the XOR_CONFIG register */ | ||
32 | #define XOR_OPERATION_MODE_XOR 0 | 33 | #define XOR_OPERATION_MODE_XOR 0 |
33 | #define XOR_OPERATION_MODE_MEMCPY 2 | 34 | #define XOR_OPERATION_MODE_MEMCPY 2 |
35 | #define XOR_DESCRIPTOR_SWAP BIT(14) | ||
34 | 36 | ||
35 | #define XOR_CURR_DESC(chan) (chan->mmr_base + 0x210 + (chan->idx * 4)) | 37 | #define XOR_CURR_DESC(chan) (chan->mmr_base + 0x210 + (chan->idx * 4)) |
36 | #define XOR_NEXT_DESC(chan) (chan->mmr_base + 0x200 + (chan->idx * 4)) | 38 | #define XOR_NEXT_DESC(chan) (chan->mmr_base + 0x200 + (chan->idx * 4)) |
@@ -143,7 +145,16 @@ struct mv_xor_desc_slot { | |||
143 | #endif | 145 | #endif |
144 | }; | 146 | }; |
145 | 147 | ||
146 | /* This structure describes XOR descriptor size 64bytes */ | 148 | /* |
149 | * This structure describes XOR descriptor size 64bytes. The | ||
150 | * mv_phy_src_idx() macro must be used when indexing the values of the | ||
151 | * phy_src_addr[] array. This is due to the fact that the 'descriptor | ||
152 | * swap' feature, used on big endian systems, swaps descriptors data | ||
153 | * within blocks of 8 bytes. So two consecutive values of the | ||
154 | * phy_src_addr[] array are actually swapped in big-endian, which | ||
155 | * explains the different mv_phy_src_idx() implementation. | ||
156 | */ | ||
157 | #if defined(__LITTLE_ENDIAN) | ||
147 | struct mv_xor_desc { | 158 | struct mv_xor_desc { |
148 | u32 status; /* descriptor execution status */ | 159 | u32 status; /* descriptor execution status */ |
149 | u32 crc32_result; /* result of CRC-32 calculation */ | 160 | u32 crc32_result; /* result of CRC-32 calculation */ |
@@ -155,6 +166,21 @@ struct mv_xor_desc { | |||
155 | u32 reserved0; | 166 | u32 reserved0; |
156 | u32 reserved1; | 167 | u32 reserved1; |
157 | }; | 168 | }; |
169 | #define mv_phy_src_idx(src_idx) (src_idx) | ||
170 | #else | ||
171 | struct mv_xor_desc { | ||
172 | u32 crc32_result; /* result of CRC-32 calculation */ | ||
173 | u32 status; /* descriptor execution status */ | ||
174 | u32 phy_next_desc; /* next descriptor address pointer */ | ||
175 | u32 desc_command; /* type of operation to be carried out */ | ||
176 | u32 phy_dest_addr; /* destination block address */ | ||
177 | u32 byte_count; /* size of src/dst blocks in bytes */ | ||
178 | u32 phy_src_addr[8]; /* source block addresses */ | ||
179 | u32 reserved1; | ||
180 | u32 reserved0; | ||
181 | }; | ||
182 | #define mv_phy_src_idx(src_idx) (src_idx ^ 1) | ||
183 | #endif | ||
158 | 184 | ||
159 | #define to_mv_sw_desc(addr_hw_desc) \ | 185 | #define to_mv_sw_desc(addr_hw_desc) \ |
160 | container_of(addr_hw_desc, struct mv_xor_desc_slot, hw_desc) | 186 | container_of(addr_hw_desc, struct mv_xor_desc_slot, hw_desc) |
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index cb286b1acdb6..0c72b89a172c 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h | |||
@@ -38,7 +38,10 @@ typedef s32 dma_cookie_t; | |||
38 | #define DMA_MIN_COOKIE 1 | 38 | #define DMA_MIN_COOKIE 1 |
39 | #define DMA_MAX_COOKIE INT_MAX | 39 | #define DMA_MAX_COOKIE INT_MAX |
40 | 40 | ||
41 | #define dma_submit_error(cookie) ((cookie) < 0 ? 1 : 0) | 41 | static inline int dma_submit_error(dma_cookie_t cookie) |
42 | { | ||
43 | return cookie < 0 ? cookie : 0; | ||
44 | } | ||
42 | 45 | ||
43 | /** | 46 | /** |
44 | * enum dma_status - DMA transaction status | 47 | * enum dma_status - DMA transaction status |
@@ -958,8 +961,9 @@ dma_set_tx_state(struct dma_tx_state *st, dma_cookie_t last, dma_cookie_t used, | |||
958 | } | 961 | } |
959 | } | 962 | } |
960 | 963 | ||
961 | enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie); | ||
962 | #ifdef CONFIG_DMA_ENGINE | 964 | #ifdef CONFIG_DMA_ENGINE |
965 | struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type); | ||
966 | enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie); | ||
963 | enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx); | 967 | enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx); |
964 | void dma_issue_pending_all(void); | 968 | void dma_issue_pending_all(void); |
965 | struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, | 969 | struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, |
@@ -967,6 +971,14 @@ struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, | |||
967 | struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name); | 971 | struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name); |
968 | void dma_release_channel(struct dma_chan *chan); | 972 | void dma_release_channel(struct dma_chan *chan); |
969 | #else | 973 | #else |
974 | static inline struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type) | ||
975 | { | ||
976 | return NULL; | ||
977 | } | ||
978 | static inline enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) | ||
979 | { | ||
980 | return DMA_SUCCESS; | ||
981 | } | ||
970 | static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) | 982 | static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) |
971 | { | 983 | { |
972 | return DMA_SUCCESS; | 984 | return DMA_SUCCESS; |
@@ -994,7 +1006,6 @@ static inline void dma_release_channel(struct dma_chan *chan) | |||
994 | int dma_async_device_register(struct dma_device *device); | 1006 | int dma_async_device_register(struct dma_device *device); |
995 | void dma_async_device_unregister(struct dma_device *device); | 1007 | void dma_async_device_unregister(struct dma_device *device); |
996 | void dma_run_dependencies(struct dma_async_tx_descriptor *tx); | 1008 | void dma_run_dependencies(struct dma_async_tx_descriptor *tx); |
997 | struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type); | ||
998 | struct dma_chan *net_dma_find_channel(void); | 1009 | struct dma_chan *net_dma_find_channel(void); |
999 | #define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y) | 1010 | #define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y) |
1000 | #define dma_request_slave_channel_compat(mask, x, y, dev, name) \ | 1011 | #define dma_request_slave_channel_compat(mask, x, y, dev, name) \ |