aboutsummaryrefslogtreecommitdiffstats
path: root/arch/c6x/kernel
diff options
context:
space:
mode:
authorAurelien Jacquiot <a-jacquiot@ti.com>2011-10-04 12:17:19 -0400
committerMark Salter <msalter@redhat.com>2011-10-06 19:47:37 -0400
commit14aa7e8bf6d84c9a42c48e7f93472d830f694b1e (patch)
tree6e7ee17817537ea8454d3e3793a37017139bfcf9 /arch/c6x/kernel
parent041cadca7008f08fb4785f2288c8127c16faa529 (diff)
C6X: memory management and DMA support
Original port to early 2.6 kernel using TI COFF toolchain. Brought up to date by Mark Salter <msalter@redhat.com> The C6X architecture currently lacks an MMU so memory management is relatively simple. There is no bus snooping between L2 and main memory but coherent DMA memory is supported by making regions of main memory uncached. If such a region is desired, it can be specified on the commandline with a "memdma=" argument. Signed-off-by: Aurelien Jacquiot <a-jacquiot@ti.com> Signed-off-by: Mark Salter <msalter@redhat.com> Acked-by: Arnd Bergmann <arnd@arndb.de>
Diffstat (limited to 'arch/c6x/kernel')
-rw-r--r--arch/c6x/kernel/dma.c153
1 files changed, 153 insertions, 0 deletions
diff --git a/arch/c6x/kernel/dma.c b/arch/c6x/kernel/dma.c
new file mode 100644
index 000000000000..ab7b12de144d
--- /dev/null
+++ b/arch/c6x/kernel/dma.c
@@ -0,0 +1,153 @@
1/*
2 * Copyright (C) 2011 Texas Instruments Incorporated
3 * Author: Mark Salter <msalter@redhat.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9#include <linux/module.h>
10#include <linux/dma-mapping.h>
11#include <linux/mm.h>
12#include <linux/mm_types.h>
13#include <linux/scatterlist.h>
14
15#include <asm/cacheflush.h>
16
17static void c6x_dma_sync(dma_addr_t handle, size_t size,
18 enum dma_data_direction dir)
19{
20 unsigned long paddr = handle;
21
22 BUG_ON(!valid_dma_direction(dir));
23
24 switch (dir) {
25 case DMA_FROM_DEVICE:
26 L2_cache_block_invalidate(paddr, paddr + size);
27 break;
28 case DMA_TO_DEVICE:
29 L2_cache_block_writeback(paddr, paddr + size);
30 break;
31 case DMA_BIDIRECTIONAL:
32 L2_cache_block_writeback_invalidate(paddr, paddr + size);
33 break;
34 default:
35 break;
36 }
37}
38
39dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
40 enum dma_data_direction dir)
41{
42 dma_addr_t addr = virt_to_phys(ptr);
43
44 c6x_dma_sync(addr, size, dir);
45
46 debug_dma_map_page(dev, virt_to_page(ptr),
47 (unsigned long)ptr & ~PAGE_MASK, size,
48 dir, addr, true);
49 return addr;
50}
51EXPORT_SYMBOL(dma_map_single);
52
53
54void dma_unmap_single(struct device *dev, dma_addr_t handle,
55 size_t size, enum dma_data_direction dir)
56{
57 c6x_dma_sync(handle, size, dir);
58
59 debug_dma_unmap_page(dev, handle, size, dir, true);
60}
61EXPORT_SYMBOL(dma_unmap_single);
62
63
64int dma_map_sg(struct device *dev, struct scatterlist *sglist,
65 int nents, enum dma_data_direction dir)
66{
67 struct scatterlist *sg;
68 int i;
69
70 for_each_sg(sglist, sg, nents, i)
71 sg->dma_address = dma_map_single(dev, sg_virt(sg), sg->length,
72 dir);
73
74 debug_dma_map_sg(dev, sglist, nents, nents, dir);
75
76 return nents;
77}
78EXPORT_SYMBOL(dma_map_sg);
79
80
81void dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
82 int nents, enum dma_data_direction dir)
83{
84 struct scatterlist *sg;
85 int i;
86
87 for_each_sg(sglist, sg, nents, i)
88 dma_unmap_single(dev, sg_dma_address(sg), sg->length, dir);
89
90 debug_dma_unmap_sg(dev, sglist, nents, dir);
91}
92EXPORT_SYMBOL(dma_unmap_sg);
93
94void dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle,
95 size_t size, enum dma_data_direction dir)
96{
97 c6x_dma_sync(handle, size, dir);
98
99 debug_dma_sync_single_for_cpu(dev, handle, size, dir);
100}
101EXPORT_SYMBOL(dma_sync_single_for_cpu);
102
103
104void dma_sync_single_for_device(struct device *dev, dma_addr_t handle,
105 size_t size, enum dma_data_direction dir)
106{
107 c6x_dma_sync(handle, size, dir);
108
109 debug_dma_sync_single_for_device(dev, handle, size, dir);
110}
111EXPORT_SYMBOL(dma_sync_single_for_device);
112
113
114void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist,
115 int nents, enum dma_data_direction dir)
116{
117 struct scatterlist *sg;
118 int i;
119
120 for_each_sg(sglist, sg, nents, i)
121 dma_sync_single_for_cpu(dev, sg_dma_address(sg),
122 sg->length, dir);
123
124 debug_dma_sync_sg_for_cpu(dev, sglist, nents, dir);
125}
126EXPORT_SYMBOL(dma_sync_sg_for_cpu);
127
128
129void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist,
130 int nents, enum dma_data_direction dir)
131{
132 struct scatterlist *sg;
133 int i;
134
135 for_each_sg(sglist, sg, nents, i)
136 dma_sync_single_for_device(dev, sg_dma_address(sg),
137 sg->length, dir);
138
139 debug_dma_sync_sg_for_device(dev, sglist, nents, dir);
140}
141EXPORT_SYMBOL(dma_sync_sg_for_device);
142
143
144/* Number of entries preallocated for DMA-API debugging */
145#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
146
147static int __init dma_init(void)
148{
149 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
150
151 return 0;
152}
153fs_initcall(dma_init);