aboutsummaryrefslogtreecommitdiffstats
path: root/arch/c6x
diff options
context:
space:
mode:
authorAurelien Jacquiot <a-jacquiot@ti.com>2011-10-04 12:17:19 -0400
committerMark Salter <msalter@redhat.com>2011-10-06 19:47:37 -0400
commit14aa7e8bf6d84c9a42c48e7f93472d830f694b1e (patch)
tree6e7ee17817537ea8454d3e3793a37017139bfcf9 /arch/c6x
parent041cadca7008f08fb4785f2288c8127c16faa529 (diff)
C6X: memory management and DMA support
Original port to early 2.6 kernel using TI COFF toolchain. Brought up to date by Mark Salter <msalter@redhat.com> The C6X architecture currently lacks an MMU so memory management is relatively simple. There is no bus snooping between L2 and main memory but coherent DMA memory is supported by making regions of main memory uncached. If such a region is desired, it can be specified on the commandline with a "memdma=" argument. Signed-off-by: Aurelien Jacquiot <a-jacquiot@ti.com> Signed-off-by: Mark Salter <msalter@redhat.com> Acked-by: Arnd Bergmann <arnd@arndb.de>
Diffstat (limited to 'arch/c6x')
-rw-r--r--arch/c6x/include/asm/dma-mapping.h91
-rw-r--r--arch/c6x/kernel/dma.c153
-rw-r--r--arch/c6x/mm/dma-coherent.c143
-rw-r--r--arch/c6x/mm/init.c113
4 files changed, 500 insertions, 0 deletions
diff --git a/arch/c6x/include/asm/dma-mapping.h b/arch/c6x/include/asm/dma-mapping.h
new file mode 100644
index 00000000000..03579fd99db
--- /dev/null
+++ b/arch/c6x/include/asm/dma-mapping.h
@@ -0,0 +1,91 @@
1/*
2 * Port on Texas Instruments TMS320C6x architecture
3 *
4 * Copyright (C) 2004, 2009, 2010, 2011 Texas Instruments Incorporated
5 * Author: Aurelien Jacquiot <aurelien.jacquiot@ti.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 */
12#ifndef _ASM_C6X_DMA_MAPPING_H
13#define _ASM_C6X_DMA_MAPPING_H
14
15#include <linux/dma-debug.h>
16#include <asm-generic/dma-coherent.h>
17
18#define dma_supported(d, m) 1
19
20static inline int dma_set_mask(struct device *dev, u64 dma_mask)
21{
22 if (!dev->dma_mask || !dma_supported(dev, dma_mask))
23 return -EIO;
24
25 *dev->dma_mask = dma_mask;
26
27 return 0;
28}
29
30/*
31 * DMA errors are defined by all-bits-set in the DMA address.
32 */
33static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
34{
35 return dma_addr == ~0;
36}
37
38extern dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
39 size_t size, enum dma_data_direction dir);
40
41extern void dma_unmap_single(struct device *dev, dma_addr_t handle,
42 size_t size, enum dma_data_direction dir);
43
44extern int dma_map_sg(struct device *dev, struct scatterlist *sglist,
45 int nents, enum dma_data_direction direction);
46
47extern void dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
48 int nents, enum dma_data_direction direction);
49
50static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
51 unsigned long offset, size_t size,
52 enum dma_data_direction dir)
53{
54 dma_addr_t handle;
55
56 handle = dma_map_single(dev, page_address(page) + offset, size, dir);
57
58 debug_dma_map_page(dev, page, offset, size, dir, handle, false);
59
60 return handle;
61}
62
63static inline void dma_unmap_page(struct device *dev, dma_addr_t handle,
64 size_t size, enum dma_data_direction dir)
65{
66 dma_unmap_single(dev, handle, size, dir);
67
68 debug_dma_unmap_page(dev, handle, size, dir, false);
69}
70
71extern void dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle,
72 size_t size, enum dma_data_direction dir);
73
74extern void dma_sync_single_for_device(struct device *dev, dma_addr_t handle,
75 size_t size,
76 enum dma_data_direction dir);
77
78extern void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
79 int nents, enum dma_data_direction dir);
80
81extern void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
82 int nents, enum dma_data_direction dir);
83
84extern void coherent_mem_init(u32 start, u32 size);
85extern void *dma_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t);
86extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t);
87
88#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent((d), (s), (h), (f))
89#define dma_free_noncoherent(d, s, v, h) dma_free_coherent((d), (s), (v), (h))
90
91#endif /* _ASM_C6X_DMA_MAPPING_H */
diff --git a/arch/c6x/kernel/dma.c b/arch/c6x/kernel/dma.c
new file mode 100644
index 00000000000..ab7b12de144
--- /dev/null
+++ b/arch/c6x/kernel/dma.c
@@ -0,0 +1,153 @@
1/*
2 * Copyright (C) 2011 Texas Instruments Incorporated
3 * Author: Mark Salter <msalter@redhat.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9#include <linux/module.h>
10#include <linux/dma-mapping.h>
11#include <linux/mm.h>
12#include <linux/mm_types.h>
13#include <linux/scatterlist.h>
14
15#include <asm/cacheflush.h>
16
17static void c6x_dma_sync(dma_addr_t handle, size_t size,
18 enum dma_data_direction dir)
19{
20 unsigned long paddr = handle;
21
22 BUG_ON(!valid_dma_direction(dir));
23
24 switch (dir) {
25 case DMA_FROM_DEVICE:
26 L2_cache_block_invalidate(paddr, paddr + size);
27 break;
28 case DMA_TO_DEVICE:
29 L2_cache_block_writeback(paddr, paddr + size);
30 break;
31 case DMA_BIDIRECTIONAL:
32 L2_cache_block_writeback_invalidate(paddr, paddr + size);
33 break;
34 default:
35 break;
36 }
37}
38
39dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
40 enum dma_data_direction dir)
41{
42 dma_addr_t addr = virt_to_phys(ptr);
43
44 c6x_dma_sync(addr, size, dir);
45
46 debug_dma_map_page(dev, virt_to_page(ptr),
47 (unsigned long)ptr & ~PAGE_MASK, size,
48 dir, addr, true);
49 return addr;
50}
51EXPORT_SYMBOL(dma_map_single);
52
53
54void dma_unmap_single(struct device *dev, dma_addr_t handle,
55 size_t size, enum dma_data_direction dir)
56{
57 c6x_dma_sync(handle, size, dir);
58
59 debug_dma_unmap_page(dev, handle, size, dir, true);
60}
61EXPORT_SYMBOL(dma_unmap_single);
62
63
64int dma_map_sg(struct device *dev, struct scatterlist *sglist,
65 int nents, enum dma_data_direction dir)
66{
67 struct scatterlist *sg;
68 int i;
69
70 for_each_sg(sglist, sg, nents, i)
71 sg->dma_address = dma_map_single(dev, sg_virt(sg), sg->length,
72 dir);
73
74 debug_dma_map_sg(dev, sglist, nents, nents, dir);
75
76 return nents;
77}
78EXPORT_SYMBOL(dma_map_sg);
79
80
81void dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
82 int nents, enum dma_data_direction dir)
83{
84 struct scatterlist *sg;
85 int i;
86
87 for_each_sg(sglist, sg, nents, i)
88 dma_unmap_single(dev, sg_dma_address(sg), sg->length, dir);
89
90 debug_dma_unmap_sg(dev, sglist, nents, dir);
91}
92EXPORT_SYMBOL(dma_unmap_sg);
93
94void dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle,
95 size_t size, enum dma_data_direction dir)
96{
97 c6x_dma_sync(handle, size, dir);
98
99 debug_dma_sync_single_for_cpu(dev, handle, size, dir);
100}
101EXPORT_SYMBOL(dma_sync_single_for_cpu);
102
103
104void dma_sync_single_for_device(struct device *dev, dma_addr_t handle,
105 size_t size, enum dma_data_direction dir)
106{
107 c6x_dma_sync(handle, size, dir);
108
109 debug_dma_sync_single_for_device(dev, handle, size, dir);
110}
111EXPORT_SYMBOL(dma_sync_single_for_device);
112
113
114void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist,
115 int nents, enum dma_data_direction dir)
116{
117 struct scatterlist *sg;
118 int i;
119
120 for_each_sg(sglist, sg, nents, i)
121 dma_sync_single_for_cpu(dev, sg_dma_address(sg),
122 sg->length, dir);
123
124 debug_dma_sync_sg_for_cpu(dev, sglist, nents, dir);
125}
126EXPORT_SYMBOL(dma_sync_sg_for_cpu);
127
128
129void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist,
130 int nents, enum dma_data_direction dir)
131{
132 struct scatterlist *sg;
133 int i;
134
135 for_each_sg(sglist, sg, nents, i)
136 dma_sync_single_for_device(dev, sg_dma_address(sg),
137 sg->length, dir);
138
139 debug_dma_sync_sg_for_device(dev, sglist, nents, dir);
140}
141EXPORT_SYMBOL(dma_sync_sg_for_device);
142
143
144/* Number of entries preallocated for DMA-API debugging */
145#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
146
147static int __init dma_init(void)
148{
149 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
150
151 return 0;
152}
153fs_initcall(dma_init);
diff --git a/arch/c6x/mm/dma-coherent.c b/arch/c6x/mm/dma-coherent.c
new file mode 100644
index 00000000000..4187e518037
--- /dev/null
+++ b/arch/c6x/mm/dma-coherent.c
@@ -0,0 +1,143 @@
1/*
2 * Port on Texas Instruments TMS320C6x architecture
3 *
4 * Copyright (C) 2004, 2009, 2010, 2011 Texas Instruments Incorporated
5 * Author: Aurelien Jacquiot <aurelien.jacquiot@ti.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * DMA uncached mapping support.
12 *
13 * Using code pulled from ARM
14 * Copyright (C) 2000-2004 Russell King
15 *
16 */
17#include <linux/slab.h>
18#include <linux/bitmap.h>
19#include <linux/bitops.h>
20#include <linux/module.h>
21#include <linux/interrupt.h>
22#include <linux/dma-mapping.h>
23#include <linux/memblock.h>
24
25#include <asm/page.h>
26
27/*
28 * DMA coherent memory management, can be redefined using the memdma=
29 * kernel command line
30 */
31
32/* none by default */
33static phys_addr_t dma_base;
34static u32 dma_size;
35static u32 dma_pages;
36
37static unsigned long *dma_bitmap;
38
39/* bitmap lock */
40static DEFINE_SPINLOCK(dma_lock);
41
42/*
43 * Return a DMA coherent and contiguous memory chunk from the DMA memory
44 */
45static inline u32 __alloc_dma_pages(int order)
46{
47 unsigned long flags;
48 u32 pos;
49
50 spin_lock_irqsave(&dma_lock, flags);
51 pos = bitmap_find_free_region(dma_bitmap, dma_pages, order);
52 spin_unlock_irqrestore(&dma_lock, flags);
53
54 return dma_base + (pos << PAGE_SHIFT);
55}
56
57static void __free_dma_pages(u32 addr, int order)
58{
59 unsigned long flags;
60 u32 pos = (addr - dma_base) >> PAGE_SHIFT;
61
62 if (addr < dma_base || (pos + (1 << order)) >= dma_pages) {
63 printk(KERN_ERR "%s: freeing outside range.\n", __func__);
64 BUG();
65 }
66
67 spin_lock_irqsave(&dma_lock, flags);
68 bitmap_release_region(dma_bitmap, pos, order);
69 spin_unlock_irqrestore(&dma_lock, flags);
70}
71
72/*
73 * Allocate DMA coherent memory space and return both the kernel
74 * virtual and DMA address for that space.
75 */
76void *dma_alloc_coherent(struct device *dev, size_t size,
77 dma_addr_t *handle, gfp_t gfp)
78{
79 u32 paddr;
80 int order;
81
82 if (!dma_size || !size)
83 return NULL;
84
85 order = get_count_order(((size - 1) >> PAGE_SHIFT) + 1);
86
87 paddr = __alloc_dma_pages(order);
88
89 if (handle)
90 *handle = paddr;
91
92 if (!paddr)
93 return NULL;
94
95 return phys_to_virt(paddr);
96}
97EXPORT_SYMBOL(dma_alloc_coherent);
98
99/*
100 * Free DMA coherent memory as defined by the above mapping.
101 */
102void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
103 dma_addr_t dma_handle)
104{
105 int order;
106
107 if (!dma_size || !size)
108 return;
109
110 order = get_count_order(((size - 1) >> PAGE_SHIFT) + 1);
111
112 __free_dma_pages(virt_to_phys(vaddr), order);
113}
114EXPORT_SYMBOL(dma_free_coherent);
115
116/*
117 * Initialise the coherent DMA memory allocator using the given uncached region.
118 */
119void __init coherent_mem_init(phys_addr_t start, u32 size)
120{
121 phys_addr_t bitmap_phys;
122
123 if (!size)
124 return;
125
126 printk(KERN_INFO
127 "Coherent memory (DMA) region start=0x%x size=0x%x\n",
128 start, size);
129
130 dma_base = start;
131 dma_size = size;
132
133 /* allocate bitmap */
134 dma_pages = dma_size >> PAGE_SHIFT;
135 if (dma_size & (PAGE_SIZE - 1))
136 ++dma_pages;
137
138 bitmap_phys = memblock_alloc(BITS_TO_LONGS(dma_pages) * sizeof(long),
139 sizeof(long));
140
141 dma_bitmap = phys_to_virt(bitmap_phys);
142 memset(dma_bitmap, 0, dma_pages * PAGE_SIZE);
143}
diff --git a/arch/c6x/mm/init.c b/arch/c6x/mm/init.c
new file mode 100644
index 00000000000..89395f09648
--- /dev/null
+++ b/arch/c6x/mm/init.c
@@ -0,0 +1,113 @@
1/*
2 * Port on Texas Instruments TMS320C6x architecture
3 *
4 * Copyright (C) 2004, 2009, 2010, 2011 Texas Instruments Incorporated
5 * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/mm.h>
12#include <linux/swap.h>
13#include <linux/module.h>
14#include <linux/bootmem.h>
15#ifdef CONFIG_BLK_DEV_RAM
16#include <linux/blkdev.h>
17#endif
18#include <linux/initrd.h>
19
20#include <asm/sections.h>
21
22/*
23 * ZERO_PAGE is a special page that is used for zero-initialized
24 * data and COW.
25 */
26unsigned long empty_zero_page;
27EXPORT_SYMBOL(empty_zero_page);
28
29/*
30 * paging_init() continues the virtual memory environment setup which
31 * was begun by the code in arch/head.S.
32 * The parameters are pointers to where to stick the starting and ending
33 * addresses of available kernel virtual memory.
34 */
35void __init paging_init(void)
36{
37 struct pglist_data *pgdat = NODE_DATA(0);
38 unsigned long zones_size[MAX_NR_ZONES] = {0, };
39
40 empty_zero_page = (unsigned long) alloc_bootmem_pages(PAGE_SIZE);
41 memset((void *)empty_zero_page, 0, PAGE_SIZE);
42
43 /*
44 * Set up user data space
45 */
46 set_fs(KERNEL_DS);
47
48 /*
49 * Define zones
50 */
51 zones_size[ZONE_NORMAL] = (memory_end - PAGE_OFFSET) >> PAGE_SHIFT;
52 pgdat->node_zones[ZONE_NORMAL].zone_start_pfn =
53 __pa(PAGE_OFFSET) >> PAGE_SHIFT;
54
55 free_area_init(zones_size);
56}
57
58void __init mem_init(void)
59{
60 int codek, datak;
61 unsigned long tmp;
62 unsigned long len = memory_end - memory_start;
63
64 high_memory = (void *)(memory_end & PAGE_MASK);
65
66 /* this will put all memory onto the freelists */
67 totalram_pages = free_all_bootmem();
68
69 codek = (_etext - _stext) >> 10;
70 datak = (_end - _sdata) >> 10;
71
72 tmp = nr_free_pages() << PAGE_SHIFT;
73 printk(KERN_INFO "Memory: %luk/%luk RAM (%dk kernel code, %dk data)\n",
74 tmp >> 10, len >> 10, codek, datak);
75}
76
77#ifdef CONFIG_BLK_DEV_INITRD
78void __init free_initrd_mem(unsigned long start, unsigned long end)
79{
80 int pages = 0;
81 for (; start < end; start += PAGE_SIZE) {
82 ClearPageReserved(virt_to_page(start));
83 init_page_count(virt_to_page(start));
84 free_page(start);
85 totalram_pages++;
86 pages++;
87 }
88 printk(KERN_INFO "Freeing initrd memory: %luk freed\n",
89 (pages * PAGE_SIZE) >> 10);
90}
91#endif
92
93void __init free_initmem(void)
94{
95 unsigned long addr;
96
97 /*
98 * The following code should be cool even if these sections
99 * are not page aligned.
100 */
101 addr = PAGE_ALIGN((unsigned long)(__init_begin));
102
103 /* next to check that the page we free is not a partial page */
104 for (; addr + PAGE_SIZE < (unsigned long)(__init_end);
105 addr += PAGE_SIZE) {
106 ClearPageReserved(virt_to_page(addr));
107 init_page_count(virt_to_page(addr));
108 free_page(addr);
109 totalram_pages++;
110 }
111 printk(KERN_INFO "Freeing unused kernel memory: %dK freed\n",
112 (int) ((addr - PAGE_ALIGN((long) &__init_begin)) >> 10));
113}