aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2008-11-11 23:17:38 -0500
committerPaul Mundt <lethal@linux-sh.org>2008-12-22 04:42:49 -0500
commit4a4a9be3ebdbf17957d29e3521f328a1145f9431 (patch)
tree274a36403bd64a7e15a4f17408ef97a9d0bc6c18
parent3d44cc3e01ee1b40317f79ed54324e25c4f848df (diff)
sh: Move arch_get_unmapped_area() in to arch/sh/mm/mmap.c.
Now that arch/sh/mm/mmap.c exists, move arch_get_unmapped_area() there. Follows the ARM change. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
-rw-r--r--arch/sh/kernel/cpu/init.c2
-rw-r--r--arch/sh/kernel/sys_sh.c92
-rw-r--r--arch/sh/mm/mmap.c94
3 files changed, 96 insertions, 92 deletions
diff --git a/arch/sh/kernel/cpu/init.c b/arch/sh/kernel/cpu/init.c
index 75fb03d35670..d29e69c156f0 100644
--- a/arch/sh/kernel/cpu/init.c
+++ b/arch/sh/kernel/cpu/init.c
@@ -261,9 +261,11 @@ asmlinkage void __init sh_cpu_init(void)
261 cache_init(); 261 cache_init();
262 262
263 if (raw_smp_processor_id() == 0) { 263 if (raw_smp_processor_id() == 0) {
264#ifdef CONFIG_MMU
264 shm_align_mask = max_t(unsigned long, 265 shm_align_mask = max_t(unsigned long,
265 current_cpu_data.dcache.way_size - 1, 266 current_cpu_data.dcache.way_size - 1,
266 PAGE_SIZE - 1); 267 PAGE_SIZE - 1);
268#endif
267 269
268 /* Boot CPU sets the cache shape */ 270 /* Boot CPU sets the cache shape */
269 detect_cache_shape(); 271 detect_cache_shape();
diff --git a/arch/sh/kernel/sys_sh.c b/arch/sh/kernel/sys_sh.c
index 38f098c9c72d..58dfc02c7af1 100644
--- a/arch/sh/kernel/sys_sh.c
+++ b/arch/sh/kernel/sys_sh.c
@@ -22,102 +22,10 @@
22#include <linux/module.h> 22#include <linux/module.h>
23#include <linux/fs.h> 23#include <linux/fs.h>
24#include <linux/ipc.h> 24#include <linux/ipc.h>
25#include <asm/cacheflush.h>
26#include <asm/syscalls.h> 25#include <asm/syscalls.h>
27#include <asm/uaccess.h> 26#include <asm/uaccess.h>
28#include <asm/unistd.h> 27#include <asm/unistd.h>
29 28
30unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */
31EXPORT_SYMBOL(shm_align_mask);
32
33#ifdef CONFIG_MMU
34/*
35 * To avoid cache aliases, we map the shared page with same color.
36 */
37#define COLOUR_ALIGN(addr, pgoff) \
38 ((((addr) + shm_align_mask) & ~shm_align_mask) + \
39 (((pgoff) << PAGE_SHIFT) & shm_align_mask))
40
41unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
42 unsigned long len, unsigned long pgoff, unsigned long flags)
43{
44 struct mm_struct *mm = current->mm;
45 struct vm_area_struct *vma;
46 unsigned long start_addr;
47 int do_colour_align;
48
49 if (flags & MAP_FIXED) {
50 /* We do not accept a shared mapping if it would violate
51 * cache aliasing constraints.
52 */
53 if ((flags & MAP_SHARED) && (addr & shm_align_mask))
54 return -EINVAL;
55 return addr;
56 }
57
58 if (unlikely(len > TASK_SIZE))
59 return -ENOMEM;
60
61 do_colour_align = 0;
62 if (filp || (flags & MAP_SHARED))
63 do_colour_align = 1;
64
65 if (addr) {
66 if (do_colour_align)
67 addr = COLOUR_ALIGN(addr, pgoff);
68 else
69 addr = PAGE_ALIGN(addr);
70
71 vma = find_vma(mm, addr);
72 if (TASK_SIZE - len >= addr &&
73 (!vma || addr + len <= vma->vm_start))
74 return addr;
75 }
76
77 if (len > mm->cached_hole_size) {
78 start_addr = addr = mm->free_area_cache;
79 } else {
80 mm->cached_hole_size = 0;
81 start_addr = addr = TASK_UNMAPPED_BASE;
82 }
83
84full_search:
85 if (do_colour_align)
86 addr = COLOUR_ALIGN(addr, pgoff);
87 else
88 addr = PAGE_ALIGN(mm->free_area_cache);
89
90 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
91 /* At this point: (!vma || addr < vma->vm_end). */
92 if (unlikely(TASK_SIZE - len < addr)) {
93 /*
94 * Start a new search - just in case we missed
95 * some holes.
96 */
97 if (start_addr != TASK_UNMAPPED_BASE) {
98 start_addr = addr = TASK_UNMAPPED_BASE;
99 mm->cached_hole_size = 0;
100 goto full_search;
101 }
102 return -ENOMEM;
103 }
104 if (likely(!vma || addr + len <= vma->vm_start)) {
105 /*
106 * Remember the place where we stopped the search:
107 */
108 mm->free_area_cache = addr + len;
109 return addr;
110 }
111 if (addr + mm->cached_hole_size < vma->vm_start)
112 mm->cached_hole_size = vma->vm_start - addr;
113
114 addr = vma->vm_end;
115 if (do_colour_align)
116 addr = COLOUR_ALIGN(addr, pgoff);
117 }
118}
119#endif /* CONFIG_MMU */
120
121static inline long 29static inline long
122do_mmap2(unsigned long addr, unsigned long len, unsigned long prot, 30do_mmap2(unsigned long addr, unsigned long len, unsigned long prot,
123 unsigned long flags, int fd, unsigned long pgoff) 31 unsigned long flags, int fd, unsigned long pgoff)
diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
index 8837d511710a..931f4d003fa0 100644
--- a/arch/sh/mm/mmap.c
+++ b/arch/sh/mm/mmap.c
@@ -9,7 +9,101 @@
9 */ 9 */
10#include <linux/io.h> 10#include <linux/io.h>
11#include <linux/mm.h> 11#include <linux/mm.h>
12#include <linux/mman.h>
13#include <linux/module.h>
12#include <asm/page.h> 14#include <asm/page.h>
15#include <asm/processor.h>
16
17#ifdef CONFIG_MMU
18unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */
19EXPORT_SYMBOL(shm_align_mask);
20
21/*
22 * To avoid cache aliases, we map the shared page with same color.
23 */
24#define COLOUR_ALIGN(addr, pgoff) \
25 ((((addr) + shm_align_mask) & ~shm_align_mask) + \
26 (((pgoff) << PAGE_SHIFT) & shm_align_mask))
27
28unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
29 unsigned long len, unsigned long pgoff, unsigned long flags)
30{
31 struct mm_struct *mm = current->mm;
32 struct vm_area_struct *vma;
33 unsigned long start_addr;
34 int do_colour_align;
35
36 if (flags & MAP_FIXED) {
37 /* We do not accept a shared mapping if it would violate
38 * cache aliasing constraints.
39 */
40 if ((flags & MAP_SHARED) && (addr & shm_align_mask))
41 return -EINVAL;
42 return addr;
43 }
44
45 if (unlikely(len > TASK_SIZE))
46 return -ENOMEM;
47
48 do_colour_align = 0;
49 if (filp || (flags & MAP_SHARED))
50 do_colour_align = 1;
51
52 if (addr) {
53 if (do_colour_align)
54 addr = COLOUR_ALIGN(addr, pgoff);
55 else
56 addr = PAGE_ALIGN(addr);
57
58 vma = find_vma(mm, addr);
59 if (TASK_SIZE - len >= addr &&
60 (!vma || addr + len <= vma->vm_start))
61 return addr;
62 }
63
64 if (len > mm->cached_hole_size) {
65 start_addr = addr = mm->free_area_cache;
66 } else {
67 mm->cached_hole_size = 0;
68 start_addr = addr = TASK_UNMAPPED_BASE;
69 }
70
71full_search:
72 if (do_colour_align)
73 addr = COLOUR_ALIGN(addr, pgoff);
74 else
75 addr = PAGE_ALIGN(mm->free_area_cache);
76
77 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
78 /* At this point: (!vma || addr < vma->vm_end). */
79 if (unlikely(TASK_SIZE - len < addr)) {
80 /*
81 * Start a new search - just in case we missed
82 * some holes.
83 */
84 if (start_addr != TASK_UNMAPPED_BASE) {
85 start_addr = addr = TASK_UNMAPPED_BASE;
86 mm->cached_hole_size = 0;
87 goto full_search;
88 }
89 return -ENOMEM;
90 }
91 if (likely(!vma || addr + len <= vma->vm_start)) {
92 /*
93 * Remember the place where we stopped the search:
94 */
95 mm->free_area_cache = addr + len;
96 return addr;
97 }
98 if (addr + mm->cached_hole_size < vma->vm_start)
99 mm->cached_hole_size = vma->vm_start - addr;
100
101 addr = vma->vm_end;
102 if (do_colour_align)
103 addr = COLOUR_ALIGN(addr, pgoff);
104 }
105}
106#endif /* CONFIG_MMU */
13 107
14/* 108/*
15 * You really shouldn't be using read() or write() on /dev/mem. This 109 * You really shouldn't be using read() or write() on /dev/mem. This