diff options
author | Paul Mundt <lethal@linux-sh.org> | 2006-09-27 05:36:17 -0400 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2006-09-27 05:36:17 -0400 |
commit | f3c2575818fab45f8609e4aef2e43ab02b3a142e (patch) | |
tree | a4924d7dd8f8df229e36fab24ccccfe12437509b /arch/sh | |
parent | 87b0ef91b6f27c07bf7dcce8584437481f473092 (diff) |
sh: Calculate shm alignment at runtime.
Set the SHM alignment at runtime, based off of probed cache desc.
Optimize get_unmapped_area() to only colour align shared mappings.
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh')
-rw-r--r-- | arch/sh/kernel/cpu/init.c | 5 | ||||
-rw-r--r-- | arch/sh/kernel/sys_sh.c | 54 |
2 files changed, 39 insertions, 20 deletions
diff --git a/arch/sh/kernel/cpu/init.c b/arch/sh/kernel/cpu/init.c index 731dd61419dd..bfb90eb0b7a6 100644 --- a/arch/sh/kernel/cpu/init.c +++ b/arch/sh/kernel/cpu/init.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
15 | #include <asm/processor.h> | 15 | #include <asm/processor.h> |
16 | #include <asm/uaccess.h> | 16 | #include <asm/uaccess.h> |
17 | #include <asm/page.h> | ||
17 | #include <asm/system.h> | 18 | #include <asm/system.h> |
18 | #include <asm/cacheflush.h> | 19 | #include <asm/cacheflush.h> |
19 | #include <asm/cache.h> | 20 | #include <asm/cache.h> |
@@ -198,6 +199,10 @@ asmlinkage void __init sh_cpu_init(void) | |||
198 | /* Init the cache */ | 199 | /* Init the cache */ |
199 | cache_init(); | 200 | cache_init(); |
200 | 201 | ||
202 | shm_align_mask = max_t(unsigned long, | ||
203 | cpu_data->dcache.way_size - 1, | ||
204 | PAGE_SIZE - 1); | ||
205 | |||
201 | /* Disable the FPU */ | 206 | /* Disable the FPU */ |
202 | if (fpu_disabled) { | 207 | if (fpu_disabled) { |
203 | printk("FPU Disabled\n"); | 208 | printk("FPU Disabled\n"); |
diff --git a/arch/sh/kernel/sys_sh.c b/arch/sh/kernel/sys_sh.c index 0ee7bf4cb238..b68ff705f067 100644 --- a/arch/sh/kernel/sys_sh.c +++ b/arch/sh/kernel/sys_sh.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/mman.h> | 21 | #include <linux/mman.h> |
22 | #include <linux/file.h> | 22 | #include <linux/file.h> |
23 | #include <linux/utsname.h> | 23 | #include <linux/utsname.h> |
24 | #include <linux/module.h> | ||
24 | #include <asm/cacheflush.h> | 25 | #include <asm/cacheflush.h> |
25 | #include <asm/uaccess.h> | 26 | #include <asm/uaccess.h> |
26 | #include <asm/ipc.h> | 27 | #include <asm/ipc.h> |
@@ -44,11 +45,16 @@ asmlinkage int sys_pipe(unsigned long r4, unsigned long r5, | |||
44 | return error; | 45 | return error; |
45 | } | 46 | } |
46 | 47 | ||
47 | #if defined(HAVE_ARCH_UNMAPPED_AREA) && defined(CONFIG_MMU) | 48 | unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */ |
49 | |||
50 | EXPORT_SYMBOL(shm_align_mask); | ||
51 | |||
48 | /* | 52 | /* |
49 | * To avoid cache alias, we map the shard page with same color. | 53 | * To avoid cache aliases, we map the shared page with same color. |
50 | */ | 54 | */ |
51 | #define COLOUR_ALIGN(addr) (((addr)+SHMLBA-1)&~(SHMLBA-1)) | 55 | #define COLOUR_ALIGN(addr, pgoff) \ |
56 | ((((addr) + shm_align_mask) & ~shm_align_mask) + \ | ||
57 | (((pgoff) << PAGE_SHIFT) & shm_align_mask)) | ||
52 | 58 | ||
53 | unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, | 59 | unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, |
54 | unsigned long len, unsigned long pgoff, unsigned long flags) | 60 | unsigned long len, unsigned long pgoff, unsigned long flags) |
@@ -56,43 +62,52 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, | |||
56 | struct mm_struct *mm = current->mm; | 62 | struct mm_struct *mm = current->mm; |
57 | struct vm_area_struct *vma; | 63 | struct vm_area_struct *vma; |
58 | unsigned long start_addr; | 64 | unsigned long start_addr; |
65 | int do_colour_align; | ||
59 | 66 | ||
60 | if (flags & MAP_FIXED) { | 67 | if (flags & MAP_FIXED) { |
61 | /* We do not accept a shared mapping if it would violate | 68 | /* We do not accept a shared mapping if it would violate |
62 | * cache aliasing constraints. | 69 | * cache aliasing constraints. |
63 | */ | 70 | */ |
64 | if ((flags & MAP_SHARED) && (addr & (SHMLBA - 1))) | 71 | if ((flags & MAP_SHARED) && (addr & shm_align_mask)) |
65 | return -EINVAL; | 72 | return -EINVAL; |
66 | return addr; | 73 | return addr; |
67 | } | 74 | } |
68 | 75 | ||
69 | if (len > TASK_SIZE) | 76 | if (unlikely(len > TASK_SIZE)) |
70 | return -ENOMEM; | 77 | return -ENOMEM; |
71 | 78 | ||
79 | do_colour_align = 0; | ||
80 | if (filp || (flags & MAP_SHARED)) | ||
81 | do_colour_align = 1; | ||
82 | |||
72 | if (addr) { | 83 | if (addr) { |
73 | if (flags & MAP_PRIVATE) | 84 | if (do_colour_align) |
74 | addr = PAGE_ALIGN(addr); | 85 | addr = COLOUR_ALIGN(addr, pgoff); |
75 | else | 86 | else |
76 | addr = COLOUR_ALIGN(addr); | 87 | addr = PAGE_ALIGN(addr); |
88 | |||
77 | vma = find_vma(mm, addr); | 89 | vma = find_vma(mm, addr); |
78 | if (TASK_SIZE - len >= addr && | 90 | if (TASK_SIZE - len >= addr && |
79 | (!vma || addr + len <= vma->vm_start)) | 91 | (!vma || addr + len <= vma->vm_start)) |
80 | return addr; | 92 | return addr; |
81 | } | 93 | } |
82 | if (len <= mm->cached_hole_size) { | 94 | |
95 | if (len > mm->cached_hole_size) { | ||
96 | start_addr = addr = mm->free_area_cache; | ||
97 | } else { | ||
83 | mm->cached_hole_size = 0; | 98 | mm->cached_hole_size = 0; |
84 | mm->free_area_cache = TASK_UNMAPPED_BASE; | 99 | start_addr = addr = TASK_UNMAPPED_BASE; |
85 | } | 100 | } |
86 | if (flags & MAP_PRIVATE) | ||
87 | addr = PAGE_ALIGN(mm->free_area_cache); | ||
88 | else | ||
89 | addr = COLOUR_ALIGN(mm->free_area_cache); | ||
90 | start_addr = addr; | ||
91 | 101 | ||
92 | full_search: | 102 | full_search: |
103 | if (do_colour_align) | ||
104 | addr = COLOUR_ALIGN(addr, pgoff); | ||
105 | else | ||
106 | addr = PAGE_ALIGN(mm->free_area_cache); | ||
107 | |||
93 | for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { | 108 | for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { |
94 | /* At this point: (!vma || addr < vma->vm_end). */ | 109 | /* At this point: (!vma || addr < vma->vm_end). */ |
95 | if (TASK_SIZE - len < addr) { | 110 | if (unlikely(TASK_SIZE - len < addr)) { |
96 | /* | 111 | /* |
97 | * Start a new search - just in case we missed | 112 | * Start a new search - just in case we missed |
98 | * some holes. | 113 | * some holes. |
@@ -104,7 +119,7 @@ full_search: | |||
104 | } | 119 | } |
105 | return -ENOMEM; | 120 | return -ENOMEM; |
106 | } | 121 | } |
107 | if (!vma || addr + len <= vma->vm_start) { | 122 | if (likely(!vma || addr + len <= vma->vm_start)) { |
108 | /* | 123 | /* |
109 | * Remember the place where we stopped the search: | 124 | * Remember the place where we stopped the search: |
110 | */ | 125 | */ |
@@ -115,11 +130,10 @@ full_search: | |||
115 | mm->cached_hole_size = vma->vm_start - addr; | 130 | mm->cached_hole_size = vma->vm_start - addr; |
116 | 131 | ||
117 | addr = vma->vm_end; | 132 | addr = vma->vm_end; |
118 | if (!(flags & MAP_PRIVATE)) | 133 | if (do_colour_align) |
119 | addr = COLOUR_ALIGN(addr); | 134 | addr = COLOUR_ALIGN(addr, pgoff); |
120 | } | 135 | } |
121 | } | 136 | } |
122 | #endif | ||
123 | 137 | ||
124 | static inline long | 138 | static inline long |
125 | do_mmap2(unsigned long addr, unsigned long len, unsigned long prot, | 139 | do_mmap2(unsigned long addr, unsigned long len, unsigned long prot, |