aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2006-09-27 05:36:17 -0400
committerPaul Mundt <lethal@linux-sh.org>2006-09-27 05:36:17 -0400
commitf3c2575818fab45f8609e4aef2e43ab02b3a142e (patch)
treea4924d7dd8f8df229e36fab24ccccfe12437509b /include
parent87b0ef91b6f27c07bf7dcce8584437481f473092 (diff)
sh: Calculate shm alignment at runtime.
Set the SHM alignment at runtime, based off of probed cache desc. Optimize get_unmapped_area() to only colour align shared mappings. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'include')
-rw-r--r--include/asm-sh/cacheflush.h2
-rw-r--r--include/asm-sh/cpu-sh3/cacheflush.h8
-rw-r--r--include/asm-sh/cpu-sh4/cacheflush.h3
-rw-r--r--include/asm-sh/page.h2
-rw-r--r--include/asm-sh/pgtable.h2
5 files changed, 4 insertions, 13 deletions
diff --git a/include/asm-sh/cacheflush.h b/include/asm-sh/cacheflush.h
index 92930b4a40d4..07f62ec9ff0c 100644
--- a/include/asm-sh/cacheflush.h
+++ b/include/asm-sh/cacheflush.h
@@ -28,5 +28,7 @@ extern void __flush_invalidate_region(void *start, int size);
28 memcpy(dst, src, len); \ 28 memcpy(dst, src, len); \
29 } while (0) 29 } while (0)
30 30
31#define HAVE_ARCH_UNMAPPED_AREA
32
31#endif /* __KERNEL__ */ 33#endif /* __KERNEL__ */
32#endif /* __ASM_SH_CACHEFLUSH_H */ 34#endif /* __ASM_SH_CACHEFLUSH_H */
diff --git a/include/asm-sh/cpu-sh3/cacheflush.h b/include/asm-sh/cpu-sh3/cacheflush.h
index 97f5a64c2ab8..03fde97a7fd0 100644
--- a/include/asm-sh/cpu-sh3/cacheflush.h
+++ b/include/asm-sh/cpu-sh3/cacheflush.h
@@ -64,12 +64,4 @@ void flush_icache_page(struct vm_area_struct *vma, struct page *page);
64 64
65#define p3_cache_init() do { } while (0) 65#define p3_cache_init() do { } while (0)
66 66
67/*
68 * We provide our own get_unmapped_area to avoid cache aliasing issues
69 * on SH7705 with a 32KB cache, and to page align addresses in the
70 * non-aliasing case.
71 */
72#define HAVE_ARCH_UNMAPPED_AREA
73
74#endif /* __ASM_CPU_SH3_CACHEFLUSH_H */ 67#endif /* __ASM_CPU_SH3_CACHEFLUSH_H */
75
diff --git a/include/asm-sh/cpu-sh4/cacheflush.h b/include/asm-sh/cpu-sh4/cacheflush.h
index a95fc951aff6..515fd574267c 100644
--- a/include/asm-sh/cpu-sh4/cacheflush.h
+++ b/include/asm-sh/cpu-sh4/cacheflush.h
@@ -39,9 +39,6 @@ void p3_cache_init(void);
39 39
40#define PG_mapped PG_arch_1 40#define PG_mapped PG_arch_1
41 41
42/* We provide our own get_unmapped_area to avoid cache alias issue */
43#define HAVE_ARCH_UNMAPPED_AREA
44
45#ifdef CONFIG_MMU 42#ifdef CONFIG_MMU
46extern int remap_area_pages(unsigned long addr, unsigned long phys_addr, 43extern int remap_area_pages(unsigned long addr, unsigned long phys_addr,
47 unsigned long size, unsigned long flags); 44 unsigned long size, unsigned long flags);
diff --git a/include/asm-sh/page.h b/include/asm-sh/page.h
index 3d8dae31a6f6..ca8b26d90475 100644
--- a/include/asm-sh/page.h
+++ b/include/asm-sh/page.h
@@ -44,6 +44,8 @@
44extern void (*clear_page)(void *to); 44extern void (*clear_page)(void *to);
45extern void (*copy_page)(void *to, void *from); 45extern void (*copy_page)(void *to, void *from);
46 46
47extern unsigned long shm_align_mask;
48
47#ifdef CONFIG_MMU 49#ifdef CONFIG_MMU
48extern void clear_page_slow(void *to); 50extern void clear_page_slow(void *to);
49extern void copy_page_slow(void *to, void *from); 51extern void copy_page_slow(void *to, void *from);
diff --git a/include/asm-sh/pgtable.h b/include/asm-sh/pgtable.h
index 41c559d8ba87..2c8682ad1012 100644
--- a/include/asm-sh/pgtable.h
+++ b/include/asm-sh/pgtable.h
@@ -340,6 +340,4 @@ extern pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t
340#include <asm-generic/pgtable.h> 340#include <asm-generic/pgtable.h>
341 341
342#endif /* !__ASSEMBLY__ */ 342#endif /* !__ASSEMBLY__ */
343
344#endif /* __ASM_SH_PAGE_H */ 343#endif /* __ASM_SH_PAGE_H */
345