aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorChris Zankel <chris@zankel.net>2007-08-22 13:14:51 -0400
committerChris Zankel <chris@zankel.net>2007-08-27 16:54:16 -0400
commit6656920b0b50beacb6cb64cf55273cbb686e436e (patch)
treedab9fdb81821b455a29779de6ca3306dbdf05dbd /include
parentff6fd469885aafa5ec387babcb6537f3c00d6df0 (diff)
[XTENSA] Add support for cache-aliasing
Add support for processors that have cache-aliasing issues, such as the Stretch S5000 processor. Cache-aliasing means that the size of the cache (for one way) is larger than the page size, thus, a page can end up in several places in cache depending on the virtual to physical translation. The method used here is to map a user page temporarily through the auto-refill way 0 and of of the DTLB. We probably will want to revisit this issue and use a better approach with kmap/kunmap. Signed-off-by: Chris Zankel <chris@zankel.net>
Diffstat (limited to 'include')
-rw-r--r--include/asm-xtensa/cache.h9
-rw-r--r--include/asm-xtensa/cacheflush.h81
-rw-r--r--include/asm-xtensa/io.h1
-rw-r--r--include/asm-xtensa/page.h56
-rw-r--r--include/asm-xtensa/pgalloc.h107
-rw-r--r--include/asm-xtensa/pgtable.h32
-rw-r--r--include/asm-xtensa/tlb.h30
7 files changed, 192 insertions, 124 deletions
diff --git a/include/asm-xtensa/cache.h b/include/asm-xtensa/cache.h
index 1c4a78f29ae2..3bba2a540cf0 100644
--- a/include/asm-xtensa/cache.h
+++ b/include/asm-xtensa/cache.h
@@ -19,6 +19,15 @@
19 19
20#define DCACHE_WAY_SIZE (XCHAL_DCACHE_SIZE/XCHAL_DCACHE_WAYS) 20#define DCACHE_WAY_SIZE (XCHAL_DCACHE_SIZE/XCHAL_DCACHE_WAYS)
21#define ICACHE_WAY_SIZE (XCHAL_ICACHE_SIZE/XCHAL_ICACHE_WAYS) 21#define ICACHE_WAY_SIZE (XCHAL_ICACHE_SIZE/XCHAL_ICACHE_WAYS)
22#define DCACHE_WAY_SHIFT (XCHAL_DCACHE_SETWIDTH + XCHAL_DCACHE_LINEWIDTH)
23#define ICACHE_WAY_SHIFT (XCHAL_ICACHE_SETWIDTH + XCHAL_ICACHE_LINEWIDTH)
24
25/* Maximum cache size per way. */
26#if DCACHE_WAY_SIZE >= ICACHE_WAY_SIZE
27# define CACHE_WAY_SIZE DCACHE_WAY_SIZE
28#else
29# define CACHE_WAY_SIZE ICACHE_WAY_SIZE
30#endif
22 31
23 32
24#endif /* _XTENSA_CACHE_H */ 33#endif /* _XTENSA_CACHE_H */
diff --git a/include/asm-xtensa/cacheflush.h b/include/asm-xtensa/cacheflush.h
index 22ef901b7845..b773c57e75a5 100644
--- a/include/asm-xtensa/cacheflush.h
+++ b/include/asm-xtensa/cacheflush.h
@@ -5,7 +5,7 @@
5 * License. See the file "COPYING" in the main directory of this archive 5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details. 6 * for more details.
7 * 7 *
8 * (C) 2001 - 2006 Tensilica Inc. 8 * (C) 2001 - 2007 Tensilica Inc.
9 */ 9 */
10 10
11#ifndef _XTENSA_CACHEFLUSH_H 11#ifndef _XTENSA_CACHEFLUSH_H
@@ -18,10 +18,7 @@
18#include <asm/page.h> 18#include <asm/page.h>
19 19
20/* 20/*
21 * flush and invalidate data cache, invalidate instruction cache: 21 * Lo-level routines for cache flushing.
22 *
23 * __flush_invalidate_cache_all()
24 * __flush_invalidate_cache_range(from,sze)
25 * 22 *
26 * invalidate data or instruction cache: 23 * invalidate data or instruction cache:
27 * 24 *
@@ -40,26 +37,39 @@
40 * __flush_invalidate_dcache_all() 37 * __flush_invalidate_dcache_all()
41 * __flush_invalidate_dcache_page(adr) 38 * __flush_invalidate_dcache_page(adr)
42 * __flush_invalidate_dcache_range(from,size) 39 * __flush_invalidate_dcache_range(from,size)
40 *
41 * specials for cache aliasing:
42 *
43 * __flush_invalidate_dcache_page_alias(vaddr,paddr)
44 * __invalidate_icache_page_alias(vaddr,paddr)
43 */ 45 */
44 46
45extern void __flush_invalidate_cache_all(void); 47extern void __invalidate_dcache_all(void);
46extern void __flush_invalidate_cache_range(unsigned long, unsigned long);
47extern void __flush_invalidate_dcache_all(void);
48extern void __invalidate_icache_all(void); 48extern void __invalidate_icache_all(void);
49
50extern void __invalidate_dcache_page(unsigned long); 49extern void __invalidate_dcache_page(unsigned long);
51extern void __invalidate_icache_page(unsigned long); 50extern void __invalidate_icache_page(unsigned long);
52extern void __invalidate_icache_range(unsigned long, unsigned long); 51extern void __invalidate_icache_range(unsigned long, unsigned long);
53extern void __invalidate_dcache_range(unsigned long, unsigned long); 52extern void __invalidate_dcache_range(unsigned long, unsigned long);
54 53
54
55#if XCHAL_DCACHE_IS_WRITEBACK 55#if XCHAL_DCACHE_IS_WRITEBACK
56extern void __flush_invalidate_dcache_all(void);
56extern void __flush_dcache_page(unsigned long); 57extern void __flush_dcache_page(unsigned long);
58extern void __flush_dcache_range(unsigned long, unsigned long);
57extern void __flush_invalidate_dcache_page(unsigned long); 59extern void __flush_invalidate_dcache_page(unsigned long);
58extern void __flush_invalidate_dcache_range(unsigned long, unsigned long); 60extern void __flush_invalidate_dcache_range(unsigned long, unsigned long);
59#else 61#else
60# define __flush_dcache_page(p) do { } while(0) 62# define __flush_dcache_range(p,s) do { } while(0)
61# define __flush_invalidate_dcache_page(p) do { } while(0) 63# define __flush_dcache_page(p) do { } while(0)
62# define __flush_invalidate_dcache_range(p,s) do { } while(0) 64# define __flush_invalidate_dcache_page(p) __invalidate_dcache_page(p)
65# define __flush_invalidate_dcache_range(p,s) __invalidate_dcache_range(p,s)
66#endif
67
68#if (DCACHE_WAY_SIZE > PAGE_SIZE)
69extern void __flush_invalidate_dcache_page_alias(unsigned long, unsigned long);
70#endif
71#if (ICACHE_WAY_SIZE > PAGE_SIZE)
72extern void __invalidate_icache_page_alias(unsigned long, unsigned long);
63#endif 73#endif
64 74
65/* 75/*
@@ -71,17 +81,21 @@ extern void __flush_invalidate_dcache_range(unsigned long, unsigned long);
71 * (see also Documentation/cachetlb.txt) 81 * (see also Documentation/cachetlb.txt)
72 */ 82 */
73 83
74#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK 84#if (DCACHE_WAY_SIZE > PAGE_SIZE)
75 85
76#define flush_cache_all() __flush_invalidate_cache_all(); 86#define flush_cache_all() \
77#define flush_cache_mm(mm) __flush_invalidate_cache_all(); 87 do { \
78#define flush_cache_dup_mm(mm) __flush_invalidate_cache_all(); 88 __flush_invalidate_dcache_all(); \
89 __invalidate_icache_all(); \
90 } while (0)
79 91
80#define flush_cache_vmap(start,end) __flush_invalidate_cache_all(); 92#define flush_cache_mm(mm) flush_cache_all()
81#define flush_cache_vunmap(start,end) __flush_invalidate_cache_all(); 93#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
82 94
83extern void flush_dcache_page(struct page*); 95#define flush_cache_vmap(start,end) flush_cache_all()
96#define flush_cache_vunmap(start,end) flush_cache_all()
84 97
98extern void flush_dcache_page(struct page*);
85extern void flush_cache_range(struct vm_area_struct*, ulong, ulong); 99extern void flush_cache_range(struct vm_area_struct*, ulong, ulong);
86extern void flush_cache_page(struct vm_area_struct*, unsigned long, unsigned long); 100extern void flush_cache_page(struct vm_area_struct*, unsigned long, unsigned long);
87 101
@@ -101,24 +115,39 @@ extern void flush_cache_page(struct vm_area_struct*, unsigned long, unsigned lon
101 115
102#endif 116#endif
103 117
118/* Ensure consistency between data and instruction cache. */
104#define flush_icache_range(start,end) \ 119#define flush_icache_range(start,end) \
105 __invalidate_icache_range(start,(end)-(start)) 120 do { \
121 __flush_dcache_range(start, (end) - (start)); \
122 __invalidate_icache_range(start,(end) - (start)); \
123 } while (0)
106 124
107/* This is not required, see Documentation/cachetlb.txt */ 125/* This is not required, see Documentation/cachetlb.txt */
108 126#define flush_icache_page(vma,page) do { } while (0)
109#define flush_icache_page(vma,page) do { } while(0)
110 127
111#define flush_dcache_mmap_lock(mapping) do { } while (0) 128#define flush_dcache_mmap_lock(mapping) do { } while (0)
112#define flush_dcache_mmap_unlock(mapping) do { } while (0) 129#define flush_dcache_mmap_unlock(mapping) do { } while (0)
113 130
131#if (DCACHE_WAY_SIZE > PAGE_SIZE)
114 132
115#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ 133extern void copy_to_user_page(struct vm_area_struct*, struct page*,
116 memcpy(dst, src, len) 134 unsigned long, void*, const void*, unsigned long);
135extern void copy_from_user_page(struct vm_area_struct*, struct page*,
136 unsigned long, void*, const void*, unsigned long);
137
138#else
139
140#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
141 do { \
142 memcpy(dst, src, len); \
143 __flush_dcache_range((unsigned long) dst, len); \
144 __invalidate_icache_range((unsigned long) dst, len); \
145 } while (0)
117 146
118#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ 147#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
119 memcpy(dst, src, len) 148 memcpy(dst, src, len)
120 149
121#endif /* __KERNEL__ */ 150#endif
122 151
152#endif /* __KERNEL__ */
123#endif /* _XTENSA_CACHEFLUSH_H */ 153#endif /* _XTENSA_CACHEFLUSH_H */
124
diff --git a/include/asm-xtensa/io.h b/include/asm-xtensa/io.h
index 0faa614d9696..47c3616ea9ac 100644
--- a/include/asm-xtensa/io.h
+++ b/include/asm-xtensa/io.h
@@ -14,6 +14,7 @@
14#ifdef __KERNEL__ 14#ifdef __KERNEL__
15#include <asm/byteorder.h> 15#include <asm/byteorder.h>
16#include <asm/page.h> 16#include <asm/page.h>
17#include <linux/kernel.h>
17 18
18#include <linux/types.h> 19#include <linux/types.h>
19 20
diff --git a/include/asm-xtensa/page.h b/include/asm-xtensa/page.h
index 2d6ac21136cf..55ce2c9749a3 100644
--- a/include/asm-xtensa/page.h
+++ b/include/asm-xtensa/page.h
@@ -15,6 +15,7 @@
15 15
16#include <asm/processor.h> 16#include <asm/processor.h>
17#include <asm/types.h> 17#include <asm/types.h>
18#include <asm/cache.h>
18 19
19/* 20/*
20 * Fixed TLB translations in the processor. 21 * Fixed TLB translations in the processor.
@@ -39,6 +40,53 @@
39#define MAX_MEM_PFN XCHAL_KSEG_SIZE 40#define MAX_MEM_PFN XCHAL_KSEG_SIZE
40#define PGTABLE_START 0x80000000 41#define PGTABLE_START 0x80000000
41 42
43/*
44 * Cache aliasing:
45 *
46 * If the cache size for one way is greater than the page size, we have to
47 * deal with cache aliasing. The cache index is wider than the page size:
48 *
49 * | |cache| cache index
50 * | pfn |off| virtual address
51 * |xxxx:X|zzz|
52 * | : | |
53 * | \ / | |
54 * |trans.| |
55 * | / \ | |
56 * |yyyy:Y|zzz| physical address
57 *
58 * When the page number is translated to the physical page address, the lowest
59 * bit(s) (X) that are part of the cache index are also translated (Y).
60 * If this translation changes bit(s) (X), the cache index is also afected,
61 * thus resulting in a different cache line than before.
62 * The kernel does not provide a mechanism to ensure that the page color
63 * (represented by this bit) remains the same when allocated or when pages
64 * are remapped. When user pages are mapped into kernel space, the color of
65 * the page might also change.
66 *
67 * We use the address space VMALLOC_END ... VMALLOC_END + DCACHE_WAY_SIZE * 2
68 * to temporarily map a patch so we can match the color.
69 */
70
71#if DCACHE_WAY_SIZE > PAGE_SIZE
72# define DCACHE_ALIAS_ORDER (DCACHE_WAY_SHIFT - PAGE_SHIFT)
73# define DCACHE_ALIAS_MASK (PAGE_MASK & (DCACHE_WAY_SIZE - 1))
74# define DCACHE_ALIAS(a) (((a) & DCACHE_ALIAS_MASK) >> PAGE_SHIFT)
75# define DCACHE_ALIAS_EQ(a,b) ((((a) ^ (b)) & DCACHE_ALIAS_MASK) == 0)
76#else
77# define DCACHE_ALIAS_ORDER 0
78#endif
79
80#if ICACHE_WAY_SIZE > PAGE_SIZE
81# define ICACHE_ALIAS_ORDER (ICACHE_WAY_SHIFT - PAGE_SHIFT)
82# define ICACHE_ALIAS_MASK (PAGE_MASK & (ICACHE_WAY_SIZE - 1))
83# define ICACHE_ALIAS(a) (((a) & ICACHE_ALIAS_MASK) >> PAGE_SHIFT)
84# define ICACHE_ALIAS_EQ(a,b) ((((a) ^ (b)) & ICACHE_ALIAS_MASK) == 0)
85#else
86# define ICACHE_ALIAS_ORDER 0
87#endif
88
89
42#ifdef __ASSEMBLY__ 90#ifdef __ASSEMBLY__
43 91
44#define __pgprot(x) (x) 92#define __pgprot(x) (x)
@@ -90,11 +138,11 @@ extern void copy_page(void *to, void *from);
90 * some extra work 138 * some extra work
91 */ 139 */
92 140
93#if (DCACHE_WAY_SIZE > PAGE_SIZE) 141#if DCACHE_WAY_SIZE > PAGE_SIZE
94void clear_user_page(void *addr, unsigned long vaddr, struct page* page); 142extern void clear_user_page(void*, unsigned long, struct page*);
95void copy_user_page(void *to,void* from,unsigned long vaddr,struct page* page); 143extern void copy_user_page(void*, void*, unsigned long, struct page*);
96#else 144#else
97# define clear_user_page(page,vaddr,pg) clear_page(page) 145# define clear_user_page(page, vaddr, pg) clear_page(page)
98# define copy_user_page(to, from, vaddr, pg) copy_page(to, from) 146# define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
99#endif 147#endif
100 148
diff --git a/include/asm-xtensa/pgalloc.h b/include/asm-xtensa/pgalloc.h
index d56ddf2055e1..3e5b56525102 100644
--- a/include/asm-xtensa/pgalloc.h
+++ b/include/asm-xtensa/pgalloc.h
@@ -1,11 +1,11 @@
1/* 1/*
2 * linux/include/asm-xtensa/pgalloc.h 2 * include/asm-xtensa/pgalloc.h
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as 5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation. 6 * published by the Free Software Foundation.
7 * 7 *
8 * Copyright (C) 2001-2005 Tensilica Inc. 8 * Copyright (C) 2001-2007 Tensilica Inc.
9 */ 9 */
10 10
11#ifndef _XTENSA_PGALLOC_H 11#ifndef _XTENSA_PGALLOC_H
@@ -13,103 +13,54 @@
13 13
14#ifdef __KERNEL__ 14#ifdef __KERNEL__
15 15
16#include <linux/threads.h>
17#include <linux/highmem.h> 16#include <linux/highmem.h>
18#include <asm/processor.h>
19#include <asm/cacheflush.h>
20
21
22/* Cache aliasing:
23 *
24 * If the cache size for one way is greater than the page size, we have to
25 * deal with cache aliasing. The cache index is wider than the page size:
26 *
27 * |cache |
28 * |pgnum |page| virtual address
29 * |xxxxxX|zzzz|
30 * | | |
31 * \ / | |
32 * trans.| |
33 * / \ | |
34 * |yyyyyY|zzzz| physical address
35 *
36 * When the page number is translated to the physical page address, the lowest
37 * bit(s) (X) that are also part of the cache index are also translated (Y).
38 * If this translation changes this bit (X), the cache index is also afected,
39 * thus resulting in a different cache line than before.
40 * The kernel does not provide a mechanism to ensure that the page color
41 * (represented by this bit) remains the same when allocated or when pages
42 * are remapped. When user pages are mapped into kernel space, the color of
43 * the page might also change.
44 *
45 * We use the address space VMALLOC_END ... VMALLOC_END + DCACHE_WAY_SIZE * 2
46 * to temporarily map a patch so we can match the color.
47 */
48
49#if (DCACHE_WAY_SIZE > PAGE_SIZE)
50# define PAGE_COLOR_MASK (PAGE_MASK & (DCACHE_WAY_SIZE-1))
51# define PAGE_COLOR(a) \
52 (((unsigned long)(a)&PAGE_COLOR_MASK) >> PAGE_SHIFT)
53# define PAGE_COLOR_EQ(a,b) \
54 ((((unsigned long)(a) ^ (unsigned long)(b)) & PAGE_COLOR_MASK) == 0)
55# define PAGE_COLOR_MAP0(v) \
56 (VMALLOC_END + ((unsigned long)(v) & PAGE_COLOR_MASK))
57# define PAGE_COLOR_MAP1(v) \
58 (VMALLOC_END + ((unsigned long)(v) & PAGE_COLOR_MASK) + DCACHE_WAY_SIZE)
59#endif
60 17
61/* 18/*
62 * Allocating and freeing a pmd is trivial: the 1-entry pmd is 19 * Allocating and freeing a pmd is trivial: the 1-entry pmd is
63 * inside the pgd, so has no extra memory associated with it. 20 * inside the pgd, so has no extra memory associated with it.
64 */ 21 */
65 22
66#define pgd_free(pgd) free_page((unsigned long)(pgd)) 23#define pmd_populate_kernel(mm, pmdp, ptep) \
67 24 (pmd_val(*(pmdp)) = ((unsigned long)ptep))
68#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK 25#define pmd_populate(mm, pmdp, page) \
26 (pmd_val(*(pmdp)) = ((unsigned long)page_to_virt(page)))
69 27
70static inline void 28static inline pgd_t*
71pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *pte) 29pgd_alloc(struct mm_struct *mm)
72{ 30{
73 pmd_val(*(pmdp)) = (unsigned long)(pte); 31 return (pgd_t*) __get_free_pages(GFP_KERNEL | __GFP_ZERO, PGD_ORDER);
74 __asm__ __volatile__ ("memw; dhwb %0, 0; dsync" :: "a" (pmdp));
75} 32}
76 33
77static inline void 34static inline void pgd_free(pgd_t *pgd)
78pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *page)
79{ 35{
80 pmd_val(*(pmdp)) = (unsigned long)page_to_virt(page); 36 free_page((unsigned long)pgd);
81 __asm__ __volatile__ ("memw; dhwb %0, 0; dsync" :: "a" (pmdp));
82} 37}
83 38
39/* Use a slab cache for the pte pages (see also sparc64 implementation) */
84 40
41extern struct kmem_cache *pgtable_cache;
85 42
86#else 43static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
87 44 unsigned long address)
88# define pmd_populate_kernel(mm, pmdp, pte) \
89 (pmd_val(*(pmdp)) = (unsigned long)(pte))
90# define pmd_populate(mm, pmdp, page) \
91 (pmd_val(*(pmdp)) = (unsigned long)page_to_virt(page))
92
93#endif
94
95static inline pgd_t*
96pgd_alloc(struct mm_struct *mm)
97{ 45{
98 pgd_t *pgd; 46 return kmem_cache_alloc(pgtable_cache, GFP_KERNEL|__GFP_REPEAT);
99 47}
100 pgd = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_ZERO, PGD_ORDER);
101
102 if (likely(pgd != NULL))
103 __flush_dcache_page((unsigned long)pgd);
104 48
105 return pgd; 49static inline struct page *pte_alloc_one(struct mm_struct *mm,
50 unsigned long addr)
51{
52 return virt_to_page(pte_alloc_one_kernel(mm, addr));
106} 53}
107 54
108extern pte_t* pte_alloc_one_kernel(struct mm_struct* mm, unsigned long addr); 55static inline void pte_free_kernel(pte_t *pte)
109extern struct page* pte_alloc_one(struct mm_struct* mm, unsigned long addr); 56{
57 kmem_cache_free(pgtable_cache, pte);
58}
110 59
111#define pte_free_kernel(pte) free_page((unsigned long)pte) 60static inline void pte_free(struct page *page)
112#define pte_free(pte) __free_page(pte) 61{
62 kmem_cache_free(pgtable_cache, page_address(page));
63}
113 64
114#endif /* __KERNEL__ */ 65#endif /* __KERNEL__ */
115#endif /* _XTENSA_PGALLOC_H */ 66#endif /* _XTENSA_PGALLOC_H */
diff --git a/include/asm-xtensa/pgtable.h b/include/asm-xtensa/pgtable.h
index 667a6c46b5a1..c0fcc1c9660c 100644
--- a/include/asm-xtensa/pgtable.h
+++ b/include/asm-xtensa/pgtable.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * linux/include/asm-xtensa/pgtable.h 2 * include/asm-xtensa/pgtable.h
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as 5 * it under the terms of the GNU General Public License version 2 as
@@ -60,16 +60,20 @@
60#define FIRST_USER_ADDRESS 0 60#define FIRST_USER_ADDRESS 0
61#define FIRST_USER_PGD_NR (FIRST_USER_ADDRESS >> PGDIR_SHIFT) 61#define FIRST_USER_PGD_NR (FIRST_USER_ADDRESS >> PGDIR_SHIFT)
62 62
63/* virtual memory area. We keep a distance to other memory regions to be 63/*
64 * Virtual memory area. We keep a distance to other memory regions to be
64 * on the safe side. We also use this area for cache aliasing. 65 * on the safe side. We also use this area for cache aliasing.
65 */ 66 */
66 67
67// FIXME: virtual memory area must be configuration-dependent
68
69#define VMALLOC_START 0xC0000000 68#define VMALLOC_START 0xC0000000
70#define VMALLOC_END 0xC7FF0000 69#define VMALLOC_END 0xC6FEFFFF
70#define TLBTEMP_BASE_1 0xC6FF0000
71#define TLBTEMP_BASE_2 0xC6FF8000
72#define MODULE_START 0xC7000000
73#define MODULE_END 0xC7FFFFFF
71 74
72/* Xtensa Linux config PTE layout (when present): 75/*
76 * Xtensa Linux config PTE layout (when present):
73 * 31-12: PPN 77 * 31-12: PPN
74 * 11-6: Software 78 * 11-6: Software
75 * 5-4: RING 79 * 5-4: RING
@@ -126,12 +130,13 @@
126#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITABLE) 130#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITABLE)
127#define PAGE_SHARED_EXEC \ 131#define PAGE_SHARED_EXEC \
128 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITABLE | _PAGE_HW_EXEC) 132 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITABLE | _PAGE_HW_EXEC)
129#define PAGE_KERNEL __pgprot(_PAGE_PRESENT) 133#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_HW_WRITE)
134#define PAGE_KERNEL_EXEC __pgprot(_PAGE_PRESENT|_PAGE_HW_WRITE|_PAGE_HW_EXEC)
130 135
131#if (DCACHE_WAY_SIZE > PAGE_SIZE) 136#if (DCACHE_WAY_SIZE > PAGE_SIZE)
132# define _PAGE_DIRECTORY (_PAGE_VALID | _PAGE_ACCESSED | _PAGE_HW_WRITE) 137# define _PAGE_DIRECTORY (_PAGE_VALID | _PAGE_ACCESSED)
133#else 138#else
134# define _PAGE_DIRECTORY (_PAGE_VALID|_PAGE_ACCESSED|_PAGE_HW_WRITE|_PAGE_CA_WB) 139# define _PAGE_DIRECTORY (_PAGE_VALID | _PAGE_ACCESSED | _PAGE_CA_WB)
135#endif 140#endif
136 141
137#else /* no mmu */ 142#else /* no mmu */
@@ -244,6 +249,10 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
244static inline void update_pte(pte_t *ptep, pte_t pteval) 249static inline void update_pte(pte_t *ptep, pte_t pteval)
245{ 250{
246 *ptep = pteval; 251 *ptep = pteval;
252#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
253 __asm__ __volatile__ ("dhwb %0, 0" :: "a" (ptep));
254#endif
255
247} 256}
248 257
249struct mm_struct; 258struct mm_struct;
@@ -383,13 +392,12 @@ extern void update_mmu_cache(struct vm_area_struct * vma,
383 * remap a physical page `pfn' of size `size' with page protection `prot' 392 * remap a physical page `pfn' of size `size' with page protection `prot'
384 * into virtual address `from' 393 * into virtual address `from'
385 */ 394 */
395
386#define io_remap_pfn_range(vma,from,pfn,size,prot) \ 396#define io_remap_pfn_range(vma,from,pfn,size,prot) \
387 remap_pfn_range(vma, from, pfn, size, prot) 397 remap_pfn_range(vma, from, pfn, size, prot)
388 398
389 399
390/* No page table caches to init */ 400extern void pgtable_cache_init(void);
391
392#define pgtable_cache_init() do { } while (0)
393 401
394typedef pte_t *pte_addr_t; 402typedef pte_t *pte_addr_t;
395 403
diff --git a/include/asm-xtensa/tlb.h b/include/asm-xtensa/tlb.h
index 4562b2dcfbc0..4830232017af 100644
--- a/include/asm-xtensa/tlb.h
+++ b/include/asm-xtensa/tlb.h
@@ -11,14 +11,36 @@
11#ifndef _XTENSA_TLB_H 11#ifndef _XTENSA_TLB_H
12#define _XTENSA_TLB_H 12#define _XTENSA_TLB_H
13 13
14#define tlb_start_vma(tlb,vma) do { } while (0) 14#include <asm/cache.h>
15#define tlb_end_vma(tlb,vma) do { } while (0) 15#include <asm/page.h>
16#define __tlb_remove_tlb_entry(tlb,pte,addr) do { } while (0) 16
17#if (DCACHE_WAY_SIZE <= PAGE_SIZE)
18
19/* Note, read http://lkml.org/lkml/2004/1/15/6 */
20
21# define tlb_start_vma(tlb,vma) do { } while (0)
22# define tlb_end_vma(tlb,vma) do { } while (0)
23
24#else
17 25
26# define tlb_start_vma(tlb, vma) \
27 do { \
28 if (!tlb->fullmm) \
29 flush_cache_range(vma, vma->vm_start, vma->vm_end); \
30 } while(0)
31
32# define tlb_end_vma(tlb, vma) \
33 do { \
34 if (!tlb->fullmm) \
35 flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
36 } while(0)
37
38#endif
39
40#define __tlb_remove_tlb_entry(tlb,pte,addr) do { } while (0)
18#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) 41#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
19 42
20#include <asm-generic/tlb.h> 43#include <asm-generic/tlb.h>
21#include <asm/page.h>
22 44
23#define __pte_free_tlb(tlb,pte) pte_free(pte) 45#define __pte_free_tlb(tlb,pte) pte_free(pte)
24 46