aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-xtensa
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-xtensa')
-rw-r--r--include/asm-xtensa/bugs.h6
-rw-r--r--include/asm-xtensa/cache.h9
-rw-r--r--include/asm-xtensa/cacheflush.h81
-rw-r--r--include/asm-xtensa/elf.h50
-rw-r--r--include/asm-xtensa/io.h1
-rw-r--r--include/asm-xtensa/ioctls.h4
-rw-r--r--include/asm-xtensa/page.h106
-rw-r--r--include/asm-xtensa/pgalloc.h107
-rw-r--r--include/asm-xtensa/pgtable.h235
-rw-r--r--include/asm-xtensa/processor.h2
-rw-r--r--include/asm-xtensa/syscall.h24
-rw-r--r--include/asm-xtensa/termbits.h5
-rw-r--r--include/asm-xtensa/termios.h6
-rw-r--r--include/asm-xtensa/timex.h4
-rw-r--r--include/asm-xtensa/tlb.h30
-rw-r--r--include/asm-xtensa/types.h9
-rw-r--r--include/asm-xtensa/unistd.h128
17 files changed, 530 insertions, 277 deletions
diff --git a/include/asm-xtensa/bugs.h b/include/asm-xtensa/bugs.h
index c42285320133..69b29d198249 100644
--- a/include/asm-xtensa/bugs.h
+++ b/include/asm-xtensa/bugs.h
@@ -13,10 +13,6 @@
13#ifndef _XTENSA_BUGS_H 13#ifndef _XTENSA_BUGS_H
14#define _XTENSA_BUGS_H 14#define _XTENSA_BUGS_H
15 15
16#include <asm/processor.h> 16static void check_bugs(void) { }
17
18static void __init check_bugs(void)
19{
20}
21 17
22#endif /* _XTENSA_BUGS_H */ 18#endif /* _XTENSA_BUGS_H */
diff --git a/include/asm-xtensa/cache.h b/include/asm-xtensa/cache.h
index 1c4a78f29ae2..3bba2a540cf0 100644
--- a/include/asm-xtensa/cache.h
+++ b/include/asm-xtensa/cache.h
@@ -19,6 +19,15 @@
19 19
20#define DCACHE_WAY_SIZE (XCHAL_DCACHE_SIZE/XCHAL_DCACHE_WAYS) 20#define DCACHE_WAY_SIZE (XCHAL_DCACHE_SIZE/XCHAL_DCACHE_WAYS)
21#define ICACHE_WAY_SIZE (XCHAL_ICACHE_SIZE/XCHAL_ICACHE_WAYS) 21#define ICACHE_WAY_SIZE (XCHAL_ICACHE_SIZE/XCHAL_ICACHE_WAYS)
22#define DCACHE_WAY_SHIFT (XCHAL_DCACHE_SETWIDTH + XCHAL_DCACHE_LINEWIDTH)
23#define ICACHE_WAY_SHIFT (XCHAL_ICACHE_SETWIDTH + XCHAL_ICACHE_LINEWIDTH)
24
25/* Maximum cache size per way. */
26#if DCACHE_WAY_SIZE >= ICACHE_WAY_SIZE
27# define CACHE_WAY_SIZE DCACHE_WAY_SIZE
28#else
29# define CACHE_WAY_SIZE ICACHE_WAY_SIZE
30#endif
22 31
23 32
24#endif /* _XTENSA_CACHE_H */ 33#endif /* _XTENSA_CACHE_H */
diff --git a/include/asm-xtensa/cacheflush.h b/include/asm-xtensa/cacheflush.h
index 22ef901b7845..b773c57e75a5 100644
--- a/include/asm-xtensa/cacheflush.h
+++ b/include/asm-xtensa/cacheflush.h
@@ -5,7 +5,7 @@
5 * License. See the file "COPYING" in the main directory of this archive 5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details. 6 * for more details.
7 * 7 *
8 * (C) 2001 - 2006 Tensilica Inc. 8 * (C) 2001 - 2007 Tensilica Inc.
9 */ 9 */
10 10
11#ifndef _XTENSA_CACHEFLUSH_H 11#ifndef _XTENSA_CACHEFLUSH_H
@@ -18,10 +18,7 @@
18#include <asm/page.h> 18#include <asm/page.h>
19 19
20/* 20/*
21 * flush and invalidate data cache, invalidate instruction cache: 21 * Lo-level routines for cache flushing.
22 *
23 * __flush_invalidate_cache_all()
24 * __flush_invalidate_cache_range(from,sze)
25 * 22 *
26 * invalidate data or instruction cache: 23 * invalidate data or instruction cache:
27 * 24 *
@@ -40,26 +37,39 @@
40 * __flush_invalidate_dcache_all() 37 * __flush_invalidate_dcache_all()
41 * __flush_invalidate_dcache_page(adr) 38 * __flush_invalidate_dcache_page(adr)
42 * __flush_invalidate_dcache_range(from,size) 39 * __flush_invalidate_dcache_range(from,size)
40 *
41 * specials for cache aliasing:
42 *
43 * __flush_invalidate_dcache_page_alias(vaddr,paddr)
44 * __invalidate_icache_page_alias(vaddr,paddr)
43 */ 45 */
44 46
45extern void __flush_invalidate_cache_all(void); 47extern void __invalidate_dcache_all(void);
46extern void __flush_invalidate_cache_range(unsigned long, unsigned long);
47extern void __flush_invalidate_dcache_all(void);
48extern void __invalidate_icache_all(void); 48extern void __invalidate_icache_all(void);
49
50extern void __invalidate_dcache_page(unsigned long); 49extern void __invalidate_dcache_page(unsigned long);
51extern void __invalidate_icache_page(unsigned long); 50extern void __invalidate_icache_page(unsigned long);
52extern void __invalidate_icache_range(unsigned long, unsigned long); 51extern void __invalidate_icache_range(unsigned long, unsigned long);
53extern void __invalidate_dcache_range(unsigned long, unsigned long); 52extern void __invalidate_dcache_range(unsigned long, unsigned long);
54 53
54
55#if XCHAL_DCACHE_IS_WRITEBACK 55#if XCHAL_DCACHE_IS_WRITEBACK
56extern void __flush_invalidate_dcache_all(void);
56extern void __flush_dcache_page(unsigned long); 57extern void __flush_dcache_page(unsigned long);
58extern void __flush_dcache_range(unsigned long, unsigned long);
57extern void __flush_invalidate_dcache_page(unsigned long); 59extern void __flush_invalidate_dcache_page(unsigned long);
58extern void __flush_invalidate_dcache_range(unsigned long, unsigned long); 60extern void __flush_invalidate_dcache_range(unsigned long, unsigned long);
59#else 61#else
60# define __flush_dcache_page(p) do { } while(0) 62# define __flush_dcache_range(p,s) do { } while(0)
61# define __flush_invalidate_dcache_page(p) do { } while(0) 63# define __flush_dcache_page(p) do { } while(0)
62# define __flush_invalidate_dcache_range(p,s) do { } while(0) 64# define __flush_invalidate_dcache_page(p) __invalidate_dcache_page(p)
65# define __flush_invalidate_dcache_range(p,s) __invalidate_dcache_range(p,s)
66#endif
67
68#if (DCACHE_WAY_SIZE > PAGE_SIZE)
69extern void __flush_invalidate_dcache_page_alias(unsigned long, unsigned long);
70#endif
71#if (ICACHE_WAY_SIZE > PAGE_SIZE)
72extern void __invalidate_icache_page_alias(unsigned long, unsigned long);
63#endif 73#endif
64 74
65/* 75/*
@@ -71,17 +81,21 @@ extern void __flush_invalidate_dcache_range(unsigned long, unsigned long);
71 * (see also Documentation/cachetlb.txt) 81 * (see also Documentation/cachetlb.txt)
72 */ 82 */
73 83
74#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK 84#if (DCACHE_WAY_SIZE > PAGE_SIZE)
75 85
76#define flush_cache_all() __flush_invalidate_cache_all(); 86#define flush_cache_all() \
77#define flush_cache_mm(mm) __flush_invalidate_cache_all(); 87 do { \
78#define flush_cache_dup_mm(mm) __flush_invalidate_cache_all(); 88 __flush_invalidate_dcache_all(); \
89 __invalidate_icache_all(); \
90 } while (0)
79 91
80#define flush_cache_vmap(start,end) __flush_invalidate_cache_all(); 92#define flush_cache_mm(mm) flush_cache_all()
81#define flush_cache_vunmap(start,end) __flush_invalidate_cache_all(); 93#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
82 94
83extern void flush_dcache_page(struct page*); 95#define flush_cache_vmap(start,end) flush_cache_all()
96#define flush_cache_vunmap(start,end) flush_cache_all()
84 97
98extern void flush_dcache_page(struct page*);
85extern void flush_cache_range(struct vm_area_struct*, ulong, ulong); 99extern void flush_cache_range(struct vm_area_struct*, ulong, ulong);
86extern void flush_cache_page(struct vm_area_struct*, unsigned long, unsigned long); 100extern void flush_cache_page(struct vm_area_struct*, unsigned long, unsigned long);
87 101
@@ -101,24 +115,39 @@ extern void flush_cache_page(struct vm_area_struct*, unsigned long, unsigned lon
101 115
102#endif 116#endif
103 117
118/* Ensure consistency between data and instruction cache. */
104#define flush_icache_range(start,end) \ 119#define flush_icache_range(start,end) \
105 __invalidate_icache_range(start,(end)-(start)) 120 do { \
121 __flush_dcache_range(start, (end) - (start)); \
122 __invalidate_icache_range(start,(end) - (start)); \
123 } while (0)
106 124
107/* This is not required, see Documentation/cachetlb.txt */ 125/* This is not required, see Documentation/cachetlb.txt */
108 126#define flush_icache_page(vma,page) do { } while (0)
109#define flush_icache_page(vma,page) do { } while(0)
110 127
111#define flush_dcache_mmap_lock(mapping) do { } while (0) 128#define flush_dcache_mmap_lock(mapping) do { } while (0)
112#define flush_dcache_mmap_unlock(mapping) do { } while (0) 129#define flush_dcache_mmap_unlock(mapping) do { } while (0)
113 130
131#if (DCACHE_WAY_SIZE > PAGE_SIZE)
114 132
115#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ 133extern void copy_to_user_page(struct vm_area_struct*, struct page*,
116 memcpy(dst, src, len) 134 unsigned long, void*, const void*, unsigned long);
135extern void copy_from_user_page(struct vm_area_struct*, struct page*,
136 unsigned long, void*, const void*, unsigned long);
137
138#else
139
140#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
141 do { \
142 memcpy(dst, src, len); \
143 __flush_dcache_range((unsigned long) dst, len); \
144 __invalidate_icache_range((unsigned long) dst, len); \
145 } while (0)
117 146
118#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ 147#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
119 memcpy(dst, src, len) 148 memcpy(dst, src, len)
120 149
121#endif /* __KERNEL__ */ 150#endif
122 151
152#endif /* __KERNEL__ */
123#endif /* _XTENSA_CACHEFLUSH_H */ 153#endif /* _XTENSA_CACHEFLUSH_H */
124
diff --git a/include/asm-xtensa/elf.h b/include/asm-xtensa/elf.h
index 1569b53cec91..7083d46766a8 100644
--- a/include/asm-xtensa/elf.h
+++ b/include/asm-xtensa/elf.h
@@ -20,6 +20,56 @@
20#define EM_XTENSA 94 20#define EM_XTENSA 94
21#define EM_XTENSA_OLD 0xABC7 21#define EM_XTENSA_OLD 0xABC7
22 22
23/* Xtensa relocations defined by the ABIs */
24
25#define R_XTENSA_NONE 0
26#define R_XTENSA_32 1
27#define R_XTENSA_RTLD 2
28#define R_XTENSA_GLOB_DAT 3
29#define R_XTENSA_JMP_SLOT 4
30#define R_XTENSA_RELATIVE 5
31#define R_XTENSA_PLT 6
32#define R_XTENSA_OP0 8
33#define R_XTENSA_OP1 9
34#define R_XTENSA_OP2 10
35#define R_XTENSA_ASM_EXPAND 11
36#define R_XTENSA_ASM_SIMPLIFY 12
37#define R_XTENSA_GNU_VTINHERIT 15
38#define R_XTENSA_GNU_VTENTRY 16
39#define R_XTENSA_DIFF8 17
40#define R_XTENSA_DIFF16 18
41#define R_XTENSA_DIFF32 19
42#define R_XTENSA_SLOT0_OP 20
43#define R_XTENSA_SLOT1_OP 21
44#define R_XTENSA_SLOT2_OP 22
45#define R_XTENSA_SLOT3_OP 23
46#define R_XTENSA_SLOT4_OP 24
47#define R_XTENSA_SLOT5_OP 25
48#define R_XTENSA_SLOT6_OP 26
49#define R_XTENSA_SLOT7_OP 27
50#define R_XTENSA_SLOT8_OP 28
51#define R_XTENSA_SLOT9_OP 29
52#define R_XTENSA_SLOT10_OP 30
53#define R_XTENSA_SLOT11_OP 31
54#define R_XTENSA_SLOT12_OP 32
55#define R_XTENSA_SLOT13_OP 33
56#define R_XTENSA_SLOT14_OP 34
57#define R_XTENSA_SLOT0_ALT 35
58#define R_XTENSA_SLOT1_ALT 36
59#define R_XTENSA_SLOT2_ALT 37
60#define R_XTENSA_SLOT3_ALT 38
61#define R_XTENSA_SLOT4_ALT 39
62#define R_XTENSA_SLOT5_ALT 40
63#define R_XTENSA_SLOT6_ALT 41
64#define R_XTENSA_SLOT7_ALT 42
65#define R_XTENSA_SLOT8_ALT 43
66#define R_XTENSA_SLOT9_ALT 44
67#define R_XTENSA_SLOT10_ALT 45
68#define R_XTENSA_SLOT11_ALT 46
69#define R_XTENSA_SLOT12_ALT 47
70#define R_XTENSA_SLOT13_ALT 48
71#define R_XTENSA_SLOT14_ALT 49
72
23/* ELF register definitions. This is needed for core dump support. */ 73/* ELF register definitions. This is needed for core dump support. */
24 74
25/* 75/*
diff --git a/include/asm-xtensa/io.h b/include/asm-xtensa/io.h
index 0faa614d9696..47c3616ea9ac 100644
--- a/include/asm-xtensa/io.h
+++ b/include/asm-xtensa/io.h
@@ -14,6 +14,7 @@
14#ifdef __KERNEL__ 14#ifdef __KERNEL__
15#include <asm/byteorder.h> 15#include <asm/byteorder.h>
16#include <asm/page.h> 16#include <asm/page.h>
17#include <linux/kernel.h>
17 18
18#include <linux/types.h> 19#include <linux/types.h>
19 20
diff --git a/include/asm-xtensa/ioctls.h b/include/asm-xtensa/ioctls.h
index 39e6f23921bb..0ffa942954b9 100644
--- a/include/asm-xtensa/ioctls.h
+++ b/include/asm-xtensa/ioctls.h
@@ -91,6 +91,10 @@
91#define TIOCSBRK _IO('T', 39) /* BSD compatibility */ 91#define TIOCSBRK _IO('T', 39) /* BSD compatibility */
92#define TIOCCBRK _IO('T', 40) /* BSD compatibility */ 92#define TIOCCBRK _IO('T', 40) /* BSD compatibility */
93#define TIOCGSID _IOR('T', 41, pid_t) /* Return the session ID of FD*/ 93#define TIOCGSID _IOR('T', 41, pid_t) /* Return the session ID of FD*/
94#define TCGETS2 _IOR('T', 42, struct termios2)
95#define TCSETS2 _IOW('T', 43, struct termios2)
96#define TCSETSW2 _IOW('T', 44, struct termios2)
97#define TCSETSF2 _IOW('T', 45, struct termios2)
94#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ 98#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
95#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ 99#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */
96 100
diff --git a/include/asm-xtensa/page.h b/include/asm-xtensa/page.h
index 1213cde75438..55ce2c9749a3 100644
--- a/include/asm-xtensa/page.h
+++ b/include/asm-xtensa/page.h
@@ -1,11 +1,11 @@
1/* 1/*
2 * linux/include/asm-xtensa/page.h 2 * include/asm-xtensa/page.h
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version2 as 5 * it under the terms of the GNU General Public License version2 as
6 * published by the Free Software Foundation. 6 * published by the Free Software Foundation.
7 * 7 *
8 * Copyright (C) 2001 - 2005 Tensilica Inc. 8 * Copyright (C) 2001 - 2007 Tensilica Inc.
9 */ 9 */
10 10
11#ifndef _XTENSA_PAGE_H 11#ifndef _XTENSA_PAGE_H
@@ -14,6 +14,12 @@
14#ifdef __KERNEL__ 14#ifdef __KERNEL__
15 15
16#include <asm/processor.h> 16#include <asm/processor.h>
17#include <asm/types.h>
18#include <asm/cache.h>
19
20/*
21 * Fixed TLB translations in the processor.
22 */
17 23
18#define XCHAL_KSEG_CACHED_VADDR 0xd0000000 24#define XCHAL_KSEG_CACHED_VADDR 0xd0000000
19#define XCHAL_KSEG_BYPASS_VADDR 0xd8000000 25#define XCHAL_KSEG_BYPASS_VADDR 0xd8000000
@@ -26,13 +32,60 @@
26 */ 32 */
27 33
28#define PAGE_SHIFT 12 34#define PAGE_SHIFT 12
29#define PAGE_SIZE (1 << PAGE_SHIFT) 35#define PAGE_SIZE (__XTENSA_UL_CONST(1) << PAGE_SHIFT)
30#define PAGE_MASK (~(PAGE_SIZE-1)) 36#define PAGE_MASK (~(PAGE_SIZE-1))
31#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE - 1) & PAGE_MASK) 37#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE - 1) & PAGE_MASK)
32 38
33#define PAGE_OFFSET XCHAL_KSEG_CACHED_VADDR 39#define PAGE_OFFSET XCHAL_KSEG_CACHED_VADDR
34#define MAX_MEM_PFN XCHAL_KSEG_SIZE 40#define MAX_MEM_PFN XCHAL_KSEG_SIZE
35#define PGTABLE_START 0x80000000 41#define PGTABLE_START 0x80000000
42
43/*
44 * Cache aliasing:
45 *
46 * If the cache size for one way is greater than the page size, we have to
47 * deal with cache aliasing. The cache index is wider than the page size:
48 *
49 * | |cache| cache index
50 * | pfn |off| virtual address
51 * |xxxx:X|zzz|
52 * | : | |
53 * | \ / | |
54 * |trans.| |
55 * | / \ | |
56 * |yyyy:Y|zzz| physical address
57 *
58 * When the page number is translated to the physical page address, the lowest
59 * bit(s) (X) that are part of the cache index are also translated (Y).
60 * If this translation changes bit(s) (X), the cache index is also afected,
61 * thus resulting in a different cache line than before.
62 * The kernel does not provide a mechanism to ensure that the page color
63 * (represented by this bit) remains the same when allocated or when pages
64 * are remapped. When user pages are mapped into kernel space, the color of
65 * the page might also change.
66 *
67 * We use the address space VMALLOC_END ... VMALLOC_END + DCACHE_WAY_SIZE * 2
68 * to temporarily map a patch so we can match the color.
69 */
70
71#if DCACHE_WAY_SIZE > PAGE_SIZE
72# define DCACHE_ALIAS_ORDER (DCACHE_WAY_SHIFT - PAGE_SHIFT)
73# define DCACHE_ALIAS_MASK (PAGE_MASK & (DCACHE_WAY_SIZE - 1))
74# define DCACHE_ALIAS(a) (((a) & DCACHE_ALIAS_MASK) >> PAGE_SHIFT)
75# define DCACHE_ALIAS_EQ(a,b) ((((a) ^ (b)) & DCACHE_ALIAS_MASK) == 0)
76#else
77# define DCACHE_ALIAS_ORDER 0
78#endif
79
80#if ICACHE_WAY_SIZE > PAGE_SIZE
81# define ICACHE_ALIAS_ORDER (ICACHE_WAY_SHIFT - PAGE_SHIFT)
82# define ICACHE_ALIAS_MASK (PAGE_MASK & (ICACHE_WAY_SIZE - 1))
83# define ICACHE_ALIAS(a) (((a) & ICACHE_ALIAS_MASK) >> PAGE_SHIFT)
84# define ICACHE_ALIAS_EQ(a,b) ((((a) ^ (b)) & ICACHE_ALIAS_MASK) == 0)
85#else
86# define ICACHE_ALIAS_ORDER 0
87#endif
88
36 89
37#ifdef __ASSEMBLY__ 90#ifdef __ASSEMBLY__
38 91
@@ -58,34 +111,23 @@ typedef struct { unsigned long pgprot; } pgprot_t;
58 111
59/* 112/*
60 * Pure 2^n version of get_order 113 * Pure 2^n version of get_order
114 * Use 'nsau' instructions if supported by the processor or the generic version.
61 */ 115 */
62 116
63static inline int get_order(unsigned long size) 117#if XCHAL_HAVE_NSA
118
119static inline __attribute_const__ int get_order(unsigned long size)
64{ 120{
65 int order; 121 int lz;
66#ifndef XCHAL_HAVE_NSU 122 asm ("nsau %0, %1" : "=r" (lz) : "r" ((size - 1) >> PAGE_SHIFT));
67 unsigned long x1, x2, x4, x8, x16; 123 return 32 - lz;
68
69 size = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
70 x1 = size & 0xAAAAAAAA;
71 x2 = size & 0xCCCCCCCC;
72 x4 = size & 0xF0F0F0F0;
73 x8 = size & 0xFF00FF00;
74 x16 = size & 0xFFFF0000;
75 order = x2 ? 2 : 0;
76 order += (x16 != 0) * 16;
77 order += (x8 != 0) * 8;
78 order += (x4 != 0) * 4;
79 order += (x1 != 0);
80
81 return order;
82#else
83 size = (size - 1) >> PAGE_SHIFT;
84 asm ("nsau %0, %1" : "=r" (order) : "r" (size));
85 return 32 - order;
86#endif
87} 124}
88 125
126#else
127
128# include <asm-generic/page.h>
129
130#endif
89 131
90struct page; 132struct page;
91extern void clear_page(void *page); 133extern void clear_page(void *page);
@@ -96,11 +138,11 @@ extern void copy_page(void *to, void *from);
96 * some extra work 138 * some extra work
97 */ 139 */
98 140
99#if (DCACHE_WAY_SIZE > PAGE_SIZE) 141#if DCACHE_WAY_SIZE > PAGE_SIZE
100void clear_user_page(void *addr, unsigned long vaddr, struct page* page); 142extern void clear_user_page(void*, unsigned long, struct page*);
101void copy_user_page(void *to,void* from,unsigned long vaddr,struct page* page); 143extern void copy_user_page(void*, void*, unsigned long, struct page*);
102#else 144#else
103# define clear_user_page(page,vaddr,pg) clear_page(page) 145# define clear_user_page(page, vaddr, pg) clear_page(page)
104# define copy_user_page(to, from, vaddr, pg) copy_page(to, from) 146# define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
105#endif 147#endif
106 148
diff --git a/include/asm-xtensa/pgalloc.h b/include/asm-xtensa/pgalloc.h
index d56ddf2055e1..3e5b56525102 100644
--- a/include/asm-xtensa/pgalloc.h
+++ b/include/asm-xtensa/pgalloc.h
@@ -1,11 +1,11 @@
1/* 1/*
2 * linux/include/asm-xtensa/pgalloc.h 2 * include/asm-xtensa/pgalloc.h
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as 5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation. 6 * published by the Free Software Foundation.
7 * 7 *
8 * Copyright (C) 2001-2005 Tensilica Inc. 8 * Copyright (C) 2001-2007 Tensilica Inc.
9 */ 9 */
10 10
11#ifndef _XTENSA_PGALLOC_H 11#ifndef _XTENSA_PGALLOC_H
@@ -13,103 +13,54 @@
13 13
14#ifdef __KERNEL__ 14#ifdef __KERNEL__
15 15
16#include <linux/threads.h>
17#include <linux/highmem.h> 16#include <linux/highmem.h>
18#include <asm/processor.h>
19#include <asm/cacheflush.h>
20
21
22/* Cache aliasing:
23 *
24 * If the cache size for one way is greater than the page size, we have to
25 * deal with cache aliasing. The cache index is wider than the page size:
26 *
27 * |cache |
28 * |pgnum |page| virtual address
29 * |xxxxxX|zzzz|
30 * | | |
31 * \ / | |
32 * trans.| |
33 * / \ | |
34 * |yyyyyY|zzzz| physical address
35 *
36 * When the page number is translated to the physical page address, the lowest
37 * bit(s) (X) that are also part of the cache index are also translated (Y).
38 * If this translation changes this bit (X), the cache index is also afected,
39 * thus resulting in a different cache line than before.
40 * The kernel does not provide a mechanism to ensure that the page color
41 * (represented by this bit) remains the same when allocated or when pages
42 * are remapped. When user pages are mapped into kernel space, the color of
43 * the page might also change.
44 *
45 * We use the address space VMALLOC_END ... VMALLOC_END + DCACHE_WAY_SIZE * 2
46 * to temporarily map a patch so we can match the color.
47 */
48
49#if (DCACHE_WAY_SIZE > PAGE_SIZE)
50# define PAGE_COLOR_MASK (PAGE_MASK & (DCACHE_WAY_SIZE-1))
51# define PAGE_COLOR(a) \
52 (((unsigned long)(a)&PAGE_COLOR_MASK) >> PAGE_SHIFT)
53# define PAGE_COLOR_EQ(a,b) \
54 ((((unsigned long)(a) ^ (unsigned long)(b)) & PAGE_COLOR_MASK) == 0)
55# define PAGE_COLOR_MAP0(v) \
56 (VMALLOC_END + ((unsigned long)(v) & PAGE_COLOR_MASK))
57# define PAGE_COLOR_MAP1(v) \
58 (VMALLOC_END + ((unsigned long)(v) & PAGE_COLOR_MASK) + DCACHE_WAY_SIZE)
59#endif
60 17
61/* 18/*
62 * Allocating and freeing a pmd is trivial: the 1-entry pmd is 19 * Allocating and freeing a pmd is trivial: the 1-entry pmd is
63 * inside the pgd, so has no extra memory associated with it. 20 * inside the pgd, so has no extra memory associated with it.
64 */ 21 */
65 22
66#define pgd_free(pgd) free_page((unsigned long)(pgd)) 23#define pmd_populate_kernel(mm, pmdp, ptep) \
67 24 (pmd_val(*(pmdp)) = ((unsigned long)ptep))
68#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK 25#define pmd_populate(mm, pmdp, page) \
26 (pmd_val(*(pmdp)) = ((unsigned long)page_to_virt(page)))
69 27
70static inline void 28static inline pgd_t*
71pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *pte) 29pgd_alloc(struct mm_struct *mm)
72{ 30{
73 pmd_val(*(pmdp)) = (unsigned long)(pte); 31 return (pgd_t*) __get_free_pages(GFP_KERNEL | __GFP_ZERO, PGD_ORDER);
74 __asm__ __volatile__ ("memw; dhwb %0, 0; dsync" :: "a" (pmdp));
75} 32}
76 33
77static inline void 34static inline void pgd_free(pgd_t *pgd)
78pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *page)
79{ 35{
80 pmd_val(*(pmdp)) = (unsigned long)page_to_virt(page); 36 free_page((unsigned long)pgd);
81 __asm__ __volatile__ ("memw; dhwb %0, 0; dsync" :: "a" (pmdp));
82} 37}
83 38
39/* Use a slab cache for the pte pages (see also sparc64 implementation) */
84 40
41extern struct kmem_cache *pgtable_cache;
85 42
86#else 43static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
87 44 unsigned long address)
88# define pmd_populate_kernel(mm, pmdp, pte) \
89 (pmd_val(*(pmdp)) = (unsigned long)(pte))
90# define pmd_populate(mm, pmdp, page) \
91 (pmd_val(*(pmdp)) = (unsigned long)page_to_virt(page))
92
93#endif
94
95static inline pgd_t*
96pgd_alloc(struct mm_struct *mm)
97{ 45{
98 pgd_t *pgd; 46 return kmem_cache_alloc(pgtable_cache, GFP_KERNEL|__GFP_REPEAT);
99 47}
100 pgd = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_ZERO, PGD_ORDER);
101
102 if (likely(pgd != NULL))
103 __flush_dcache_page((unsigned long)pgd);
104 48
105 return pgd; 49static inline struct page *pte_alloc_one(struct mm_struct *mm,
50 unsigned long addr)
51{
52 return virt_to_page(pte_alloc_one_kernel(mm, addr));
106} 53}
107 54
108extern pte_t* pte_alloc_one_kernel(struct mm_struct* mm, unsigned long addr); 55static inline void pte_free_kernel(pte_t *pte)
109extern struct page* pte_alloc_one(struct mm_struct* mm, unsigned long addr); 56{
57 kmem_cache_free(pgtable_cache, pte);
58}
110 59
111#define pte_free_kernel(pte) free_page((unsigned long)pte) 60static inline void pte_free(struct page *page)
112#define pte_free(pte) __free_page(pte) 61{
62 kmem_cache_free(pgtable_cache, page_address(page));
63}
113 64
114#endif /* __KERNEL__ */ 65#endif /* __KERNEL__ */
115#endif /* _XTENSA_PGALLOC_H */ 66#endif /* _XTENSA_PGALLOC_H */
diff --git a/include/asm-xtensa/pgtable.h b/include/asm-xtensa/pgtable.h
index 06850f3b26a7..c0fcc1c9660c 100644
--- a/include/asm-xtensa/pgtable.h
+++ b/include/asm-xtensa/pgtable.h
@@ -1,11 +1,11 @@
1/* 1/*
2 * linux/include/asm-xtensa/pgtable.h 2 * include/asm-xtensa/pgtable.h
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version2 as 5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation. 6 * published by the Free Software Foundation.
7 * 7 *
8 * Copyright (C) 2001 - 2005 Tensilica Inc. 8 * Copyright (C) 2001 - 2007 Tensilica Inc.
9 */ 9 */
10 10
11#ifndef _XTENSA_PGTABLE_H 11#ifndef _XTENSA_PGTABLE_H
@@ -23,7 +23,7 @@
23 23
24/* 24/*
25 * The Xtensa architecture port of Linux has a two-level page table system, 25 * The Xtensa architecture port of Linux has a two-level page table system,
26 * i.e. the logical three-level Linux page table layout are folded. 26 * i.e. the logical three-level Linux page table layout is folded.
27 * Each task has the following memory page tables: 27 * Each task has the following memory page tables:
28 * 28 *
29 * PGD table (page directory), ie. 3rd-level page table: 29 * PGD table (page directory), ie. 3rd-level page table:
@@ -43,6 +43,7 @@
43 * 43 *
44 * The individual pages are 4 kB big with special pages for the empty_zero_page. 44 * The individual pages are 4 kB big with special pages for the empty_zero_page.
45 */ 45 */
46
46#define PGDIR_SHIFT 22 47#define PGDIR_SHIFT 22
47#define PGDIR_SIZE (1UL << PGDIR_SHIFT) 48#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
48#define PGDIR_MASK (~(PGDIR_SIZE-1)) 49#define PGDIR_MASK (~(PGDIR_SIZE-1))
@@ -53,24 +54,26 @@
53 */ 54 */
54#define PTRS_PER_PTE 1024 55#define PTRS_PER_PTE 1024
55#define PTRS_PER_PTE_SHIFT 10 56#define PTRS_PER_PTE_SHIFT 10
56#define PTRS_PER_PMD 1
57#define PTRS_PER_PGD 1024 57#define PTRS_PER_PGD 1024
58#define PGD_ORDER 0 58#define PGD_ORDER 0
59#define PMD_ORDER 0
60#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE) 59#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
61#define FIRST_USER_ADDRESS 0 60#define FIRST_USER_ADDRESS 0
62#define FIRST_USER_PGD_NR (FIRST_USER_ADDRESS >> PGDIR_SHIFT) 61#define FIRST_USER_PGD_NR (FIRST_USER_ADDRESS >> PGDIR_SHIFT)
63 62
64/* virtual memory area. We keep a distance to other memory regions to be 63/*
64 * Virtual memory area. We keep a distance to other memory regions to be
65 * on the safe side. We also use this area for cache aliasing. 65 * on the safe side. We also use this area for cache aliasing.
66 */ 66 */
67 67
68// FIXME: virtual memory area must be configuration-dependent
69
70#define VMALLOC_START 0xC0000000 68#define VMALLOC_START 0xC0000000
71#define VMALLOC_END 0xC7FF0000 69#define VMALLOC_END 0xC6FEFFFF
70#define TLBTEMP_BASE_1 0xC6FF0000
71#define TLBTEMP_BASE_2 0xC6FF8000
72#define MODULE_START 0xC7000000
73#define MODULE_END 0xC7FFFFFF
72 74
73/* Xtensa Linux config PTE layout (when present): 75/*
76 * Xtensa Linux config PTE layout (when present):
74 * 31-12: PPN 77 * 31-12: PPN
75 * 11-6: Software 78 * 11-6: Software
76 * 5-4: RING 79 * 5-4: RING
@@ -86,47 +89,55 @@
86 * See further below for PTE layout for swapped-out pages. 89 * See further below for PTE layout for swapped-out pages.
87 */ 90 */
88 91
89#define _PAGE_VALID (1<<0) /* hardware: page is accessible */ 92#define _PAGE_HW_EXEC (1<<0) /* hardware: page is executable */
90#define _PAGE_WRENABLE (1<<1) /* hardware: page is writable */ 93#define _PAGE_HW_WRITE (1<<1) /* hardware: page is writable */
94
95#define _PAGE_FILE (1<<1) /* non-linear mapping, if !present */
96#define _PAGE_PROTNONE (3<<0) /* special case for VM_PROT_NONE */
91 97
92/* None of these cache modes include MP coherency: */ 98/* None of these cache modes include MP coherency: */
93#define _PAGE_NO_CACHE (0<<2) /* bypass, non-speculative */ 99#define _PAGE_CA_BYPASS (0<<2) /* bypass, non-speculative */
94#if XCHAL_DCACHE_IS_WRITEBACK 100#define _PAGE_CA_WB (1<<2) /* write-back */
95# define _PAGE_WRITEBACK (1<<2) /* write back */ 101#define _PAGE_CA_WT (2<<2) /* write-through */
96# define _PAGE_WRITETHRU (2<<2) /* write through */ 102#define _PAGE_CA_MASK (3<<2)
97#else 103#define _PAGE_INVALID (3<<2)
98# define _PAGE_WRITEBACK (1<<2) /* assume write through */
99# define _PAGE_WRITETHRU (1<<2)
100#endif
101#define _PAGE_NOALLOC (3<<2) /* don't allocate cache,if not cached */
102#define _CACHE_MASK (3<<2)
103 104
104#define _PAGE_USER (1<<4) /* user access (ring=1) */ 105#define _PAGE_USER (1<<4) /* user access (ring=1) */
105#define _PAGE_KERNEL (0<<4) /* kernel access (ring=0) */
106 106
107/* Software */ 107/* Software */
108#define _PAGE_RW (1<<6) /* software: page writable */ 108#define _PAGE_WRITABLE_BIT 6
109#define _PAGE_WRITABLE (1<<6) /* software: page writable */
109#define _PAGE_DIRTY (1<<7) /* software: page dirty */ 110#define _PAGE_DIRTY (1<<7) /* software: page dirty */
110#define _PAGE_ACCESSED (1<<8) /* software: page accessed (read) */ 111#define _PAGE_ACCESSED (1<<8) /* software: page accessed (read) */
111#define _PAGE_FILE (1<<9) /* nonlinear file mapping*/
112 112
113#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _CACHE_MASK | _PAGE_DIRTY) 113/* On older HW revisions, we always have to set bit 0 */
114#define _PAGE_PRESENT ( _PAGE_VALID | _PAGE_WRITEBACK | _PAGE_ACCESSED) 114#if XCHAL_HW_VERSION_MAJOR < 2000
115# define _PAGE_VALID (1<<0)
116#else
117# define _PAGE_VALID 0
118#endif
115 119
116#ifdef CONFIG_MMU 120#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
121#define _PAGE_PRESENT (_PAGE_VALID | _PAGE_CA_WB | _PAGE_ACCESSED)
117 122
118# define PAGE_NONE __pgprot(_PAGE_PRESENT) 123#ifdef CONFIG_MMU
119# define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_RW)
120# define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER)
121# define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER)
122# define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_KERNEL | _PAGE_WRENABLE)
123# define PAGE_INVALID __pgprot(_PAGE_USER)
124 124
125# if (DCACHE_WAY_SIZE > PAGE_SIZE) 125#define PAGE_NONE __pgprot(_PAGE_INVALID | _PAGE_USER | _PAGE_PROTNONE)
126# define PAGE_DIRECTORY __pgprot(_PAGE_VALID | _PAGE_ACCESSED | _PAGE_KERNEL) 126#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER)
127# else 127#define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_HW_EXEC)
128# define PAGE_DIRECTORY __pgprot(_PAGE_PRESENT | _PAGE_KERNEL) 128#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER)
129# endif 129#define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_HW_EXEC)
130#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITABLE)
131#define PAGE_SHARED_EXEC \
132 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITABLE | _PAGE_HW_EXEC)
133#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_HW_WRITE)
134#define PAGE_KERNEL_EXEC __pgprot(_PAGE_PRESENT|_PAGE_HW_WRITE|_PAGE_HW_EXEC)
135
136#if (DCACHE_WAY_SIZE > PAGE_SIZE)
137# define _PAGE_DIRECTORY (_PAGE_VALID | _PAGE_ACCESSED)
138#else
139# define _PAGE_DIRECTORY (_PAGE_VALID | _PAGE_ACCESSED | _PAGE_CA_WB)
140#endif
130 141
131#else /* no mmu */ 142#else /* no mmu */
132 143
@@ -145,23 +156,23 @@
145 * What follows is the closest we can get by reasonable means.. 156 * What follows is the closest we can get by reasonable means..
146 * See linux/mm/mmap.c for protection_map[] array that uses these definitions. 157 * See linux/mm/mmap.c for protection_map[] array that uses these definitions.
147 */ 158 */
148#define __P000 PAGE_NONE /* private --- */ 159#define __P000 PAGE_NONE /* private --- */
149#define __P001 PAGE_READONLY /* private --r */ 160#define __P001 PAGE_READONLY /* private --r */
150#define __P010 PAGE_COPY /* private -w- */ 161#define __P010 PAGE_COPY /* private -w- */
151#define __P011 PAGE_COPY /* private -wr */ 162#define __P011 PAGE_COPY /* private -wr */
152#define __P100 PAGE_READONLY /* private x-- */ 163#define __P100 PAGE_READONLY_EXEC /* private x-- */
153#define __P101 PAGE_READONLY /* private x-r */ 164#define __P101 PAGE_READONLY_EXEC /* private x-r */
154#define __P110 PAGE_COPY /* private xw- */ 165#define __P110 PAGE_COPY_EXEC /* private xw- */
155#define __P111 PAGE_COPY /* private xwr */ 166#define __P111 PAGE_COPY_EXEC /* private xwr */
156 167
157#define __S000 PAGE_NONE /* shared --- */ 168#define __S000 PAGE_NONE /* shared --- */
158#define __S001 PAGE_READONLY /* shared --r */ 169#define __S001 PAGE_READONLY /* shared --r */
159#define __S010 PAGE_SHARED /* shared -w- */ 170#define __S010 PAGE_SHARED /* shared -w- */
160#define __S011 PAGE_SHARED /* shared -wr */ 171#define __S011 PAGE_SHARED /* shared -wr */
161#define __S100 PAGE_READONLY /* shared x-- */ 172#define __S100 PAGE_READONLY_EXEC /* shared x-- */
162#define __S101 PAGE_READONLY /* shared x-r */ 173#define __S101 PAGE_READONLY_EXEC /* shared x-r */
163#define __S110 PAGE_SHARED /* shared xw- */ 174#define __S110 PAGE_SHARED_EXEC /* shared xw- */
164#define __S111 PAGE_SHARED /* shared xwr */ 175#define __S111 PAGE_SHARED_EXEC /* shared xwr */
165 176
166#ifndef __ASSEMBLY__ 177#ifndef __ASSEMBLY__
167 178
@@ -183,35 +194,42 @@ extern pgd_t swapper_pg_dir[PAGE_SIZE/sizeof(pgd_t)];
183#define pmd_page(pmd) virt_to_page(pmd_val(pmd)) 194#define pmd_page(pmd) virt_to_page(pmd_val(pmd))
184 195
185/* 196/*
186 * The following only work if pte_present() is true. 197 * pte status.
187 */ 198 */
188#define pte_none(pte) (!(pte_val(pte) ^ _PAGE_USER)) 199#define pte_none(pte) (pte_val(pte) == _PAGE_INVALID)
189#define pte_present(pte) (pte_val(pte) & _PAGE_VALID) 200#define pte_present(pte) \
201 (((pte_val(pte) & _PAGE_CA_MASK) != _PAGE_INVALID) \
202 || ((pte_val(pte) & _PAGE_PROTNONE) == _PAGE_PROTNONE))
190#define pte_clear(mm,addr,ptep) \ 203#define pte_clear(mm,addr,ptep) \
191 do { update_pte(ptep, __pte(_PAGE_USER)); } while(0) 204 do { update_pte(ptep, __pte(_PAGE_INVALID)); } while(0)
192 205
193#define pmd_none(pmd) (!pmd_val(pmd)) 206#define pmd_none(pmd) (!pmd_val(pmd))
194#define pmd_present(pmd) (pmd_val(pmd) & PAGE_MASK) 207#define pmd_present(pmd) (pmd_val(pmd) & PAGE_MASK)
195#define pmd_clear(pmdp) do { set_pmd(pmdp, __pmd(0)); } while (0)
196#define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK) 208#define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK)
209#define pmd_clear(pmdp) do { set_pmd(pmdp, __pmd(0)); } while (0)
197 210
198/* Note: We use the _PAGE_USER bit to indicate write-protect kernel memory */ 211static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITABLE; }
199
200static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
201static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } 212static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
202static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } 213static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
203static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } 214static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
204static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) &= ~(_PAGE_RW | _PAGE_WRENABLE); return pte; } 215static inline pte_t pte_wrprotect(pte_t pte)
205static inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~_PAGE_DIRTY; return pte; } 216 { pte_val(pte) &= ~(_PAGE_WRITABLE | _PAGE_HW_WRITE); return pte; }
206static inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; } 217static inline pte_t pte_mkclean(pte_t pte)
207static inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= _PAGE_DIRTY; return pte; } 218 { pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HW_WRITE); return pte; }
208static inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; } 219static inline pte_t pte_mkold(pte_t pte)
209static inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_RW; return pte; } 220 { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
221static inline pte_t pte_mkdirty(pte_t pte)
222 { pte_val(pte) |= _PAGE_DIRTY; return pte; }
223static inline pte_t pte_mkyoung(pte_t pte)
224 { pte_val(pte) |= _PAGE_ACCESSED; return pte; }
225static inline pte_t pte_mkwrite(pte_t pte)
226 { pte_val(pte) |= _PAGE_WRITABLE; return pte; }
210 227
211/* 228/*
212 * Conversion functions: convert a page and protection to a page entry, 229 * Conversion functions: convert a page and protection to a page entry,
213 * and a page entry and page directory to the page they refer to. 230 * and a page entry and page directory to the page they refer to.
214 */ 231 */
232
215#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) 233#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
216#define pte_same(a,b) (pte_val(a) == pte_val(b)) 234#define pte_same(a,b) (pte_val(a) == pte_val(b))
217#define pte_page(x) pfn_to_page(pte_pfn(x)) 235#define pte_page(x) pfn_to_page(pte_pfn(x))
@@ -232,8 +250,9 @@ static inline void update_pte(pte_t *ptep, pte_t pteval)
232{ 250{
233 *ptep = pteval; 251 *ptep = pteval;
234#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK 252#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
235 __asm__ __volatile__ ("memw; dhwb %0, 0; dsync" :: "a" (ptep)); 253 __asm__ __volatile__ ("dhwb %0, 0" :: "a" (ptep));
236#endif 254#endif
255
237} 256}
238 257
239struct mm_struct; 258struct mm_struct;
@@ -249,9 +268,6 @@ static inline void
249set_pmd(pmd_t *pmdp, pmd_t pmdval) 268set_pmd(pmd_t *pmdp, pmd_t pmdval)
250{ 269{
251 *pmdp = pmdval; 270 *pmdp = pmdval;
252#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
253 __asm__ __volatile__ ("memw; dhwb %0, 0; dsync" :: "a" (pmdp));
254#endif
255} 271}
256 272
257struct vm_area_struct; 273struct vm_area_struct;
@@ -306,52 +322,34 @@ ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
306 322
307/* 323/*
308 * Encode and decode a swap entry. 324 * Encode and decode a swap entry.
309 * Each PTE in a process VM's page table is either:
310 * "present" -- valid and not swapped out, protection bits are meaningful;
311 * "not present" -- which further subdivides in these two cases:
312 * "none" -- no mapping at all; identified by pte_none(), set by pte_clear(
313 * "swapped out" -- the page is swapped out, and the SWP macros below
314 * are used to store swap file info in the PTE itself.
315 * 325 *
316 * In the Xtensa processor MMU, any PTE entries in user space (or anywhere 326 * Format of swap pte:
317 * in virtual memory that can map differently across address spaces) 327 * bit 0 MBZ
318 * must have a correct ring value that represents the RASID field that 328 * bit 1 page-file (must be zero)
319 * is changed when switching address spaces. Eg. such PTE entries cannot 329 * bits 2 - 3 page hw access mode (must be 11: _PAGE_INVALID)
320 * be set to ring zero, because that can cause a (global) kernel ASID 330 * bits 4 - 5 ring protection (must be 01: _PAGE_USER)
321 * entry to be created in the TLBs (even with invalid cache attribute), 331 * bits 6 - 10 swap type (5 bits -> 32 types)
322 * potentially causing a multihit exception when going back to another 332 * bits 11 - 31 swap offset / PAGE_SIZE (21 bits -> 8GB)
323 * address space that mapped the same virtual address at another ring. 333
324 * 334 * Format of file pte:
325 * SO: we avoid using ring bits (_PAGE_RING_MASK) in "not present" PTEs. 335 * bit 0 MBZ
326 * We also avoid using the _PAGE_VALID bit which must be zero for non-present 336 * bit 1 page-file (must be one: _PAGE_FILE)
327 * pages. 337 * bits 2 - 3 page hw access mode (must be 11: _PAGE_INVALID)
328 * 338 * bits 4 - 5 ring protection (must be 01: _PAGE_USER)
329 * We end up with the following available bits: 1..3 and 7..31. 339 * bits 6 - 31 file offset / PAGE_SIZE
330 * We don't bother with 1..3 for now (we can use them later if needed),
331 * and chose to allocate 6 bits for SWP_TYPE and the remaining 19 bits
332 * for SWP_OFFSET. At least 5 bits are needed for SWP_TYPE, because it
333 * is currently implemented as an index into swap_info[MAX_SWAPFILES]
334 * and MAX_SWAPFILES is currently defined as 32 in <linux/swap.h>.
335 * However, for some reason all other architectures in the 2.4 kernel
336 * reserve either 6, 7, or 8 bits so I'll not detract from that for now. :)
337 * SWP_OFFSET is an offset into the swap file in page-size units, so
338 * with 4 kB pages, 19 bits supports a maximum swap file size of 2 GB.
339 *
340 * FIXME: 2 GB isn't very big. Other bits can be used to allow
341 * larger swap sizes. In the meantime, it appears relatively easy to get
342 * around the 2 GB limitation by simply using multiple swap files.
343 */ 340 */
344 341
345#define __swp_type(entry) (((entry).val >> 7) & 0x3f) 342#define __swp_type(entry) (((entry).val >> 6) & 0x1f)
346#define __swp_offset(entry) ((entry).val >> 13) 343#define __swp_offset(entry) ((entry).val >> 11)
347#define __swp_entry(type,offs) ((swp_entry_t) {((type) << 7) | ((offs) << 13)}) 344#define __swp_entry(type,offs) \
345 ((swp_entry_t) {((type) << 6) | ((offs) << 11) | _PAGE_INVALID})
348#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 346#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
349#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) 347#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
350 348
351#define PTE_FILE_MAX_BITS 29 349#define PTE_FILE_MAX_BITS 28
352#define pte_to_pgoff(pte) (pte_val(pte) >> 3) 350#define pte_to_pgoff(pte) (pte_val(pte) >> 4)
353#define pgoff_to_pte(off) ((pte_t) { ((off) << 3) | _PAGE_FILE }) 351#define pgoff_to_pte(off) \
354 352 ((pte_t) { ((off) << 4) | _PAGE_INVALID | _PAGE_FILE })
355 353
356#endif /* !defined (__ASSEMBLY__) */ 354#endif /* !defined (__ASSEMBLY__) */
357 355
@@ -394,13 +392,12 @@ extern void update_mmu_cache(struct vm_area_struct * vma,
394 * remap a physical page `pfn' of size `size' with page protection `prot' 392 * remap a physical page `pfn' of size `size' with page protection `prot'
395 * into virtual address `from' 393 * into virtual address `from'
396 */ 394 */
395
397#define io_remap_pfn_range(vma,from,pfn,size,prot) \ 396#define io_remap_pfn_range(vma,from,pfn,size,prot) \
398 remap_pfn_range(vma, from, pfn, size, prot) 397 remap_pfn_range(vma, from, pfn, size, prot)
399 398
400 399
401/* No page table caches to init */ 400extern void pgtable_cache_init(void);
402
403#define pgtable_cache_init() do { } while (0)
404 401
405typedef pte_t *pte_addr_t; 402typedef pte_t *pte_addr_t;
406 403
diff --git a/include/asm-xtensa/processor.h b/include/asm-xtensa/processor.h
index 4feb9f7f35a6..35145bcd96eb 100644
--- a/include/asm-xtensa/processor.h
+++ b/include/asm-xtensa/processor.h
@@ -33,7 +33,7 @@
33 * the 1 GB requirement applies to the stack as well. 33 * the 1 GB requirement applies to the stack as well.
34 */ 34 */
35 35
36#define TASK_SIZE 0x40000000 36#define TASK_SIZE __XTENSA_UL_CONST(0x40000000)
37 37
38/* 38/*
39 * General exception cause assigned to debug exceptions. Debug exceptions go 39 * General exception cause assigned to debug exceptions. Debug exceptions go
diff --git a/include/asm-xtensa/syscall.h b/include/asm-xtensa/syscall.h
index 6cb0d42f11c8..05cebf8f62b1 100644
--- a/include/asm-xtensa/syscall.h
+++ b/include/asm-xtensa/syscall.h
@@ -1,3 +1,13 @@
1/*
2 * include/asm-xtensa/syscall.h
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 2001 - 2007 Tensilica Inc.
9 */
10
1struct pt_regs; 11struct pt_regs;
2struct sigaction; 12struct sigaction;
3asmlinkage long xtensa_execve(char*, char**, char**, struct pt_regs*); 13asmlinkage long xtensa_execve(char*, char**, char**, struct pt_regs*);
@@ -17,4 +27,16 @@ asmlinkage long sys_rt_sigaction(int,
17 const struct sigaction __user *, 27 const struct sigaction __user *,
18 struct sigaction __user *, 28 struct sigaction __user *,
19 size_t); 29 size_t);
20asmlinkage long xtensa_shmat(int shmid, char __user *shmaddr, int shmflg); 30asmlinkage long xtensa_shmat(int, char __user *, int);
31asmlinkage long xtensa_fadvise64_64(int, int,
32 unsigned long long, unsigned long long);
33
34/* Should probably move to linux/syscalls.h */
35struct pollfd;
36asmlinkage long sys_pselect6(int n, fd_set __user *inp, fd_set __user *outp,
37 fd_set __user *exp, struct timespec __user *tsp, void __user *sig);
38asmlinkage long sys_ppoll(struct pollfd __user *ufds, unsigned int nfds,
39 struct timespec __user *tsp, const sigset_t __user *sigmask,
40 size_t sigsetsize);
41
42
diff --git a/include/asm-xtensa/termbits.h b/include/asm-xtensa/termbits.h
index 9972c25ec86f..85aa6a3c0b6e 100644
--- a/include/asm-xtensa/termbits.h
+++ b/include/asm-xtensa/termbits.h
@@ -157,6 +157,7 @@ struct ktermios {
157#define HUPCL 0002000 157#define HUPCL 0002000
158#define CLOCAL 0004000 158#define CLOCAL 0004000
159#define CBAUDEX 0010000 159#define CBAUDEX 0010000
160#define BOTHER 0010000
160#define B57600 0010001 161#define B57600 0010001
161#define B115200 0010002 162#define B115200 0010002
162#define B230400 0010003 163#define B230400 0010003
@@ -172,10 +173,12 @@ struct ktermios {
172#define B3000000 0010015 173#define B3000000 0010015
173#define B3500000 0010016 174#define B3500000 0010016
174#define B4000000 0010017 175#define B4000000 0010017
175#define CIBAUD 002003600000 /* input baud rate (not used) */ 176#define CIBAUD 002003600000 /* input baud rate */
176#define CMSPAR 010000000000 /* mark or space (stick) parity */ 177#define CMSPAR 010000000000 /* mark or space (stick) parity */
177#define CRTSCTS 020000000000 /* flow control */ 178#define CRTSCTS 020000000000 /* flow control */
178 179
180#define IBSHIFT 16 /* Shift from CBAUD to CIBAUD */
181
179/* c_lflag bits */ 182/* c_lflag bits */
180 183
181#define ISIG 0000001 184#define ISIG 0000001
diff --git a/include/asm-xtensa/termios.h b/include/asm-xtensa/termios.h
index f14b42c8dac0..4673f42f88a7 100644
--- a/include/asm-xtensa/termios.h
+++ b/include/asm-xtensa/termios.h
@@ -95,8 +95,10 @@ struct termio {
95 copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \ 95 copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \
96}) 96})
97 97
98#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios)) 98#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios2))
99#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios)) 99#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios2))
100#define user_termios_to_kernel_termios_1(k, u) copy_from_user(k, u, sizeof(struct termios))
101#define kernel_termios_to_user_termios_1(u, k) copy_to_user(u, k, sizeof(struct termios))
100 102
101#endif /* __KERNEL__ */ 103#endif /* __KERNEL__ */
102 104
diff --git a/include/asm-xtensa/timex.h b/include/asm-xtensa/timex.h
index 28c7985a4000..a5fca59fba9e 100644
--- a/include/asm-xtensa/timex.h
+++ b/include/asm-xtensa/timex.h
@@ -41,10 +41,10 @@
41extern unsigned long ccount_per_jiffy; 41extern unsigned long ccount_per_jiffy;
42extern unsigned long ccount_nsec; 42extern unsigned long ccount_nsec;
43#define CCOUNT_PER_JIFFY ccount_per_jiffy 43#define CCOUNT_PER_JIFFY ccount_per_jiffy
44#define CCOUNT_NSEC ccount_nsec 44#define NSEC_PER_CCOUNT ccount_nsec
45#else 45#else
46#define CCOUNT_PER_JIFFY (CONFIG_XTENSA_CPU_CLOCK*(1000000UL/HZ)) 46#define CCOUNT_PER_JIFFY (CONFIG_XTENSA_CPU_CLOCK*(1000000UL/HZ))
47#define CCOUNT_NSEC (1000000000UL / CONFIG_XTENSA_CPU_CLOCK) 47#define NSEC_PER_CCOUNT (1000UL / CONFIG_XTENSA_CPU_CLOCK)
48#endif 48#endif
49 49
50 50
diff --git a/include/asm-xtensa/tlb.h b/include/asm-xtensa/tlb.h
index 4562b2dcfbc0..4830232017af 100644
--- a/include/asm-xtensa/tlb.h
+++ b/include/asm-xtensa/tlb.h
@@ -11,14 +11,36 @@
11#ifndef _XTENSA_TLB_H 11#ifndef _XTENSA_TLB_H
12#define _XTENSA_TLB_H 12#define _XTENSA_TLB_H
13 13
14#define tlb_start_vma(tlb,vma) do { } while (0) 14#include <asm/cache.h>
15#define tlb_end_vma(tlb,vma) do { } while (0) 15#include <asm/page.h>
16#define __tlb_remove_tlb_entry(tlb,pte,addr) do { } while (0) 16
17#if (DCACHE_WAY_SIZE <= PAGE_SIZE)
18
19/* Note, read http://lkml.org/lkml/2004/1/15/6 */
20
21# define tlb_start_vma(tlb,vma) do { } while (0)
22# define tlb_end_vma(tlb,vma) do { } while (0)
23
24#else
17 25
26# define tlb_start_vma(tlb, vma) \
27 do { \
28 if (!tlb->fullmm) \
29 flush_cache_range(vma, vma->vm_start, vma->vm_end); \
30 } while(0)
31
32# define tlb_end_vma(tlb, vma) \
33 do { \
34 if (!tlb->fullmm) \
35 flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
36 } while(0)
37
38#endif
39
40#define __tlb_remove_tlb_entry(tlb,pte,addr) do { } while (0)
18#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) 41#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
19 42
20#include <asm-generic/tlb.h> 43#include <asm-generic/tlb.h>
21#include <asm/page.h>
22 44
23#define __pte_free_tlb(tlb,pte) pte_free(pte) 45#define __pte_free_tlb(tlb,pte) pte_free(pte)
24 46
diff --git a/include/asm-xtensa/types.h b/include/asm-xtensa/types.h
index 9d99a8e9e337..f1e84526f999 100644
--- a/include/asm-xtensa/types.h
+++ b/include/asm-xtensa/types.h
@@ -11,6 +11,15 @@
11#ifndef _XTENSA_TYPES_H 11#ifndef _XTENSA_TYPES_H
12#define _XTENSA_TYPES_H 12#define _XTENSA_TYPES_H
13 13
14
15#ifdef __ASSEMBLY__
16# define __XTENSA_UL(x) (x)
17# define __XTENSA_UL_CONST(x) x
18#else
19# define __XTENSA_UL(x) ((unsigned long)(x))
20# define __XTENSA_UL_CONST(x) x##UL
21#endif
22
14#ifndef __ASSEMBLY__ 23#ifndef __ASSEMBLY__
15 24
16typedef unsigned short umode_t; 25typedef unsigned short umode_t;
diff --git a/include/asm-xtensa/unistd.h b/include/asm-xtensa/unistd.h
index 9bd34024431c..92968aabe34e 100644
--- a/include/asm-xtensa/unistd.h
+++ b/include/asm-xtensa/unistd.h
@@ -151,7 +151,7 @@ __SYSCALL( 61, sys_fcntl64, 3)
151#define __NR_available62 62 151#define __NR_available62 62
152__SYSCALL( 62, sys_ni_syscall, 0) 152__SYSCALL( 62, sys_ni_syscall, 0)
153#define __NR_fadvise64_64 63 153#define __NR_fadvise64_64 63
154__SYSCALL( 63, sys_fadvise64_64, 6) 154__SYSCALL( 63, xtensa_fadvise64_64, 6)
155#define __NR_utime 64 /* glibc 2.3.3 ?? */ 155#define __NR_utime 64 /* glibc 2.3.3 ?? */
156__SYSCALL( 64, sys_utime, 2) 156__SYSCALL( 64, sys_utime, 2)
157#define __NR_utimes 65 157#define __NR_utimes 65
@@ -339,8 +339,8 @@ __SYSCALL(148, sys_setpgid, 2)
339__SYSCALL(149, sys_getpgid, 1) 339__SYSCALL(149, sys_getpgid, 1)
340#define __NR_getppid 150 340#define __NR_getppid 150
341__SYSCALL(150, sys_getppid, 0) 341__SYSCALL(150, sys_getppid, 0)
342#define __NR_available151 151 342#define __NR_getpgrp 151
343__SYSCALL(151, sys_ni_syscall, 0) 343__SYSCALL(151, sys_getpgrp, 0)
344 344
345#define __NR_reserved152 152 /* set_thread_area */ 345#define __NR_reserved152 152 /* set_thread_area */
346__SYSCALL(152, sys_ni_syscall, 0) 346__SYSCALL(152, sys_ni_syscall, 0)
@@ -577,7 +577,112 @@ __SYSCALL(258, sys_keyctl, 5)
577#define __NR_available259 259 577#define __NR_available259 259
578__SYSCALL(259, sys_ni_syscall, 0) 578__SYSCALL(259, sys_ni_syscall, 0)
579 579
580#define __NR_syscall_count 261 580
581#define __NR_readahead 260
582__SYSCALL(260, sys_readahead, 5)
583#define __NR_remap_file_pages 261
584__SYSCALL(261, sys_remap_file_pages, 5)
585#define __NR_migrate_pages 262
586__SYSCALL(262, sys_migrate_pages, 0)
587#define __NR_mbind 263
588__SYSCALL(263, sys_mbind, 6)
589#define __NR_get_mempolicy 264
590__SYSCALL(264, sys_get_mempolicy, 5)
591#define __NR_set_mempolicy 265
592__SYSCALL(265, sys_set_mempolicy, 3)
593#define __NR_unshare 266
594__SYSCALL(266, sys_unshare, 1)
595#define __NR_move_pages 267
596__SYSCALL(267, sys_move_pages, 0)
597#define __NR_splice 268
598__SYSCALL(268, sys_splice, 0)
599#define __NR_tee 269
600__SYSCALL(269, sys_tee, 0)
601#define __NR_vmsplice 270
602__SYSCALL(270, sys_vmsplice, 0)
603#define __NR_available271 271
604__SYSCALL(271, sys_ni_syscall, 0)
605
606#define __NR_pselect6 272
607__SYSCALL(272, sys_pselect6, 0)
608#define __NR_ppoll 273
609__SYSCALL(273, sys_ppoll, 0)
610#define __NR_epoll_pwait 274
611__SYSCALL(274, sys_epoll_pwait, 0)
612#define __NR_available275 275
613__SYSCALL(275, sys_ni_syscall, 0)
614
615#define __NR_inotify_init 276
616__SYSCALL(276, sys_inotify_init, 0)
617#define __NR_inotify_add_watch 277
618__SYSCALL(277, sys_inotify_add_watch, 3)
619#define __NR_inotify_rm_watch 278
620__SYSCALL(278, sys_inotify_rm_watch, 2)
621#define __NR_available279 279
622__SYSCALL(279, sys_ni_syscall, 0)
623
624#define __NR_getcpu 280
625__SYSCALL(280, sys_getcpu, 0)
626#define __NR_kexec_load 281
627__SYSCALL(281, sys_ni_syscall, 0)
628
629#define __NR_ioprio_set 282
630__SYSCALL(282, sys_ioprio_set, 2)
631#define __NR_ioprio_get 283
632__SYSCALL(283, sys_ioprio_get, 3)
633
634#define __NR_set_robust_list 284
635__SYSCALL(284, sys_set_robust_list, 3)
636#define __NR_get_robust_list 285
637__SYSCALL(285, sys_get_robust_list, 3)
638#define __NR_reserved286 286 /* sync_file_rangeX */
639__SYSCALL(286, sys_ni_syscall, 3)
640#define __NR_available287 287
641__SYSCALL(287, sys_faccessat, 0)
642
643/* Relative File Operations */
644
645#define __NR_openat 288
646__SYSCALL(288, sys_openat, 4)
647#define __NR_mkdirat 289
648__SYSCALL(289, sys_mkdirat, 3)
649#define __NR_mknodat 290
650__SYSCALL(290, sys_mknodat, 4)
651#define __NR_unlinkat 291
652__SYSCALL(291, sys_unlinkat, 3)
653#define __NR_renameat 292
654__SYSCALL(292, sys_renameat, 4)
655#define __NR_linkat 293
656__SYSCALL(293, sys_linkat, 5)
657#define __NR_symlinkat 294
658__SYSCALL(294, sys_symlinkat, 3)
659#define __NR_readlinkat 295
660__SYSCALL(295, sys_readlinkat, 4)
661#define __NR_utimensat 296
662__SYSCALL(296, sys_utimensat, 0)
663#define __NR_fchownat 297
664__SYSCALL(297, sys_fchownat, 5)
665#define __NR_futimesat 298
666__SYSCALL(298, sys_futimesat, 4)
667#define __NR_fstatat64 299
668__SYSCALL(299, sys_fstatat64, 0)
669#define __NR_fchmodat 300
670__SYSCALL(300, sys_fchmodat, 4)
671#define __NR_faccessat 301
672__SYSCALL(301, sys_faccessat, 4)
673#define __NR_available302 302
674__SYSCALL(302, sys_ni_syscall, 0)
675#define __NR_available303 303
676__SYSCALL(303, sys_ni_syscall, 0)
677
678#define __NR_signalfd 304
679__SYSCALL(304, sys_signalfd, 3)
680#define __NR_timerfd 305
681__SYSCALL(305, sys_timerfd, 4)
682#define __NR_eventfd 306
683__SYSCALL(306, sys_eventfd, 1)
684
685#define __NR_syscall_count 307
581 686
582/* 687/*
583 * sysxtensa syscall handler 688 * sysxtensa syscall handler
@@ -612,8 +717,19 @@ __SYSCALL(259, sys_ni_syscall, 0)
612#define __ARCH_WANT_SYS_LLSEEK 717#define __ARCH_WANT_SYS_LLSEEK
613#define __ARCH_WANT_SYS_RT_SIGACTION 718#define __ARCH_WANT_SYS_RT_SIGACTION
614#define __ARCH_WANT_SYS_RT_SIGSUSPEND 719#define __ARCH_WANT_SYS_RT_SIGSUSPEND
720#define __ARCH_WANT_SYS_GETPGRP
615 721
616#endif /* __KERNEL__ */ 722/*
723 * Ignore legacy system calls in the checksyscalls.sh script
724 */
617 725
618#endif /* _XTENSA_UNISTD_H */ 726#define __IGNORE_fork /* use clone */
727#define __IGNORE_time
728#define __IGNORE_alarm /* use setitimer */
729#define __IGNORE_pause
730#define __IGNORE_mmap /* use mmap2 */
731#define __IGNORE_vfork /* use clone */
732#define __IGNORE_fadvise64 /* use fadvise64_64 */
619 733
734#endif /* __KERNEL__ */
735#endif /* _XTENSA_UNISTD_H */