diff options
Diffstat (limited to 'include')
46 files changed, 4003 insertions, 716 deletions
diff --git a/include/asm-sparc/idprom.h b/include/asm-sparc/idprom.h index d856e640acd3..59083ed85232 100644 --- a/include/asm-sparc/idprom.h +++ b/include/asm-sparc/idprom.h | |||
@@ -7,27 +7,19 @@ | |||
7 | #ifndef _SPARC_IDPROM_H | 7 | #ifndef _SPARC_IDPROM_H |
8 | #define _SPARC_IDPROM_H | 8 | #define _SPARC_IDPROM_H |
9 | 9 | ||
10 | /* Offset into the EEPROM where the id PROM is located on the 4c */ | 10 | #include <linux/types.h> |
11 | #define IDPROM_OFFSET 0x7d8 | ||
12 | 11 | ||
13 | /* On sun4m; physical. */ | 12 | struct idprom { |
14 | /* MicroSPARC(-II) does not decode 31rd bit, but it works. */ | 13 | u8 id_format; /* Format identifier (always 0x01) */ |
15 | #define IDPROM_OFFSET_M 0xfd8 | 14 | u8 id_machtype; /* Machine type */ |
16 | 15 | u8 id_ethaddr[6]; /* Hardware ethernet address */ | |
17 | struct idprom | 16 | s32 id_date; /* Date of manufacture */ |
18 | { | 17 | u32 id_sernum:24; /* Unique serial number */ |
19 | unsigned char id_format; /* Format identifier (always 0x01) */ | 18 | u8 id_cksum; /* Checksum - xor of the data bytes */ |
20 | unsigned char id_machtype; /* Machine type */ | 19 | u8 reserved[16]; |
21 | unsigned char id_ethaddr[6]; /* Hardware ethernet address */ | ||
22 | long id_date; /* Date of manufacture */ | ||
23 | unsigned int id_sernum:24; /* Unique serial number */ | ||
24 | unsigned char id_cksum; /* Checksum - xor of the data bytes */ | ||
25 | unsigned char reserved[16]; | ||
26 | }; | 20 | }; |
27 | 21 | ||
28 | extern struct idprom *idprom; | 22 | extern struct idprom *idprom; |
29 | extern void idprom_init(void); | 23 | extern void idprom_init(void); |
30 | 24 | ||
31 | #define IDPROM_SIZE (sizeof(struct idprom)) | ||
32 | |||
33 | #endif /* !(_SPARC_IDPROM_H) */ | 25 | #endif /* !(_SPARC_IDPROM_H) */ |
diff --git a/include/asm-sparc/oplib.h b/include/asm-sparc/oplib.h index d0d76b30eb4c..f283f8aaf6a9 100644 --- a/include/asm-sparc/oplib.h +++ b/include/asm-sparc/oplib.h | |||
@@ -165,6 +165,7 @@ enum prom_input_device { | |||
165 | PROMDEV_ITTYA, /* input from ttya */ | 165 | PROMDEV_ITTYA, /* input from ttya */ |
166 | PROMDEV_ITTYB, /* input from ttyb */ | 166 | PROMDEV_ITTYB, /* input from ttyb */ |
167 | PROMDEV_IRSC, /* input from rsc */ | 167 | PROMDEV_IRSC, /* input from rsc */ |
168 | PROMDEV_IVCONS, /* input from virtual-console */ | ||
168 | PROMDEV_I_UNK, | 169 | PROMDEV_I_UNK, |
169 | }; | 170 | }; |
170 | 171 | ||
@@ -177,6 +178,7 @@ enum prom_output_device { | |||
177 | PROMDEV_OTTYA, /* to ttya */ | 178 | PROMDEV_OTTYA, /* to ttya */ |
178 | PROMDEV_OTTYB, /* to ttyb */ | 179 | PROMDEV_OTTYB, /* to ttyb */ |
179 | PROMDEV_ORSC, /* to rsc */ | 180 | PROMDEV_ORSC, /* to rsc */ |
181 | PROMDEV_OVCONS, /* to virtual-console */ | ||
180 | PROMDEV_O_UNK, | 182 | PROMDEV_O_UNK, |
181 | }; | 183 | }; |
182 | 184 | ||
diff --git a/include/asm-sparc/uaccess.h b/include/asm-sparc/uaccess.h index f8f1ec1f06e6..3cf132e1aa25 100644 --- a/include/asm-sparc/uaccess.h +++ b/include/asm-sparc/uaccess.h | |||
@@ -120,17 +120,6 @@ case 8: __put_user_asm(x,d,addr,__pu_ret); break; \ | |||
120 | default: __pu_ret = __put_user_bad(); break; \ | 120 | default: __pu_ret = __put_user_bad(); break; \ |
121 | } } else { __pu_ret = -EFAULT; } __pu_ret; }) | 121 | } } else { __pu_ret = -EFAULT; } __pu_ret; }) |
122 | 122 | ||
123 | #define __put_user_check_ret(x,addr,size,retval) ({ \ | ||
124 | register int __foo __asm__ ("l1"); \ | ||
125 | if (__access_ok(addr,size)) { \ | ||
126 | switch (size) { \ | ||
127 | case 1: __put_user_asm_ret(x,b,addr,retval,__foo); break; \ | ||
128 | case 2: __put_user_asm_ret(x,h,addr,retval,__foo); break; \ | ||
129 | case 4: __put_user_asm_ret(x,,addr,retval,__foo); break; \ | ||
130 | case 8: __put_user_asm_ret(x,d,addr,retval,__foo); break; \ | ||
131 | default: if (__put_user_bad()) return retval; break; \ | ||
132 | } } else return retval; }) | ||
133 | |||
134 | #define __put_user_nocheck(x,addr,size) ({ \ | 123 | #define __put_user_nocheck(x,addr,size) ({ \ |
135 | register int __pu_ret; \ | 124 | register int __pu_ret; \ |
136 | switch (size) { \ | 125 | switch (size) { \ |
@@ -141,16 +130,6 @@ case 8: __put_user_asm(x,d,addr,__pu_ret); break; \ | |||
141 | default: __pu_ret = __put_user_bad(); break; \ | 130 | default: __pu_ret = __put_user_bad(); break; \ |
142 | } __pu_ret; }) | 131 | } __pu_ret; }) |
143 | 132 | ||
144 | #define __put_user_nocheck_ret(x,addr,size,retval) ({ \ | ||
145 | register int __foo __asm__ ("l1"); \ | ||
146 | switch (size) { \ | ||
147 | case 1: __put_user_asm_ret(x,b,addr,retval,__foo); break; \ | ||
148 | case 2: __put_user_asm_ret(x,h,addr,retval,__foo); break; \ | ||
149 | case 4: __put_user_asm_ret(x,,addr,retval,__foo); break; \ | ||
150 | case 8: __put_user_asm_ret(x,d,addr,retval,__foo); break; \ | ||
151 | default: if (__put_user_bad()) return retval; break; \ | ||
152 | } }) | ||
153 | |||
154 | #define __put_user_asm(x,size,addr,ret) \ | 133 | #define __put_user_asm(x,size,addr,ret) \ |
155 | __asm__ __volatile__( \ | 134 | __asm__ __volatile__( \ |
156 | "/* Put user asm, inline. */\n" \ | 135 | "/* Put user asm, inline. */\n" \ |
@@ -170,32 +149,6 @@ __asm__ __volatile__( \ | |||
170 | : "=&r" (ret) : "r" (x), "m" (*__m(addr)), \ | 149 | : "=&r" (ret) : "r" (x), "m" (*__m(addr)), \ |
171 | "i" (-EFAULT)) | 150 | "i" (-EFAULT)) |
172 | 151 | ||
173 | #define __put_user_asm_ret(x,size,addr,ret,foo) \ | ||
174 | if (__builtin_constant_p(ret) && ret == -EFAULT) \ | ||
175 | __asm__ __volatile__( \ | ||
176 | "/* Put user asm ret, inline. */\n" \ | ||
177 | "1:\t" "st"#size " %1, %2\n\n\t" \ | ||
178 | ".section __ex_table,#alloc\n\t" \ | ||
179 | ".align 4\n\t" \ | ||
180 | ".word 1b, __ret_efault\n\n\t" \ | ||
181 | ".previous\n\n\t" \ | ||
182 | : "=r" (foo) : "r" (x), "m" (*__m(addr))); \ | ||
183 | else \ | ||
184 | __asm__ __volatile( \ | ||
185 | "/* Put user asm ret, inline. */\n" \ | ||
186 | "1:\t" "st"#size " %1, %2\n\n\t" \ | ||
187 | ".section .fixup,#alloc,#execinstr\n\t" \ | ||
188 | ".align 4\n" \ | ||
189 | "3:\n\t" \ | ||
190 | "ret\n\t" \ | ||
191 | " restore %%g0, %3, %%o0\n\t" \ | ||
192 | ".previous\n\n\t" \ | ||
193 | ".section __ex_table,#alloc\n\t" \ | ||
194 | ".align 4\n\t" \ | ||
195 | ".word 1b, 3b\n\n\t" \ | ||
196 | ".previous\n\n\t" \ | ||
197 | : "=r" (foo) : "r" (x), "m" (*__m(addr)), "i" (ret)) | ||
198 | |||
199 | extern int __put_user_bad(void); | 152 | extern int __put_user_bad(void); |
200 | 153 | ||
201 | #define __get_user_check(x,addr,size,type) ({ \ | 154 | #define __get_user_check(x,addr,size,type) ({ \ |
diff --git a/include/asm-sparc64/a.out.h b/include/asm-sparc64/a.out.h index 02af289e3f46..35cb5c9e0c92 100644 --- a/include/asm-sparc64/a.out.h +++ b/include/asm-sparc64/a.out.h | |||
@@ -95,7 +95,11 @@ struct relocation_info /* used when header.a_machtype == M_SPARC */ | |||
95 | 95 | ||
96 | #ifdef __KERNEL__ | 96 | #ifdef __KERNEL__ |
97 | 97 | ||
98 | #define STACK_TOP (test_thread_flag(TIF_32BIT) ? 0xf0000000 : 0x80000000000L) | 98 | #define STACK_TOP32 ((1UL << 32UL) - PAGE_SIZE) |
99 | #define STACK_TOP64 (0x0000080000000000UL - (1UL << 32UL)) | ||
100 | |||
101 | #define STACK_TOP (test_thread_flag(TIF_32BIT) ? \ | ||
102 | STACK_TOP32 : STACK_TOP64) | ||
99 | 103 | ||
100 | #endif | 104 | #endif |
101 | 105 | ||
diff --git a/include/asm-sparc64/asi.h b/include/asm-sparc64/asi.h index 534855660f2a..662a21107ae6 100644 --- a/include/asm-sparc64/asi.h +++ b/include/asm-sparc64/asi.h | |||
@@ -25,14 +25,27 @@ | |||
25 | 25 | ||
26 | /* SpitFire and later extended ASIs. The "(III)" marker designates | 26 | /* SpitFire and later extended ASIs. The "(III)" marker designates |
27 | * UltraSparc-III and later specific ASIs. The "(CMT)" marker designates | 27 | * UltraSparc-III and later specific ASIs. The "(CMT)" marker designates |
28 | * Chip Multi Threading specific ASIs. | 28 | * Chip Multi Threading specific ASIs. "(NG)" designates Niagara specific |
29 | * ASIs, "(4V)" designates SUN4V specific ASIs. | ||
29 | */ | 30 | */ |
30 | #define ASI_PHYS_USE_EC 0x14 /* PADDR, E-cachable */ | 31 | #define ASI_PHYS_USE_EC 0x14 /* PADDR, E-cachable */ |
31 | #define ASI_PHYS_BYPASS_EC_E 0x15 /* PADDR, E-bit */ | 32 | #define ASI_PHYS_BYPASS_EC_E 0x15 /* PADDR, E-bit */ |
33 | #define ASI_BLK_AIUP_4V 0x16 /* (4V) Prim, user, block ld/st */ | ||
34 | #define ASI_BLK_AIUS_4V 0x17 /* (4V) Sec, user, block ld/st */ | ||
32 | #define ASI_PHYS_USE_EC_L 0x1c /* PADDR, E-cachable, little endian*/ | 35 | #define ASI_PHYS_USE_EC_L 0x1c /* PADDR, E-cachable, little endian*/ |
33 | #define ASI_PHYS_BYPASS_EC_E_L 0x1d /* PADDR, E-bit, little endian */ | 36 | #define ASI_PHYS_BYPASS_EC_E_L 0x1d /* PADDR, E-bit, little endian */ |
37 | #define ASI_BLK_AIUP_L_4V 0x1e /* (4V) Prim, user, block, l-endian*/ | ||
38 | #define ASI_BLK_AIUS_L_4V 0x1f /* (4V) Sec, user, block, l-endian */ | ||
39 | #define ASI_SCRATCHPAD 0x20 /* (4V) Scratch Pad Registers */ | ||
40 | #define ASI_MMU 0x21 /* (4V) MMU Context Registers */ | ||
41 | #define ASI_BLK_INIT_QUAD_LDD_AIUS 0x23 /* (NG) init-store, twin load, | ||
42 | * secondary, user | ||
43 | */ | ||
34 | #define ASI_NUCLEUS_QUAD_LDD 0x24 /* Cachable, qword load */ | 44 | #define ASI_NUCLEUS_QUAD_LDD 0x24 /* Cachable, qword load */ |
45 | #define ASI_QUEUE 0x25 /* (4V) Interrupt Queue Registers */ | ||
46 | #define ASI_QUAD_LDD_PHYS_4V 0x26 /* (4V) Physical, qword load */ | ||
35 | #define ASI_NUCLEUS_QUAD_LDD_L 0x2c /* Cachable, qword load, l-endian */ | 47 | #define ASI_NUCLEUS_QUAD_LDD_L 0x2c /* Cachable, qword load, l-endian */ |
48 | #define ASI_QUAD_LDD_PHYS_L_4V 0x2e /* (4V) Phys, qword load, l-endian */ | ||
36 | #define ASI_PCACHE_DATA_STATUS 0x30 /* (III) PCache data stat RAM diag */ | 49 | #define ASI_PCACHE_DATA_STATUS 0x30 /* (III) PCache data stat RAM diag */ |
37 | #define ASI_PCACHE_DATA 0x31 /* (III) PCache data RAM diag */ | 50 | #define ASI_PCACHE_DATA 0x31 /* (III) PCache data RAM diag */ |
38 | #define ASI_PCACHE_TAG 0x32 /* (III) PCache tag RAM diag */ | 51 | #define ASI_PCACHE_TAG 0x32 /* (III) PCache tag RAM diag */ |
@@ -137,6 +150,9 @@ | |||
137 | #define ASI_FL16_SL 0xdb /* Secondary, 1 16-bit, fpu ld/st,L*/ | 150 | #define ASI_FL16_SL 0xdb /* Secondary, 1 16-bit, fpu ld/st,L*/ |
138 | #define ASI_BLK_COMMIT_P 0xe0 /* Primary, blk store commit */ | 151 | #define ASI_BLK_COMMIT_P 0xe0 /* Primary, blk store commit */ |
139 | #define ASI_BLK_COMMIT_S 0xe1 /* Secondary, blk store commit */ | 152 | #define ASI_BLK_COMMIT_S 0xe1 /* Secondary, blk store commit */ |
153 | #define ASI_BLK_INIT_QUAD_LDD_P 0xe2 /* (NG) init-store, twin load, | ||
154 | * primary, implicit | ||
155 | */ | ||
140 | #define ASI_BLK_P 0xf0 /* Primary, blk ld/st */ | 156 | #define ASI_BLK_P 0xf0 /* Primary, blk ld/st */ |
141 | #define ASI_BLK_S 0xf1 /* Secondary, blk ld/st */ | 157 | #define ASI_BLK_S 0xf1 /* Secondary, blk ld/st */ |
142 | #define ASI_BLK_PL 0xf8 /* Primary, blk ld/st, little */ | 158 | #define ASI_BLK_PL 0xf8 /* Primary, blk ld/st, little */ |
diff --git a/include/asm-sparc64/cpudata.h b/include/asm-sparc64/cpudata.h index 74de79dca915..c66a81bbc84d 100644 --- a/include/asm-sparc64/cpudata.h +++ b/include/asm-sparc64/cpudata.h | |||
@@ -1,41 +1,224 @@ | |||
1 | /* cpudata.h: Per-cpu parameters. | 1 | /* cpudata.h: Per-cpu parameters. |
2 | * | 2 | * |
3 | * Copyright (C) 2003, 2005 David S. Miller (davem@redhat.com) | 3 | * Copyright (C) 2003, 2005, 2006 David S. Miller (davem@davemloft.net) |
4 | */ | 4 | */ |
5 | 5 | ||
6 | #ifndef _SPARC64_CPUDATA_H | 6 | #ifndef _SPARC64_CPUDATA_H |
7 | #define _SPARC64_CPUDATA_H | 7 | #define _SPARC64_CPUDATA_H |
8 | 8 | ||
9 | #include <asm/hypervisor.h> | ||
10 | #include <asm/asi.h> | ||
11 | |||
12 | #ifndef __ASSEMBLY__ | ||
13 | |||
9 | #include <linux/percpu.h> | 14 | #include <linux/percpu.h> |
15 | #include <linux/threads.h> | ||
10 | 16 | ||
11 | typedef struct { | 17 | typedef struct { |
12 | /* Dcache line 1 */ | 18 | /* Dcache line 1 */ |
13 | unsigned int __softirq_pending; /* must be 1st, see rtrap.S */ | 19 | unsigned int __softirq_pending; /* must be 1st, see rtrap.S */ |
14 | unsigned int multiplier; | 20 | unsigned int multiplier; |
15 | unsigned int counter; | 21 | unsigned int counter; |
16 | unsigned int idle_volume; | 22 | unsigned int __pad1; |
17 | unsigned long clock_tick; /* %tick's per second */ | 23 | unsigned long clock_tick; /* %tick's per second */ |
18 | unsigned long udelay_val; | 24 | unsigned long udelay_val; |
19 | 25 | ||
20 | /* Dcache line 2 */ | 26 | /* Dcache line 2, rarely used */ |
21 | unsigned int pgcache_size; | ||
22 | unsigned int __pad1; | ||
23 | unsigned long *pte_cache[2]; | ||
24 | unsigned long *pgd_cache; | ||
25 | |||
26 | /* Dcache line 3, rarely used */ | ||
27 | unsigned int dcache_size; | 27 | unsigned int dcache_size; |
28 | unsigned int dcache_line_size; | 28 | unsigned int dcache_line_size; |
29 | unsigned int icache_size; | 29 | unsigned int icache_size; |
30 | unsigned int icache_line_size; | 30 | unsigned int icache_line_size; |
31 | unsigned int ecache_size; | 31 | unsigned int ecache_size; |
32 | unsigned int ecache_line_size; | 32 | unsigned int ecache_line_size; |
33 | unsigned int __pad2; | ||
34 | unsigned int __pad3; | 33 | unsigned int __pad3; |
34 | unsigned int __pad4; | ||
35 | } cpuinfo_sparc; | 35 | } cpuinfo_sparc; |
36 | 36 | ||
37 | DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data); | 37 | DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data); |
38 | #define cpu_data(__cpu) per_cpu(__cpu_data, (__cpu)) | 38 | #define cpu_data(__cpu) per_cpu(__cpu_data, (__cpu)) |
39 | #define local_cpu_data() __get_cpu_var(__cpu_data) | 39 | #define local_cpu_data() __get_cpu_var(__cpu_data) |
40 | 40 | ||
41 | /* Trap handling code needs to get at a few critical values upon | ||
42 | * trap entry and to process TSB misses. These cannot be in the | ||
43 | * per_cpu() area as we really need to lock them into the TLB and | ||
44 | * thus make them part of the main kernel image. As a result we | ||
45 | * try to make this as small as possible. | ||
46 | * | ||
47 | * This is padded out and aligned to 64-bytes to avoid false sharing | ||
48 | * on SMP. | ||
49 | */ | ||
50 | |||
51 | /* If you modify the size of this structure, please update | ||
52 | * TRAP_BLOCK_SZ_SHIFT below. | ||
53 | */ | ||
54 | struct thread_info; | ||
55 | struct trap_per_cpu { | ||
56 | /* D-cache line 1: Basic thread information, cpu and device mondo queues */ | ||
57 | struct thread_info *thread; | ||
58 | unsigned long pgd_paddr; | ||
59 | unsigned long cpu_mondo_pa; | ||
60 | unsigned long dev_mondo_pa; | ||
61 | |||
62 | /* D-cache line 2: Error Mondo Queue and kernel buffer pointers */ | ||
63 | unsigned long resum_mondo_pa; | ||
64 | unsigned long resum_kernel_buf_pa; | ||
65 | unsigned long nonresum_mondo_pa; | ||
66 | unsigned long nonresum_kernel_buf_pa; | ||
67 | |||
68 | /* Dcache lines 3, 4, 5, and 6: Hypervisor Fault Status */ | ||
69 | struct hv_fault_status fault_info; | ||
70 | |||
71 | /* Dcache line 7: Physical addresses of CPU send mondo block and CPU list. */ | ||
72 | unsigned long cpu_mondo_block_pa; | ||
73 | unsigned long cpu_list_pa; | ||
74 | unsigned long __pad1[2]; | ||
75 | |||
76 | /* Dcache line 8: Unused, needed to keep trap_block a power-of-2 in size. */ | ||
77 | unsigned long __pad2[4]; | ||
78 | } __attribute__((aligned(64))); | ||
79 | extern struct trap_per_cpu trap_block[NR_CPUS]; | ||
80 | extern void init_cur_cpu_trap(struct thread_info *); | ||
81 | extern void setup_tba(void); | ||
82 | |||
83 | struct cpuid_patch_entry { | ||
84 | unsigned int addr; | ||
85 | unsigned int cheetah_safari[4]; | ||
86 | unsigned int cheetah_jbus[4]; | ||
87 | unsigned int starfire[4]; | ||
88 | unsigned int sun4v[4]; | ||
89 | }; | ||
90 | extern struct cpuid_patch_entry __cpuid_patch, __cpuid_patch_end; | ||
91 | |||
92 | struct sun4v_1insn_patch_entry { | ||
93 | unsigned int addr; | ||
94 | unsigned int insn; | ||
95 | }; | ||
96 | extern struct sun4v_1insn_patch_entry __sun4v_1insn_patch, | ||
97 | __sun4v_1insn_patch_end; | ||
98 | |||
99 | struct sun4v_2insn_patch_entry { | ||
100 | unsigned int addr; | ||
101 | unsigned int insns[2]; | ||
102 | }; | ||
103 | extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch, | ||
104 | __sun4v_2insn_patch_end; | ||
105 | |||
106 | #endif /* !(__ASSEMBLY__) */ | ||
107 | |||
108 | #define TRAP_PER_CPU_THREAD 0x00 | ||
109 | #define TRAP_PER_CPU_PGD_PADDR 0x08 | ||
110 | #define TRAP_PER_CPU_CPU_MONDO_PA 0x10 | ||
111 | #define TRAP_PER_CPU_DEV_MONDO_PA 0x18 | ||
112 | #define TRAP_PER_CPU_RESUM_MONDO_PA 0x20 | ||
113 | #define TRAP_PER_CPU_RESUM_KBUF_PA 0x28 | ||
114 | #define TRAP_PER_CPU_NONRESUM_MONDO_PA 0x30 | ||
115 | #define TRAP_PER_CPU_NONRESUM_KBUF_PA 0x38 | ||
116 | #define TRAP_PER_CPU_FAULT_INFO 0x40 | ||
117 | #define TRAP_PER_CPU_CPU_MONDO_BLOCK_PA 0xc0 | ||
118 | #define TRAP_PER_CPU_CPU_LIST_PA 0xc8 | ||
119 | |||
120 | #define TRAP_BLOCK_SZ_SHIFT 8 | ||
121 | |||
122 | #include <asm/scratchpad.h> | ||
123 | |||
124 | #define __GET_CPUID(REG) \ | ||
125 | /* Spitfire implementation (default). */ \ | ||
126 | 661: ldxa [%g0] ASI_UPA_CONFIG, REG; \ | ||
127 | srlx REG, 17, REG; \ | ||
128 | and REG, 0x1f, REG; \ | ||
129 | nop; \ | ||
130 | .section .cpuid_patch, "ax"; \ | ||
131 | /* Instruction location. */ \ | ||
132 | .word 661b; \ | ||
133 | /* Cheetah Safari implementation. */ \ | ||
134 | ldxa [%g0] ASI_SAFARI_CONFIG, REG; \ | ||
135 | srlx REG, 17, REG; \ | ||
136 | and REG, 0x3ff, REG; \ | ||
137 | nop; \ | ||
138 | /* Cheetah JBUS implementation. */ \ | ||
139 | ldxa [%g0] ASI_JBUS_CONFIG, REG; \ | ||
140 | srlx REG, 17, REG; \ | ||
141 | and REG, 0x1f, REG; \ | ||
142 | nop; \ | ||
143 | /* Starfire implementation. */ \ | ||
144 | sethi %hi(0x1fff40000d0 >> 9), REG; \ | ||
145 | sllx REG, 9, REG; \ | ||
146 | or REG, 0xd0, REG; \ | ||
147 | lduwa [REG] ASI_PHYS_BYPASS_EC_E, REG;\ | ||
148 | /* sun4v implementation. */ \ | ||
149 | mov SCRATCHPAD_CPUID, REG; \ | ||
150 | ldxa [REG] ASI_SCRATCHPAD, REG; \ | ||
151 | nop; \ | ||
152 | nop; \ | ||
153 | .previous; | ||
154 | |||
155 | #ifdef CONFIG_SMP | ||
156 | |||
157 | #define TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \ | ||
158 | __GET_CPUID(TMP) \ | ||
159 | sethi %hi(trap_block), DEST; \ | ||
160 | sllx TMP, TRAP_BLOCK_SZ_SHIFT, TMP; \ | ||
161 | or DEST, %lo(trap_block), DEST; \ | ||
162 | add DEST, TMP, DEST; \ | ||
163 | |||
164 | /* Clobbers TMP, current address space PGD phys address into DEST. */ | ||
165 | #define TRAP_LOAD_PGD_PHYS(DEST, TMP) \ | ||
166 | TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \ | ||
167 | ldx [DEST + TRAP_PER_CPU_PGD_PADDR], DEST; | ||
168 | |||
169 | /* Clobbers TMP, loads local processor's IRQ work area into DEST. */ | ||
170 | #define TRAP_LOAD_IRQ_WORK(DEST, TMP) \ | ||
171 | __GET_CPUID(TMP) \ | ||
172 | sethi %hi(__irq_work), DEST; \ | ||
173 | sllx TMP, 6, TMP; \ | ||
174 | or DEST, %lo(__irq_work), DEST; \ | ||
175 | add DEST, TMP, DEST; | ||
176 | |||
177 | /* Clobbers TMP, loads DEST with current thread info pointer. */ | ||
178 | #define TRAP_LOAD_THREAD_REG(DEST, TMP) \ | ||
179 | TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \ | ||
180 | ldx [DEST + TRAP_PER_CPU_THREAD], DEST; | ||
181 | |||
182 | /* Given the current thread info pointer in THR, load the per-cpu | ||
183 | * area base of the current processor into DEST. REG1, REG2, and REG3 are | ||
184 | * clobbered. | ||
185 | * | ||
186 | * You absolutely cannot use DEST as a temporary in this code. The | ||
187 | * reason is that traps can happen during execution, and return from | ||
188 | * trap will load the fully resolved DEST per-cpu base. This can corrupt | ||
189 | * the calculations done by the macro mid-stream. | ||
190 | */ | ||
191 | #define LOAD_PER_CPU_BASE(DEST, THR, REG1, REG2, REG3) \ | ||
192 | ldub [THR + TI_CPU], REG1; \ | ||
193 | sethi %hi(__per_cpu_shift), REG3; \ | ||
194 | sethi %hi(__per_cpu_base), REG2; \ | ||
195 | ldx [REG3 + %lo(__per_cpu_shift)], REG3; \ | ||
196 | ldx [REG2 + %lo(__per_cpu_base)], REG2; \ | ||
197 | sllx REG1, REG3, REG3; \ | ||
198 | add REG3, REG2, DEST; | ||
199 | |||
200 | #else | ||
201 | |||
202 | #define TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \ | ||
203 | sethi %hi(trap_block), DEST; \ | ||
204 | or DEST, %lo(trap_block), DEST; \ | ||
205 | |||
206 | /* Uniprocessor versions, we know the cpuid is zero. */ | ||
207 | #define TRAP_LOAD_PGD_PHYS(DEST, TMP) \ | ||
208 | TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \ | ||
209 | ldx [DEST + TRAP_PER_CPU_PGD_PADDR], DEST; | ||
210 | |||
211 | #define TRAP_LOAD_IRQ_WORK(DEST, TMP) \ | ||
212 | sethi %hi(__irq_work), DEST; \ | ||
213 | or DEST, %lo(__irq_work), DEST; | ||
214 | |||
215 | #define TRAP_LOAD_THREAD_REG(DEST, TMP) \ | ||
216 | TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \ | ||
217 | ldx [DEST + TRAP_PER_CPU_THREAD], DEST; | ||
218 | |||
219 | /* No per-cpu areas on uniprocessor, so no need to load DEST. */ | ||
220 | #define LOAD_PER_CPU_BASE(DEST, THR, REG1, REG2, REG3) | ||
221 | |||
222 | #endif /* !(CONFIG_SMP) */ | ||
223 | |||
41 | #endif /* _SPARC64_CPUDATA_H */ | 224 | #endif /* _SPARC64_CPUDATA_H */ |
diff --git a/include/asm-sparc64/elf.h b/include/asm-sparc64/elf.h index 69539a8ab833..303d85e2f82e 100644 --- a/include/asm-sparc64/elf.h +++ b/include/asm-sparc64/elf.h | |||
@@ -10,6 +10,7 @@ | |||
10 | #ifdef __KERNEL__ | 10 | #ifdef __KERNEL__ |
11 | #include <asm/processor.h> | 11 | #include <asm/processor.h> |
12 | #include <asm/uaccess.h> | 12 | #include <asm/uaccess.h> |
13 | #include <asm/spitfire.h> | ||
13 | #endif | 14 | #endif |
14 | 15 | ||
15 | /* | 16 | /* |
@@ -68,6 +69,7 @@ | |||
68 | #define HWCAP_SPARC_MULDIV 8 | 69 | #define HWCAP_SPARC_MULDIV 8 |
69 | #define HWCAP_SPARC_V9 16 | 70 | #define HWCAP_SPARC_V9 16 |
70 | #define HWCAP_SPARC_ULTRA3 32 | 71 | #define HWCAP_SPARC_ULTRA3 32 |
72 | #define HWCAP_SPARC_BLKINIT 64 | ||
71 | 73 | ||
72 | /* | 74 | /* |
73 | * These are used to set parameters in the core dumps. | 75 | * These are used to set parameters in the core dumps. |
@@ -145,11 +147,21 @@ typedef struct { | |||
145 | instruction set this cpu supports. */ | 147 | instruction set this cpu supports. */ |
146 | 148 | ||
147 | /* On Ultra, we support all of the v8 capabilities. */ | 149 | /* On Ultra, we support all of the v8 capabilities. */ |
148 | #define ELF_HWCAP ((HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR | \ | 150 | static inline unsigned int sparc64_elf_hwcap(void) |
149 | HWCAP_SPARC_SWAP | HWCAP_SPARC_MULDIV | \ | 151 | { |
150 | HWCAP_SPARC_V9) | \ | 152 | unsigned int cap = (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR | |
151 | ((tlb_type == cheetah || tlb_type == cheetah_plus) ? \ | 153 | HWCAP_SPARC_SWAP | HWCAP_SPARC_MULDIV | |
152 | HWCAP_SPARC_ULTRA3 : 0)) | 154 | HWCAP_SPARC_V9); |
155 | |||
156 | if (tlb_type == cheetah || tlb_type == cheetah_plus) | ||
157 | cap |= HWCAP_SPARC_ULTRA3; | ||
158 | else if (tlb_type == hypervisor) | ||
159 | cap |= HWCAP_SPARC_BLKINIT; | ||
160 | |||
161 | return cap; | ||
162 | } | ||
163 | |||
164 | #define ELF_HWCAP sparc64_elf_hwcap(); | ||
153 | 165 | ||
154 | /* This yields a string that ld.so will use to load implementation | 166 | /* This yields a string that ld.so will use to load implementation |
155 | specific libraries for optimization. This is more specific in | 167 | specific libraries for optimization. This is more specific in |
diff --git a/include/asm-sparc64/head.h b/include/asm-sparc64/head.h index 0abd3a674e8f..67960a751f4d 100644 --- a/include/asm-sparc64/head.h +++ b/include/asm-sparc64/head.h | |||
@@ -4,12 +4,21 @@ | |||
4 | 4 | ||
5 | #include <asm/pstate.h> | 5 | #include <asm/pstate.h> |
6 | 6 | ||
7 | /* wrpr %g0, val, %gl */ | ||
8 | #define SET_GL(val) \ | ||
9 | .word 0xa1902000 | val | ||
10 | |||
11 | /* rdpr %gl, %gN */ | ||
12 | #define GET_GL_GLOBAL(N) \ | ||
13 | .word 0x81540000 | (N << 25) | ||
14 | |||
7 | #define KERNBASE 0x400000 | 15 | #define KERNBASE 0x400000 |
8 | 16 | ||
9 | #define PTREGS_OFF (STACK_BIAS + STACKFRAME_SZ) | 17 | #define PTREGS_OFF (STACK_BIAS + STACKFRAME_SZ) |
10 | 18 | ||
11 | #define __CHEETAH_ID 0x003e0014 | 19 | #define __CHEETAH_ID 0x003e0014 |
12 | #define __JALAPENO_ID 0x003e0016 | 20 | #define __JALAPENO_ID 0x003e0016 |
21 | #define __SERRANO_ID 0x003e0022 | ||
13 | 22 | ||
14 | #define CHEETAH_MANUF 0x003e | 23 | #define CHEETAH_MANUF 0x003e |
15 | #define CHEETAH_IMPL 0x0014 /* Ultra-III */ | 24 | #define CHEETAH_IMPL 0x0014 /* Ultra-III */ |
@@ -19,6 +28,12 @@ | |||
19 | #define PANTHER_IMPL 0x0019 /* Ultra-IV+ */ | 28 | #define PANTHER_IMPL 0x0019 /* Ultra-IV+ */ |
20 | #define SERRANO_IMPL 0x0022 /* Ultra-IIIi+ */ | 29 | #define SERRANO_IMPL 0x0022 /* Ultra-IIIi+ */ |
21 | 30 | ||
31 | #define BRANCH_IF_SUN4V(tmp1,label) \ | ||
32 | sethi %hi(is_sun4v), %tmp1; \ | ||
33 | lduw [%tmp1 + %lo(is_sun4v)], %tmp1; \ | ||
34 | brnz,pn %tmp1, label; \ | ||
35 | nop | ||
36 | |||
22 | #define BRANCH_IF_CHEETAH_BASE(tmp1,tmp2,label) \ | 37 | #define BRANCH_IF_CHEETAH_BASE(tmp1,tmp2,label) \ |
23 | rdpr %ver, %tmp1; \ | 38 | rdpr %ver, %tmp1; \ |
24 | sethi %hi(__CHEETAH_ID), %tmp2; \ | 39 | sethi %hi(__CHEETAH_ID), %tmp2; \ |
diff --git a/include/asm-sparc64/hypervisor.h b/include/asm-sparc64/hypervisor.h new file mode 100644 index 000000000000..612bf319753f --- /dev/null +++ b/include/asm-sparc64/hypervisor.h | |||
@@ -0,0 +1,2128 @@ | |||
1 | #ifndef _SPARC64_HYPERVISOR_H | ||
2 | #define _SPARC64_HYPERVISOR_H | ||
3 | |||
4 | /* Sun4v hypervisor interfaces and defines. | ||
5 | * | ||
6 | * Hypervisor calls are made via traps to software traps number 0x80 | ||
7 | * and above. Registers %o0 to %o5 serve as argument, status, and | ||
8 | * return value registers. | ||
9 | * | ||
10 | * There are two kinds of these traps. First there are the normal | ||
11 | * "fast traps" which use software trap 0x80 and encode the function | ||
12 | * to invoke by number in register %o5. Argument and return value | ||
13 | * handling is as follows: | ||
14 | * | ||
15 | * ----------------------------------------------- | ||
16 | * | %o5 | function number | undefined | | ||
17 | * | %o0 | argument 0 | return status | | ||
18 | * | %o1 | argument 1 | return value 1 | | ||
19 | * | %o2 | argument 2 | return value 2 | | ||
20 | * | %o3 | argument 3 | return value 3 | | ||
21 | * | %o4 | argument 4 | return value 4 | | ||
22 | * ----------------------------------------------- | ||
23 | * | ||
24 | * The second type are "hyper-fast traps" which encode the function | ||
25 | * number in the software trap number itself. So these use trap | ||
26 | * numbers > 0x80. The register usage for hyper-fast traps is as | ||
27 | * follows: | ||
28 | * | ||
29 | * ----------------------------------------------- | ||
30 | * | %o0 | argument 0 | return status | | ||
31 | * | %o1 | argument 1 | return value 1 | | ||
32 | * | %o2 | argument 2 | return value 2 | | ||
33 | * | %o3 | argument 3 | return value 3 | | ||
34 | * | %o4 | argument 4 | return value 4 | | ||
35 | * ----------------------------------------------- | ||
36 | * | ||
37 | * Registers providing explicit arguments to the hypervisor calls | ||
38 | * are volatile across the call. Upon return their values are | ||
39 | * undefined unless explicitly specified as containing a particular | ||
40 | * return value by the specific call. The return status is always | ||
41 | * returned in register %o0, zero indicates a successful execution of | ||
42 | * the hypervisor call and other values indicate an error status as | ||
43 | * defined below. So, for example, if a hyper-fast trap takes | ||
44 | * arguments 0, 1, and 2, then %o0, %o1, and %o2 are volatile across | ||
45 | * the call and %o3, %o4, and %o5 would be preserved. | ||
46 | * | ||
47 | * If the hypervisor trap is invalid, or the fast trap function number | ||
48 | * is invalid, HV_EBADTRAP will be returned in %o0. Also, all 64-bits | ||
49 | * of the argument and return values are significant. | ||
50 | */ | ||
51 | |||
52 | /* Trap numbers. */ | ||
53 | #define HV_FAST_TRAP 0x80 | ||
54 | #define HV_MMU_MAP_ADDR_TRAP 0x83 | ||
55 | #define HV_MMU_UNMAP_ADDR_TRAP 0x84 | ||
56 | #define HV_TTRACE_ADDENTRY_TRAP 0x85 | ||
57 | #define HV_CORE_TRAP 0xff | ||
58 | |||
59 | /* Error codes. */ | ||
60 | #define HV_EOK 0 /* Successful return */ | ||
61 | #define HV_ENOCPU 1 /* Invalid CPU id */ | ||
62 | #define HV_ENORADDR 2 /* Invalid real address */ | ||
63 | #define HV_ENOINTR 3 /* Invalid interrupt id */ | ||
64 | #define HV_EBADPGSZ 4 /* Invalid pagesize encoding */ | ||
65 | #define HV_EBADTSB 5 /* Invalid TSB description */ | ||
66 | #define HV_EINVAL 6 /* Invalid argument */ | ||
67 | #define HV_EBADTRAP 7 /* Invalid function number */ | ||
68 | #define HV_EBADALIGN 8 /* Invalid address alignment */ | ||
69 | #define HV_EWOULDBLOCK 9 /* Cannot complete w/o blocking */ | ||
70 | #define HV_ENOACCESS 10 /* No access to resource */ | ||
71 | #define HV_EIO 11 /* I/O error */ | ||
72 | #define HV_ECPUERROR 12 /* CPU in error state */ | ||
73 | #define HV_ENOTSUPPORTED 13 /* Function not supported */ | ||
74 | #define HV_ENOMAP 14 /* No mapping found */ | ||
75 | #define HV_ETOOMANY 15 /* Too many items specified */ | ||
76 | |||
77 | /* mach_exit() | ||
78 | * TRAP: HV_FAST_TRAP | ||
79 | * FUNCTION: HV_FAST_MACH_EXIT | ||
80 | * ARG0: exit code | ||
81 | * ERRORS: This service does not return. | ||
82 | * | ||
83 | * Stop all CPUs in the virtual domain and place them into the stopped | ||
84 | * state. The 64-bit exit code may be passed to a service entity as | ||
85 | * the domain's exit status. On systems without a service entity, the | ||
86 | * domain will undergo a reset, and the boot firmware will be | ||
87 | * reloaded. | ||
88 | * | ||
89 | * This function will never return to the guest that invokes it. | ||
90 | * | ||
91 | * Note: By convention an exit code of zero denotes a successful exit by | ||
92 | * the guest code. A non-zero exit code denotes a guest specific | ||
93 | * error indication. | ||
94 | * | ||
95 | */ | ||
96 | #define HV_FAST_MACH_EXIT 0x00 | ||
97 | |||
98 | /* Domain services. */ | ||
99 | |||
100 | /* mach_desc() | ||
101 | * TRAP: HV_FAST_TRAP | ||
102 | * FUNCTION: HV_FAST_MACH_DESC | ||
103 | * ARG0: buffer | ||
104 | * ARG1: length | ||
105 | * RET0: status | ||
106 | * RET1: length | ||
107 | * ERRORS: HV_EBADALIGN Buffer is badly aligned | ||
108 | * HV_ENORADDR Buffer is to an illegal real address. | ||
109 | * HV_EINVAL Buffer length is too small for complete | ||
110 | * machine description. | ||
111 | * | ||
112 | * Copy the most current machine description into the buffer indicated | ||
113 | * by the real address in ARG0. The buffer provided must be 16 byte | ||
114 | * aligned. Upon success or HV_EINVAL, this service returns the | ||
115 | * actual size of the machine description in the RET1 return value. | ||
116 | * | ||
117 | * Note: A method of determining the appropriate buffer size for the | ||
118 | * machine description is to first call this service with a buffer | ||
119 | * length of 0 bytes. | ||
120 | */ | ||
121 | #define HV_FAST_MACH_DESC 0x01 | ||
122 | |||
123 | /* mach_exit() | ||
124 | * TRAP: HV_FAST_TRAP | ||
125 | * FUNCTION: HV_FAST_MACH_SIR | ||
126 | * ERRORS: This service does not return. | ||
127 | * | ||
128 | * Perform a software initiated reset of the virtual machine domain. | ||
129 | * All CPUs are captured as soon as possible, all hardware devices are | ||
130 | * returned to the entry default state, and the domain is restarted at | ||
131 | * the SIR (trap type 0x04) real trap table (RTBA) entry point on one | ||
132 | * of the CPUs. The single CPU restarted is selected as determined by | ||
133 | * platform specific policy. Memory is preserved across this | ||
134 | * operation. | ||
135 | */ | ||
136 | #define HV_FAST_MACH_SIR 0x02 | ||
137 | |||
138 | /* mach_set_soft_state() | ||
139 | * TRAP: HV_FAST_TRAP | ||
140 | * FUNCTION: HV_FAST_MACH_SET_SOFT_STATE | ||
141 | * ARG0: software state | ||
142 | * ARG1: software state description pointer | ||
143 | * RET0: status | ||
144 | * ERRORS: EINVAL software state not valid or software state | ||
145 | * description is not NULL terminated | ||
146 | * ENORADDR software state description pointer is not a | ||
147 | * valid real address | ||
148 | * EBADALIGNED software state description is not correctly | ||
149 | * aligned | ||
150 | * | ||
151 | * This allows the guest to report it's soft state to the hypervisor. There | ||
152 | * are two primary components to this state. The first part states whether | ||
153 | * the guest software is running or not. The second containts optional | ||
154 | * details specific to the software. | ||
155 | * | ||
156 | * The software state argument is defined below in HV_SOFT_STATE_*, and | ||
157 | * indicates whether the guest is operating normally or in a transitional | ||
158 | * state. | ||
159 | * | ||
160 | * The software state description argument is a real address of a data buffer | ||
161 | * of size 32-bytes aligned on a 32-byte boundary. It is treated as a NULL | ||
162 | * terminated 7-bit ASCII string of up to 31 characters not including the | ||
163 | * NULL termination. | ||
164 | */ | ||
165 | #define HV_FAST_MACH_SET_SOFT_STATE 0x03 | ||
166 | #define HV_SOFT_STATE_NORMAL 0x01 | ||
167 | #define HV_SOFT_STATE_TRANSITION 0x02 | ||
168 | |||
169 | /* mach_get_soft_state() | ||
170 | * TRAP: HV_FAST_TRAP | ||
171 | * FUNCTION: HV_FAST_MACH_GET_SOFT_STATE | ||
172 | * ARG0: software state description pointer | ||
173 | * RET0: status | ||
174 | * RET1: software state | ||
175 | * ERRORS: ENORADDR software state description pointer is not a | ||
176 | * valid real address | ||
177 | * EBADALIGNED software state description is not correctly | ||
178 | * aligned | ||
179 | * | ||
180 | * Retrieve the current value of the guest's software state. The rules | ||
181 | * for the software state pointer are the same as for mach_set_soft_state() | ||
182 | * above. | ||
183 | */ | ||
184 | #define HV_FAST_MACH_GET_SOFT_STATE 0x04 | ||
185 | |||
186 | /* CPU services. | ||
187 | * | ||
188 | * CPUs represent devices that can execute software threads. A single | ||
189 | * chip that contains multiple cores or strands is represented as | ||
190 | * multiple CPUs with unique CPU identifiers. CPUs are exported to | ||
191 | * OBP via the machine description (and to the OS via the OBP device | ||
192 | * tree). CPUs are always in one of three states: stopped, running, | ||
193 | * or error. | ||
194 | * | ||
195 | * A CPU ID is a pre-assigned 16-bit value that uniquely identifies a | ||
196 | * CPU within a logical domain. Operations that are to be performed | ||
197 | * on multiple CPUs specify them via a CPU list. A CPU list is an | ||
198 | * array in real memory, of which each 16-bit word is a CPU ID. CPU | ||
199 | * lists are passed through the API as two arguments. The first is | ||
200 | * the number of entries (16-bit words) in the CPU list, and the | ||
201 | * second is the (real address) pointer to the CPU ID list. | ||
202 | */ | ||
203 | |||
204 | /* cpu_start() | ||
205 | * TRAP: HV_FAST_TRAP | ||
206 | * FUNCTION: HV_FAST_CPU_START | ||
207 | * ARG0: CPU ID | ||
208 | * ARG1: PC | ||
209 | * ARG1: RTBA | ||
210 | * ARG1: target ARG0 | ||
211 | * RET0: status | ||
212 | * ERRORS: ENOCPU Invalid CPU ID | ||
213 | * EINVAL Target CPU ID is not in the stopped state | ||
214 | * ENORADDR Invalid PC or RTBA real address | ||
215 | * EBADALIGN Unaligned PC or unaligned RTBA | ||
216 | * EWOULDBLOCK Starting resources are not available | ||
217 | * | ||
218 | * Start CPU with given CPU ID with PC in %pc and with a real trap | ||
219 | * base address value of RTBA. The indicated CPU must be in the | ||
220 | * stopped state. The supplied RTBA must be aligned on a 256 byte | ||
221 | * boundary. On successful completion, the specified CPU will be in | ||
222 | * the running state and will be supplied with "target ARG0" in %o0 | ||
223 | * and RTBA in %tba. | ||
224 | */ | ||
225 | #define HV_FAST_CPU_START 0x10 | ||
226 | |||
227 | /* cpu_stop() | ||
228 | * TRAP: HV_FAST_TRAP | ||
229 | * FUNCTION: HV_FAST_CPU_STOP | ||
230 | * ARG0: CPU ID | ||
231 | * RET0: status | ||
232 | * ERRORS: ENOCPU Invalid CPU ID | ||
233 | * EINVAL Target CPU ID is the current cpu | ||
234 | * EINVAL Target CPU ID is not in the running state | ||
235 | * EWOULDBLOCK Stopping resources are not available | ||
236 | * ENOTSUPPORTED Not supported on this platform | ||
237 | * | ||
238 | * The specified CPU is stopped. The indicated CPU must be in the | ||
239 | * running state. On completion, it will be in the stopped state. It | ||
240 | * is not legal to stop the current CPU. | ||
241 | * | ||
242 | * Note: As this service cannot be used to stop the current cpu, this service | ||
243 | * may not be used to stop the last running CPU in a domain. To stop | ||
244 | * and exit a running domain, a guest must use the mach_exit() service. | ||
245 | */ | ||
246 | #define HV_FAST_CPU_STOP 0x11 | ||
247 | |||
248 | /* cpu_yield() | ||
249 | * TRAP: HV_FAST_TRAP | ||
250 | * FUNCTION: HV_FAST_CPU_YIELD | ||
251 | * RET0: status | ||
252 | * ERRORS: No possible error. | ||
253 | * | ||
254 | * Suspend execution on the current CPU. Execution will resume when | ||
255 | * an interrupt (device, %stick_compare, or cross-call) is targeted to | ||
256 | * the CPU. On some CPUs, this API may be used by the hypervisor to | ||
257 | * save power by disabling hardware strands. | ||
258 | */ | ||
259 | #define HV_FAST_CPU_YIELD 0x12 | ||
260 | |||
261 | #ifndef __ASSEMBLY__ | ||
262 | extern unsigned long sun4v_cpu_yield(void); | ||
263 | #endif | ||
264 | |||
265 | /* cpu_qconf() | ||
266 | * TRAP: HV_FAST_TRAP | ||
267 | * FUNCTION: HV_FAST_CPU_QCONF | ||
268 | * ARG0: queue | ||
269 | * ARG1: base real address | ||
270 | * ARG2: number of entries | ||
271 | * RET0: status | ||
272 | * ERRORS: ENORADDR Invalid base real address | ||
273 | * EINVAL Invalid queue or number of entries is less | ||
274 | * than 2 or too large. | ||
275 | * EBADALIGN Base real address is not correctly aligned | ||
276 | * for size. | ||
277 | * | ||
278 | * Configure the given queue to be placed at the given base real | ||
279 | * address, with the given number of entries. The number of entries | ||
280 | * must be a power of 2. The base real address must be aligned | ||
281 | * exactly to match the queue size. Each queue entry is 64 bytes | ||
282 | * long, so for example a 32 entry queue must be aligned on a 2048 | ||
283 | * byte real address boundary. | ||
284 | * | ||
285 | * The specified queue is unconfigured if the number of entries is given | ||
286 | * as zero. | ||
287 | * | ||
288 | * For the current version of this API service, the argument queue is defined | ||
289 | * as follows: | ||
290 | * | ||
291 | * queue description | ||
292 | * ----- ------------------------- | ||
293 | * 0x3c cpu mondo queue | ||
294 | * 0x3d device mondo queue | ||
295 | * 0x3e resumable error queue | ||
296 | * 0x3f non-resumable error queue | ||
297 | * | ||
298 | * Note: The maximum number of entries for each queue for a specific cpu may | ||
299 | * be determined from the machine description. | ||
300 | */ | ||
301 | #define HV_FAST_CPU_QCONF 0x14 | ||
302 | #define HV_CPU_QUEUE_CPU_MONDO 0x3c | ||
303 | #define HV_CPU_QUEUE_DEVICE_MONDO 0x3d | ||
304 | #define HV_CPU_QUEUE_RES_ERROR 0x3e | ||
305 | #define HV_CPU_QUEUE_NONRES_ERROR 0x3f | ||
306 | |||
307 | #ifndef __ASSEMBLY__ | ||
308 | extern unsigned long sun4v_cpu_qconf(unsigned long type, | ||
309 | unsigned long queue_paddr, | ||
310 | unsigned long num_queue_entries); | ||
311 | #endif | ||
312 | |||
313 | /* cpu_qinfo() | ||
314 | * TRAP: HV_FAST_TRAP | ||
315 | * FUNCTION: HV_FAST_CPU_QINFO | ||
316 | * ARG0: queue | ||
317 | * RET0: status | ||
318 | * RET1: base real address | ||
319 | * RET1: number of entries | ||
320 | * ERRORS: EINVAL Invalid queue | ||
321 | * | ||
322 | * Return the configuration info for the given queue. The base real | ||
323 | * address and number of entries of the defined queue are returned. | ||
324 | * The queue argument values are the same as for cpu_qconf() above. | ||
325 | * | ||
326 | * If the specified queue is a valid queue number, but no queue has | ||
327 | * been defined, the number of entries will be set to zero and the | ||
328 | * base real address returned is undefined. | ||
329 | */ | ||
330 | #define HV_FAST_CPU_QINFO 0x15 | ||
331 | |||
332 | /* cpu_mondo_send() | ||
333 | * TRAP: HV_FAST_TRAP | ||
334 | * FUNCTION: HV_FAST_CPU_MONDO_SEND | ||
335 | * ARG0-1: CPU list | ||
336 | * ARG2: data real address | ||
337 | * RET0: status | ||
338 | * ERRORS: EBADALIGN Mondo data is not 64-byte aligned or CPU list | ||
339 | * is not 2-byte aligned. | ||
340 | * ENORADDR Invalid data mondo address, or invalid cpu list | ||
341 | * address. | ||
342 | * ENOCPU Invalid cpu in CPU list | ||
343 | * EWOULDBLOCK Some or all of the listed CPUs did not receive | ||
344 | * the mondo | ||
345 | * ECPUERROR One or more of the listed CPUs are in error | ||
346 | * state, use HV_FAST_CPU_STATE to see which ones | ||
347 | * EINVAL CPU list includes caller's CPU ID | ||
348 | * | ||
349 | * Send a mondo interrupt to the CPUs in the given CPU list with the | ||
350 | * 64-bytes at the given data real address. The data must be 64-byte | ||
351 | * aligned. The mondo data will be delivered to the cpu_mondo queues | ||
352 | * of the recipient CPUs. | ||
353 | * | ||
354 | * In all cases, error or not, the CPUs in the CPU list to which the | ||
355 | * mondo has been successfully delivered will be indicated by having | ||
356 | * their entry in CPU list updated with the value 0xffff. | ||
357 | */ | ||
358 | #define HV_FAST_CPU_MONDO_SEND 0x42 | ||
359 | |||
360 | #ifndef __ASSEMBLY__ | ||
361 | extern unsigned long sun4v_cpu_mondo_send(unsigned long cpu_count, unsigned long cpu_list_pa, unsigned long mondo_block_pa); | ||
362 | #endif | ||
363 | |||
364 | /* cpu_myid() | ||
365 | * TRAP: HV_FAST_TRAP | ||
366 | * FUNCTION: HV_FAST_CPU_MYID | ||
367 | * RET0: status | ||
368 | * RET1: CPU ID | ||
369 | * ERRORS: No errors defined. | ||
370 | * | ||
371 | * Return the hypervisor ID handle for the current CPU. Use by a | ||
372 | * virtual CPU to discover it's own identity. | ||
373 | */ | ||
374 | #define HV_FAST_CPU_MYID 0x16 | ||
375 | |||
376 | /* cpu_state() | ||
377 | * TRAP: HV_FAST_TRAP | ||
378 | * FUNCTION: HV_FAST_CPU_STATE | ||
379 | * ARG0: CPU ID | ||
380 | * RET0: status | ||
381 | * RET1: state | ||
382 | * ERRORS: ENOCPU Invalid CPU ID | ||
383 | * | ||
384 | * Retrieve the current state of the CPU with the given CPU ID. | ||
385 | */ | ||
386 | #define HV_FAST_CPU_STATE 0x17 | ||
387 | #define HV_CPU_STATE_STOPPED 0x01 | ||
388 | #define HV_CPU_STATE_RUNNING 0x02 | ||
389 | #define HV_CPU_STATE_ERROR 0x03 | ||
390 | |||
391 | #ifndef __ASSEMBLY__ | ||
392 | extern long sun4v_cpu_state(unsigned long cpuid); | ||
393 | #endif | ||
394 | |||
395 | /* cpu_set_rtba() | ||
396 | * TRAP: HV_FAST_TRAP | ||
397 | * FUNCTION: HV_FAST_CPU_SET_RTBA | ||
398 | * ARG0: RTBA | ||
399 | * RET0: status | ||
400 | * RET1: previous RTBA | ||
401 | * ERRORS: ENORADDR Invalid RTBA real address | ||
402 | * EBADALIGN RTBA is incorrectly aligned for a trap table | ||
403 | * | ||
404 | * Set the real trap base address of the local cpu to the given RTBA. | ||
405 | * The supplied RTBA must be aligned on a 256 byte boundary. Upon | ||
406 | * success the previous value of the RTBA is returned in RET1. | ||
407 | * | ||
408 | * Note: This service does not affect %tba | ||
409 | */ | ||
410 | #define HV_FAST_CPU_SET_RTBA 0x18 | ||
411 | |||
412 | /* cpu_set_rtba() | ||
413 | * TRAP: HV_FAST_TRAP | ||
414 | * FUNCTION: HV_FAST_CPU_GET_RTBA | ||
415 | * RET0: status | ||
416 | * RET1: previous RTBA | ||
417 | * ERRORS: No possible error. | ||
418 | * | ||
419 | * Returns the current value of RTBA in RET1. | ||
420 | */ | ||
421 | #define HV_FAST_CPU_GET_RTBA 0x19 | ||
422 | |||
423 | /* MMU services. | ||
424 | * | ||
425 | * Layout of a TSB description for mmu_tsb_ctx{,non}0() calls. | ||
426 | */ | ||
427 | #ifndef __ASSEMBLY__ | ||
428 | struct hv_tsb_descr { | ||
429 | unsigned short pgsz_idx; | ||
430 | unsigned short assoc; | ||
431 | unsigned int num_ttes; /* in TTEs */ | ||
432 | unsigned int ctx_idx; | ||
433 | unsigned int pgsz_mask; | ||
434 | unsigned long tsb_base; | ||
435 | unsigned long resv; | ||
436 | }; | ||
437 | #endif | ||
438 | #define HV_TSB_DESCR_PGSZ_IDX_OFFSET 0x00 | ||
439 | #define HV_TSB_DESCR_ASSOC_OFFSET 0x02 | ||
440 | #define HV_TSB_DESCR_NUM_TTES_OFFSET 0x04 | ||
441 | #define HV_TSB_DESCR_CTX_IDX_OFFSET 0x08 | ||
442 | #define HV_TSB_DESCR_PGSZ_MASK_OFFSET 0x0c | ||
443 | #define HV_TSB_DESCR_TSB_BASE_OFFSET 0x10 | ||
444 | #define HV_TSB_DESCR_RESV_OFFSET 0x18 | ||
445 | |||
446 | /* Page size bitmask. */ | ||
447 | #define HV_PGSZ_MASK_8K (1 << 0) | ||
448 | #define HV_PGSZ_MASK_64K (1 << 1) | ||
449 | #define HV_PGSZ_MASK_512K (1 << 2) | ||
450 | #define HV_PGSZ_MASK_4MB (1 << 3) | ||
451 | #define HV_PGSZ_MASK_32MB (1 << 4) | ||
452 | #define HV_PGSZ_MASK_256MB (1 << 5) | ||
453 | #define HV_PGSZ_MASK_2GB (1 << 6) | ||
454 | #define HV_PGSZ_MASK_16GB (1 << 7) | ||
455 | |||
456 | /* Page size index. The value given in the TSB descriptor must correspond | ||
457 | * to the smallest page size specified in the pgsz_mask page size bitmask. | ||
458 | */ | ||
459 | #define HV_PGSZ_IDX_8K 0 | ||
460 | #define HV_PGSZ_IDX_64K 1 | ||
461 | #define HV_PGSZ_IDX_512K 2 | ||
462 | #define HV_PGSZ_IDX_4MB 3 | ||
463 | #define HV_PGSZ_IDX_32MB 4 | ||
464 | #define HV_PGSZ_IDX_256MB 5 | ||
465 | #define HV_PGSZ_IDX_2GB 6 | ||
466 | #define HV_PGSZ_IDX_16GB 7 | ||
467 | |||
468 | /* MMU fault status area. | ||
469 | * | ||
470 | * MMU related faults have their status and fault address information | ||
471 | * placed into a memory region made available by privileged code. Each | ||
472 | * virtual processor must make a mmu_fault_area_conf() call to tell the | ||
473 | * hypervisor where that processor's fault status should be stored. | ||
474 | * | ||
475 | * The fault status block is a multiple of 64-bytes and must be aligned | ||
476 | * on a 64-byte boundary. | ||
477 | */ | ||
478 | #ifndef __ASSEMBLY__ | ||
479 | struct hv_fault_status { | ||
480 | unsigned long i_fault_type; | ||
481 | unsigned long i_fault_addr; | ||
482 | unsigned long i_fault_ctx; | ||
483 | unsigned long i_reserved[5]; | ||
484 | unsigned long d_fault_type; | ||
485 | unsigned long d_fault_addr; | ||
486 | unsigned long d_fault_ctx; | ||
487 | unsigned long d_reserved[5]; | ||
488 | }; | ||
489 | #endif | ||
490 | #define HV_FAULT_I_TYPE_OFFSET 0x00 | ||
491 | #define HV_FAULT_I_ADDR_OFFSET 0x08 | ||
492 | #define HV_FAULT_I_CTX_OFFSET 0x10 | ||
493 | #define HV_FAULT_D_TYPE_OFFSET 0x40 | ||
494 | #define HV_FAULT_D_ADDR_OFFSET 0x48 | ||
495 | #define HV_FAULT_D_CTX_OFFSET 0x50 | ||
496 | |||
497 | #define HV_FAULT_TYPE_FAST_MISS 1 | ||
498 | #define HV_FAULT_TYPE_FAST_PROT 2 | ||
499 | #define HV_FAULT_TYPE_MMU_MISS 3 | ||
500 | #define HV_FAULT_TYPE_INV_RA 4 | ||
501 | #define HV_FAULT_TYPE_PRIV_VIOL 5 | ||
502 | #define HV_FAULT_TYPE_PROT_VIOL 6 | ||
503 | #define HV_FAULT_TYPE_NFO 7 | ||
504 | #define HV_FAULT_TYPE_NFO_SEFF 8 | ||
505 | #define HV_FAULT_TYPE_INV_VA 9 | ||
506 | #define HV_FAULT_TYPE_INV_ASI 10 | ||
507 | #define HV_FAULT_TYPE_NC_ATOMIC 11 | ||
508 | #define HV_FAULT_TYPE_PRIV_ACT 12 | ||
509 | #define HV_FAULT_TYPE_RESV1 13 | ||
510 | #define HV_FAULT_TYPE_UNALIGNED 14 | ||
511 | #define HV_FAULT_TYPE_INV_PGSZ 15 | ||
512 | /* Values 16 --> -2 are reserved. */ | ||
513 | #define HV_FAULT_TYPE_MULTIPLE -1 | ||
514 | |||
515 | /* Flags argument for mmu_{map,unmap}_addr(), mmu_demap_{page,context,all}(), | ||
516 | * and mmu_{map,unmap}_perm_addr(). | ||
517 | */ | ||
518 | #define HV_MMU_DMMU 0x01 | ||
519 | #define HV_MMU_IMMU 0x02 | ||
520 | #define HV_MMU_ALL (HV_MMU_DMMU | HV_MMU_IMMU) | ||
521 | |||
522 | /* mmu_map_addr() | ||
523 | * TRAP: HV_MMU_MAP_ADDR_TRAP | ||
524 | * ARG0: virtual address | ||
525 | * ARG1: mmu context | ||
526 | * ARG2: TTE | ||
527 | * ARG3: flags (HV_MMU_{IMMU,DMMU}) | ||
528 | * ERRORS: EINVAL Invalid virtual address, mmu context, or flags | ||
529 | * EBADPGSZ Invalid page size value | ||
530 | * ENORADDR Invalid real address in TTE | ||
531 | * | ||
532 | * Create a non-permanent mapping using the given TTE, virtual | ||
533 | * address, and mmu context. The flags argument determines which | ||
534 | * (data, or instruction, or both) TLB the mapping gets loaded into. | ||
535 | * | ||
536 | * The behavior is undefined if the valid bit is clear in the TTE. | ||
537 | * | ||
538 | * Note: This API call is for privileged code to specify temporary translation | ||
539 | * mappings without the need to create and manage a TSB. | ||
540 | */ | ||
541 | |||
542 | /* mmu_unmap_addr() | ||
543 | * TRAP: HV_MMU_UNMAP_ADDR_TRAP | ||
544 | * ARG0: virtual address | ||
545 | * ARG1: mmu context | ||
546 | * ARG2: flags (HV_MMU_{IMMU,DMMU}) | ||
547 | * ERRORS: EINVAL Invalid virtual address, mmu context, or flags | ||
548 | * | ||
549 | * Demaps the given virtual address in the given mmu context on this | ||
550 | * CPU. This function is intended to be used to demap pages mapped | ||
551 | * with mmu_map_addr. This service is equivalent to invoking | ||
552 | * mmu_demap_page() with only the current CPU in the CPU list. The | ||
553 | * flags argument determines which (data, or instruction, or both) TLB | ||
554 | * the mapping gets unmapped from. | ||
555 | * | ||
556 | * Attempting to perform an unmap operation for a previously defined | ||
557 | * permanent mapping will have undefined results. | ||
558 | */ | ||
559 | |||
560 | /* mmu_tsb_ctx0() | ||
561 | * TRAP: HV_FAST_TRAP | ||
562 | * FUNCTION: HV_FAST_MMU_TSB_CTX0 | ||
563 | * ARG0: number of TSB descriptions | ||
564 | * ARG1: TSB descriptions pointer | ||
565 | * RET0: status | ||
566 | * ERRORS: ENORADDR Invalid TSB descriptions pointer or | ||
567 | * TSB base within a descriptor | ||
568 | * EBADALIGN TSB descriptions pointer is not aligned | ||
569 | * to an 8-byte boundary, or TSB base | ||
570 | * within a descriptor is not aligned for | ||
571 | * the given TSB size | ||
572 | * EBADPGSZ Invalid page size in a TSB descriptor | ||
573 | * EBADTSB Invalid associativity or size in a TSB | ||
574 | * descriptor | ||
575 | * EINVAL Invalid number of TSB descriptions, or | ||
576 | * invalid context index in a TSB | ||
577 | * descriptor, or index page size not | ||
578 | * equal to smallest page size in page | ||
579 | * size bitmask field. | ||
580 | * | ||
581 | * Configures the TSBs for the current CPU for virtual addresses with | ||
582 | * context zero. The TSB descriptions pointer is a pointer to an | ||
583 | * array of the given number of TSB descriptions. | ||
584 | * | ||
585 | * Note: The maximum number of TSBs available to a virtual CPU is given by the | ||
586 | * mmu-max-#tsbs property of the cpu's corresponding "cpu" node in the | ||
587 | * machine description. | ||
588 | */ | ||
589 | #define HV_FAST_MMU_TSB_CTX0 0x20 | ||
590 | |||
591 | /* mmu_tsb_ctxnon0() | ||
592 | * TRAP: HV_FAST_TRAP | ||
593 | * FUNCTION: HV_FAST_MMU_TSB_CTXNON0 | ||
594 | * ARG0: number of TSB descriptions | ||
595 | * ARG1: TSB descriptions pointer | ||
596 | * RET0: status | ||
597 | * ERRORS: Same as for mmu_tsb_ctx0() above. | ||
598 | * | ||
599 | * Configures the TSBs for the current CPU for virtual addresses with | ||
600 | * non-zero contexts. The TSB descriptions pointer is a pointer to an | ||
601 | * array of the given number of TSB descriptions. | ||
602 | * | ||
603 | * Note: A maximum of 16 TSBs may be specified in the TSB description list. | ||
604 | */ | ||
605 | #define HV_FAST_MMU_TSB_CTXNON0 0x21 | ||
606 | |||
607 | /* mmu_demap_page() | ||
608 | * TRAP: HV_FAST_TRAP | ||
609 | * FUNCTION: HV_FAST_MMU_DEMAP_PAGE | ||
610 | * ARG0: reserved, must be zero | ||
611 | * ARG1: reserved, must be zero | ||
612 | * ARG2: virtual address | ||
613 | * ARG3: mmu context | ||
614 | * ARG4: flags (HV_MMU_{IMMU,DMMU}) | ||
615 | * RET0: status | ||
616 | * ERRORS: EINVAL Invalid virutal address, context, or | ||
617 | * flags value | ||
618 | * ENOTSUPPORTED ARG0 or ARG1 is non-zero | ||
619 | * | ||
620 | * Demaps any page mapping of the given virtual address in the given | ||
621 | * mmu context for the current virtual CPU. Any virtually tagged | ||
622 | * caches are guaranteed to be kept consistent. The flags argument | ||
623 | * determines which TLB (instruction, or data, or both) participate in | ||
624 | * the operation. | ||
625 | * | ||
626 | * ARG0 and ARG1 are both reserved and must be set to zero. | ||
627 | */ | ||
628 | #define HV_FAST_MMU_DEMAP_PAGE 0x22 | ||
629 | |||
630 | /* mmu_demap_ctx() | ||
631 | * TRAP: HV_FAST_TRAP | ||
632 | * FUNCTION: HV_FAST_MMU_DEMAP_CTX | ||
633 | * ARG0: reserved, must be zero | ||
634 | * ARG1: reserved, must be zero | ||
635 | * ARG2: mmu context | ||
636 | * ARG3: flags (HV_MMU_{IMMU,DMMU}) | ||
637 | * RET0: status | ||
638 | * ERRORS: EINVAL Invalid context or flags value | ||
639 | * ENOTSUPPORTED ARG0 or ARG1 is non-zero | ||
640 | * | ||
641 | * Demaps all non-permanent virtual page mappings previously specified | ||
642 | * for the given context for the current virtual CPU. Any virtual | ||
643 | * tagged caches are guaranteed to be kept consistent. The flags | ||
644 | * argument determines which TLB (instruction, or data, or both) | ||
645 | * participate in the operation. | ||
646 | * | ||
647 | * ARG0 and ARG1 are both reserved and must be set to zero. | ||
648 | */ | ||
649 | #define HV_FAST_MMU_DEMAP_CTX 0x23 | ||
650 | |||
651 | /* mmu_demap_all() | ||
652 | * TRAP: HV_FAST_TRAP | ||
653 | * FUNCTION: HV_FAST_MMU_DEMAP_ALL | ||
654 | * ARG0: reserved, must be zero | ||
655 | * ARG1: reserved, must be zero | ||
656 | * ARG2: flags (HV_MMU_{IMMU,DMMU}) | ||
657 | * RET0: status | ||
658 | * ERRORS: EINVAL Invalid flags value | ||
659 | * ENOTSUPPORTED ARG0 or ARG1 is non-zero | ||
660 | * | ||
661 | * Demaps all non-permanent virtual page mappings previously specified | ||
662 | * for the current virtual CPU. Any virtual tagged caches are | ||
663 | * guaranteed to be kept consistent. The flags argument determines | ||
664 | * which TLB (instruction, or data, or both) participate in the | ||
665 | * operation. | ||
666 | * | ||
667 | * ARG0 and ARG1 are both reserved and must be set to zero. | ||
668 | */ | ||
669 | #define HV_FAST_MMU_DEMAP_ALL 0x24 | ||
670 | |||
671 | /* mmu_map_perm_addr() | ||
672 | * TRAP: HV_FAST_TRAP | ||
673 | * FUNCTION: HV_FAST_MMU_MAP_PERM_ADDR | ||
674 | * ARG0: virtual address | ||
675 | * ARG1: reserved, must be zero | ||
676 | * ARG2: TTE | ||
677 | * ARG3: flags (HV_MMU_{IMMU,DMMU}) | ||
678 | * RET0: status | ||
679 | * ERRORS: EINVAL Invalid virutal address or flags value | ||
680 | * EBADPGSZ Invalid page size value | ||
681 | * ENORADDR Invalid real address in TTE | ||
682 | * ETOOMANY Too many mappings (max of 8 reached) | ||
683 | * | ||
684 | * Create a permanent mapping using the given TTE and virtual address | ||
685 | * for context 0 on the calling virtual CPU. A maximum of 8 such | ||
686 | * permanent mappings may be specified by privileged code. Mappings | ||
687 | * may be removed with mmu_unmap_perm_addr(). | ||
688 | * | ||
689 | * The behavior is undefined if a TTE with the valid bit clear is given. | ||
690 | * | ||
691 | * Note: This call is used to specify address space mappings for which | ||
692 | * privileged code does not expect to receive misses. For example, | ||
693 | * this mechanism can be used to map kernel nucleus code and data. | ||
694 | */ | ||
695 | #define HV_FAST_MMU_MAP_PERM_ADDR 0x25 | ||
696 | |||
697 | /* mmu_fault_area_conf() | ||
698 | * TRAP: HV_FAST_TRAP | ||
699 | * FUNCTION: HV_FAST_MMU_FAULT_AREA_CONF | ||
700 | * ARG0: real address | ||
701 | * RET0: status | ||
702 | * RET1: previous mmu fault area real address | ||
703 | * ERRORS: ENORADDR Invalid real address | ||
704 | * EBADALIGN Invalid alignment for fault area | ||
705 | * | ||
706 | * Configure the MMU fault status area for the calling CPU. A 64-byte | ||
707 | * aligned real address specifies where MMU fault status information | ||
708 | * is placed. The return value is the previously specified area, or 0 | ||
709 | * for the first invocation. Specifying a fault area at real address | ||
710 | * 0 is not allowed. | ||
711 | */ | ||
712 | #define HV_FAST_MMU_FAULT_AREA_CONF 0x26 | ||
713 | |||
714 | /* mmu_enable() | ||
715 | * TRAP: HV_FAST_TRAP | ||
716 | * FUNCTION: HV_FAST_MMU_ENABLE | ||
717 | * ARG0: enable flag | ||
718 | * ARG1: return target address | ||
719 | * RET0: status | ||
720 | * ERRORS: ENORADDR Invalid real address when disabling | ||
721 | * translation. | ||
722 | * EBADALIGN The return target address is not | ||
723 | * aligned to an instruction. | ||
724 | * EINVAL The enable flag request the current | ||
725 | * operating mode (e.g. disable if already | ||
726 | * disabled) | ||
727 | * | ||
728 | * Enable or disable virtual address translation for the calling CPU | ||
729 | * within the virtual machine domain. If the enable flag is zero, | ||
730 | * translation is disabled, any non-zero value will enable | ||
731 | * translation. | ||
732 | * | ||
733 | * When this function returns, the newly selected translation mode | ||
734 | * will be active. If the mmu is being enabled, then the return | ||
735 | * target address is a virtual address else it is a real address. | ||
736 | * | ||
737 | * Upon successful completion, control will be returned to the given | ||
738 | * return target address (ie. the cpu will jump to that address). On | ||
739 | * failure, the previous mmu mode remains and the trap simply returns | ||
740 | * as normal with the appropriate error code in RET0. | ||
741 | */ | ||
742 | #define HV_FAST_MMU_ENABLE 0x27 | ||
743 | |||
744 | /* mmu_unmap_perm_addr() | ||
745 | * TRAP: HV_FAST_TRAP | ||
746 | * FUNCTION: HV_FAST_MMU_UNMAP_PERM_ADDR | ||
747 | * ARG0: virtual address | ||
748 | * ARG1: reserved, must be zero | ||
749 | * ARG2: flags (HV_MMU_{IMMU,DMMU}) | ||
750 | * RET0: status | ||
751 | * ERRORS: EINVAL Invalid virutal address or flags value | ||
752 | * ENOMAP Specified mapping was not found | ||
753 | * | ||
754 | * Demaps any permanent page mapping (established via | ||
755 | * mmu_map_perm_addr()) at the given virtual address for context 0 on | ||
756 | * the current virtual CPU. Any virtual tagged caches are guaranteed | ||
757 | * to be kept consistent. | ||
758 | */ | ||
759 | #define HV_FAST_MMU_UNMAP_PERM_ADDR 0x28 | ||
760 | |||
761 | /* mmu_tsb_ctx0_info() | ||
762 | * TRAP: HV_FAST_TRAP | ||
763 | * FUNCTION: HV_FAST_MMU_TSB_CTX0_INFO | ||
764 | * ARG0: max TSBs | ||
765 | * ARG1: buffer pointer | ||
766 | * RET0: status | ||
767 | * RET1: number of TSBs | ||
768 | * ERRORS: EINVAL Supplied buffer is too small | ||
769 | * EBADALIGN The buffer pointer is badly aligned | ||
770 | * ENORADDR Invalid real address for buffer pointer | ||
771 | * | ||
772 | * Return the TSB configuration as previous defined by mmu_tsb_ctx0() | ||
773 | * into the provided buffer. The size of the buffer is given in ARG1 | ||
774 | * in terms of the number of TSB description entries. | ||
775 | * | ||
776 | * Upon return, RET1 always contains the number of TSB descriptions | ||
777 | * previously configured. If zero TSBs were configured, EOK is | ||
778 | * returned with RET1 containing 0. | ||
779 | */ | ||
780 | #define HV_FAST_MMU_TSB_CTX0_INFO 0x29 | ||
781 | |||
782 | /* mmu_tsb_ctxnon0_info() | ||
783 | * TRAP: HV_FAST_TRAP | ||
784 | * FUNCTION: HV_FAST_MMU_TSB_CTXNON0_INFO | ||
785 | * ARG0: max TSBs | ||
786 | * ARG1: buffer pointer | ||
787 | * RET0: status | ||
788 | * RET1: number of TSBs | ||
789 | * ERRORS: EINVAL Supplied buffer is too small | ||
790 | * EBADALIGN The buffer pointer is badly aligned | ||
791 | * ENORADDR Invalid real address for buffer pointer | ||
792 | * | ||
793 | * Return the TSB configuration as previous defined by | ||
794 | * mmu_tsb_ctxnon0() into the provided buffer. The size of the buffer | ||
795 | * is given in ARG1 in terms of the number of TSB description entries. | ||
796 | * | ||
797 | * Upon return, RET1 always contains the number of TSB descriptions | ||
798 | * previously configured. If zero TSBs were configured, EOK is | ||
799 | * returned with RET1 containing 0. | ||
800 | */ | ||
801 | #define HV_FAST_MMU_TSB_CTXNON0_INFO 0x2a | ||
802 | |||
803 | /* mmu_fault_area_info() | ||
804 | * TRAP: HV_FAST_TRAP | ||
805 | * FUNCTION: HV_FAST_MMU_FAULT_AREA_INFO | ||
806 | * RET0: status | ||
807 | * RET1: fault area real address | ||
808 | * ERRORS: No errors defined. | ||
809 | * | ||
810 | * Return the currently defined MMU fault status area for the current | ||
811 | * CPU. The real address of the fault status area is returned in | ||
812 | * RET1, or 0 is returned in RET1 if no fault status area is defined. | ||
813 | * | ||
814 | * Note: mmu_fault_area_conf() may be called with the return value (RET1) | ||
815 | * from this service if there is a need to save and restore the fault | ||
816 | * area for a cpu. | ||
817 | */ | ||
818 | #define HV_FAST_MMU_FAULT_AREA_INFO 0x2b | ||
819 | |||
820 | /* Cache and Memory services. */ | ||
821 | |||
822 | /* mem_scrub() | ||
823 | * TRAP: HV_FAST_TRAP | ||
824 | * FUNCTION: HV_FAST_MEM_SCRUB | ||
825 | * ARG0: real address | ||
826 | * ARG1: length | ||
827 | * RET0: status | ||
828 | * RET1: length scrubbed | ||
829 | * ERRORS: ENORADDR Invalid real address | ||
830 | * EBADALIGN Start address or length are not correctly | ||
831 | * aligned | ||
832 | * EINVAL Length is zero | ||
833 | * | ||
834 | * Zero the memory contents in the range real address to real address | ||
835 | * plus length minus 1. Also, valid ECC will be generated for that | ||
836 | * memory address range. Scrubbing is started at the given real | ||
837 | * address, but may not scrub the entire given length. The actual | ||
838 | * length scrubbed will be returned in RET1. | ||
839 | * | ||
840 | * The real address and length must be aligned on an 8K boundary, or | ||
841 | * contain the start address and length from a sun4v error report. | ||
842 | * | ||
843 | * Note: There are two uses for this function. The first use is to block clear | ||
844 | * and initialize memory and the second is to scrub an u ncorrectable | ||
845 | * error reported via a resumable or non-resumable trap. The second | ||
846 | * use requires the arguments to be equal to the real address and length | ||
847 | * provided in a sun4v memory error report. | ||
848 | */ | ||
849 | #define HV_FAST_MEM_SCRUB 0x31 | ||
850 | |||
851 | /* mem_sync() | ||
852 | * TRAP: HV_FAST_TRAP | ||
853 | * FUNCTION: HV_FAST_MEM_SYNC | ||
854 | * ARG0: real address | ||
855 | * ARG1: length | ||
856 | * RET0: status | ||
857 | * RET1: length synced | ||
858 | * ERRORS: ENORADDR Invalid real address | ||
859 | * EBADALIGN Start address or length are not correctly | ||
860 | * aligned | ||
861 | * EINVAL Length is zero | ||
862 | * | ||
863 | * Force the next access within the real address to real address plus | ||
864 | * length minus 1 to be fetches from main system memory. Less than | ||
865 | * the given length may be synced, the actual amount synced is | ||
866 | * returned in RET1. The real address and length must be aligned on | ||
867 | * an 8K boundary. | ||
868 | */ | ||
869 | #define HV_FAST_MEM_SYNC 0x32 | ||
870 | |||
871 | /* Time of day services. | ||
872 | * | ||
873 | * The hypervisor maintains the time of day on a per-domain basis. | ||
874 | * Changing the time of day in one domain does not affect the time of | ||
875 | * day on any other domain. | ||
876 | * | ||
877 | * Time is described by a single unsigned 64-bit word which is the | ||
878 | * number of seconds since the UNIX Epoch (00:00:00 UTC, January 1, | ||
879 | * 1970). | ||
880 | */ | ||
881 | |||
882 | /* tod_get() | ||
883 | * TRAP: HV_FAST_TRAP | ||
884 | * FUNCTION: HV_FAST_TOD_GET | ||
885 | * RET0: status | ||
886 | * RET1: TOD | ||
887 | * ERRORS: EWOULDBLOCK TOD resource is temporarily unavailable | ||
888 | * ENOTSUPPORTED If TOD not supported on this platform | ||
889 | * | ||
890 | * Return the current time of day. May block if TOD access is | ||
891 | * temporarily not possible. | ||
892 | */ | ||
893 | #define HV_FAST_TOD_GET 0x50 | ||
894 | |||
895 | /* tod_set() | ||
896 | * TRAP: HV_FAST_TRAP | ||
897 | * FUNCTION: HV_FAST_TOD_SET | ||
898 | * ARG0: TOD | ||
899 | * RET0: status | ||
900 | * ERRORS: EWOULDBLOCK TOD resource is temporarily unavailable | ||
901 | * ENOTSUPPORTED If TOD not supported on this platform | ||
902 | * | ||
903 | * The current time of day is set to the value specified in ARG0. May | ||
904 | * block if TOD access is temporarily not possible. | ||
905 | */ | ||
906 | #define HV_FAST_TOD_SET 0x51 | ||
907 | |||
908 | /* Console services */ | ||
909 | |||
910 | /* con_getchar() | ||
911 | * TRAP: HV_FAST_TRAP | ||
912 | * FUNCTION: HV_FAST_CONS_GETCHAR | ||
913 | * RET0: status | ||
914 | * RET1: character | ||
915 | * ERRORS: EWOULDBLOCK No character available. | ||
916 | * | ||
917 | * Returns a character from the console device. If no character is | ||
918 | * available then an EWOULDBLOCK error is returned. If a character is | ||
919 | * available, then the returned status is EOK and the character value | ||
920 | * is in RET1. | ||
921 | * | ||
922 | * A virtual BREAK is represented by the 64-bit value -1. | ||
923 | * | ||
924 | * A virtual HUP signal is represented by the 64-bit value -2. | ||
925 | */ | ||
926 | #define HV_FAST_CONS_GETCHAR 0x60 | ||
927 | |||
928 | /* con_putchar() | ||
929 | * TRAP: HV_FAST_TRAP | ||
930 | * FUNCTION: HV_FAST_CONS_PUTCHAR | ||
931 | * ARG0: character | ||
932 | * RET0: status | ||
933 | * ERRORS: EINVAL Illegal character | ||
934 | * EWOULDBLOCK Output buffer currently full, would block | ||
935 | * | ||
936 | * Send a character to the console device. Only character values | ||
937 | * between 0 and 255 may be used. Values outside this range are | ||
938 | * invalid except for the 64-bit value -1 which is used to send a | ||
939 | * virtual BREAK. | ||
940 | */ | ||
941 | #define HV_FAST_CONS_PUTCHAR 0x61 | ||
942 | |||
943 | /* Trap trace services. | ||
944 | * | ||
945 | * The hypervisor provides a trap tracing capability for privileged | ||
946 | * code running on each virtual CPU. Privileged code provides a | ||
947 | * round-robin trap trace queue within which the hypervisor writes | ||
948 | * 64-byte entries detailing hyperprivileged traps taken n behalf of | ||
949 | * privileged code. This is provided as a debugging capability for | ||
950 | * privileged code. | ||
951 | * | ||
952 | * The trap trace control structure is 64-bytes long and placed at the | ||
953 | * start (offset 0) of the trap trace buffer, and is described as | ||
954 | * follows: | ||
955 | */ | ||
956 | #ifndef __ASSEMBLY__ | ||
957 | struct hv_trap_trace_control { | ||
958 | unsigned long head_offset; | ||
959 | unsigned long tail_offset; | ||
960 | unsigned long __reserved[0x30 / sizeof(unsigned long)]; | ||
961 | }; | ||
962 | #endif | ||
963 | #define HV_TRAP_TRACE_CTRL_HEAD_OFFSET 0x00 | ||
964 | #define HV_TRAP_TRACE_CTRL_TAIL_OFFSET 0x08 | ||
965 | |||
966 | /* The head offset is the offset of the most recently completed entry | ||
967 | * in the trap-trace buffer. The tail offset is the offset of the | ||
968 | * next entry to be written. The control structure is owned and | ||
969 | * modified by the hypervisor. A guest may not modify the control | ||
970 | * structure contents. Attempts to do so will result in undefined | ||
971 | * behavior for the guest. | ||
972 | * | ||
973 | * Each trap trace buffer entry is layed out as follows: | ||
974 | */ | ||
975 | #ifndef __ASSEMBLY__ | ||
976 | struct hv_trap_trace_entry { | ||
977 | unsigned char type; /* Hypervisor or guest entry? */ | ||
978 | unsigned char hpstate; /* Hyper-privileged state */ | ||
979 | unsigned char tl; /* Trap level */ | ||
980 | unsigned char gl; /* Global register level */ | ||
981 | unsigned short tt; /* Trap type */ | ||
982 | unsigned short tag; /* Extended trap identifier */ | ||
983 | unsigned long tstate; /* Trap state */ | ||
984 | unsigned long tick; /* Tick */ | ||
985 | unsigned long tpc; /* Trap PC */ | ||
986 | unsigned long f1; /* Entry specific */ | ||
987 | unsigned long f2; /* Entry specific */ | ||
988 | unsigned long f3; /* Entry specific */ | ||
989 | unsigned long f4; /* Entry specific */ | ||
990 | }; | ||
991 | #endif | ||
992 | #define HV_TRAP_TRACE_ENTRY_TYPE 0x00 | ||
993 | #define HV_TRAP_TRACE_ENTRY_HPSTATE 0x01 | ||
994 | #define HV_TRAP_TRACE_ENTRY_TL 0x02 | ||
995 | #define HV_TRAP_TRACE_ENTRY_GL 0x03 | ||
996 | #define HV_TRAP_TRACE_ENTRY_TT 0x04 | ||
997 | #define HV_TRAP_TRACE_ENTRY_TAG 0x06 | ||
998 | #define HV_TRAP_TRACE_ENTRY_TSTATE 0x08 | ||
999 | #define HV_TRAP_TRACE_ENTRY_TICK 0x10 | ||
1000 | #define HV_TRAP_TRACE_ENTRY_TPC 0x18 | ||
1001 | #define HV_TRAP_TRACE_ENTRY_F1 0x20 | ||
1002 | #define HV_TRAP_TRACE_ENTRY_F2 0x28 | ||
1003 | #define HV_TRAP_TRACE_ENTRY_F3 0x30 | ||
1004 | #define HV_TRAP_TRACE_ENTRY_F4 0x38 | ||
1005 | |||
1006 | /* The type field is encoded as follows. */ | ||
1007 | #define HV_TRAP_TYPE_UNDEF 0x00 /* Entry content undefined */ | ||
1008 | #define HV_TRAP_TYPE_HV 0x01 /* Hypervisor trap entry */ | ||
1009 | #define HV_TRAP_TYPE_GUEST 0xff /* Added via ttrace_addentry() */ | ||
1010 | |||
1011 | /* ttrace_buf_conf() | ||
1012 | * TRAP: HV_FAST_TRAP | ||
1013 | * FUNCTION: HV_FAST_TTRACE_BUF_CONF | ||
1014 | * ARG0: real address | ||
1015 | * ARG1: number of entries | ||
1016 | * RET0: status | ||
1017 | * RET1: number of entries | ||
1018 | * ERRORS: ENORADDR Invalid real address | ||
1019 | * EINVAL Size is too small | ||
1020 | * EBADALIGN Real address not aligned on 64-byte boundary | ||
1021 | * | ||
1022 | * Requests hypervisor trap tracing and declares a virtual CPU's trap | ||
1023 | * trace buffer to the hypervisor. The real address supplies the real | ||
1024 | * base address of the trap trace queue and must be 64-byte aligned. | ||
1025 | * Specifying a value of 0 for the number of entries disables trap | ||
1026 | * tracing for the calling virtual CPU. The buffer allocated must be | ||
1027 | * sized for a power of two number of 64-byte trap trace entries plus | ||
1028 | * an initial 64-byte control structure. | ||
1029 | * | ||
1030 | * This may be invoked any number of times so that a virtual CPU may | ||
1031 | * relocate a trap trace buffer or create "snapshots" of information. | ||
1032 | * | ||
1033 | * If the real address is illegal or badly aligned, then trap tracing | ||
1034 | * is disabled and an error is returned. | ||
1035 | * | ||
1036 | * Upon failure with EINVAL, this service call returns in RET1 the | ||
1037 | * minimum number of buffer entries required. Upon other failures | ||
1038 | * RET1 is undefined. | ||
1039 | */ | ||
1040 | #define HV_FAST_TTRACE_BUF_CONF 0x90 | ||
1041 | |||
1042 | /* ttrace_buf_info() | ||
1043 | * TRAP: HV_FAST_TRAP | ||
1044 | * FUNCTION: HV_FAST_TTRACE_BUF_INFO | ||
1045 | * RET0: status | ||
1046 | * RET1: real address | ||
1047 | * RET2: size | ||
1048 | * ERRORS: None defined. | ||
1049 | * | ||
1050 | * Returns the size and location of the previously declared trap-trace | ||
1051 | * buffer. In the event that no buffer was previously defined, or the | ||
1052 | * buffer is disabled, this call will return a size of zero bytes. | ||
1053 | */ | ||
1054 | #define HV_FAST_TTRACE_BUF_INFO 0x91 | ||
1055 | |||
1056 | /* ttrace_enable() | ||
1057 | * TRAP: HV_FAST_TRAP | ||
1058 | * FUNCTION: HV_FAST_TTRACE_ENABLE | ||
1059 | * ARG0: enable | ||
1060 | * RET0: status | ||
1061 | * RET1: previous enable state | ||
1062 | * ERRORS: EINVAL No trap trace buffer currently defined | ||
1063 | * | ||
1064 | * Enable or disable trap tracing, and return the previous enabled | ||
1065 | * state in RET1. Future systems may define various flags for the | ||
1066 | * enable argument (ARG0), for the moment a guest should pass | ||
1067 | * "(uint64_t) -1" to enable, and "(uint64_t) 0" to disable all | ||
1068 | * tracing - which will ensure future compatability. | ||
1069 | */ | ||
1070 | #define HV_FAST_TTRACE_ENABLE 0x92 | ||
1071 | |||
1072 | /* ttrace_freeze() | ||
1073 | * TRAP: HV_FAST_TRAP | ||
1074 | * FUNCTION: HV_FAST_TTRACE_FREEZE | ||
1075 | * ARG0: freeze | ||
1076 | * RET0: status | ||
1077 | * RET1: previous freeze state | ||
1078 | * ERRORS: EINVAL No trap trace buffer currently defined | ||
1079 | * | ||
1080 | * Freeze or unfreeze trap tracing, returning the previous freeze | ||
1081 | * state in RET1. A guest should pass a non-zero value to freeze and | ||
1082 | * a zero value to unfreeze all tracing. The returned previous state | ||
1083 | * is 0 for not frozen and 1 for frozen. | ||
1084 | */ | ||
1085 | #define HV_FAST_TTRACE_FREEZE 0x93 | ||
1086 | |||
1087 | /* ttrace_addentry() | ||
1088 | * TRAP: HV_TTRACE_ADDENTRY_TRAP | ||
1089 | * ARG0: tag (16-bits) | ||
1090 | * ARG1: data word 0 | ||
1091 | * ARG2: data word 1 | ||
1092 | * ARG3: data word 2 | ||
1093 | * ARG4: data word 3 | ||
1094 | * RET0: status | ||
1095 | * ERRORS: EINVAL No trap trace buffer currently defined | ||
1096 | * | ||
1097 | * Add an entry to the trap trace buffer. Upon return only ARG0/RET0 | ||
1098 | * is modified - none of the other registers holding arguments are | ||
1099 | * volatile across this hypervisor service. | ||
1100 | */ | ||
1101 | |||
1102 | /* Core dump services. | ||
1103 | * | ||
1104 | * Since the hypervisor viraulizes and thus obscures a lot of the | ||
1105 | * physical machine layout and state, traditional OS crash dumps can | ||
1106 | * be difficult to diagnose especially when the problem is a | ||
1107 | * configuration error of some sort. | ||
1108 | * | ||
1109 | * The dump services provide an opaque buffer into which the | ||
1110 | * hypervisor can place it's internal state in order to assist in | ||
1111 | * debugging such situations. The contents are opaque and extremely | ||
1112 | * platform and hypervisor implementation specific. The guest, during | ||
1113 | * a core dump, requests that the hypervisor update any information in | ||
1114 | * the dump buffer in preparation to being dumped as part of the | ||
1115 | * domain's memory image. | ||
1116 | */ | ||
1117 | |||
1118 | /* dump_buf_update() | ||
1119 | * TRAP: HV_FAST_TRAP | ||
1120 | * FUNCTION: HV_FAST_DUMP_BUF_UPDATE | ||
1121 | * ARG0: real address | ||
1122 | * ARG1: size | ||
1123 | * RET0: status | ||
1124 | * RET1: required size of dump buffer | ||
1125 | * ERRORS: ENORADDR Invalid real address | ||
1126 | * EBADALIGN Real address is not aligned on a 64-byte | ||
1127 | * boundary | ||
1128 | * EINVAL Size is non-zero but less than minimum size | ||
1129 | * required | ||
1130 | * ENOTSUPPORTED Operation not supported on current logical | ||
1131 | * domain | ||
1132 | * | ||
1133 | * Declare a domain dump buffer to the hypervisor. The real address | ||
1134 | * provided for the domain dump buffer must be 64-byte aligned. The | ||
1135 | * size specifies the size of the dump buffer and may be larger than | ||
1136 | * the minimum size specified in the machine description. The | ||
1137 | * hypervisor will fill the dump buffer with opaque data. | ||
1138 | * | ||
1139 | * Note: A guest may elect to include dump buffer contents as part of a crash | ||
1140 | * dump to assist with debugging. This function may be called any number | ||
1141 | * of times so that a guest may relocate a dump buffer, or create | ||
1142 | * "snapshots" of any dump-buffer information. Each call to | ||
1143 | * dump_buf_update() atomically declares the new dump buffer to the | ||
1144 | * hypervisor. | ||
1145 | * | ||
1146 | * A specified size of 0 unconfigures the dump buffer. If the real | ||
1147 | * address is illegal or badly aligned, then any currently active dump | ||
1148 | * buffer is disabled and an error is returned. | ||
1149 | * | ||
1150 | * In the event that the call fails with EINVAL, RET1 contains the | ||
1151 | * minimum size requires by the hypervisor for a valid dump buffer. | ||
1152 | */ | ||
1153 | #define HV_FAST_DUMP_BUF_UPDATE 0x94 | ||
1154 | |||
1155 | /* dump_buf_info() | ||
1156 | * TRAP: HV_FAST_TRAP | ||
1157 | * FUNCTION: HV_FAST_DUMP_BUF_INFO | ||
1158 | * RET0: status | ||
1159 | * RET1: real address of current dump buffer | ||
1160 | * RET2: size of current dump buffer | ||
1161 | * ERRORS: No errors defined. | ||
1162 | * | ||
1163 | * Return the currently configures dump buffer description. A | ||
1164 | * returned size of 0 bytes indicates an undefined dump buffer. In | ||
1165 | * this case the return address in RET1 is undefined. | ||
1166 | */ | ||
1167 | #define HV_FAST_DUMP_BUF_INFO 0x95 | ||
1168 | |||
1169 | /* Device interrupt services. | ||
1170 | * | ||
1171 | * Device interrupts are allocated to system bus bridges by the hypervisor, | ||
1172 | * and described to OBP in the machine description. OBP then describes | ||
1173 | * these interrupts to the OS via properties in the device tree. | ||
1174 | * | ||
1175 | * Terminology: | ||
1176 | * | ||
1177 | * cpuid Unique opaque value which represents a target cpu. | ||
1178 | * | ||
1179 | * devhandle Device handle. It uniquely identifies a device, and | ||
1180 | * consistes of the lower 28-bits of the hi-cell of the | ||
1181 | * first entry of the device's "reg" property in the | ||
1182 | * OBP device tree. | ||
1183 | * | ||
1184 | * devino Device interrupt number. Specifies the relative | ||
1185 | * interrupt number within the device. The unique | ||
1186 | * combination of devhandle and devino are used to | ||
1187 | * identify a specific device interrupt. | ||
1188 | * | ||
1189 | * Note: The devino value is the same as the values in the | ||
1190 | * "interrupts" property or "interrupt-map" property | ||
1191 | * in the OBP device tree for that device. | ||
1192 | * | ||
1193 | * sysino System interrupt number. A 64-bit unsigned interger | ||
1194 | * representing a unique interrupt within a virtual | ||
1195 | * machine. | ||
1196 | * | ||
1197 | * intr_state A flag representing the interrupt state for a given | ||
1198 | * sysino. The state values are defined below. | ||
1199 | * | ||
1200 | * intr_enabled A flag representing the 'enabled' state for a given | ||
1201 | * sysino. The enable values are defined below. | ||
1202 | */ | ||
1203 | |||
1204 | #define HV_INTR_STATE_IDLE 0 /* Nothing pending */ | ||
1205 | #define HV_INTR_STATE_RECEIVED 1 /* Interrupt received by hardware */ | ||
1206 | #define HV_INTR_STATE_DELIVERED 2 /* Interrupt delivered to queue */ | ||
1207 | |||
1208 | #define HV_INTR_DISABLED 0 /* sysino not enabled */ | ||
1209 | #define HV_INTR_ENABLED 1 /* sysino enabled */ | ||
1210 | |||
1211 | /* intr_devino_to_sysino() | ||
1212 | * TRAP: HV_FAST_TRAP | ||
1213 | * FUNCTION: HV_FAST_INTR_DEVINO2SYSINO | ||
1214 | * ARG0: devhandle | ||
1215 | * ARG1: devino | ||
1216 | * RET0: status | ||
1217 | * RET1: sysino | ||
1218 | * ERRORS: EINVAL Invalid devhandle/devino | ||
1219 | * | ||
1220 | * Converts a device specific interrupt number of the given | ||
1221 | * devhandle/devino into a system specific ino (sysino). | ||
1222 | */ | ||
1223 | #define HV_FAST_INTR_DEVINO2SYSINO 0xa0 | ||
1224 | |||
1225 | #ifndef __ASSEMBLY__ | ||
1226 | extern unsigned long sun4v_devino_to_sysino(unsigned long devhandle, | ||
1227 | unsigned long devino); | ||
1228 | #endif | ||
1229 | |||
1230 | /* intr_getenabled() | ||
1231 | * TRAP: HV_FAST_TRAP | ||
1232 | * FUNCTION: HV_FAST_INTR_GETENABLED | ||
1233 | * ARG0: sysino | ||
1234 | * RET0: status | ||
1235 | * RET1: intr_enabled (HV_INTR_{DISABLED,ENABLED}) | ||
1236 | * ERRORS: EINVAL Invalid sysino | ||
1237 | * | ||
1238 | * Returns interrupt enabled state in RET1 for the interrupt defined | ||
1239 | * by the given sysino. | ||
1240 | */ | ||
1241 | #define HV_FAST_INTR_GETENABLED 0xa1 | ||
1242 | |||
1243 | #ifndef __ASSEMBLY__ | ||
1244 | extern unsigned long sun4v_intr_getenabled(unsigned long sysino); | ||
1245 | #endif | ||
1246 | |||
1247 | /* intr_setenabled() | ||
1248 | * TRAP: HV_FAST_TRAP | ||
1249 | * FUNCTION: HV_FAST_INTR_SETENABLED | ||
1250 | * ARG0: sysino | ||
1251 | * ARG1: intr_enabled (HV_INTR_{DISABLED,ENABLED}) | ||
1252 | * RET0: status | ||
1253 | * ERRORS: EINVAL Invalid sysino or intr_enabled value | ||
1254 | * | ||
1255 | * Set the 'enabled' state of the interrupt sysino. | ||
1256 | */ | ||
1257 | #define HV_FAST_INTR_SETENABLED 0xa2 | ||
1258 | |||
1259 | #ifndef __ASSEMBLY__ | ||
1260 | extern unsigned long sun4v_intr_setenabled(unsigned long sysino, unsigned long intr_enabled); | ||
1261 | #endif | ||
1262 | |||
1263 | /* intr_getstate() | ||
1264 | * TRAP: HV_FAST_TRAP | ||
1265 | * FUNCTION: HV_FAST_INTR_GETSTATE | ||
1266 | * ARG0: sysino | ||
1267 | * RET0: status | ||
1268 | * RET1: intr_state (HV_INTR_STATE_*) | ||
1269 | * ERRORS: EINVAL Invalid sysino | ||
1270 | * | ||
1271 | * Returns current state of the interrupt defined by the given sysino. | ||
1272 | */ | ||
1273 | #define HV_FAST_INTR_GETSTATE 0xa3 | ||
1274 | |||
1275 | #ifndef __ASSEMBLY__ | ||
1276 | extern unsigned long sun4v_intr_getstate(unsigned long sysino); | ||
1277 | #endif | ||
1278 | |||
1279 | /* intr_setstate() | ||
1280 | * TRAP: HV_FAST_TRAP | ||
1281 | * FUNCTION: HV_FAST_INTR_SETSTATE | ||
1282 | * ARG0: sysino | ||
1283 | * ARG1: intr_state (HV_INTR_STATE_*) | ||
1284 | * RET0: status | ||
1285 | * ERRORS: EINVAL Invalid sysino or intr_state value | ||
1286 | * | ||
1287 | * Sets the current state of the interrupt described by the given sysino | ||
1288 | * value. | ||
1289 | * | ||
1290 | * Note: Setting the state to HV_INTR_STATE_IDLE clears any pending | ||
1291 | * interrupt for sysino. | ||
1292 | */ | ||
1293 | #define HV_FAST_INTR_SETSTATE 0xa4 | ||
1294 | |||
1295 | #ifndef __ASSEMBLY__ | ||
1296 | extern unsigned long sun4v_intr_setstate(unsigned long sysino, unsigned long intr_state); | ||
1297 | #endif | ||
1298 | |||
1299 | /* intr_gettarget() | ||
1300 | * TRAP: HV_FAST_TRAP | ||
1301 | * FUNCTION: HV_FAST_INTR_GETTARGET | ||
1302 | * ARG0: sysino | ||
1303 | * RET0: status | ||
1304 | * RET1: cpuid | ||
1305 | * ERRORS: EINVAL Invalid sysino | ||
1306 | * | ||
1307 | * Returns CPU that is the current target of the interrupt defined by | ||
1308 | * the given sysino. The CPU value returned is undefined if the target | ||
1309 | * has not been set via intr_settarget(). | ||
1310 | */ | ||
1311 | #define HV_FAST_INTR_GETTARGET 0xa5 | ||
1312 | |||
1313 | #ifndef __ASSEMBLY__ | ||
1314 | extern unsigned long sun4v_intr_gettarget(unsigned long sysino); | ||
1315 | #endif | ||
1316 | |||
1317 | /* intr_settarget() | ||
1318 | * TRAP: HV_FAST_TRAP | ||
1319 | * FUNCTION: HV_FAST_INTR_SETTARGET | ||
1320 | * ARG0: sysino | ||
1321 | * ARG1: cpuid | ||
1322 | * RET0: status | ||
1323 | * ERRORS: EINVAL Invalid sysino | ||
1324 | * ENOCPU Invalid cpuid | ||
1325 | * | ||
1326 | * Set the target CPU for the interrupt defined by the given sysino. | ||
1327 | */ | ||
1328 | #define HV_FAST_INTR_SETTARGET 0xa6 | ||
1329 | |||
1330 | #ifndef __ASSEMBLY__ | ||
1331 | extern unsigned long sun4v_intr_settarget(unsigned long sysino, unsigned long cpuid); | ||
1332 | #endif | ||
1333 | |||
1334 | /* PCI IO services. | ||
1335 | * | ||
1336 | * See the terminology descriptions in the device interrupt services | ||
1337 | * section above as those apply here too. Here are terminology | ||
1338 | * definitions specific to these PCI IO services: | ||
1339 | * | ||
1340 | * tsbnum TSB number. Indentifies which io-tsb is used. | ||
1341 | * For this version of the specification, tsbnum | ||
1342 | * must be zero. | ||
1343 | * | ||
1344 | * tsbindex TSB index. Identifies which entry in the TSB | ||
1345 | * is used. The first entry is zero. | ||
1346 | * | ||
1347 | * tsbid A 64-bit aligned data structure which contains | ||
1348 | * a tsbnum and a tsbindex. Bits 63:32 contain the | ||
1349 | * tsbnum and bits 31:00 contain the tsbindex. | ||
1350 | * | ||
1351 | * Use the HV_PCI_TSBID() macro to construct such | ||
1352 | * values. | ||
1353 | * | ||
1354 | * io_attributes IO attributes for IOMMU mappings. One of more | ||
1355 | * of the attritbute bits are stores in a 64-bit | ||
1356 | * value. The values are defined below. | ||
1357 | * | ||
1358 | * r_addr 64-bit real address | ||
1359 | * | ||
1360 | * pci_device PCI device address. A PCI device address identifies | ||
1361 | * a specific device on a specific PCI bus segment. | ||
1362 | * A PCI device address ia a 32-bit unsigned integer | ||
1363 | * with the following format: | ||
1364 | * | ||
1365 | * 00000000.bbbbbbbb.dddddfff.00000000 | ||
1366 | * | ||
1367 | * Use the HV_PCI_DEVICE_BUILD() macro to construct | ||
1368 | * such values. | ||
1369 | * | ||
1370 | * pci_config_offset | ||
1371 | * PCI configureation space offset. For conventional | ||
1372 | * PCI a value between 0 and 255. For extended | ||
1373 | * configuration space, a value between 0 and 4095. | ||
1374 | * | ||
1375 | * Note: For PCI configuration space accesses, the offset | ||
1376 | * must be aligned to the access size. | ||
1377 | * | ||
1378 | * error_flag A return value which specifies if the action succeeded | ||
1379 | * or failed. 0 means no error, non-0 means some error | ||
1380 | * occurred while performing the service. | ||
1381 | * | ||
1382 | * io_sync_direction | ||
1383 | * Direction definition for pci_dma_sync(), defined | ||
1384 | * below in HV_PCI_SYNC_*. | ||
1385 | * | ||
1386 | * io_page_list A list of io_page_addresses, an io_page_address is | ||
1387 | * a real address. | ||
1388 | * | ||
1389 | * io_page_list_p A pointer to an io_page_list. | ||
1390 | * | ||
1391 | * "size based byte swap" - Some functions do size based byte swapping | ||
1392 | * which allows sw to access pointers and | ||
1393 | * counters in native form when the processor | ||
1394 | * operates in a different endianness than the | ||
1395 | * IO bus. Size-based byte swapping converts a | ||
1396 | * multi-byte field between big-endian and | ||
1397 | * little-endian format. | ||
1398 | */ | ||
1399 | |||
1400 | #define HV_PCI_MAP_ATTR_READ 0x01 | ||
1401 | #define HV_PCI_MAP_ATTR_WRITE 0x02 | ||
1402 | |||
1403 | #define HV_PCI_DEVICE_BUILD(b,d,f) \ | ||
1404 | ((((b) & 0xff) << 16) | \ | ||
1405 | (((d) & 0x1f) << 11) | \ | ||
1406 | (((f) & 0x07) << 8)) | ||
1407 | |||
1408 | #define HV_PCI_TSBID(__tsb_num, __tsb_index) \ | ||
1409 | ((((u64)(__tsb_num)) << 32UL) | ((u64)(__tsb_index))) | ||
1410 | |||
1411 | #define HV_PCI_SYNC_FOR_DEVICE 0x01 | ||
1412 | #define HV_PCI_SYNC_FOR_CPU 0x02 | ||
1413 | |||
1414 | /* pci_iommu_map() | ||
1415 | * TRAP: HV_FAST_TRAP | ||
1416 | * FUNCTION: HV_FAST_PCI_IOMMU_MAP | ||
1417 | * ARG0: devhandle | ||
1418 | * ARG1: tsbid | ||
1419 | * ARG2: #ttes | ||
1420 | * ARG3: io_attributes | ||
1421 | * ARG4: io_page_list_p | ||
1422 | * RET0: status | ||
1423 | * RET1: #ttes mapped | ||
1424 | * ERRORS: EINVAL Invalid devhandle/tsbnum/tsbindex/io_attributes | ||
1425 | * EBADALIGN Improperly aligned real address | ||
1426 | * ENORADDR Invalid real address | ||
1427 | * | ||
1428 | * Create IOMMU mappings in the sun4v device defined by the given | ||
1429 | * devhandle. The mappings are created in the TSB defined by the | ||
1430 | * tsbnum component of the given tsbid. The first mapping is created | ||
1431 | * in the TSB i ndex defined by the tsbindex component of the given tsbid. | ||
1432 | * The call creates up to #ttes mappings, the first one at tsbnum, tsbindex, | ||
1433 | * the second at tsbnum, tsbindex + 1, etc. | ||
1434 | * | ||
1435 | * All mappings are created with the attributes defined by the io_attributes | ||
1436 | * argument. The page mapping addresses are described in the io_page_list | ||
1437 | * defined by the given io_page_list_p, which is a pointer to the io_page_list. | ||
1438 | * The first entry in the io_page_list is the address for the first iotte, the | ||
1439 | * 2nd for the 2nd iotte, and so on. | ||
1440 | * | ||
1441 | * Each io_page_address in the io_page_list must be appropriately aligned. | ||
1442 | * #ttes must be greater than zero. For this version of the spec, the tsbnum | ||
1443 | * component of the given tsbid must be zero. | ||
1444 | * | ||
1445 | * Returns the actual number of mappings creates, which may be less than | ||
1446 | * or equal to the argument #ttes. If the function returns a value which | ||
1447 | * is less than the #ttes, the caller may continus to call the function with | ||
1448 | * an updated tsbid, #ttes, io_page_list_p arguments until all pages are | ||
1449 | * mapped. | ||
1450 | * | ||
1451 | * Note: This function does not imply an iotte cache flush. The guest must | ||
1452 | * demap an entry before re-mapping it. | ||
1453 | */ | ||
1454 | #define HV_FAST_PCI_IOMMU_MAP 0xb0 | ||
1455 | |||
1456 | /* pci_iommu_demap() | ||
1457 | * TRAP: HV_FAST_TRAP | ||
1458 | * FUNCTION: HV_FAST_PCI_IOMMU_DEMAP | ||
1459 | * ARG0: devhandle | ||
1460 | * ARG1: tsbid | ||
1461 | * ARG2: #ttes | ||
1462 | * RET0: status | ||
1463 | * RET1: #ttes demapped | ||
1464 | * ERRORS: EINVAL Invalid devhandle/tsbnum/tsbindex | ||
1465 | * | ||
1466 | * Demap and flush IOMMU mappings in the device defined by the given | ||
1467 | * devhandle. Demaps up to #ttes entries in the TSB defined by the tsbnum | ||
1468 | * component of the given tsbid, starting at the TSB index defined by the | ||
1469 | * tsbindex component of the given tsbid. | ||
1470 | * | ||
1471 | * For this version of the spec, the tsbnum of the given tsbid must be zero. | ||
1472 | * #ttes must be greater than zero. | ||
1473 | * | ||
1474 | * Returns the actual number of ttes demapped, which may be less than or equal | ||
1475 | * to the argument #ttes. If #ttes demapped is less than #ttes, the caller | ||
1476 | * may continue to call this function with updated tsbid and #ttes arguments | ||
1477 | * until all pages are demapped. | ||
1478 | * | ||
1479 | * Note: Entries do not have to be mapped to be demapped. A demap of an | ||
1480 | * unmapped page will flush the entry from the tte cache. | ||
1481 | */ | ||
1482 | #define HV_FAST_PCI_IOMMU_DEMAP 0xb1 | ||
1483 | |||
1484 | /* pci_iommu_getmap() | ||
1485 | * TRAP: HV_FAST_TRAP | ||
1486 | * FUNCTION: HV_FAST_PCI_IOMMU_GETMAP | ||
1487 | * ARG0: devhandle | ||
1488 | * ARG1: tsbid | ||
1489 | * RET0: status | ||
1490 | * RET1: io_attributes | ||
1491 | * RET2: real address | ||
1492 | * ERRORS: EINVAL Invalid devhandle/tsbnum/tsbindex | ||
1493 | * ENOMAP Mapping is not valid, no translation exists | ||
1494 | * | ||
1495 | * Read and return the mapping in the device described by the given devhandle | ||
1496 | * and tsbid. If successful, the io_attributes shall be returned in RET1 | ||
1497 | * and the page address of the mapping shall be returned in RET2. | ||
1498 | * | ||
1499 | * For this version of the spec, the tsbnum component of the given tsbid | ||
1500 | * must be zero. | ||
1501 | */ | ||
1502 | #define HV_FAST_PCI_IOMMU_GETMAP 0xb2 | ||
1503 | |||
1504 | /* pci_iommu_getbypass() | ||
1505 | * TRAP: HV_FAST_TRAP | ||
1506 | * FUNCTION: HV_FAST_PCI_IOMMU_GETBYPASS | ||
1507 | * ARG0: devhandle | ||
1508 | * ARG1: real address | ||
1509 | * ARG2: io_attributes | ||
1510 | * RET0: status | ||
1511 | * RET1: io_addr | ||
1512 | * ERRORS: EINVAL Invalid devhandle/io_attributes | ||
1513 | * ENORADDR Invalid real address | ||
1514 | * ENOTSUPPORTED Function not supported in this implementation. | ||
1515 | * | ||
1516 | * Create a "special" mapping in the device described by the given devhandle, | ||
1517 | * for the given real address and attributes. Return the IO address in RET1 | ||
1518 | * if successful. | ||
1519 | */ | ||
1520 | #define HV_FAST_PCI_IOMMU_GETBYPASS 0xb3 | ||
1521 | |||
1522 | /* pci_config_get() | ||
1523 | * TRAP: HV_FAST_TRAP | ||
1524 | * FUNCTION: HV_FAST_PCI_CONFIG_GET | ||
1525 | * ARG0: devhandle | ||
1526 | * ARG1: pci_device | ||
1527 | * ARG2: pci_config_offset | ||
1528 | * ARG3: size | ||
1529 | * RET0: status | ||
1530 | * RET1: error_flag | ||
1531 | * RET2: data | ||
1532 | * ERRORS: EINVAL Invalid devhandle/pci_device/offset/size | ||
1533 | * EBADALIGN pci_config_offset not size aligned | ||
1534 | * ENOACCESS Access to this offset is not permitted | ||
1535 | * | ||
1536 | * Read PCI configuration space for the adapter described by the given | ||
1537 | * devhandle. Read size (1, 2, or 4) bytes of data from the given | ||
1538 | * pci_device, at pci_config_offset from the beginning of the device's | ||
1539 | * configuration space. If there was no error, RET1 is set to zero and | ||
1540 | * RET2 is set to the data read. Insignificant bits in RET2 are not | ||
1541 | * guarenteed to have any specific value and therefore must be ignored. | ||
1542 | * | ||
1543 | * The data returned in RET2 is size based byte swapped. | ||
1544 | * | ||
1545 | * If an error occurs during the read, set RET1 to a non-zero value. The | ||
1546 | * given pci_config_offset must be 'size' aligned. | ||
1547 | */ | ||
1548 | #define HV_FAST_PCI_CONFIG_GET 0xb4 | ||
1549 | |||
1550 | /* pci_config_put() | ||
1551 | * TRAP: HV_FAST_TRAP | ||
1552 | * FUNCTION: HV_FAST_PCI_CONFIG_PUT | ||
1553 | * ARG0: devhandle | ||
1554 | * ARG1: pci_device | ||
1555 | * ARG2: pci_config_offset | ||
1556 | * ARG3: size | ||
1557 | * ARG4: data | ||
1558 | * RET0: status | ||
1559 | * RET1: error_flag | ||
1560 | * ERRORS: EINVAL Invalid devhandle/pci_device/offset/size | ||
1561 | * EBADALIGN pci_config_offset not size aligned | ||
1562 | * ENOACCESS Access to this offset is not permitted | ||
1563 | * | ||
1564 | * Write PCI configuration space for the adapter described by the given | ||
1565 | * devhandle. Write size (1, 2, or 4) bytes of data in a single operation, | ||
1566 | * at pci_config_offset from the beginning of the device's configuration | ||
1567 | * space. The data argument contains the data to be written to configuration | ||
1568 | * space. Prior to writing, the data is size based byte swapped. | ||
1569 | * | ||
1570 | * If an error occurs during the write access, do not generate an error | ||
1571 | * report, do set RET1 to a non-zero value. Otherwise RET1 is zero. | ||
1572 | * The given pci_config_offset must be 'size' aligned. | ||
1573 | * | ||
1574 | * This function is permitted to read from offset zero in the configuration | ||
1575 | * space described by the given pci_device if necessary to ensure that the | ||
1576 | * write access to config space completes. | ||
1577 | */ | ||
1578 | #define HV_FAST_PCI_CONFIG_PUT 0xb5 | ||
1579 | |||
1580 | /* pci_peek() | ||
1581 | * TRAP: HV_FAST_TRAP | ||
1582 | * FUNCTION: HV_FAST_PCI_PEEK | ||
1583 | * ARG0: devhandle | ||
1584 | * ARG1: real address | ||
1585 | * ARG2: size | ||
1586 | * RET0: status | ||
1587 | * RET1: error_flag | ||
1588 | * RET2: data | ||
1589 | * ERRORS: EINVAL Invalid devhandle or size | ||
1590 | * EBADALIGN Improperly aligned real address | ||
1591 | * ENORADDR Bad real address | ||
1592 | * ENOACCESS Guest access prohibited | ||
1593 | * | ||
1594 | * Attempt to read the IO address given by the given devhandle, real address, | ||
1595 | * and size. Size must be 1, 2, 4, or 8. The read is performed as a single | ||
1596 | * access operation using the given size. If an error occurs when reading | ||
1597 | * from the given location, do not generate an error report, but return a | ||
1598 | * non-zero value in RET1. If the read was successful, return zero in RET1 | ||
1599 | * and return the actual data read in RET2. The data returned is size based | ||
1600 | * byte swapped. | ||
1601 | * | ||
1602 | * Non-significant bits in RET2 are not guarenteed to have any specific value | ||
1603 | * and therefore must be ignored. If RET1 is returned as non-zero, the data | ||
1604 | * value is not guarenteed to have any specific value and should be ignored. | ||
1605 | * | ||
1606 | * The caller must have permission to read from the given devhandle, real | ||
1607 | * address, which must be an IO address. The argument real address must be a | ||
1608 | * size aligned address. | ||
1609 | * | ||
1610 | * The hypervisor implementation of this function must block access to any | ||
1611 | * IO address that the guest does not have explicit permission to access. | ||
1612 | */ | ||
1613 | #define HV_FAST_PCI_PEEK 0xb6 | ||
1614 | |||
1615 | /* pci_poke() | ||
1616 | * TRAP: HV_FAST_TRAP | ||
1617 | * FUNCTION: HV_FAST_PCI_POKE | ||
1618 | * ARG0: devhandle | ||
1619 | * ARG1: real address | ||
1620 | * ARG2: size | ||
1621 | * ARG3: data | ||
1622 | * ARG4: pci_device | ||
1623 | * RET0: status | ||
1624 | * RET1: error_flag | ||
1625 | * ERRORS: EINVAL Invalid devhandle, size, or pci_device | ||
1626 | * EBADALIGN Improperly aligned real address | ||
1627 | * ENORADDR Bad real address | ||
1628 | * ENOACCESS Guest access prohibited | ||
1629 | * ENOTSUPPORTED Function is not supported by implementation | ||
1630 | * | ||
1631 | * Attempt to write data to the IO address given by the given devhandle, | ||
1632 | * real address, and size. Size must be 1, 2, 4, or 8. The write is | ||
1633 | * performed as a single access operation using the given size. Prior to | ||
1634 | * writing the data is size based swapped. | ||
1635 | * | ||
1636 | * If an error occurs when writing to the given location, do not generate an | ||
1637 | * error report, but return a non-zero value in RET1. If the write was | ||
1638 | * successful, return zero in RET1. | ||
1639 | * | ||
1640 | * pci_device describes the configuration address of the device being | ||
1641 | * written to. The implementation may safely read from offset 0 with | ||
1642 | * the configuration space of the device described by devhandle and | ||
1643 | * pci_device in order to guarantee that the write portion of the operation | ||
1644 | * completes | ||
1645 | * | ||
1646 | * Any error that occurs due to the read shall be reported using the normal | ||
1647 | * error reporting mechanisms .. the read error is not suppressed. | ||
1648 | * | ||
1649 | * The caller must have permission to write to the given devhandle, real | ||
1650 | * address, which must be an IO address. The argument real address must be a | ||
1651 | * size aligned address. The caller must have permission to read from | ||
1652 | * the given devhandle, pci_device cofiguration space offset 0. | ||
1653 | * | ||
1654 | * The hypervisor implementation of this function must block access to any | ||
1655 | * IO address that the guest does not have explicit permission to access. | ||
1656 | */ | ||
1657 | #define HV_FAST_PCI_POKE 0xb7 | ||
1658 | |||
1659 | /* pci_dma_sync() | ||
1660 | * TRAP: HV_FAST_TRAP | ||
1661 | * FUNCTION: HV_FAST_PCI_DMA_SYNC | ||
1662 | * ARG0: devhandle | ||
1663 | * ARG1: real address | ||
1664 | * ARG2: size | ||
1665 | * ARG3: io_sync_direction | ||
1666 | * RET0: status | ||
1667 | * RET1: #synced | ||
1668 | * ERRORS: EINVAL Invalid devhandle or io_sync_direction | ||
1669 | * ENORADDR Bad real address | ||
1670 | * | ||
1671 | * Synchronize a memory region described by the given real address and size, | ||
1672 | * for the device defined by the given devhandle using the direction(s) | ||
1673 | * defined by the given io_sync_direction. The argument size is the size of | ||
1674 | * the memory region in bytes. | ||
1675 | * | ||
1676 | * Return the actual number of bytes synchronized in the return value #synced, | ||
1677 | * which may be less than or equal to the argument size. If the return | ||
1678 | * value #synced is less than size, the caller must continue to call this | ||
1679 | * function with updated real address and size arguments until the entire | ||
1680 | * memory region is synchronized. | ||
1681 | */ | ||
1682 | #define HV_FAST_PCI_DMA_SYNC 0xb8 | ||
1683 | |||
1684 | /* PCI MSI services. */ | ||
1685 | |||
1686 | #define HV_MSITYPE_MSI32 0x00 | ||
1687 | #define HV_MSITYPE_MSI64 0x01 | ||
1688 | |||
1689 | #define HV_MSIQSTATE_IDLE 0x00 | ||
1690 | #define HV_MSIQSTATE_ERROR 0x01 | ||
1691 | |||
1692 | #define HV_MSIQ_INVALID 0x00 | ||
1693 | #define HV_MSIQ_VALID 0x01 | ||
1694 | |||
1695 | #define HV_MSISTATE_IDLE 0x00 | ||
1696 | #define HV_MSISTATE_DELIVERED 0x01 | ||
1697 | |||
1698 | #define HV_MSIVALID_INVALID 0x00 | ||
1699 | #define HV_MSIVALID_VALID 0x01 | ||
1700 | |||
1701 | #define HV_PCIE_MSGTYPE_PME_MSG 0x18 | ||
1702 | #define HV_PCIE_MSGTYPE_PME_ACK_MSG 0x1b | ||
1703 | #define HV_PCIE_MSGTYPE_CORR_MSG 0x30 | ||
1704 | #define HV_PCIE_MSGTYPE_NONFATAL_MSG 0x31 | ||
1705 | #define HV_PCIE_MSGTYPE_FATAL_MSG 0x33 | ||
1706 | |||
1707 | #define HV_MSG_INVALID 0x00 | ||
1708 | #define HV_MSG_VALID 0x01 | ||
1709 | |||
1710 | /* pci_msiq_conf() | ||
1711 | * TRAP: HV_FAST_TRAP | ||
1712 | * FUNCTION: HV_FAST_PCI_MSIQ_CONF | ||
1713 | * ARG0: devhandle | ||
1714 | * ARG1: msiqid | ||
1715 | * ARG2: real address | ||
1716 | * ARG3: number of entries | ||
1717 | * RET0: status | ||
1718 | * ERRORS: EINVAL Invalid devhandle, msiqid or nentries | ||
1719 | * EBADALIGN Improperly aligned real address | ||
1720 | * ENORADDR Bad real address | ||
1721 | * | ||
1722 | * Configure the MSI queue given by the devhandle and msiqid arguments, | ||
1723 | * and to be placed at the given real address and be of the given | ||
1724 | * number of entries. The real address must be aligned exactly to match | ||
1725 | * the queue size. Each queue entry is 64-bytes long, so f.e. a 32 entry | ||
1726 | * queue must be aligned on a 2048 byte real address boundary. The MSI-EQ | ||
1727 | * Head and Tail are initialized so that the MSI-EQ is 'empty'. | ||
1728 | * | ||
1729 | * Implementation Note: Certain implementations have fixed sized queues. In | ||
1730 | * that case, number of entries must contain the correct | ||
1731 | * value. | ||
1732 | */ | ||
1733 | #define HV_FAST_PCI_MSIQ_CONF 0xc0 | ||
1734 | |||
1735 | /* pci_msiq_info() | ||
1736 | * TRAP: HV_FAST_TRAP | ||
1737 | * FUNCTION: HV_FAST_PCI_MSIQ_INFO | ||
1738 | * ARG0: devhandle | ||
1739 | * ARG1: msiqid | ||
1740 | * RET0: status | ||
1741 | * RET1: real address | ||
1742 | * RET2: number of entries | ||
1743 | * ERRORS: EINVAL Invalid devhandle or msiqid | ||
1744 | * | ||
1745 | * Return the configuration information for the MSI queue described | ||
1746 | * by the given devhandle and msiqid. The base address of the queue | ||
1747 | * is returned in ARG1 and the number of entries is returned in ARG2. | ||
1748 | * If the queue is unconfigured, the real address is undefined and the | ||
1749 | * number of entries will be returned as zero. | ||
1750 | */ | ||
1751 | #define HV_FAST_PCI_MSIQ_INFO 0xc1 | ||
1752 | |||
1753 | /* pci_msiq_getvalid() | ||
1754 | * TRAP: HV_FAST_TRAP | ||
1755 | * FUNCTION: HV_FAST_PCI_MSIQ_GETVALID | ||
1756 | * ARG0: devhandle | ||
1757 | * ARG1: msiqid | ||
1758 | * RET0: status | ||
1759 | * RET1: msiqvalid (HV_MSIQ_VALID or HV_MSIQ_INVALID) | ||
1760 | * ERRORS: EINVAL Invalid devhandle or msiqid | ||
1761 | * | ||
1762 | * Get the valid state of the MSI-EQ described by the given devhandle and | ||
1763 | * msiqid. | ||
1764 | */ | ||
1765 | #define HV_FAST_PCI_MSIQ_GETVALID 0xc2 | ||
1766 | |||
1767 | /* pci_msiq_setvalid() | ||
1768 | * TRAP: HV_FAST_TRAP | ||
1769 | * FUNCTION: HV_FAST_PCI_MSIQ_SETVALID | ||
1770 | * ARG0: devhandle | ||
1771 | * ARG1: msiqid | ||
1772 | * ARG2: msiqvalid (HV_MSIQ_VALID or HV_MSIQ_INVALID) | ||
1773 | * RET0: status | ||
1774 | * ERRORS: EINVAL Invalid devhandle or msiqid or msiqvalid | ||
1775 | * value or MSI EQ is uninitialized | ||
1776 | * | ||
1777 | * Set the valid state of the MSI-EQ described by the given devhandle and | ||
1778 | * msiqid to the given msiqvalid. | ||
1779 | */ | ||
1780 | #define HV_FAST_PCI_MSIQ_SETVALID 0xc3 | ||
1781 | |||
1782 | /* pci_msiq_getstate() | ||
1783 | * TRAP: HV_FAST_TRAP | ||
1784 | * FUNCTION: HV_FAST_PCI_MSIQ_GETSTATE | ||
1785 | * ARG0: devhandle | ||
1786 | * ARG1: msiqid | ||
1787 | * RET0: status | ||
1788 | * RET1: msiqstate (HV_MSIQSTATE_IDLE or HV_MSIQSTATE_ERROR) | ||
1789 | * ERRORS: EINVAL Invalid devhandle or msiqid | ||
1790 | * | ||
1791 | * Get the state of the MSI-EQ described by the given devhandle and | ||
1792 | * msiqid. | ||
1793 | */ | ||
1794 | #define HV_FAST_PCI_MSIQ_GETSTATE 0xc4 | ||
1795 | |||
1796 | /* pci_msiq_getvalid() | ||
1797 | * TRAP: HV_FAST_TRAP | ||
1798 | * FUNCTION: HV_FAST_PCI_MSIQ_GETVALID | ||
1799 | * ARG0: devhandle | ||
1800 | * ARG1: msiqid | ||
1801 | * ARG2: msiqstate (HV_MSIQSTATE_IDLE or HV_MSIQSTATE_ERROR) | ||
1802 | * RET0: status | ||
1803 | * ERRORS: EINVAL Invalid devhandle or msiqid or msiqstate | ||
1804 | * value or MSI EQ is uninitialized | ||
1805 | * | ||
1806 | * Set the state of the MSI-EQ described by the given devhandle and | ||
1807 | * msiqid to the given msiqvalid. | ||
1808 | */ | ||
1809 | #define HV_FAST_PCI_MSIQ_SETSTATE 0xc5 | ||
1810 | |||
1811 | /* pci_msiq_gethead() | ||
1812 | * TRAP: HV_FAST_TRAP | ||
1813 | * FUNCTION: HV_FAST_PCI_MSIQ_GETHEAD | ||
1814 | * ARG0: devhandle | ||
1815 | * ARG1: msiqid | ||
1816 | * RET0: status | ||
1817 | * RET1: msiqhead | ||
1818 | * ERRORS: EINVAL Invalid devhandle or msiqid | ||
1819 | * | ||
1820 | * Get the current MSI EQ queue head for the MSI-EQ described by the | ||
1821 | * given devhandle and msiqid. | ||
1822 | */ | ||
1823 | #define HV_FAST_PCI_MSIQ_GETHEAD 0xc6 | ||
1824 | |||
1825 | /* pci_msiq_sethead() | ||
1826 | * TRAP: HV_FAST_TRAP | ||
1827 | * FUNCTION: HV_FAST_PCI_MSIQ_SETHEAD | ||
1828 | * ARG0: devhandle | ||
1829 | * ARG1: msiqid | ||
1830 | * ARG2: msiqhead | ||
1831 | * RET0: status | ||
1832 | * ERRORS: EINVAL Invalid devhandle or msiqid or msiqhead, | ||
1833 | * or MSI EQ is uninitialized | ||
1834 | * | ||
1835 | * Set the current MSI EQ queue head for the MSI-EQ described by the | ||
1836 | * given devhandle and msiqid. | ||
1837 | */ | ||
1838 | #define HV_FAST_PCI_MSIQ_SETHEAD 0xc7 | ||
1839 | |||
1840 | /* pci_msiq_gettail() | ||
1841 | * TRAP: HV_FAST_TRAP | ||
1842 | * FUNCTION: HV_FAST_PCI_MSIQ_GETTAIL | ||
1843 | * ARG0: devhandle | ||
1844 | * ARG1: msiqid | ||
1845 | * RET0: status | ||
1846 | * RET1: msiqtail | ||
1847 | * ERRORS: EINVAL Invalid devhandle or msiqid | ||
1848 | * | ||
1849 | * Get the current MSI EQ queue tail for the MSI-EQ described by the | ||
1850 | * given devhandle and msiqid. | ||
1851 | */ | ||
1852 | #define HV_FAST_PCI_MSIQ_GETTAIL 0xc8 | ||
1853 | |||
1854 | /* pci_msi_getvalid() | ||
1855 | * TRAP: HV_FAST_TRAP | ||
1856 | * FUNCTION: HV_FAST_PCI_MSI_GETVALID | ||
1857 | * ARG0: devhandle | ||
1858 | * ARG1: msinum | ||
1859 | * RET0: status | ||
1860 | * RET1: msivalidstate | ||
1861 | * ERRORS: EINVAL Invalid devhandle or msinum | ||
1862 | * | ||
1863 | * Get the current valid/enabled state for the MSI defined by the | ||
1864 | * given devhandle and msinum. | ||
1865 | */ | ||
1866 | #define HV_FAST_PCI_MSI_GETVALID 0xc9 | ||
1867 | |||
1868 | /* pci_msi_setvalid() | ||
1869 | * TRAP: HV_FAST_TRAP | ||
1870 | * FUNCTION: HV_FAST_PCI_MSI_SETVALID | ||
1871 | * ARG0: devhandle | ||
1872 | * ARG1: msinum | ||
1873 | * ARG2: msivalidstate | ||
1874 | * RET0: status | ||
1875 | * ERRORS: EINVAL Invalid devhandle or msinum or msivalidstate | ||
1876 | * | ||
1877 | * Set the current valid/enabled state for the MSI defined by the | ||
1878 | * given devhandle and msinum. | ||
1879 | */ | ||
1880 | #define HV_FAST_PCI_MSI_SETVALID 0xca | ||
1881 | |||
1882 | /* pci_msi_getmsiq() | ||
1883 | * TRAP: HV_FAST_TRAP | ||
1884 | * FUNCTION: HV_FAST_PCI_MSI_GETMSIQ | ||
1885 | * ARG0: devhandle | ||
1886 | * ARG1: msinum | ||
1887 | * RET0: status | ||
1888 | * RET1: msiqid | ||
1889 | * ERRORS: EINVAL Invalid devhandle or msinum or MSI is unbound | ||
1890 | * | ||
1891 | * Get the MSI EQ that the MSI defined by the given devhandle and | ||
1892 | * msinum is bound to. | ||
1893 | */ | ||
1894 | #define HV_FAST_PCI_MSI_GETMSIQ 0xcb | ||
1895 | |||
1896 | /* pci_msi_setmsiq() | ||
1897 | * TRAP: HV_FAST_TRAP | ||
1898 | * FUNCTION: HV_FAST_PCI_MSI_SETMSIQ | ||
1899 | * ARG0: devhandle | ||
1900 | * ARG1: msinum | ||
1901 | * ARG2: msitype | ||
1902 | * ARG3: msiqid | ||
1903 | * RET0: status | ||
1904 | * ERRORS: EINVAL Invalid devhandle or msinum or msiqid | ||
1905 | * | ||
1906 | * Set the MSI EQ that the MSI defined by the given devhandle and | ||
1907 | * msinum is bound to. | ||
1908 | */ | ||
1909 | #define HV_FAST_PCI_MSI_SETMSIQ 0xcc | ||
1910 | |||
1911 | /* pci_msi_getstate() | ||
1912 | * TRAP: HV_FAST_TRAP | ||
1913 | * FUNCTION: HV_FAST_PCI_MSI_GETSTATE | ||
1914 | * ARG0: devhandle | ||
1915 | * ARG1: msinum | ||
1916 | * RET0: status | ||
1917 | * RET1: msistate | ||
1918 | * ERRORS: EINVAL Invalid devhandle or msinum | ||
1919 | * | ||
1920 | * Get the state of the MSI defined by the given devhandle and msinum. | ||
1921 | * If not initialized, return HV_MSISTATE_IDLE. | ||
1922 | */ | ||
1923 | #define HV_FAST_PCI_MSI_GETSTATE 0xcd | ||
1924 | |||
1925 | /* pci_msi_setstate() | ||
1926 | * TRAP: HV_FAST_TRAP | ||
1927 | * FUNCTION: HV_FAST_PCI_MSI_SETSTATE | ||
1928 | * ARG0: devhandle | ||
1929 | * ARG1: msinum | ||
1930 | * ARG2: msistate | ||
1931 | * RET0: status | ||
1932 | * ERRORS: EINVAL Invalid devhandle or msinum or msistate | ||
1933 | * | ||
1934 | * Set the state of the MSI defined by the given devhandle and msinum. | ||
1935 | */ | ||
1936 | #define HV_FAST_PCI_MSI_SETSTATE 0xce | ||
1937 | |||
1938 | /* pci_msg_getmsiq() | ||
1939 | * TRAP: HV_FAST_TRAP | ||
1940 | * FUNCTION: HV_FAST_PCI_MSG_GETMSIQ | ||
1941 | * ARG0: devhandle | ||
1942 | * ARG1: msgtype | ||
1943 | * RET0: status | ||
1944 | * RET1: msiqid | ||
1945 | * ERRORS: EINVAL Invalid devhandle or msgtype | ||
1946 | * | ||
1947 | * Get the MSI EQ of the MSG defined by the given devhandle and msgtype. | ||
1948 | */ | ||
1949 | #define HV_FAST_PCI_MSG_GETMSIQ 0xd0 | ||
1950 | |||
1951 | /* pci_msg_setmsiq() | ||
1952 | * TRAP: HV_FAST_TRAP | ||
1953 | * FUNCTION: HV_FAST_PCI_MSG_SETMSIQ | ||
1954 | * ARG0: devhandle | ||
1955 | * ARG1: msgtype | ||
1956 | * ARG2: msiqid | ||
1957 | * RET0: status | ||
1958 | * ERRORS: EINVAL Invalid devhandle, msgtype, or msiqid | ||
1959 | * | ||
1960 | * Set the MSI EQ of the MSG defined by the given devhandle and msgtype. | ||
1961 | */ | ||
1962 | #define HV_FAST_PCI_MSG_SETMSIQ 0xd1 | ||
1963 | |||
1964 | /* pci_msg_getvalid() | ||
1965 | * TRAP: HV_FAST_TRAP | ||
1966 | * FUNCTION: HV_FAST_PCI_MSG_GETVALID | ||
1967 | * ARG0: devhandle | ||
1968 | * ARG1: msgtype | ||
1969 | * RET0: status | ||
1970 | * RET1: msgvalidstate | ||
1971 | * ERRORS: EINVAL Invalid devhandle or msgtype | ||
1972 | * | ||
1973 | * Get the valid/enabled state of the MSG defined by the given | ||
1974 | * devhandle and msgtype. | ||
1975 | */ | ||
1976 | #define HV_FAST_PCI_MSG_GETVALID 0xd2 | ||
1977 | |||
1978 | /* pci_msg_setvalid() | ||
1979 | * TRAP: HV_FAST_TRAP | ||
1980 | * FUNCTION: HV_FAST_PCI_MSG_SETVALID | ||
1981 | * ARG0: devhandle | ||
1982 | * ARG1: msgtype | ||
1983 | * ARG2: msgvalidstate | ||
1984 | * RET0: status | ||
1985 | * ERRORS: EINVAL Invalid devhandle or msgtype or msgvalidstate | ||
1986 | * | ||
1987 | * Set the valid/enabled state of the MSG defined by the given | ||
1988 | * devhandle and msgtype. | ||
1989 | */ | ||
1990 | #define HV_FAST_PCI_MSG_SETVALID 0xd3 | ||
1991 | |||
1992 | /* Performance counter services. */ | ||
1993 | |||
1994 | #define HV_PERF_JBUS_PERF_CTRL_REG 0x00 | ||
1995 | #define HV_PERF_JBUS_PERF_CNT_REG 0x01 | ||
1996 | #define HV_PERF_DRAM_PERF_CTRL_REG_0 0x02 | ||
1997 | #define HV_PERF_DRAM_PERF_CNT_REG_0 0x03 | ||
1998 | #define HV_PERF_DRAM_PERF_CTRL_REG_1 0x04 | ||
1999 | #define HV_PERF_DRAM_PERF_CNT_REG_1 0x05 | ||
2000 | #define HV_PERF_DRAM_PERF_CTRL_REG_2 0x06 | ||
2001 | #define HV_PERF_DRAM_PERF_CNT_REG_2 0x07 | ||
2002 | #define HV_PERF_DRAM_PERF_CTRL_REG_3 0x08 | ||
2003 | #define HV_PERF_DRAM_PERF_CNT_REG_3 0x09 | ||
2004 | |||
2005 | /* get_perfreg() | ||
2006 | * TRAP: HV_FAST_TRAP | ||
2007 | * FUNCTION: HV_FAST_GET_PERFREG | ||
2008 | * ARG0: performance reg number | ||
2009 | * RET0: status | ||
2010 | * RET1: performance reg value | ||
2011 | * ERRORS: EINVAL Invalid performance register number | ||
2012 | * ENOACCESS No access allowed to performance counters | ||
2013 | * | ||
2014 | * Read the value of the given DRAM/JBUS performance counter/control register. | ||
2015 | */ | ||
2016 | #define HV_FAST_GET_PERFREG 0x100 | ||
2017 | |||
2018 | /* set_perfreg() | ||
2019 | * TRAP: HV_FAST_TRAP | ||
2020 | * FUNCTION: HV_FAST_SET_PERFREG | ||
2021 | * ARG0: performance reg number | ||
2022 | * ARG1: performance reg value | ||
2023 | * RET0: status | ||
2024 | * ERRORS: EINVAL Invalid performance register number | ||
2025 | * ENOACCESS No access allowed to performance counters | ||
2026 | * | ||
2027 | * Write the given performance reg value to the given DRAM/JBUS | ||
2028 | * performance counter/control register. | ||
2029 | */ | ||
2030 | #define HV_FAST_SET_PERFREG 0x101 | ||
2031 | |||
2032 | /* MMU statistics services. | ||
2033 | * | ||
2034 | * The hypervisor maintains MMU statistics and privileged code provides | ||
2035 | * a buffer where these statistics can be collected. It is continually | ||
2036 | * updated once configured. The layout is as follows: | ||
2037 | */ | ||
2038 | #ifndef __ASSEMBLY__ | ||
2039 | struct hv_mmu_statistics { | ||
2040 | unsigned long immu_tsb_hits_ctx0_8k_tte; | ||
2041 | unsigned long immu_tsb_ticks_ctx0_8k_tte; | ||
2042 | unsigned long immu_tsb_hits_ctx0_64k_tte; | ||
2043 | unsigned long immu_tsb_ticks_ctx0_64k_tte; | ||
2044 | unsigned long __reserved1[2]; | ||
2045 | unsigned long immu_tsb_hits_ctx0_4mb_tte; | ||
2046 | unsigned long immu_tsb_ticks_ctx0_4mb_tte; | ||
2047 | unsigned long __reserved2[2]; | ||
2048 | unsigned long immu_tsb_hits_ctx0_256mb_tte; | ||
2049 | unsigned long immu_tsb_ticks_ctx0_256mb_tte; | ||
2050 | unsigned long __reserved3[4]; | ||
2051 | unsigned long immu_tsb_hits_ctxnon0_8k_tte; | ||
2052 | unsigned long immu_tsb_ticks_ctxnon0_8k_tte; | ||
2053 | unsigned long immu_tsb_hits_ctxnon0_64k_tte; | ||
2054 | unsigned long immu_tsb_ticks_ctxnon0_64k_tte; | ||
2055 | unsigned long __reserved4[2]; | ||
2056 | unsigned long immu_tsb_hits_ctxnon0_4mb_tte; | ||
2057 | unsigned long immu_tsb_ticks_ctxnon0_4mb_tte; | ||
2058 | unsigned long __reserved5[2]; | ||
2059 | unsigned long immu_tsb_hits_ctxnon0_256mb_tte; | ||
2060 | unsigned long immu_tsb_ticks_ctxnon0_256mb_tte; | ||
2061 | unsigned long __reserved6[4]; | ||
2062 | unsigned long dmmu_tsb_hits_ctx0_8k_tte; | ||
2063 | unsigned long dmmu_tsb_ticks_ctx0_8k_tte; | ||
2064 | unsigned long dmmu_tsb_hits_ctx0_64k_tte; | ||
2065 | unsigned long dmmu_tsb_ticks_ctx0_64k_tte; | ||
2066 | unsigned long __reserved7[2]; | ||
2067 | unsigned long dmmu_tsb_hits_ctx0_4mb_tte; | ||
2068 | unsigned long dmmu_tsb_ticks_ctx0_4mb_tte; | ||
2069 | unsigned long __reserved8[2]; | ||
2070 | unsigned long dmmu_tsb_hits_ctx0_256mb_tte; | ||
2071 | unsigned long dmmu_tsb_ticks_ctx0_256mb_tte; | ||
2072 | unsigned long __reserved9[4]; | ||
2073 | unsigned long dmmu_tsb_hits_ctxnon0_8k_tte; | ||
2074 | unsigned long dmmu_tsb_ticks_ctxnon0_8k_tte; | ||
2075 | unsigned long dmmu_tsb_hits_ctxnon0_64k_tte; | ||
2076 | unsigned long dmmu_tsb_ticks_ctxnon0_64k_tte; | ||
2077 | unsigned long __reserved10[2]; | ||
2078 | unsigned long dmmu_tsb_hits_ctxnon0_4mb_tte; | ||
2079 | unsigned long dmmu_tsb_ticks_ctxnon0_4mb_tte; | ||
2080 | unsigned long __reserved11[2]; | ||
2081 | unsigned long dmmu_tsb_hits_ctxnon0_256mb_tte; | ||
2082 | unsigned long dmmu_tsb_ticks_ctxnon0_256mb_tte; | ||
2083 | unsigned long __reserved12[4]; | ||
2084 | }; | ||
2085 | #endif | ||
2086 | |||
2087 | /* mmustat_conf() | ||
2088 | * TRAP: HV_FAST_TRAP | ||
2089 | * FUNCTION: HV_FAST_MMUSTAT_CONF | ||
2090 | * ARG0: real address | ||
2091 | * RET0: status | ||
2092 | * RET1: real address | ||
2093 | * ERRORS: ENORADDR Invalid real address | ||
2094 | * EBADALIGN Real address not aligned on 64-byte boundary | ||
2095 | * EBADTRAP API not supported on this processor | ||
2096 | * | ||
2097 | * Enable MMU statistic gathering using the buffer at the given real | ||
2098 | * address on the current virtual CPU. The new buffer real address | ||
2099 | * is given in ARG1, and the previously specified buffer real address | ||
2100 | * is returned in RET1, or is returned as zero for the first invocation. | ||
2101 | * | ||
2102 | * If the passed in real address argument is zero, this will disable | ||
2103 | * MMU statistic collection on the current virtual CPU. If an error is | ||
2104 | * returned then no statistics are collected. | ||
2105 | * | ||
2106 | * The buffer contents should be initialized to all zeros before being | ||
2107 | * given to the hypervisor or else the statistics will be meaningless. | ||
2108 | */ | ||
2109 | #define HV_FAST_MMUSTAT_CONF 0x102 | ||
2110 | |||
2111 | /* mmustat_info() | ||
2112 | * TRAP: HV_FAST_TRAP | ||
2113 | * FUNCTION: HV_FAST_MMUSTAT_INFO | ||
2114 | * RET0: status | ||
2115 | * RET1: real address | ||
2116 | * ERRORS: EBADTRAP API not supported on this processor | ||
2117 | * | ||
2118 | * Return the current state and real address of the currently configured | ||
2119 | * MMU statistics buffer on the current virtual CPU. | ||
2120 | */ | ||
2121 | #define HV_FAST_MMUSTAT_INFO 0x103 | ||
2122 | |||
2123 | /* Function numbers for HV_CORE_TRAP. */ | ||
2124 | #define HV_CORE_VER 0x00 | ||
2125 | #define HV_CORE_PUTCHAR 0x01 | ||
2126 | #define HV_CORE_EXIT 0x02 | ||
2127 | |||
2128 | #endif /* !(_SPARC64_HYPERVISOR_H) */ | ||
diff --git a/include/asm-sparc64/idprom.h b/include/asm-sparc64/idprom.h index 701483c5465d..77fbf987385f 100644 --- a/include/asm-sparc64/idprom.h +++ b/include/asm-sparc64/idprom.h | |||
@@ -9,15 +9,7 @@ | |||
9 | 9 | ||
10 | #include <linux/types.h> | 10 | #include <linux/types.h> |
11 | 11 | ||
12 | /* Offset into the EEPROM where the id PROM is located on the 4c */ | 12 | struct idprom { |
13 | #define IDPROM_OFFSET 0x7d8 | ||
14 | |||
15 | /* On sun4m; physical. */ | ||
16 | /* MicroSPARC(-II) does not decode 31rd bit, but it works. */ | ||
17 | #define IDPROM_OFFSET_M 0xfd8 | ||
18 | |||
19 | struct idprom | ||
20 | { | ||
21 | u8 id_format; /* Format identifier (always 0x01) */ | 13 | u8 id_format; /* Format identifier (always 0x01) */ |
22 | u8 id_machtype; /* Machine type */ | 14 | u8 id_machtype; /* Machine type */ |
23 | u8 id_ethaddr[6]; /* Hardware ethernet address */ | 15 | u8 id_ethaddr[6]; /* Hardware ethernet address */ |
@@ -30,6 +22,4 @@ struct idprom | |||
30 | extern struct idprom *idprom; | 22 | extern struct idprom *idprom; |
31 | extern void idprom_init(void); | 23 | extern void idprom_init(void); |
32 | 24 | ||
33 | #define IDPROM_SIZE (sizeof(struct idprom)) | ||
34 | |||
35 | #endif /* !(_SPARC_IDPROM_H) */ | 25 | #endif /* !(_SPARC_IDPROM_H) */ |
diff --git a/include/asm-sparc64/intr_queue.h b/include/asm-sparc64/intr_queue.h new file mode 100644 index 000000000000..206077dedc2a --- /dev/null +++ b/include/asm-sparc64/intr_queue.h | |||
@@ -0,0 +1,15 @@ | |||
1 | #ifndef _SPARC64_INTR_QUEUE_H | ||
2 | #define _SPARC64_INTR_QUEUE_H | ||
3 | |||
4 | /* Sun4v interrupt queue registers, accessed via ASI_QUEUE. */ | ||
5 | |||
6 | #define INTRQ_CPU_MONDO_HEAD 0x3c0 /* CPU mondo head */ | ||
7 | #define INTRQ_CPU_MONDO_TAIL 0x3c8 /* CPU mondo tail */ | ||
8 | #define INTRQ_DEVICE_MONDO_HEAD 0x3d0 /* Device mondo head */ | ||
9 | #define INTRQ_DEVICE_MONDO_TAIL 0x3d8 /* Device mondo tail */ | ||
10 | #define INTRQ_RESUM_MONDO_HEAD 0x3e0 /* Resumable error mondo head */ | ||
11 | #define INTRQ_RESUM_MONDO_TAIL 0x3e8 /* Resumable error mondo tail */ | ||
12 | #define INTRQ_NONRESUM_MONDO_HEAD 0x3f0 /* Non-resumable error mondo head */ | ||
13 | #define INTRQ_NONRESUM_MONDO_TAIL 0x3f8 /* Non-resumable error mondo head */ | ||
14 | |||
15 | #endif /* !(_SPARC64_INTR_QUEUE_H) */ | ||
diff --git a/include/asm-sparc64/irq.h b/include/asm-sparc64/irq.h index 8b70edcb80dc..de33d6e1afb5 100644 --- a/include/asm-sparc64/irq.h +++ b/include/asm-sparc64/irq.h | |||
@@ -72,8 +72,11 @@ struct ino_bucket { | |||
72 | #define IMAP_VALID 0x80000000 /* IRQ Enabled */ | 72 | #define IMAP_VALID 0x80000000 /* IRQ Enabled */ |
73 | #define IMAP_TID_UPA 0x7c000000 /* UPA TargetID */ | 73 | #define IMAP_TID_UPA 0x7c000000 /* UPA TargetID */ |
74 | #define IMAP_TID_JBUS 0x7c000000 /* JBUS TargetID */ | 74 | #define IMAP_TID_JBUS 0x7c000000 /* JBUS TargetID */ |
75 | #define IMAP_TID_SHIFT 26 | ||
75 | #define IMAP_AID_SAFARI 0x7c000000 /* Safari AgentID */ | 76 | #define IMAP_AID_SAFARI 0x7c000000 /* Safari AgentID */ |
77 | #define IMAP_AID_SHIFT 26 | ||
76 | #define IMAP_NID_SAFARI 0x03e00000 /* Safari NodeID */ | 78 | #define IMAP_NID_SAFARI 0x03e00000 /* Safari NodeID */ |
79 | #define IMAP_NID_SHIFT 21 | ||
77 | #define IMAP_IGN 0x000007c0 /* IRQ Group Number */ | 80 | #define IMAP_IGN 0x000007c0 /* IRQ Group Number */ |
78 | #define IMAP_INO 0x0000003f /* IRQ Number */ | 81 | #define IMAP_INO 0x0000003f /* IRQ Number */ |
79 | #define IMAP_INR 0x000007ff /* Full interrupt number*/ | 82 | #define IMAP_INR 0x000007ff /* Full interrupt number*/ |
@@ -111,6 +114,7 @@ extern void disable_irq(unsigned int); | |||
111 | #define disable_irq_nosync disable_irq | 114 | #define disable_irq_nosync disable_irq |
112 | extern void enable_irq(unsigned int); | 115 | extern void enable_irq(unsigned int); |
113 | extern unsigned int build_irq(int pil, int inofixup, unsigned long iclr, unsigned long imap); | 116 | extern unsigned int build_irq(int pil, int inofixup, unsigned long iclr, unsigned long imap); |
117 | extern unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino, int pil, unsigned char flags); | ||
114 | extern unsigned int sbus_build_irq(void *sbus, unsigned int ino); | 118 | extern unsigned int sbus_build_irq(void *sbus, unsigned int ino); |
115 | 119 | ||
116 | static __inline__ void set_softint(unsigned long bits) | 120 | static __inline__ void set_softint(unsigned long bits) |
diff --git a/include/asm-sparc64/mmu.h b/include/asm-sparc64/mmu.h index 8627eed6e83d..230ba678d3b0 100644 --- a/include/asm-sparc64/mmu.h +++ b/include/asm-sparc64/mmu.h | |||
@@ -4,20 +4,9 @@ | |||
4 | #include <linux/config.h> | 4 | #include <linux/config.h> |
5 | #include <asm/page.h> | 5 | #include <asm/page.h> |
6 | #include <asm/const.h> | 6 | #include <asm/const.h> |
7 | #include <asm/hypervisor.h> | ||
7 | 8 | ||
8 | /* | 9 | #define CTX_NR_BITS 13 |
9 | * For the 8k pagesize kernel, use only 10 hw context bits to optimize some | ||
10 | * shifts in the fast tlbmiss handlers, instead of all 13 bits (specifically | ||
11 | * for vpte offset calculation). For other pagesizes, this optimization in | ||
12 | * the tlbhandlers can not be done; but still, all 13 bits can not be used | ||
13 | * because the tlb handlers use "andcc" instruction which sign extends 13 | ||
14 | * bit arguments. | ||
15 | */ | ||
16 | #if PAGE_SHIFT == 13 | ||
17 | #define CTX_NR_BITS 10 | ||
18 | #else | ||
19 | #define CTX_NR_BITS 12 | ||
20 | #endif | ||
21 | 10 | ||
22 | #define TAG_CONTEXT_BITS ((_AC(1,UL) << CTX_NR_BITS) - _AC(1,UL)) | 11 | #define TAG_CONTEXT_BITS ((_AC(1,UL) << CTX_NR_BITS) - _AC(1,UL)) |
23 | 12 | ||
@@ -90,8 +79,27 @@ | |||
90 | 79 | ||
91 | #ifndef __ASSEMBLY__ | 80 | #ifndef __ASSEMBLY__ |
92 | 81 | ||
82 | #define TSB_ENTRY_ALIGNMENT 16 | ||
83 | |||
84 | struct tsb { | ||
85 | unsigned long tag; | ||
86 | unsigned long pte; | ||
87 | } __attribute__((aligned(TSB_ENTRY_ALIGNMENT))); | ||
88 | |||
89 | extern void __tsb_insert(unsigned long ent, unsigned long tag, unsigned long pte); | ||
90 | extern void tsb_flush(unsigned long ent, unsigned long tag); | ||
91 | extern void tsb_init(struct tsb *tsb, unsigned long size); | ||
92 | |||
93 | typedef struct { | 93 | typedef struct { |
94 | unsigned long sparc64_ctx_val; | 94 | spinlock_t lock; |
95 | unsigned long sparc64_ctx_val; | ||
96 | struct tsb *tsb; | ||
97 | unsigned long tsb_rss_limit; | ||
98 | unsigned long tsb_nentries; | ||
99 | unsigned long tsb_reg_val; | ||
100 | unsigned long tsb_map_vaddr; | ||
101 | unsigned long tsb_map_pte; | ||
102 | struct hv_tsb_descr tsb_descr; | ||
95 | } mm_context_t; | 103 | } mm_context_t; |
96 | 104 | ||
97 | #endif /* !__ASSEMBLY__ */ | 105 | #endif /* !__ASSEMBLY__ */ |
diff --git a/include/asm-sparc64/mmu_context.h b/include/asm-sparc64/mmu_context.h index 57ee7b306189..e7974321d052 100644 --- a/include/asm-sparc64/mmu_context.h +++ b/include/asm-sparc64/mmu_context.h | |||
@@ -19,96 +19,98 @@ extern unsigned long tlb_context_cache; | |||
19 | extern unsigned long mmu_context_bmap[]; | 19 | extern unsigned long mmu_context_bmap[]; |
20 | 20 | ||
21 | extern void get_new_mmu_context(struct mm_struct *mm); | 21 | extern void get_new_mmu_context(struct mm_struct *mm); |
22 | #ifdef CONFIG_SMP | ||
23 | extern void smp_new_mmu_context_version(void); | ||
24 | #else | ||
25 | #define smp_new_mmu_context_version() do { } while (0) | ||
26 | #endif | ||
27 | |||
28 | extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm); | ||
29 | extern void destroy_context(struct mm_struct *mm); | ||
30 | |||
31 | extern void __tsb_context_switch(unsigned long pgd_pa, | ||
32 | unsigned long tsb_reg, | ||
33 | unsigned long tsb_vaddr, | ||
34 | unsigned long tsb_pte, | ||
35 | unsigned long tsb_descr_pa); | ||
36 | |||
37 | static inline void tsb_context_switch(struct mm_struct *mm) | ||
38 | { | ||
39 | __tsb_context_switch(__pa(mm->pgd), mm->context.tsb_reg_val, | ||
40 | mm->context.tsb_map_vaddr, | ||
41 | mm->context.tsb_map_pte, | ||
42 | __pa(&mm->context.tsb_descr)); | ||
43 | } | ||
22 | 44 | ||
23 | /* Initialize a new mmu context. This is invoked when a new | 45 | extern void tsb_grow(struct mm_struct *mm, unsigned long mm_rss); |
24 | * address space instance (unique or shared) is instantiated. | 46 | #ifdef CONFIG_SMP |
25 | * This just needs to set mm->context to an invalid context. | 47 | extern void smp_tsb_sync(struct mm_struct *mm); |
26 | */ | 48 | #else |
27 | #define init_new_context(__tsk, __mm) \ | 49 | #define smp_tsb_sync(__mm) do { } while (0) |
28 | (((__mm)->context.sparc64_ctx_val = 0UL), 0) | 50 | #endif |
29 | |||
30 | /* Destroy a dead context. This occurs when mmput drops the | ||
31 | * mm_users count to zero, the mmaps have been released, and | ||
32 | * all the page tables have been flushed. Our job is to destroy | ||
33 | * any remaining processor-specific state, and in the sparc64 | ||
34 | * case this just means freeing up the mmu context ID held by | ||
35 | * this task if valid. | ||
36 | */ | ||
37 | #define destroy_context(__mm) \ | ||
38 | do { spin_lock(&ctx_alloc_lock); \ | ||
39 | if (CTX_VALID((__mm)->context)) { \ | ||
40 | unsigned long nr = CTX_NRBITS((__mm)->context); \ | ||
41 | mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63)); \ | ||
42 | } \ | ||
43 | spin_unlock(&ctx_alloc_lock); \ | ||
44 | } while(0) | ||
45 | |||
46 | /* Reload the two core values used by TLB miss handler | ||
47 | * processing on sparc64. They are: | ||
48 | * 1) The physical address of mm->pgd, when full page | ||
49 | * table walks are necessary, this is where the | ||
50 | * search begins. | ||
51 | * 2) A "PGD cache". For 32-bit tasks only pgd[0] is | ||
52 | * ever used since that maps the entire low 4GB | ||
53 | * completely. To speed up TLB miss processing we | ||
54 | * make this value available to the handlers. This | ||
55 | * decreases the amount of memory traffic incurred. | ||
56 | */ | ||
57 | #define reload_tlbmiss_state(__tsk, __mm) \ | ||
58 | do { \ | ||
59 | register unsigned long paddr asm("o5"); \ | ||
60 | register unsigned long pgd_cache asm("o4"); \ | ||
61 | paddr = __pa((__mm)->pgd); \ | ||
62 | pgd_cache = 0UL; \ | ||
63 | if (task_thread_info(__tsk)->flags & _TIF_32BIT) \ | ||
64 | pgd_cache = get_pgd_cache((__mm)->pgd); \ | ||
65 | __asm__ __volatile__("wrpr %%g0, 0x494, %%pstate\n\t" \ | ||
66 | "mov %3, %%g4\n\t" \ | ||
67 | "mov %0, %%g7\n\t" \ | ||
68 | "stxa %1, [%%g4] %2\n\t" \ | ||
69 | "membar #Sync\n\t" \ | ||
70 | "wrpr %%g0, 0x096, %%pstate" \ | ||
71 | : /* no outputs */ \ | ||
72 | : "r" (paddr), "r" (pgd_cache),\ | ||
73 | "i" (ASI_DMMU), "i" (TSB_REG)); \ | ||
74 | } while(0) | ||
75 | 51 | ||
76 | /* Set MMU context in the actual hardware. */ | 52 | /* Set MMU context in the actual hardware. */ |
77 | #define load_secondary_context(__mm) \ | 53 | #define load_secondary_context(__mm) \ |
78 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" \ | 54 | __asm__ __volatile__( \ |
79 | "flush %%g6" \ | 55 | "\n661: stxa %0, [%1] %2\n" \ |
80 | : /* No outputs */ \ | 56 | " .section .sun4v_1insn_patch, \"ax\"\n" \ |
81 | : "r" (CTX_HWBITS((__mm)->context)), \ | 57 | " .word 661b\n" \ |
82 | "r" (SECONDARY_CONTEXT), "i" (ASI_DMMU)) | 58 | " stxa %0, [%1] %3\n" \ |
59 | " .previous\n" \ | ||
60 | " flush %%g6\n" \ | ||
61 | : /* No outputs */ \ | ||
62 | : "r" (CTX_HWBITS((__mm)->context)), \ | ||
63 | "r" (SECONDARY_CONTEXT), "i" (ASI_DMMU), "i" (ASI_MMU)) | ||
83 | 64 | ||
84 | extern void __flush_tlb_mm(unsigned long, unsigned long); | 65 | extern void __flush_tlb_mm(unsigned long, unsigned long); |
85 | 66 | ||
86 | /* Switch the current MM context. */ | 67 | /* Switch the current MM context. Interrupts are disabled. */ |
87 | static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk) | 68 | static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk) |
88 | { | 69 | { |
89 | unsigned long ctx_valid; | 70 | unsigned long ctx_valid, flags; |
90 | int cpu; | 71 | int cpu; |
91 | 72 | ||
92 | /* Note: page_table_lock is used here to serialize switch_mm | 73 | spin_lock_irqsave(&mm->context.lock, flags); |
93 | * and activate_mm, and their calls to get_new_mmu_context. | ||
94 | * This use of page_table_lock is unrelated to its other uses. | ||
95 | */ | ||
96 | spin_lock(&mm->page_table_lock); | ||
97 | ctx_valid = CTX_VALID(mm->context); | 74 | ctx_valid = CTX_VALID(mm->context); |
98 | if (!ctx_valid) | 75 | if (!ctx_valid) |
99 | get_new_mmu_context(mm); | 76 | get_new_mmu_context(mm); |
100 | spin_unlock(&mm->page_table_lock); | ||
101 | 77 | ||
102 | if (!ctx_valid || (old_mm != mm)) { | 78 | /* We have to be extremely careful here or else we will miss |
103 | load_secondary_context(mm); | 79 | * a TSB grow if we switch back and forth between a kernel |
104 | reload_tlbmiss_state(tsk, mm); | 80 | * thread and an address space which has it's TSB size increased |
105 | } | 81 | * on another processor. |
82 | * | ||
83 | * It is possible to play some games in order to optimize the | ||
84 | * switch, but the safest thing to do is to unconditionally | ||
85 | * perform the secondary context load and the TSB context switch. | ||
86 | * | ||
87 | * For reference the bad case is, for address space "A": | ||
88 | * | ||
89 | * CPU 0 CPU 1 | ||
90 | * run address space A | ||
91 | * set cpu0's bits in cpu_vm_mask | ||
92 | * switch to kernel thread, borrow | ||
93 | * address space A via entry_lazy_tlb | ||
94 | * run address space A | ||
95 | * set cpu1's bit in cpu_vm_mask | ||
96 | * flush_tlb_pending() | ||
97 | * reset cpu_vm_mask to just cpu1 | ||
98 | * TSB grow | ||
99 | * run address space A | ||
100 | * context was valid, so skip | ||
101 | * TSB context switch | ||
102 | * | ||
103 | * At that point cpu0 continues to use a stale TSB, the one from | ||
104 | * before the TSB grow performed on cpu1. cpu1 did not cross-call | ||
105 | * cpu0 to update it's TSB because at that point the cpu_vm_mask | ||
106 | * only had cpu1 set in it. | ||
107 | */ | ||
108 | load_secondary_context(mm); | ||
109 | tsb_context_switch(mm); | ||
106 | 110 | ||
107 | /* Even if (mm == old_mm) we _must_ check | 111 | /* Any time a processor runs a context on an address space |
108 | * the cpu_vm_mask. If we do not we could | 112 | * for the first time, we must flush that context out of the |
109 | * corrupt the TLB state because of how | 113 | * local TLB. |
110 | * smp_flush_tlb_{page,range,mm} on sparc64 | ||
111 | * and lazy tlb switches work. -DaveM | ||
112 | */ | 114 | */ |
113 | cpu = smp_processor_id(); | 115 | cpu = smp_processor_id(); |
114 | if (!ctx_valid || !cpu_isset(cpu, mm->cpu_vm_mask)) { | 116 | if (!ctx_valid || !cpu_isset(cpu, mm->cpu_vm_mask)) { |
@@ -116,6 +118,7 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str | |||
116 | __flush_tlb_mm(CTX_HWBITS(mm->context), | 118 | __flush_tlb_mm(CTX_HWBITS(mm->context), |
117 | SECONDARY_CONTEXT); | 119 | SECONDARY_CONTEXT); |
118 | } | 120 | } |
121 | spin_unlock_irqrestore(&mm->context.lock, flags); | ||
119 | } | 122 | } |
120 | 123 | ||
121 | #define deactivate_mm(tsk,mm) do { } while (0) | 124 | #define deactivate_mm(tsk,mm) do { } while (0) |
@@ -123,23 +126,20 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str | |||
123 | /* Activate a new MM instance for the current task. */ | 126 | /* Activate a new MM instance for the current task. */ |
124 | static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm) | 127 | static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm) |
125 | { | 128 | { |
129 | unsigned long flags; | ||
126 | int cpu; | 130 | int cpu; |
127 | 131 | ||
128 | /* Note: page_table_lock is used here to serialize switch_mm | 132 | spin_lock_irqsave(&mm->context.lock, flags); |
129 | * and activate_mm, and their calls to get_new_mmu_context. | ||
130 | * This use of page_table_lock is unrelated to its other uses. | ||
131 | */ | ||
132 | spin_lock(&mm->page_table_lock); | ||
133 | if (!CTX_VALID(mm->context)) | 133 | if (!CTX_VALID(mm->context)) |
134 | get_new_mmu_context(mm); | 134 | get_new_mmu_context(mm); |
135 | cpu = smp_processor_id(); | 135 | cpu = smp_processor_id(); |
136 | if (!cpu_isset(cpu, mm->cpu_vm_mask)) | 136 | if (!cpu_isset(cpu, mm->cpu_vm_mask)) |
137 | cpu_set(cpu, mm->cpu_vm_mask); | 137 | cpu_set(cpu, mm->cpu_vm_mask); |
138 | spin_unlock(&mm->page_table_lock); | ||
139 | 138 | ||
140 | load_secondary_context(mm); | 139 | load_secondary_context(mm); |
141 | __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT); | 140 | __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT); |
142 | reload_tlbmiss_state(current, mm); | 141 | tsb_context_switch(mm); |
142 | spin_unlock_irqrestore(&mm->context.lock, flags); | ||
143 | } | 143 | } |
144 | 144 | ||
145 | #endif /* !(__ASSEMBLY__) */ | 145 | #endif /* !(__ASSEMBLY__) */ |
diff --git a/include/asm-sparc64/numnodes.h b/include/asm-sparc64/numnodes.h new file mode 100644 index 000000000000..017e7e74f5e7 --- /dev/null +++ b/include/asm-sparc64/numnodes.h | |||
@@ -0,0 +1,6 @@ | |||
1 | #ifndef _SPARC64_NUMNODES_H | ||
2 | #define _SPARC64_NUMNODES_H | ||
3 | |||
4 | #define NODES_SHIFT 0 | ||
5 | |||
6 | #endif /* !(_SPARC64_NUMNODES_H) */ | ||
diff --git a/include/asm-sparc64/oplib.h b/include/asm-sparc64/oplib.h index 3c59b2693fb9..c754676e13ef 100644 --- a/include/asm-sparc64/oplib.h +++ b/include/asm-sparc64/oplib.h | |||
@@ -12,18 +12,8 @@ | |||
12 | #include <linux/config.h> | 12 | #include <linux/config.h> |
13 | #include <asm/openprom.h> | 13 | #include <asm/openprom.h> |
14 | 14 | ||
15 | /* Enumeration to describe the prom major version we have detected. */ | 15 | /* OBP version string. */ |
16 | enum prom_major_version { | 16 | extern char prom_version[]; |
17 | PROM_V0, /* Original sun4c V0 prom */ | ||
18 | PROM_V2, /* sun4c and early sun4m V2 prom */ | ||
19 | PROM_V3, /* sun4m and later, up to sun4d/sun4e machines V3 */ | ||
20 | PROM_P1275, /* IEEE compliant ISA based Sun PROM, only sun4u */ | ||
21 | PROM_AP1000, /* actually no prom at all */ | ||
22 | }; | ||
23 | |||
24 | extern enum prom_major_version prom_vers; | ||
25 | /* Revision, and firmware revision. */ | ||
26 | extern unsigned int prom_rev, prom_prev; | ||
27 | 17 | ||
28 | /* Root node of the prom device tree, this stays constant after | 18 | /* Root node of the prom device tree, this stays constant after |
29 | * initialization is complete. | 19 | * initialization is complete. |
@@ -39,6 +29,9 @@ extern int prom_stdin, prom_stdout; | |||
39 | extern int prom_chosen_node; | 29 | extern int prom_chosen_node; |
40 | 30 | ||
41 | /* Helper values and strings in arch/sparc64/kernel/head.S */ | 31 | /* Helper values and strings in arch/sparc64/kernel/head.S */ |
32 | extern const char prom_peer_name[]; | ||
33 | extern const char prom_compatible_name[]; | ||
34 | extern const char prom_root_compatible[]; | ||
42 | extern const char prom_finddev_name[]; | 35 | extern const char prom_finddev_name[]; |
43 | extern const char prom_chosen_path[]; | 36 | extern const char prom_chosen_path[]; |
44 | extern const char prom_getprop_name[]; | 37 | extern const char prom_getprop_name[]; |
@@ -130,15 +123,6 @@ extern void prom_setcallback(callback_func_t func_ptr); | |||
130 | */ | 123 | */ |
131 | extern unsigned char prom_get_idprom(char *idp_buffer, int idpbuf_size); | 124 | extern unsigned char prom_get_idprom(char *idp_buffer, int idpbuf_size); |
132 | 125 | ||
133 | /* Get the prom major version. */ | ||
134 | extern int prom_version(void); | ||
135 | |||
136 | /* Get the prom plugin revision. */ | ||
137 | extern int prom_getrev(void); | ||
138 | |||
139 | /* Get the prom firmware revision. */ | ||
140 | extern int prom_getprev(void); | ||
141 | |||
142 | /* Character operations to/from the console.... */ | 126 | /* Character operations to/from the console.... */ |
143 | 127 | ||
144 | /* Non-blocking get character from console. */ | 128 | /* Non-blocking get character from console. */ |
@@ -164,6 +148,7 @@ enum prom_input_device { | |||
164 | PROMDEV_ITTYA, /* input from ttya */ | 148 | PROMDEV_ITTYA, /* input from ttya */ |
165 | PROMDEV_ITTYB, /* input from ttyb */ | 149 | PROMDEV_ITTYB, /* input from ttyb */ |
166 | PROMDEV_IRSC, /* input from rsc */ | 150 | PROMDEV_IRSC, /* input from rsc */ |
151 | PROMDEV_IVCONS, /* input from virtual-console */ | ||
167 | PROMDEV_I_UNK, | 152 | PROMDEV_I_UNK, |
168 | }; | 153 | }; |
169 | 154 | ||
@@ -176,6 +161,7 @@ enum prom_output_device { | |||
176 | PROMDEV_OTTYA, /* to ttya */ | 161 | PROMDEV_OTTYA, /* to ttya */ |
177 | PROMDEV_OTTYB, /* to ttyb */ | 162 | PROMDEV_OTTYB, /* to ttyb */ |
178 | PROMDEV_ORSC, /* to rsc */ | 163 | PROMDEV_ORSC, /* to rsc */ |
164 | PROMDEV_OVCONS, /* to virtual-console */ | ||
179 | PROMDEV_O_UNK, | 165 | PROMDEV_O_UNK, |
180 | }; | 166 | }; |
181 | 167 | ||
@@ -183,10 +169,18 @@ extern enum prom_output_device prom_query_output_device(void); | |||
183 | 169 | ||
184 | /* Multiprocessor operations... */ | 170 | /* Multiprocessor operations... */ |
185 | #ifdef CONFIG_SMP | 171 | #ifdef CONFIG_SMP |
186 | /* Start the CPU with the given device tree node, context table, and context | 172 | /* Start the CPU with the given device tree node at the passed program |
187 | * at the passed program counter. | 173 | * counter with the given arg passed in via register %o0. |
174 | */ | ||
175 | extern void prom_startcpu(int cpunode, unsigned long pc, unsigned long arg); | ||
176 | |||
177 | /* Start the CPU with the given cpu ID at the passed program | ||
178 | * counter with the given arg passed in via register %o0. | ||
188 | */ | 179 | */ |
189 | extern void prom_startcpu(int cpunode, unsigned long pc, unsigned long o0); | 180 | extern void prom_startcpu_cpuid(int cpuid, unsigned long pc, unsigned long arg); |
181 | |||
182 | /* Stop the CPU with the given cpu ID. */ | ||
183 | extern void prom_stopcpu_cpuid(int cpuid); | ||
190 | 184 | ||
191 | /* Stop the current CPU. */ | 185 | /* Stop the current CPU. */ |
192 | extern void prom_stopself(void); | 186 | extern void prom_stopself(void); |
@@ -335,6 +329,7 @@ int cpu_find_by_mid(int mid, int *prom_node); | |||
335 | 329 | ||
336 | /* Client interface level routines. */ | 330 | /* Client interface level routines. */ |
337 | extern void prom_set_trap_table(unsigned long tba); | 331 | extern void prom_set_trap_table(unsigned long tba); |
332 | extern void prom_set_trap_table_sun4v(unsigned long tba, unsigned long mmfsa); | ||
338 | 333 | ||
339 | extern long p1275_cmd(const char *, long, ...); | 334 | extern long p1275_cmd(const char *, long, ...); |
340 | 335 | ||
diff --git a/include/asm-sparc64/page.h b/include/asm-sparc64/page.h index 5426bb28a993..fcb2812265f4 100644 --- a/include/asm-sparc64/page.h +++ b/include/asm-sparc64/page.h | |||
@@ -104,10 +104,12 @@ typedef unsigned long pgprot_t; | |||
104 | #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) | 104 | #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) |
105 | #define ARCH_HAS_SETCLEAR_HUGE_PTE | 105 | #define ARCH_HAS_SETCLEAR_HUGE_PTE |
106 | #define ARCH_HAS_HUGETLB_PREFAULT_HOOK | 106 | #define ARCH_HAS_HUGETLB_PREFAULT_HOOK |
107 | #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA | ||
107 | #endif | 108 | #endif |
108 | 109 | ||
109 | #define TASK_UNMAPPED_BASE (test_thread_flag(TIF_32BIT) ? \ | 110 | #define TASK_UNMAPPED_BASE (test_thread_flag(TIF_32BIT) ? \ |
110 | (_AC(0x0000000070000000,UL)) : (PAGE_OFFSET)) | 111 | (_AC(0x0000000070000000,UL)) : \ |
112 | (_AC(0xfffff80000000000,UL) + (1UL << 32UL))) | ||
111 | 113 | ||
112 | #endif /* !(__ASSEMBLY__) */ | 114 | #endif /* !(__ASSEMBLY__) */ |
113 | 115 | ||
@@ -124,17 +126,10 @@ typedef unsigned long pgprot_t; | |||
124 | #define __pa(x) ((unsigned long)(x) - PAGE_OFFSET) | 126 | #define __pa(x) ((unsigned long)(x) - PAGE_OFFSET) |
125 | #define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET)) | 127 | #define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET)) |
126 | 128 | ||
127 | /* PFNs are real physical page numbers. However, mem_map only begins to record | 129 | #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) |
128 | * per-page information starting at pfn_base. This is to handle systems where | ||
129 | * the first physical page in the machine is at some huge physical address, | ||
130 | * such as 4GB. This is common on a partitioned E10000, for example. | ||
131 | */ | ||
132 | extern struct page *pfn_to_page(unsigned long pfn); | ||
133 | extern unsigned long page_to_pfn(struct page *); | ||
134 | 130 | ||
135 | #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr)>>PAGE_SHIFT) | 131 | #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr)>>PAGE_SHIFT) |
136 | 132 | ||
137 | #define pfn_valid(pfn) (((pfn)-(pfn_base)) < max_mapnr) | ||
138 | #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) | 133 | #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) |
139 | 134 | ||
140 | #define virt_to_phys __pa | 135 | #define virt_to_phys __pa |
diff --git a/include/asm-sparc64/pbm.h b/include/asm-sparc64/pbm.h index dd35a2c7798a..1396f110939a 100644 --- a/include/asm-sparc64/pbm.h +++ b/include/asm-sparc64/pbm.h | |||
@@ -139,6 +139,9 @@ struct pci_pbm_info { | |||
139 | /* Opaque 32-bit system bus Port ID. */ | 139 | /* Opaque 32-bit system bus Port ID. */ |
140 | u32 portid; | 140 | u32 portid; |
141 | 141 | ||
142 | /* Opaque 32-bit handle used for hypervisor calls. */ | ||
143 | u32 devhandle; | ||
144 | |||
142 | /* Chipset version information. */ | 145 | /* Chipset version information. */ |
143 | int chip_type; | 146 | int chip_type; |
144 | #define PBM_CHIP_TYPE_SABRE 1 | 147 | #define PBM_CHIP_TYPE_SABRE 1 |
diff --git a/include/asm-sparc64/pci.h b/include/asm-sparc64/pci.h index 89bd71b1c0d8..7c5a589ea437 100644 --- a/include/asm-sparc64/pci.h +++ b/include/asm-sparc64/pci.h | |||
@@ -41,10 +41,26 @@ static inline void pcibios_penalize_isa_irq(int irq, int active) | |||
41 | 41 | ||
42 | struct pci_dev; | 42 | struct pci_dev; |
43 | 43 | ||
44 | struct pci_iommu_ops { | ||
45 | void *(*alloc_consistent)(struct pci_dev *, size_t, dma_addr_t *); | ||
46 | void (*free_consistent)(struct pci_dev *, size_t, void *, dma_addr_t); | ||
47 | dma_addr_t (*map_single)(struct pci_dev *, void *, size_t, int); | ||
48 | void (*unmap_single)(struct pci_dev *, dma_addr_t, size_t, int); | ||
49 | int (*map_sg)(struct pci_dev *, struct scatterlist *, int, int); | ||
50 | void (*unmap_sg)(struct pci_dev *, struct scatterlist *, int, int); | ||
51 | void (*dma_sync_single_for_cpu)(struct pci_dev *, dma_addr_t, size_t, int); | ||
52 | void (*dma_sync_sg_for_cpu)(struct pci_dev *, struct scatterlist *, int, int); | ||
53 | }; | ||
54 | |||
55 | extern struct pci_iommu_ops *pci_iommu_ops; | ||
56 | |||
44 | /* Allocate and map kernel buffer using consistent mode DMA for a device. | 57 | /* Allocate and map kernel buffer using consistent mode DMA for a device. |
45 | * hwdev should be valid struct pci_dev pointer for PCI devices. | 58 | * hwdev should be valid struct pci_dev pointer for PCI devices. |
46 | */ | 59 | */ |
47 | extern void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle); | 60 | static inline void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle) |
61 | { | ||
62 | return pci_iommu_ops->alloc_consistent(hwdev, size, dma_handle); | ||
63 | } | ||
48 | 64 | ||
49 | /* Free and unmap a consistent DMA buffer. | 65 | /* Free and unmap a consistent DMA buffer. |
50 | * cpu_addr is what was returned from pci_alloc_consistent, | 66 | * cpu_addr is what was returned from pci_alloc_consistent, |
@@ -54,7 +70,10 @@ extern void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t | |||
54 | * References to the memory and mappings associated with cpu_addr/dma_addr | 70 | * References to the memory and mappings associated with cpu_addr/dma_addr |
55 | * past this call are illegal. | 71 | * past this call are illegal. |
56 | */ | 72 | */ |
57 | extern void pci_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle); | 73 | static inline void pci_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle) |
74 | { | ||
75 | return pci_iommu_ops->free_consistent(hwdev, size, vaddr, dma_handle); | ||
76 | } | ||
58 | 77 | ||
59 | /* Map a single buffer of the indicated size for DMA in streaming mode. | 78 | /* Map a single buffer of the indicated size for DMA in streaming mode. |
60 | * The 32-bit bus address to use is returned. | 79 | * The 32-bit bus address to use is returned. |
@@ -62,7 +81,10 @@ extern void pci_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr, | |||
62 | * Once the device is given the dma address, the device owns this memory | 81 | * Once the device is given the dma address, the device owns this memory |
63 | * until either pci_unmap_single or pci_dma_sync_single_for_cpu is performed. | 82 | * until either pci_unmap_single or pci_dma_sync_single_for_cpu is performed. |
64 | */ | 83 | */ |
65 | extern dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction); | 84 | static inline dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction) |
85 | { | ||
86 | return pci_iommu_ops->map_single(hwdev, ptr, size, direction); | ||
87 | } | ||
66 | 88 | ||
67 | /* Unmap a single streaming mode DMA translation. The dma_addr and size | 89 | /* Unmap a single streaming mode DMA translation. The dma_addr and size |
68 | * must match what was provided for in a previous pci_map_single call. All | 90 | * must match what was provided for in a previous pci_map_single call. All |
@@ -71,7 +93,10 @@ extern dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, | |||
71 | * After this call, reads by the cpu to the buffer are guaranteed to see | 93 | * After this call, reads by the cpu to the buffer are guaranteed to see |
72 | * whatever the device wrote there. | 94 | * whatever the device wrote there. |
73 | */ | 95 | */ |
74 | extern void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size, int direction); | 96 | static inline void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size, int direction) |
97 | { | ||
98 | pci_iommu_ops->unmap_single(hwdev, dma_addr, size, direction); | ||
99 | } | ||
75 | 100 | ||
76 | /* No highmem on sparc64, plus we have an IOMMU, so mapping pages is easy. */ | 101 | /* No highmem on sparc64, plus we have an IOMMU, so mapping pages is easy. */ |
77 | #define pci_map_page(dev, page, off, size, dir) \ | 102 | #define pci_map_page(dev, page, off, size, dir) \ |
@@ -107,15 +132,19 @@ extern void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t | |||
107 | * Device ownership issues as mentioned above for pci_map_single are | 132 | * Device ownership issues as mentioned above for pci_map_single are |
108 | * the same here. | 133 | * the same here. |
109 | */ | 134 | */ |
110 | extern int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, | 135 | static inline int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction) |
111 | int nents, int direction); | 136 | { |
137 | return pci_iommu_ops->map_sg(hwdev, sg, nents, direction); | ||
138 | } | ||
112 | 139 | ||
113 | /* Unmap a set of streaming mode DMA translations. | 140 | /* Unmap a set of streaming mode DMA translations. |
114 | * Again, cpu read rules concerning calls here are the same as for | 141 | * Again, cpu read rules concerning calls here are the same as for |
115 | * pci_unmap_single() above. | 142 | * pci_unmap_single() above. |
116 | */ | 143 | */ |
117 | extern void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, | 144 | static inline void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nhwents, int direction) |
118 | int nhwents, int direction); | 145 | { |
146 | pci_iommu_ops->unmap_sg(hwdev, sg, nhwents, direction); | ||
147 | } | ||
119 | 148 | ||
120 | /* Make physical memory consistent for a single | 149 | /* Make physical memory consistent for a single |
121 | * streaming mode DMA translation after a transfer. | 150 | * streaming mode DMA translation after a transfer. |
@@ -127,8 +156,10 @@ extern void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, | |||
127 | * must first perform a pci_dma_sync_for_device, and then the | 156 | * must first perform a pci_dma_sync_for_device, and then the |
128 | * device again owns the buffer. | 157 | * device again owns the buffer. |
129 | */ | 158 | */ |
130 | extern void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle, | 159 | static inline void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction) |
131 | size_t size, int direction); | 160 | { |
161 | pci_iommu_ops->dma_sync_single_for_cpu(hwdev, dma_handle, size, direction); | ||
162 | } | ||
132 | 163 | ||
133 | static inline void | 164 | static inline void |
134 | pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle, | 165 | pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle, |
@@ -144,7 +175,10 @@ pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle, | |||
144 | * The same as pci_dma_sync_single_* but for a scatter-gather list, | 175 | * The same as pci_dma_sync_single_* but for a scatter-gather list, |
145 | * same rules and usage. | 176 | * same rules and usage. |
146 | */ | 177 | */ |
147 | extern void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction); | 178 | static inline void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction) |
179 | { | ||
180 | pci_iommu_ops->dma_sync_sg_for_cpu(hwdev, sg, nelems, direction); | ||
181 | } | ||
148 | 182 | ||
149 | static inline void | 183 | static inline void |
150 | pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg, | 184 | pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg, |
diff --git a/include/asm-sparc64/pgalloc.h b/include/asm-sparc64/pgalloc.h index a96067cca963..12e4a273bd43 100644 --- a/include/asm-sparc64/pgalloc.h +++ b/include/asm-sparc64/pgalloc.h | |||
@@ -6,6 +6,7 @@ | |||
6 | #include <linux/kernel.h> | 6 | #include <linux/kernel.h> |
7 | #include <linux/sched.h> | 7 | #include <linux/sched.h> |
8 | #include <linux/mm.h> | 8 | #include <linux/mm.h> |
9 | #include <linux/slab.h> | ||
9 | 10 | ||
10 | #include <asm/spitfire.h> | 11 | #include <asm/spitfire.h> |
11 | #include <asm/cpudata.h> | 12 | #include <asm/cpudata.h> |
@@ -13,172 +14,59 @@ | |||
13 | #include <asm/page.h> | 14 | #include <asm/page.h> |
14 | 15 | ||
15 | /* Page table allocation/freeing. */ | 16 | /* Page table allocation/freeing. */ |
16 | #ifdef CONFIG_SMP | 17 | extern kmem_cache_t *pgtable_cache; |
17 | /* Sliiiicck */ | ||
18 | #define pgt_quicklists local_cpu_data() | ||
19 | #else | ||
20 | extern struct pgtable_cache_struct { | ||
21 | unsigned long *pgd_cache; | ||
22 | unsigned long *pte_cache[2]; | ||
23 | unsigned int pgcache_size; | ||
24 | } pgt_quicklists; | ||
25 | #endif | ||
26 | #define pgd_quicklist (pgt_quicklists.pgd_cache) | ||
27 | #define pmd_quicklist ((unsigned long *)0) | ||
28 | #define pte_quicklist (pgt_quicklists.pte_cache) | ||
29 | #define pgtable_cache_size (pgt_quicklists.pgcache_size) | ||
30 | 18 | ||
31 | static __inline__ void free_pgd_fast(pgd_t *pgd) | 19 | static inline pgd_t *pgd_alloc(struct mm_struct *mm) |
32 | { | 20 | { |
33 | preempt_disable(); | 21 | return kmem_cache_alloc(pgtable_cache, GFP_KERNEL); |
34 | *(unsigned long *)pgd = (unsigned long) pgd_quicklist; | ||
35 | pgd_quicklist = (unsigned long *) pgd; | ||
36 | pgtable_cache_size++; | ||
37 | preempt_enable(); | ||
38 | } | 22 | } |
39 | 23 | ||
40 | static __inline__ pgd_t *get_pgd_fast(void) | 24 | static inline void pgd_free(pgd_t *pgd) |
41 | { | 25 | { |
42 | unsigned long *ret; | 26 | kmem_cache_free(pgtable_cache, pgd); |
43 | |||
44 | preempt_disable(); | ||
45 | if((ret = pgd_quicklist) != NULL) { | ||
46 | pgd_quicklist = (unsigned long *)(*ret); | ||
47 | ret[0] = 0; | ||
48 | pgtable_cache_size--; | ||
49 | preempt_enable(); | ||
50 | } else { | ||
51 | preempt_enable(); | ||
52 | ret = (unsigned long *) __get_free_page(GFP_KERNEL|__GFP_REPEAT); | ||
53 | if(ret) | ||
54 | memset(ret, 0, PAGE_SIZE); | ||
55 | } | ||
56 | return (pgd_t *)ret; | ||
57 | } | 27 | } |
58 | 28 | ||
59 | static __inline__ void free_pgd_slow(pgd_t *pgd) | ||
60 | { | ||
61 | free_page((unsigned long)pgd); | ||
62 | } | ||
63 | |||
64 | #ifdef DCACHE_ALIASING_POSSIBLE | ||
65 | #define VPTE_COLOR(address) (((address) >> (PAGE_SHIFT + 10)) & 1UL) | ||
66 | #define DCACHE_COLOR(address) (((address) >> PAGE_SHIFT) & 1UL) | ||
67 | #else | ||
68 | #define VPTE_COLOR(address) 0 | ||
69 | #define DCACHE_COLOR(address) 0 | ||
70 | #endif | ||
71 | |||
72 | #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD) | 29 | #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD) |
73 | 30 | ||
74 | static __inline__ pmd_t *pmd_alloc_one_fast(struct mm_struct *mm, unsigned long address) | 31 | static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) |
75 | { | 32 | { |
76 | unsigned long *ret; | 33 | return kmem_cache_alloc(pgtable_cache, |
77 | int color = 0; | 34 | GFP_KERNEL|__GFP_REPEAT); |
78 | |||
79 | preempt_disable(); | ||
80 | if (pte_quicklist[color] == NULL) | ||
81 | color = 1; | ||
82 | |||
83 | if((ret = (unsigned long *)pte_quicklist[color]) != NULL) { | ||
84 | pte_quicklist[color] = (unsigned long *)(*ret); | ||
85 | ret[0] = 0; | ||
86 | pgtable_cache_size--; | ||
87 | } | ||
88 | preempt_enable(); | ||
89 | |||
90 | return (pmd_t *)ret; | ||
91 | } | 35 | } |
92 | 36 | ||
93 | static __inline__ pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) | 37 | static inline void pmd_free(pmd_t *pmd) |
94 | { | 38 | { |
95 | pmd_t *pmd; | 39 | kmem_cache_free(pgtable_cache, pmd); |
96 | |||
97 | pmd = pmd_alloc_one_fast(mm, address); | ||
98 | if (!pmd) { | ||
99 | pmd = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); | ||
100 | if (pmd) | ||
101 | memset(pmd, 0, PAGE_SIZE); | ||
102 | } | ||
103 | return pmd; | ||
104 | } | 40 | } |
105 | 41 | ||
106 | static __inline__ void free_pmd_fast(pmd_t *pmd) | 42 | static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, |
43 | unsigned long address) | ||
107 | { | 44 | { |
108 | unsigned long color = DCACHE_COLOR((unsigned long)pmd); | 45 | return kmem_cache_alloc(pgtable_cache, |
109 | 46 | GFP_KERNEL|__GFP_REPEAT); | |
110 | preempt_disable(); | ||
111 | *(unsigned long *)pmd = (unsigned long) pte_quicklist[color]; | ||
112 | pte_quicklist[color] = (unsigned long *) pmd; | ||
113 | pgtable_cache_size++; | ||
114 | preempt_enable(); | ||
115 | } | 47 | } |
116 | 48 | ||
117 | static __inline__ void free_pmd_slow(pmd_t *pmd) | 49 | static inline struct page *pte_alloc_one(struct mm_struct *mm, |
50 | unsigned long address) | ||
118 | { | 51 | { |
119 | free_page((unsigned long)pmd); | 52 | return virt_to_page(pte_alloc_one_kernel(mm, address)); |
120 | } | 53 | } |
121 | 54 | ||
122 | #define pmd_populate_kernel(MM, PMD, PTE) pmd_set(PMD, PTE) | ||
123 | #define pmd_populate(MM,PMD,PTE_PAGE) \ | ||
124 | pmd_populate_kernel(MM,PMD,page_address(PTE_PAGE)) | ||
125 | |||
126 | extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address); | ||
127 | |||
128 | static inline struct page * | ||
129 | pte_alloc_one(struct mm_struct *mm, unsigned long addr) | ||
130 | { | ||
131 | pte_t *pte = pte_alloc_one_kernel(mm, addr); | ||
132 | |||
133 | if (pte) | ||
134 | return virt_to_page(pte); | ||
135 | |||
136 | return NULL; | ||
137 | } | ||
138 | |||
139 | static __inline__ pte_t *pte_alloc_one_fast(struct mm_struct *mm, unsigned long address) | ||
140 | { | ||
141 | unsigned long color = VPTE_COLOR(address); | ||
142 | unsigned long *ret; | ||
143 | |||
144 | preempt_disable(); | ||
145 | if((ret = (unsigned long *)pte_quicklist[color]) != NULL) { | ||
146 | pte_quicklist[color] = (unsigned long *)(*ret); | ||
147 | ret[0] = 0; | ||
148 | pgtable_cache_size--; | ||
149 | } | ||
150 | preempt_enable(); | ||
151 | return (pte_t *)ret; | ||
152 | } | ||
153 | |||
154 | static __inline__ void free_pte_fast(pte_t *pte) | ||
155 | { | ||
156 | unsigned long color = DCACHE_COLOR((unsigned long)pte); | ||
157 | |||
158 | preempt_disable(); | ||
159 | *(unsigned long *)pte = (unsigned long) pte_quicklist[color]; | ||
160 | pte_quicklist[color] = (unsigned long *) pte; | ||
161 | pgtable_cache_size++; | ||
162 | preempt_enable(); | ||
163 | } | ||
164 | |||
165 | static __inline__ void free_pte_slow(pte_t *pte) | ||
166 | { | ||
167 | free_page((unsigned long)pte); | ||
168 | } | ||
169 | |||
170 | static inline void pte_free_kernel(pte_t *pte) | 55 | static inline void pte_free_kernel(pte_t *pte) |
171 | { | 56 | { |
172 | free_pte_fast(pte); | 57 | kmem_cache_free(pgtable_cache, pte); |
173 | } | 58 | } |
174 | 59 | ||
175 | static inline void pte_free(struct page *ptepage) | 60 | static inline void pte_free(struct page *ptepage) |
176 | { | 61 | { |
177 | free_pte_fast(page_address(ptepage)); | 62 | pte_free_kernel(page_address(ptepage)); |
178 | } | 63 | } |
179 | 64 | ||
180 | #define pmd_free(pmd) free_pmd_fast(pmd) | 65 | |
181 | #define pgd_free(pgd) free_pgd_fast(pgd) | 66 | #define pmd_populate_kernel(MM, PMD, PTE) pmd_set(PMD, PTE) |
182 | #define pgd_alloc(mm) get_pgd_fast() | 67 | #define pmd_populate(MM,PMD,PTE_PAGE) \ |
68 | pmd_populate_kernel(MM,PMD,page_address(PTE_PAGE)) | ||
69 | |||
70 | #define check_pgt_cache() do { } while (0) | ||
183 | 71 | ||
184 | #endif /* _SPARC64_PGALLOC_H */ | 72 | #endif /* _SPARC64_PGALLOC_H */ |
diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h index f0a9b44d3eb5..ed4124edf837 100644 --- a/include/asm-sparc64/pgtable.h +++ b/include/asm-sparc64/pgtable.h | |||
@@ -25,7 +25,8 @@ | |||
25 | #include <asm/const.h> | 25 | #include <asm/const.h> |
26 | 26 | ||
27 | /* The kernel image occupies 0x4000000 to 0x1000000 (4MB --> 32MB). | 27 | /* The kernel image occupies 0x4000000 to 0x1000000 (4MB --> 32MB). |
28 | * The page copy blockops can use 0x2000000 to 0x10000000. | 28 | * The page copy blockops can use 0x2000000 to 0x4000000. |
29 | * The TSB is mapped in the 0x4000000 to 0x6000000 range. | ||
29 | * The PROM resides in an area spanning 0xf0000000 to 0x100000000. | 30 | * The PROM resides in an area spanning 0xf0000000 to 0x100000000. |
30 | * The vmalloc area spans 0x100000000 to 0x200000000. | 31 | * The vmalloc area spans 0x100000000 to 0x200000000. |
31 | * Since modules need to be in the lowest 32-bits of the address space, | 32 | * Since modules need to be in the lowest 32-bits of the address space, |
@@ -34,6 +35,7 @@ | |||
34 | * 0x400000000. | 35 | * 0x400000000. |
35 | */ | 36 | */ |
36 | #define TLBTEMP_BASE _AC(0x0000000002000000,UL) | 37 | #define TLBTEMP_BASE _AC(0x0000000002000000,UL) |
38 | #define TSBMAP_BASE _AC(0x0000000004000000,UL) | ||
37 | #define MODULES_VADDR _AC(0x0000000010000000,UL) | 39 | #define MODULES_VADDR _AC(0x0000000010000000,UL) |
38 | #define MODULES_LEN _AC(0x00000000e0000000,UL) | 40 | #define MODULES_LEN _AC(0x00000000e0000000,UL) |
39 | #define MODULES_END _AC(0x00000000f0000000,UL) | 41 | #define MODULES_END _AC(0x00000000f0000000,UL) |
@@ -88,162 +90,538 @@ | |||
88 | 90 | ||
89 | #endif /* !(__ASSEMBLY__) */ | 91 | #endif /* !(__ASSEMBLY__) */ |
90 | 92 | ||
91 | /* Spitfire/Cheetah TTE bits. */ | 93 | /* PTE bits which are the same in SUN4U and SUN4V format. */ |
92 | #define _PAGE_VALID _AC(0x8000000000000000,UL) /* Valid TTE */ | 94 | #define _PAGE_VALID _AC(0x8000000000000000,UL) /* Valid TTE */ |
93 | #define _PAGE_R _AC(0x8000000000000000,UL) /* Keep ref bit up to date*/ | 95 | #define _PAGE_R _AC(0x8000000000000000,UL) /* Keep ref bit uptodate*/ |
94 | #define _PAGE_SZ4MB _AC(0x6000000000000000,UL) /* 4MB Page */ | 96 | |
95 | #define _PAGE_SZ512K _AC(0x4000000000000000,UL) /* 512K Page */ | 97 | /* SUN4U pte bits... */ |
96 | #define _PAGE_SZ64K _AC(0x2000000000000000,UL) /* 64K Page */ | 98 | #define _PAGE_SZ4MB_4U _AC(0x6000000000000000,UL) /* 4MB Page */ |
97 | #define _PAGE_SZ8K _AC(0x0000000000000000,UL) /* 8K Page */ | 99 | #define _PAGE_SZ512K_4U _AC(0x4000000000000000,UL) /* 512K Page */ |
98 | #define _PAGE_NFO _AC(0x1000000000000000,UL) /* No Fault Only */ | 100 | #define _PAGE_SZ64K_4U _AC(0x2000000000000000,UL) /* 64K Page */ |
99 | #define _PAGE_IE _AC(0x0800000000000000,UL) /* Invert Endianness */ | 101 | #define _PAGE_SZ8K_4U _AC(0x0000000000000000,UL) /* 8K Page */ |
100 | #define _PAGE_SOFT2 _AC(0x07FC000000000000,UL) /* Software bits, set 2 */ | 102 | #define _PAGE_NFO_4U _AC(0x1000000000000000,UL) /* No Fault Only */ |
101 | #define _PAGE_RES1 _AC(0x0002000000000000,UL) /* Reserved */ | 103 | #define _PAGE_IE_4U _AC(0x0800000000000000,UL) /* Invert Endianness */ |
102 | #define _PAGE_SZ32MB _AC(0x0001000000000000,UL) /* (Panther) 32MB page */ | 104 | #define _PAGE_SOFT2_4U _AC(0x07FC000000000000,UL) /* Software bits, set 2 */ |
103 | #define _PAGE_SZ256MB _AC(0x2001000000000000,UL) /* (Panther) 256MB page */ | 105 | #define _PAGE_RES1_4U _AC(0x0002000000000000,UL) /* Reserved */ |
104 | #define _PAGE_SN _AC(0x0000800000000000,UL) /* (Cheetah) Snoop */ | 106 | #define _PAGE_SZ32MB_4U _AC(0x0001000000000000,UL) /* (Panther) 32MB page */ |
105 | #define _PAGE_RES2 _AC(0x0000780000000000,UL) /* Reserved */ | 107 | #define _PAGE_SZ256MB_4U _AC(0x2001000000000000,UL) /* (Panther) 256MB page */ |
106 | #define _PAGE_PADDR_SF _AC(0x000001FFFFFFE000,UL) /* (Spitfire) paddr[40:13]*/ | 108 | #define _PAGE_SN_4U _AC(0x0000800000000000,UL) /* (Cheetah) Snoop */ |
107 | #define _PAGE_PADDR _AC(0x000007FFFFFFE000,UL) /* (Cheetah) paddr[42:13] */ | 109 | #define _PAGE_RES2_4U _AC(0x0000780000000000,UL) /* Reserved */ |
108 | #define _PAGE_SOFT _AC(0x0000000000001F80,UL) /* Software bits */ | 110 | #define _PAGE_PADDR_4U _AC(0x000007FFFFFFE000,UL) /* (Cheetah) pa[42:13] */ |
109 | #define _PAGE_L _AC(0x0000000000000040,UL) /* Locked TTE */ | 111 | #define _PAGE_SOFT_4U _AC(0x0000000000001F80,UL) /* Software bits: */ |
110 | #define _PAGE_CP _AC(0x0000000000000020,UL) /* Cacheable in P-Cache */ | 112 | #define _PAGE_EXEC_4U _AC(0x0000000000001000,UL) /* Executable SW bit */ |
111 | #define _PAGE_CV _AC(0x0000000000000010,UL) /* Cacheable in V-Cache */ | 113 | #define _PAGE_MODIFIED_4U _AC(0x0000000000000800,UL) /* Modified (dirty) */ |
112 | #define _PAGE_E _AC(0x0000000000000008,UL) /* side-Effect */ | 114 | #define _PAGE_FILE_4U _AC(0x0000000000000800,UL) /* Pagecache page */ |
113 | #define _PAGE_P _AC(0x0000000000000004,UL) /* Privileged Page */ | 115 | #define _PAGE_ACCESSED_4U _AC(0x0000000000000400,UL) /* Accessed (ref'd) */ |
114 | #define _PAGE_W _AC(0x0000000000000002,UL) /* Writable */ | 116 | #define _PAGE_READ_4U _AC(0x0000000000000200,UL) /* Readable SW Bit */ |
115 | #define _PAGE_G _AC(0x0000000000000001,UL) /* Global */ | 117 | #define _PAGE_WRITE_4U _AC(0x0000000000000100,UL) /* Writable SW Bit */ |
116 | 118 | #define _PAGE_PRESENT_4U _AC(0x0000000000000080,UL) /* Present */ | |
117 | /* Here are the SpitFire software bits we use in the TTE's. | 119 | #define _PAGE_L_4U _AC(0x0000000000000040,UL) /* Locked TTE */ |
118 | * | 120 | #define _PAGE_CP_4U _AC(0x0000000000000020,UL) /* Cacheable in P-Cache */ |
119 | * WARNING: If you are going to try and start using some | 121 | #define _PAGE_CV_4U _AC(0x0000000000000010,UL) /* Cacheable in V-Cache */ |
120 | * of the soft2 bits, you will need to make | 122 | #define _PAGE_E_4U _AC(0x0000000000000008,UL) /* side-Effect */ |
121 | * modifications to the swap entry implementation. | 123 | #define _PAGE_P_4U _AC(0x0000000000000004,UL) /* Privileged Page */ |
122 | * For example, one thing that could happen is that | 124 | #define _PAGE_W_4U _AC(0x0000000000000002,UL) /* Writable */ |
123 | * swp_entry_to_pte() would BUG_ON() if you tried | 125 | |
124 | * to use one of the soft2 bits for _PAGE_FILE. | 126 | /* SUN4V pte bits... */ |
125 | * | 127 | #define _PAGE_NFO_4V _AC(0x4000000000000000,UL) /* No Fault Only */ |
126 | * Like other architectures, I have aliased _PAGE_FILE with | 128 | #define _PAGE_SOFT2_4V _AC(0x3F00000000000000,UL) /* Software bits, set 2 */ |
127 | * _PAGE_MODIFIED. This works because _PAGE_FILE is never | 129 | #define _PAGE_MODIFIED_4V _AC(0x2000000000000000,UL) /* Modified (dirty) */ |
128 | * interpreted that way unless _PAGE_PRESENT is clear. | 130 | #define _PAGE_ACCESSED_4V _AC(0x1000000000000000,UL) /* Accessed (ref'd) */ |
129 | */ | 131 | #define _PAGE_READ_4V _AC(0x0800000000000000,UL) /* Readable SW Bit */ |
130 | #define _PAGE_EXEC _AC(0x0000000000001000,UL) /* Executable SW bit */ | 132 | #define _PAGE_WRITE_4V _AC(0x0400000000000000,UL) /* Writable SW Bit */ |
131 | #define _PAGE_MODIFIED _AC(0x0000000000000800,UL) /* Modified (dirty) */ | 133 | #define _PAGE_PADDR_4V _AC(0x00FFFFFFFFFFE000,UL) /* paddr[55:13] */ |
132 | #define _PAGE_FILE _AC(0x0000000000000800,UL) /* Pagecache page */ | 134 | #define _PAGE_IE_4V _AC(0x0000000000001000,UL) /* Invert Endianness */ |
133 | #define _PAGE_ACCESSED _AC(0x0000000000000400,UL) /* Accessed (ref'd) */ | 135 | #define _PAGE_E_4V _AC(0x0000000000000800,UL) /* side-Effect */ |
134 | #define _PAGE_READ _AC(0x0000000000000200,UL) /* Readable SW Bit */ | 136 | #define _PAGE_CP_4V _AC(0x0000000000000400,UL) /* Cacheable in P-Cache */ |
135 | #define _PAGE_WRITE _AC(0x0000000000000100,UL) /* Writable SW Bit */ | 137 | #define _PAGE_CV_4V _AC(0x0000000000000200,UL) /* Cacheable in V-Cache */ |
136 | #define _PAGE_PRESENT _AC(0x0000000000000080,UL) /* Present */ | 138 | #define _PAGE_P_4V _AC(0x0000000000000100,UL) /* Privileged Page */ |
139 | #define _PAGE_EXEC_4V _AC(0x0000000000000080,UL) /* Executable Page */ | ||
140 | #define _PAGE_W_4V _AC(0x0000000000000040,UL) /* Writable */ | ||
141 | #define _PAGE_SOFT_4V _AC(0x0000000000000030,UL) /* Software bits */ | ||
142 | #define _PAGE_FILE_4V _AC(0x0000000000000020,UL) /* Pagecache page */ | ||
143 | #define _PAGE_PRESENT_4V _AC(0x0000000000000010,UL) /* Present */ | ||
144 | #define _PAGE_RESV_4V _AC(0x0000000000000008,UL) /* Reserved */ | ||
145 | #define _PAGE_SZ16GB_4V _AC(0x0000000000000007,UL) /* 16GB Page */ | ||
146 | #define _PAGE_SZ2GB_4V _AC(0x0000000000000006,UL) /* 2GB Page */ | ||
147 | #define _PAGE_SZ256MB_4V _AC(0x0000000000000005,UL) /* 256MB Page */ | ||
148 | #define _PAGE_SZ32MB_4V _AC(0x0000000000000004,UL) /* 32MB Page */ | ||
149 | #define _PAGE_SZ4MB_4V _AC(0x0000000000000003,UL) /* 4MB Page */ | ||
150 | #define _PAGE_SZ512K_4V _AC(0x0000000000000002,UL) /* 512K Page */ | ||
151 | #define _PAGE_SZ64K_4V _AC(0x0000000000000001,UL) /* 64K Page */ | ||
152 | #define _PAGE_SZ8K_4V _AC(0x0000000000000000,UL) /* 8K Page */ | ||
137 | 153 | ||
138 | #if PAGE_SHIFT == 13 | 154 | #if PAGE_SHIFT == 13 |
139 | #define _PAGE_SZBITS _PAGE_SZ8K | 155 | #define _PAGE_SZBITS_4U _PAGE_SZ8K_4U |
156 | #define _PAGE_SZBITS_4V _PAGE_SZ8K_4V | ||
140 | #elif PAGE_SHIFT == 16 | 157 | #elif PAGE_SHIFT == 16 |
141 | #define _PAGE_SZBITS _PAGE_SZ64K | 158 | #define _PAGE_SZBITS_4U _PAGE_SZ64K_4U |
159 | #define _PAGE_SZBITS_4V _PAGE_SZ64K_4V | ||
142 | #elif PAGE_SHIFT == 19 | 160 | #elif PAGE_SHIFT == 19 |
143 | #define _PAGE_SZBITS _PAGE_SZ512K | 161 | #define _PAGE_SZBITS_4U _PAGE_SZ512K_4U |
162 | #define _PAGE_SZBITS_4V _PAGE_SZ512K_4V | ||
144 | #elif PAGE_SHIFT == 22 | 163 | #elif PAGE_SHIFT == 22 |
145 | #define _PAGE_SZBITS _PAGE_SZ4MB | 164 | #define _PAGE_SZBITS_4U _PAGE_SZ4MB_4U |
165 | #define _PAGE_SZBITS_4V _PAGE_SZ4MB_4V | ||
146 | #else | 166 | #else |
147 | #error Wrong PAGE_SHIFT specified | 167 | #error Wrong PAGE_SHIFT specified |
148 | #endif | 168 | #endif |
149 | 169 | ||
150 | #if defined(CONFIG_HUGETLB_PAGE_SIZE_4MB) | 170 | #if defined(CONFIG_HUGETLB_PAGE_SIZE_4MB) |
151 | #define _PAGE_SZHUGE _PAGE_SZ4MB | 171 | #define _PAGE_SZHUGE_4U _PAGE_SZ4MB_4U |
172 | #define _PAGE_SZHUGE_4V _PAGE_SZ4MB_4V | ||
152 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K) | 173 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K) |
153 | #define _PAGE_SZHUGE _PAGE_SZ512K | 174 | #define _PAGE_SZHUGE_4U _PAGE_SZ512K_4U |
175 | #define _PAGE_SZHUGE_4V _PAGE_SZ512K_4V | ||
154 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K) | 176 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K) |
155 | #define _PAGE_SZHUGE _PAGE_SZ64K | 177 | #define _PAGE_SZHUGE_4U _PAGE_SZ64K_4U |
178 | #define _PAGE_SZHUGE_4V _PAGE_SZ64K_4V | ||
156 | #endif | 179 | #endif |
157 | 180 | ||
158 | #define _PAGE_CACHE (_PAGE_CP | _PAGE_CV) | 181 | /* These are actually filled in at boot time by sun4{u,v}_pgprot_init() */ |
182 | #define __P000 __pgprot(0) | ||
183 | #define __P001 __pgprot(0) | ||
184 | #define __P010 __pgprot(0) | ||
185 | #define __P011 __pgprot(0) | ||
186 | #define __P100 __pgprot(0) | ||
187 | #define __P101 __pgprot(0) | ||
188 | #define __P110 __pgprot(0) | ||
189 | #define __P111 __pgprot(0) | ||
190 | |||
191 | #define __S000 __pgprot(0) | ||
192 | #define __S001 __pgprot(0) | ||
193 | #define __S010 __pgprot(0) | ||
194 | #define __S011 __pgprot(0) | ||
195 | #define __S100 __pgprot(0) | ||
196 | #define __S101 __pgprot(0) | ||
197 | #define __S110 __pgprot(0) | ||
198 | #define __S111 __pgprot(0) | ||
159 | 199 | ||
160 | #define __DIRTY_BITS (_PAGE_MODIFIED | _PAGE_WRITE | _PAGE_W) | 200 | #ifndef __ASSEMBLY__ |
161 | #define __ACCESS_BITS (_PAGE_ACCESSED | _PAGE_READ | _PAGE_R) | ||
162 | #define __PRIV_BITS _PAGE_P | ||
163 | 201 | ||
164 | #define PAGE_NONE __pgprot (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_CACHE) | 202 | extern pte_t mk_pte_io(unsigned long, pgprot_t, int, unsigned long); |
165 | 203 | ||
166 | /* Don't set the TTE _PAGE_W bit here, else the dirty bit never gets set. */ | 204 | extern unsigned long pte_sz_bits(unsigned long size); |
167 | #define PAGE_SHARED __pgprot (_PAGE_PRESENT | _PAGE_VALID | _PAGE_CACHE | \ | ||
168 | __ACCESS_BITS | _PAGE_WRITE | _PAGE_EXEC) | ||
169 | 205 | ||
170 | #define PAGE_COPY __pgprot (_PAGE_PRESENT | _PAGE_VALID | _PAGE_CACHE | \ | 206 | extern pgprot_t PAGE_KERNEL; |
171 | __ACCESS_BITS | _PAGE_EXEC) | 207 | extern pgprot_t PAGE_KERNEL_LOCKED; |
208 | extern pgprot_t PAGE_COPY; | ||
209 | extern pgprot_t PAGE_SHARED; | ||
172 | 210 | ||
173 | #define PAGE_READONLY __pgprot (_PAGE_PRESENT | _PAGE_VALID | _PAGE_CACHE | \ | 211 | /* XXX This uglyness is for the atyfb driver's sparc mmap() support. XXX */ |
174 | __ACCESS_BITS | _PAGE_EXEC) | 212 | extern unsigned long _PAGE_IE; |
213 | extern unsigned long _PAGE_E; | ||
214 | extern unsigned long _PAGE_CACHE; | ||
175 | 215 | ||
176 | #define PAGE_KERNEL __pgprot (_PAGE_PRESENT | _PAGE_VALID | _PAGE_CACHE | \ | 216 | extern unsigned long pg_iobits; |
177 | __PRIV_BITS | \ | 217 | extern unsigned long _PAGE_ALL_SZ_BITS; |
178 | __ACCESS_BITS | __DIRTY_BITS | _PAGE_EXEC) | 218 | extern unsigned long _PAGE_SZBITS; |
179 | 219 | ||
180 | #define PAGE_SHARED_NOEXEC __pgprot (_PAGE_PRESENT | _PAGE_VALID | \ | 220 | extern struct page *mem_map_zero; |
181 | _PAGE_CACHE | \ | 221 | #define ZERO_PAGE(vaddr) (mem_map_zero) |
182 | __ACCESS_BITS | _PAGE_WRITE) | ||
183 | 222 | ||
184 | #define PAGE_COPY_NOEXEC __pgprot (_PAGE_PRESENT | _PAGE_VALID | \ | 223 | /* PFNs are real physical page numbers. However, mem_map only begins to record |
185 | _PAGE_CACHE | __ACCESS_BITS) | 224 | * per-page information starting at pfn_base. This is to handle systems where |
225 | * the first physical page in the machine is at some huge physical address, | ||
226 | * such as 4GB. This is common on a partitioned E10000, for example. | ||
227 | */ | ||
228 | static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot) | ||
229 | { | ||
230 | unsigned long paddr = pfn << PAGE_SHIFT; | ||
231 | unsigned long sz_bits; | ||
232 | |||
233 | sz_bits = 0UL; | ||
234 | if (_PAGE_SZBITS_4U != 0UL || _PAGE_SZBITS_4V != 0UL) { | ||
235 | __asm__ __volatile__( | ||
236 | "\n661: sethi %uhi(%1), %0\n" | ||
237 | " sllx %0, 32, %0\n" | ||
238 | " .section .sun4v_2insn_patch, \"ax\"\n" | ||
239 | " .word 661b\n" | ||
240 | " mov %2, %0\n" | ||
241 | " nop\n" | ||
242 | " .previous\n" | ||
243 | : "=r" (sz_bits) | ||
244 | : "i" (_PAGE_SZBITS_4U), "i" (_PAGE_SZBITS_4V)); | ||
245 | } | ||
246 | return __pte(paddr | sz_bits | pgprot_val(prot)); | ||
247 | } | ||
248 | #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) | ||
186 | 249 | ||
187 | #define PAGE_READONLY_NOEXEC __pgprot (_PAGE_PRESENT | _PAGE_VALID | \ | 250 | /* This one can be done with two shifts. */ |
188 | _PAGE_CACHE | __ACCESS_BITS) | 251 | static inline unsigned long pte_pfn(pte_t pte) |
252 | { | ||
253 | unsigned long ret; | ||
254 | |||
255 | __asm__ __volatile__( | ||
256 | "\n661: sllx %1, %2, %0\n" | ||
257 | " srlx %0, %3, %0\n" | ||
258 | " .section .sun4v_2insn_patch, \"ax\"\n" | ||
259 | " .word 661b\n" | ||
260 | " sllx %1, %4, %0\n" | ||
261 | " srlx %0, %5, %0\n" | ||
262 | " .previous\n" | ||
263 | : "=r" (ret) | ||
264 | : "r" (pte_val(pte)), | ||
265 | "i" (21), "i" (21 + PAGE_SHIFT), | ||
266 | "i" (8), "i" (8 + PAGE_SHIFT)); | ||
267 | |||
268 | return ret; | ||
269 | } | ||
270 | #define pte_page(x) pfn_to_page(pte_pfn(x)) | ||
189 | 271 | ||
190 | #define _PFN_MASK _PAGE_PADDR | 272 | static inline pte_t pte_modify(pte_t pte, pgprot_t prot) |
273 | { | ||
274 | unsigned long mask, tmp; | ||
275 | |||
276 | /* SUN4U: 0x600307ffffffecb8 (negated == 0x9ffcf80000001347) | ||
277 | * SUN4V: 0x30ffffffffffee17 (negated == 0xcf000000000011e8) | ||
278 | * | ||
279 | * Even if we use negation tricks the result is still a 6 | ||
280 | * instruction sequence, so don't try to play fancy and just | ||
281 | * do the most straightforward implementation. | ||
282 | * | ||
283 | * Note: We encode this into 3 sun4v 2-insn patch sequences. | ||
284 | */ | ||
191 | 285 | ||
192 | #define pg_iobits (_PAGE_VALID | _PAGE_PRESENT | __DIRTY_BITS | \ | 286 | __asm__ __volatile__( |
193 | __ACCESS_BITS | _PAGE_E) | 287 | "\n661: sethi %%uhi(%2), %1\n" |
288 | " sethi %%hi(%2), %0\n" | ||
289 | "\n662: or %1, %%ulo(%2), %1\n" | ||
290 | " or %0, %%lo(%2), %0\n" | ||
291 | "\n663: sllx %1, 32, %1\n" | ||
292 | " or %0, %1, %0\n" | ||
293 | " .section .sun4v_2insn_patch, \"ax\"\n" | ||
294 | " .word 661b\n" | ||
295 | " sethi %%uhi(%3), %1\n" | ||
296 | " sethi %%hi(%3), %0\n" | ||
297 | " .word 662b\n" | ||
298 | " or %1, %%ulo(%3), %1\n" | ||
299 | " or %0, %%lo(%3), %0\n" | ||
300 | " .word 663b\n" | ||
301 | " sllx %1, 32, %1\n" | ||
302 | " or %0, %1, %0\n" | ||
303 | " .previous\n" | ||
304 | : "=r" (mask), "=r" (tmp) | ||
305 | : "i" (_PAGE_PADDR_4U | _PAGE_MODIFIED_4U | _PAGE_ACCESSED_4U | | ||
306 | _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_E_4U | _PAGE_PRESENT_4U | | ||
307 | _PAGE_SZBITS_4U), | ||
308 | "i" (_PAGE_PADDR_4V | _PAGE_MODIFIED_4V | _PAGE_ACCESSED_4V | | ||
309 | _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_E_4V | _PAGE_PRESENT_4V | | ||
310 | _PAGE_SZBITS_4V)); | ||
311 | |||
312 | return __pte((pte_val(pte) & mask) | (pgprot_val(prot) & ~mask)); | ||
313 | } | ||
194 | 314 | ||
195 | #define __P000 PAGE_NONE | 315 | static inline pte_t pgoff_to_pte(unsigned long off) |
196 | #define __P001 PAGE_READONLY_NOEXEC | 316 | { |
197 | #define __P010 PAGE_COPY_NOEXEC | 317 | off <<= PAGE_SHIFT; |
198 | #define __P011 PAGE_COPY_NOEXEC | 318 | |
199 | #define __P100 PAGE_READONLY | 319 | __asm__ __volatile__( |
200 | #define __P101 PAGE_READONLY | 320 | "\n661: or %0, %2, %0\n" |
201 | #define __P110 PAGE_COPY | 321 | " .section .sun4v_1insn_patch, \"ax\"\n" |
202 | #define __P111 PAGE_COPY | 322 | " .word 661b\n" |
323 | " or %0, %3, %0\n" | ||
324 | " .previous\n" | ||
325 | : "=r" (off) | ||
326 | : "0" (off), "i" (_PAGE_FILE_4U), "i" (_PAGE_FILE_4V)); | ||
327 | |||
328 | return __pte(off); | ||
329 | } | ||
203 | 330 | ||
204 | #define __S000 PAGE_NONE | 331 | static inline pgprot_t pgprot_noncached(pgprot_t prot) |
205 | #define __S001 PAGE_READONLY_NOEXEC | 332 | { |
206 | #define __S010 PAGE_SHARED_NOEXEC | 333 | unsigned long val = pgprot_val(prot); |
207 | #define __S011 PAGE_SHARED_NOEXEC | 334 | |
208 | #define __S100 PAGE_READONLY | 335 | __asm__ __volatile__( |
209 | #define __S101 PAGE_READONLY | 336 | "\n661: andn %0, %2, %0\n" |
210 | #define __S110 PAGE_SHARED | 337 | " or %0, %3, %0\n" |
211 | #define __S111 PAGE_SHARED | 338 | " .section .sun4v_2insn_patch, \"ax\"\n" |
339 | " .word 661b\n" | ||
340 | " andn %0, %4, %0\n" | ||
341 | " or %0, %3, %0\n" | ||
342 | " .previous\n" | ||
343 | : "=r" (val) | ||
344 | : "0" (val), "i" (_PAGE_CP_4U | _PAGE_CV_4U), "i" (_PAGE_E_4U), | ||
345 | "i" (_PAGE_CP_4V | _PAGE_CV_4V), "i" (_PAGE_E_4V)); | ||
346 | |||
347 | return __pgprot(val); | ||
348 | } | ||
349 | /* Various pieces of code check for platform support by ifdef testing | ||
350 | * on "pgprot_noncached". That's broken and should be fixed, but for | ||
351 | * now... | ||
352 | */ | ||
353 | #define pgprot_noncached pgprot_noncached | ||
212 | 354 | ||
213 | #ifndef __ASSEMBLY__ | 355 | #ifdef CONFIG_HUGETLB_PAGE |
356 | static inline pte_t pte_mkhuge(pte_t pte) | ||
357 | { | ||
358 | unsigned long mask; | ||
359 | |||
360 | __asm__ __volatile__( | ||
361 | "\n661: sethi %%uhi(%1), %0\n" | ||
362 | " sllx %0, 32, %0\n" | ||
363 | " .section .sun4v_2insn_patch, \"ax\"\n" | ||
364 | " .word 661b\n" | ||
365 | " mov %2, %0\n" | ||
366 | " nop\n" | ||
367 | " .previous\n" | ||
368 | : "=r" (mask) | ||
369 | : "i" (_PAGE_SZHUGE_4U), "i" (_PAGE_SZHUGE_4V)); | ||
370 | |||
371 | return __pte(pte_val(pte) | mask); | ||
372 | } | ||
373 | #endif | ||
214 | 374 | ||
215 | extern unsigned long phys_base; | 375 | static inline pte_t pte_mkdirty(pte_t pte) |
216 | extern unsigned long pfn_base; | 376 | { |
377 | unsigned long val = pte_val(pte), tmp; | ||
378 | |||
379 | __asm__ __volatile__( | ||
380 | "\n661: or %0, %3, %0\n" | ||
381 | " nop\n" | ||
382 | "\n662: nop\n" | ||
383 | " nop\n" | ||
384 | " .section .sun4v_2insn_patch, \"ax\"\n" | ||
385 | " .word 661b\n" | ||
386 | " sethi %%uhi(%4), %1\n" | ||
387 | " sllx %1, 32, %1\n" | ||
388 | " .word 662b\n" | ||
389 | " or %1, %%lo(%4), %1\n" | ||
390 | " or %0, %1, %0\n" | ||
391 | " .previous\n" | ||
392 | : "=r" (val), "=r" (tmp) | ||
393 | : "0" (val), "i" (_PAGE_MODIFIED_4U | _PAGE_W_4U), | ||
394 | "i" (_PAGE_MODIFIED_4V | _PAGE_W_4V)); | ||
395 | |||
396 | return __pte(val); | ||
397 | } | ||
217 | 398 | ||
218 | extern struct page *mem_map_zero; | 399 | static inline pte_t pte_mkclean(pte_t pte) |
219 | #define ZERO_PAGE(vaddr) (mem_map_zero) | 400 | { |
401 | unsigned long val = pte_val(pte), tmp; | ||
402 | |||
403 | __asm__ __volatile__( | ||
404 | "\n661: andn %0, %3, %0\n" | ||
405 | " nop\n" | ||
406 | "\n662: nop\n" | ||
407 | " nop\n" | ||
408 | " .section .sun4v_2insn_patch, \"ax\"\n" | ||
409 | " .word 661b\n" | ||
410 | " sethi %%uhi(%4), %1\n" | ||
411 | " sllx %1, 32, %1\n" | ||
412 | " .word 662b\n" | ||
413 | " or %1, %%lo(%4), %1\n" | ||
414 | " andn %0, %1, %0\n" | ||
415 | " .previous\n" | ||
416 | : "=r" (val), "=r" (tmp) | ||
417 | : "0" (val), "i" (_PAGE_MODIFIED_4U | _PAGE_W_4U), | ||
418 | "i" (_PAGE_MODIFIED_4V | _PAGE_W_4V)); | ||
419 | |||
420 | return __pte(val); | ||
421 | } | ||
220 | 422 | ||
221 | /* PFNs are real physical page numbers. However, mem_map only begins to record | 423 | static inline pte_t pte_mkwrite(pte_t pte) |
222 | * per-page information starting at pfn_base. This is to handle systems where | 424 | { |
223 | * the first physical page in the machine is at some huge physical address, | 425 | unsigned long val = pte_val(pte), mask; |
224 | * such as 4GB. This is common on a partitioned E10000, for example. | 426 | |
225 | */ | 427 | __asm__ __volatile__( |
428 | "\n661: mov %1, %0\n" | ||
429 | " nop\n" | ||
430 | " .section .sun4v_2insn_patch, \"ax\"\n" | ||
431 | " .word 661b\n" | ||
432 | " sethi %%uhi(%2), %0\n" | ||
433 | " sllx %0, 32, %0\n" | ||
434 | " .previous\n" | ||
435 | : "=r" (mask) | ||
436 | : "i" (_PAGE_WRITE_4U), "i" (_PAGE_WRITE_4V)); | ||
437 | |||
438 | return __pte(val | mask); | ||
439 | } | ||
226 | 440 | ||
227 | #define pfn_pte(pfn, prot) \ | 441 | static inline pte_t pte_wrprotect(pte_t pte) |
228 | __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot) | _PAGE_SZBITS) | 442 | { |
229 | #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) | 443 | unsigned long val = pte_val(pte), tmp; |
444 | |||
445 | __asm__ __volatile__( | ||
446 | "\n661: andn %0, %3, %0\n" | ||
447 | " nop\n" | ||
448 | "\n662: nop\n" | ||
449 | " nop\n" | ||
450 | " .section .sun4v_2insn_patch, \"ax\"\n" | ||
451 | " .word 661b\n" | ||
452 | " sethi %%uhi(%4), %1\n" | ||
453 | " sllx %1, 32, %1\n" | ||
454 | " .word 662b\n" | ||
455 | " or %1, %%lo(%4), %1\n" | ||
456 | " andn %0, %1, %0\n" | ||
457 | " .previous\n" | ||
458 | : "=r" (val), "=r" (tmp) | ||
459 | : "0" (val), "i" (_PAGE_WRITE_4U | _PAGE_W_4U), | ||
460 | "i" (_PAGE_WRITE_4V | _PAGE_W_4V)); | ||
461 | |||
462 | return __pte(val); | ||
463 | } | ||
464 | |||
465 | static inline pte_t pte_mkold(pte_t pte) | ||
466 | { | ||
467 | unsigned long mask; | ||
468 | |||
469 | __asm__ __volatile__( | ||
470 | "\n661: mov %1, %0\n" | ||
471 | " nop\n" | ||
472 | " .section .sun4v_2insn_patch, \"ax\"\n" | ||
473 | " .word 661b\n" | ||
474 | " sethi %%uhi(%2), %0\n" | ||
475 | " sllx %0, 32, %0\n" | ||
476 | " .previous\n" | ||
477 | : "=r" (mask) | ||
478 | : "i" (_PAGE_ACCESSED_4U), "i" (_PAGE_ACCESSED_4V)); | ||
479 | |||
480 | mask |= _PAGE_R; | ||
481 | |||
482 | return __pte(pte_val(pte) & ~mask); | ||
483 | } | ||
484 | |||
485 | static inline pte_t pte_mkyoung(pte_t pte) | ||
486 | { | ||
487 | unsigned long mask; | ||
488 | |||
489 | __asm__ __volatile__( | ||
490 | "\n661: mov %1, %0\n" | ||
491 | " nop\n" | ||
492 | " .section .sun4v_2insn_patch, \"ax\"\n" | ||
493 | " .word 661b\n" | ||
494 | " sethi %%uhi(%2), %0\n" | ||
495 | " sllx %0, 32, %0\n" | ||
496 | " .previous\n" | ||
497 | : "=r" (mask) | ||
498 | : "i" (_PAGE_ACCESSED_4U), "i" (_PAGE_ACCESSED_4V)); | ||
499 | |||
500 | mask |= _PAGE_R; | ||
501 | |||
502 | return __pte(pte_val(pte) | mask); | ||
503 | } | ||
230 | 504 | ||
231 | #define pte_pfn(x) ((pte_val(x) & _PAGE_PADDR)>>PAGE_SHIFT) | 505 | static inline unsigned long pte_young(pte_t pte) |
232 | #define pte_page(x) pfn_to_page(pte_pfn(x)) | 506 | { |
507 | unsigned long mask; | ||
508 | |||
509 | __asm__ __volatile__( | ||
510 | "\n661: mov %1, %0\n" | ||
511 | " nop\n" | ||
512 | " .section .sun4v_2insn_patch, \"ax\"\n" | ||
513 | " .word 661b\n" | ||
514 | " sethi %%uhi(%2), %0\n" | ||
515 | " sllx %0, 32, %0\n" | ||
516 | " .previous\n" | ||
517 | : "=r" (mask) | ||
518 | : "i" (_PAGE_ACCESSED_4U), "i" (_PAGE_ACCESSED_4V)); | ||
519 | |||
520 | return (pte_val(pte) & mask); | ||
521 | } | ||
522 | |||
523 | static inline unsigned long pte_dirty(pte_t pte) | ||
524 | { | ||
525 | unsigned long mask; | ||
526 | |||
527 | __asm__ __volatile__( | ||
528 | "\n661: mov %1, %0\n" | ||
529 | " nop\n" | ||
530 | " .section .sun4v_2insn_patch, \"ax\"\n" | ||
531 | " .word 661b\n" | ||
532 | " sethi %%uhi(%2), %0\n" | ||
533 | " sllx %0, 32, %0\n" | ||
534 | " .previous\n" | ||
535 | : "=r" (mask) | ||
536 | : "i" (_PAGE_MODIFIED_4U), "i" (_PAGE_MODIFIED_4V)); | ||
537 | |||
538 | return (pte_val(pte) & mask); | ||
539 | } | ||
233 | 540 | ||
234 | static inline pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot) | 541 | static inline unsigned long pte_write(pte_t pte) |
235 | { | 542 | { |
236 | pte_t __pte; | 543 | unsigned long mask; |
237 | const unsigned long preserve_mask = (_PFN_MASK | | 544 | |
238 | _PAGE_MODIFIED | _PAGE_ACCESSED | | 545 | __asm__ __volatile__( |
239 | _PAGE_CACHE | _PAGE_E | | 546 | "\n661: mov %1, %0\n" |
240 | _PAGE_PRESENT | _PAGE_SZBITS); | 547 | " nop\n" |
548 | " .section .sun4v_2insn_patch, \"ax\"\n" | ||
549 | " .word 661b\n" | ||
550 | " sethi %%uhi(%2), %0\n" | ||
551 | " sllx %0, 32, %0\n" | ||
552 | " .previous\n" | ||
553 | : "=r" (mask) | ||
554 | : "i" (_PAGE_WRITE_4U), "i" (_PAGE_WRITE_4V)); | ||
555 | |||
556 | return (pte_val(pte) & mask); | ||
557 | } | ||
241 | 558 | ||
242 | pte_val(__pte) = (pte_val(orig_pte) & preserve_mask) | | 559 | static inline unsigned long pte_exec(pte_t pte) |
243 | (pgprot_val(new_prot) & ~preserve_mask); | 560 | { |
561 | unsigned long mask; | ||
562 | |||
563 | __asm__ __volatile__( | ||
564 | "\n661: sethi %%hi(%1), %0\n" | ||
565 | " .section .sun4v_1insn_patch, \"ax\"\n" | ||
566 | " .word 661b\n" | ||
567 | " mov %2, %0\n" | ||
568 | " .previous\n" | ||
569 | : "=r" (mask) | ||
570 | : "i" (_PAGE_EXEC_4U), "i" (_PAGE_EXEC_4V)); | ||
571 | |||
572 | return (pte_val(pte) & mask); | ||
573 | } | ||
244 | 574 | ||
245 | return __pte; | 575 | static inline unsigned long pte_read(pte_t pte) |
576 | { | ||
577 | unsigned long mask; | ||
578 | |||
579 | __asm__ __volatile__( | ||
580 | "\n661: mov %1, %0\n" | ||
581 | " nop\n" | ||
582 | " .section .sun4v_2insn_patch, \"ax\"\n" | ||
583 | " .word 661b\n" | ||
584 | " sethi %%uhi(%2), %0\n" | ||
585 | " sllx %0, 32, %0\n" | ||
586 | " .previous\n" | ||
587 | : "=r" (mask) | ||
588 | : "i" (_PAGE_READ_4U), "i" (_PAGE_READ_4V)); | ||
589 | |||
590 | return (pte_val(pte) & mask); | ||
246 | } | 591 | } |
592 | |||
593 | static inline unsigned long pte_file(pte_t pte) | ||
594 | { | ||
595 | unsigned long val = pte_val(pte); | ||
596 | |||
597 | __asm__ __volatile__( | ||
598 | "\n661: and %0, %2, %0\n" | ||
599 | " .section .sun4v_1insn_patch, \"ax\"\n" | ||
600 | " .word 661b\n" | ||
601 | " and %0, %3, %0\n" | ||
602 | " .previous\n" | ||
603 | : "=r" (val) | ||
604 | : "0" (val), "i" (_PAGE_FILE_4U), "i" (_PAGE_FILE_4V)); | ||
605 | |||
606 | return val; | ||
607 | } | ||
608 | |||
609 | static inline unsigned long pte_present(pte_t pte) | ||
610 | { | ||
611 | unsigned long val = pte_val(pte); | ||
612 | |||
613 | __asm__ __volatile__( | ||
614 | "\n661: and %0, %2, %0\n" | ||
615 | " .section .sun4v_1insn_patch, \"ax\"\n" | ||
616 | " .word 661b\n" | ||
617 | " and %0, %3, %0\n" | ||
618 | " .previous\n" | ||
619 | : "=r" (val) | ||
620 | : "0" (val), "i" (_PAGE_PRESENT_4U), "i" (_PAGE_PRESENT_4V)); | ||
621 | |||
622 | return val; | ||
623 | } | ||
624 | |||
247 | #define pmd_set(pmdp, ptep) \ | 625 | #define pmd_set(pmdp, ptep) \ |
248 | (pmd_val(*(pmdp)) = (__pa((unsigned long) (ptep)) >> 11UL)) | 626 | (pmd_val(*(pmdp)) = (__pa((unsigned long) (ptep)) >> 11UL)) |
249 | #define pud_set(pudp, pmdp) \ | 627 | #define pud_set(pudp, pmdp) \ |
@@ -253,8 +631,6 @@ static inline pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot) | |||
253 | #define pmd_page(pmd) virt_to_page((void *)__pmd_page(pmd)) | 631 | #define pmd_page(pmd) virt_to_page((void *)__pmd_page(pmd)) |
254 | #define pud_page(pud) \ | 632 | #define pud_page(pud) \ |
255 | ((unsigned long) __va((((unsigned long)pud_val(pud))<<11UL))) | 633 | ((unsigned long) __va((((unsigned long)pud_val(pud))<<11UL))) |
256 | #define pte_none(pte) (!pte_val(pte)) | ||
257 | #define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) | ||
258 | #define pmd_none(pmd) (!pmd_val(pmd)) | 634 | #define pmd_none(pmd) (!pmd_val(pmd)) |
259 | #define pmd_bad(pmd) (0) | 635 | #define pmd_bad(pmd) (0) |
260 | #define pmd_present(pmd) (pmd_val(pmd) != 0U) | 636 | #define pmd_present(pmd) (pmd_val(pmd) != 0U) |
@@ -264,30 +640,8 @@ static inline pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot) | |||
264 | #define pud_present(pud) (pud_val(pud) != 0U) | 640 | #define pud_present(pud) (pud_val(pud) != 0U) |
265 | #define pud_clear(pudp) (pud_val(*(pudp)) = 0U) | 641 | #define pud_clear(pudp) (pud_val(*(pudp)) = 0U) |
266 | 642 | ||
267 | /* The following only work if pte_present() is true. | 643 | /* Same in both SUN4V and SUN4U. */ |
268 | * Undefined behaviour if not.. | 644 | #define pte_none(pte) (!pte_val(pte)) |
269 | */ | ||
270 | #define pte_read(pte) (pte_val(pte) & _PAGE_READ) | ||
271 | #define pte_exec(pte) (pte_val(pte) & _PAGE_EXEC) | ||
272 | #define pte_write(pte) (pte_val(pte) & _PAGE_WRITE) | ||
273 | #define pte_dirty(pte) (pte_val(pte) & _PAGE_MODIFIED) | ||
274 | #define pte_young(pte) (pte_val(pte) & _PAGE_ACCESSED) | ||
275 | #define pte_wrprotect(pte) (__pte(pte_val(pte) & ~(_PAGE_WRITE|_PAGE_W))) | ||
276 | #define pte_rdprotect(pte) \ | ||
277 | (__pte(((pte_val(pte)<<1UL)>>1UL) & ~_PAGE_READ)) | ||
278 | #define pte_mkclean(pte) \ | ||
279 | (__pte(pte_val(pte) & ~(_PAGE_MODIFIED|_PAGE_W))) | ||
280 | #define pte_mkold(pte) \ | ||
281 | (__pte(((pte_val(pte)<<1UL)>>1UL) & ~_PAGE_ACCESSED)) | ||
282 | |||
283 | /* Permanent address of a page. */ | ||
284 | #define __page_address(page) page_address(page) | ||
285 | |||
286 | /* Be very careful when you change these three, they are delicate. */ | ||
287 | #define pte_mkyoung(pte) (__pte(pte_val(pte) | _PAGE_ACCESSED | _PAGE_R)) | ||
288 | #define pte_mkwrite(pte) (__pte(pte_val(pte) | _PAGE_WRITE)) | ||
289 | #define pte_mkdirty(pte) (__pte(pte_val(pte) | _PAGE_MODIFIED | _PAGE_W)) | ||
290 | #define pte_mkhuge(pte) (__pte(pte_val(pte) | _PAGE_SZHUGE)) | ||
291 | 645 | ||
292 | /* to find an entry in a page-table-directory. */ | 646 | /* to find an entry in a page-table-directory. */ |
293 | #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) | 647 | #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) |
@@ -296,11 +650,6 @@ static inline pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot) | |||
296 | /* to find an entry in a kernel page-table-directory */ | 650 | /* to find an entry in a kernel page-table-directory */ |
297 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) | 651 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) |
298 | 652 | ||
299 | /* extract the pgd cache used for optimizing the tlb miss | ||
300 | * slow path when executing 32-bit compat processes | ||
301 | */ | ||
302 | #define get_pgd_cache(pgd) ((unsigned long) pgd_val(*pgd) << 11) | ||
303 | |||
304 | /* Find an entry in the second-level page table.. */ | 653 | /* Find an entry in the second-level page table.. */ |
305 | #define pmd_offset(pudp, address) \ | 654 | #define pmd_offset(pudp, address) \ |
306 | ((pmd_t *) pud_page(*(pudp)) + \ | 655 | ((pmd_t *) pud_page(*(pudp)) + \ |
@@ -327,6 +676,9 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *p | |||
327 | 676 | ||
328 | /* It is more efficient to let flush_tlb_kernel_range() | 677 | /* It is more efficient to let flush_tlb_kernel_range() |
329 | * handle init_mm tlb flushes. | 678 | * handle init_mm tlb flushes. |
679 | * | ||
680 | * SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U | ||
681 | * and SUN4V pte layout, so this inline test is fine. | ||
330 | */ | 682 | */ |
331 | if (likely(mm != &init_mm) && (pte_val(orig) & _PAGE_VALID)) | 683 | if (likely(mm != &init_mm) && (pte_val(orig) & _PAGE_VALID)) |
332 | tlb_batch_add(mm, addr, ptep, orig); | 684 | tlb_batch_add(mm, addr, ptep, orig); |
@@ -361,42 +713,23 @@ extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); | |||
361 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) | 713 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) |
362 | 714 | ||
363 | /* File offset in PTE support. */ | 715 | /* File offset in PTE support. */ |
364 | #define pte_file(pte) (pte_val(pte) & _PAGE_FILE) | 716 | extern unsigned long pte_file(pte_t); |
365 | #define pte_to_pgoff(pte) (pte_val(pte) >> PAGE_SHIFT) | 717 | #define pte_to_pgoff(pte) (pte_val(pte) >> PAGE_SHIFT) |
366 | #define pgoff_to_pte(off) (__pte(((off) << PAGE_SHIFT) | _PAGE_FILE)) | 718 | extern pte_t pgoff_to_pte(unsigned long); |
367 | #define PTE_FILE_MAX_BITS (64UL - PAGE_SHIFT - 1UL) | 719 | #define PTE_FILE_MAX_BITS (64UL - PAGE_SHIFT - 1UL) |
368 | 720 | ||
369 | extern unsigned long prom_virt_to_phys(unsigned long, int *); | 721 | extern unsigned long prom_virt_to_phys(unsigned long, int *); |
370 | 722 | ||
371 | static __inline__ unsigned long | 723 | extern unsigned long sun4u_get_pte(unsigned long); |
372 | sun4u_get_pte (unsigned long addr) | ||
373 | { | ||
374 | pgd_t *pgdp; | ||
375 | pud_t *pudp; | ||
376 | pmd_t *pmdp; | ||
377 | pte_t *ptep; | ||
378 | |||
379 | if (addr >= PAGE_OFFSET) | ||
380 | return addr & _PAGE_PADDR; | ||
381 | if ((addr >= LOW_OBP_ADDRESS) && (addr < HI_OBP_ADDRESS)) | ||
382 | return prom_virt_to_phys(addr, NULL); | ||
383 | pgdp = pgd_offset_k(addr); | ||
384 | pudp = pud_offset(pgdp, addr); | ||
385 | pmdp = pmd_offset(pudp, addr); | ||
386 | ptep = pte_offset_kernel(pmdp, addr); | ||
387 | return pte_val(*ptep) & _PAGE_PADDR; | ||
388 | } | ||
389 | 724 | ||
390 | static __inline__ unsigned long | 725 | static inline unsigned long __get_phys(unsigned long addr) |
391 | __get_phys (unsigned long addr) | ||
392 | { | 726 | { |
393 | return sun4u_get_pte (addr); | 727 | return sun4u_get_pte(addr); |
394 | } | 728 | } |
395 | 729 | ||
396 | static __inline__ int | 730 | static inline int __get_iospace(unsigned long addr) |
397 | __get_iospace (unsigned long addr) | ||
398 | { | 731 | { |
399 | return ((sun4u_get_pte (addr) & 0xf0000000) >> 28); | 732 | return ((sun4u_get_pte(addr) & 0xf0000000) >> 28); |
400 | } | 733 | } |
401 | 734 | ||
402 | extern unsigned long *sparc64_valid_addr_bitmap; | 735 | extern unsigned long *sparc64_valid_addr_bitmap; |
@@ -409,11 +742,6 @@ extern int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from, | |||
409 | unsigned long pfn, | 742 | unsigned long pfn, |
410 | unsigned long size, pgprot_t prot); | 743 | unsigned long size, pgprot_t prot); |
411 | 744 | ||
412 | /* Clear virtual and physical cachability, set side-effect bit. */ | ||
413 | #define pgprot_noncached(prot) \ | ||
414 | (__pgprot((pgprot_val(prot) & ~(_PAGE_CP | _PAGE_CV)) | \ | ||
415 | _PAGE_E)) | ||
416 | |||
417 | /* | 745 | /* |
418 | * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in | 746 | * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in |
419 | * its high 4 bits. These macros/functions put it there or get it from there. | 747 | * its high 4 bits. These macros/functions put it there or get it from there. |
@@ -424,8 +752,11 @@ extern int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from, | |||
424 | 752 | ||
425 | #include <asm-generic/pgtable.h> | 753 | #include <asm-generic/pgtable.h> |
426 | 754 | ||
427 | /* We provide our own get_unmapped_area to cope with VA holes for userland */ | 755 | /* We provide our own get_unmapped_area to cope with VA holes and |
756 | * SHM area cache aliasing for userland. | ||
757 | */ | ||
428 | #define HAVE_ARCH_UNMAPPED_AREA | 758 | #define HAVE_ARCH_UNMAPPED_AREA |
759 | #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN | ||
429 | 760 | ||
430 | /* We provide a special get_unmapped_area for framebuffer mmaps to try and use | 761 | /* We provide a special get_unmapped_area for framebuffer mmaps to try and use |
431 | * the largest alignment possible such that larget PTEs can be used. | 762 | * the largest alignment possible such that larget PTEs can be used. |
@@ -435,12 +766,9 @@ extern unsigned long get_fb_unmapped_area(struct file *filp, unsigned long, | |||
435 | unsigned long); | 766 | unsigned long); |
436 | #define HAVE_ARCH_FB_UNMAPPED_AREA | 767 | #define HAVE_ARCH_FB_UNMAPPED_AREA |
437 | 768 | ||
438 | /* | 769 | extern void pgtable_cache_init(void); |
439 | * No page table caches to initialise | 770 | extern void sun4v_register_fault_status(void); |
440 | */ | 771 | extern void sun4v_ktsb_register(void); |
441 | #define pgtable_cache_init() do { } while (0) | ||
442 | |||
443 | extern void check_pgt_cache(void); | ||
444 | 772 | ||
445 | #endif /* !(__ASSEMBLY__) */ | 773 | #endif /* !(__ASSEMBLY__) */ |
446 | 774 | ||
diff --git a/include/asm-sparc64/pil.h b/include/asm-sparc64/pil.h index 8f87750c3517..79f827eb3f5d 100644 --- a/include/asm-sparc64/pil.h +++ b/include/asm-sparc64/pil.h | |||
@@ -16,11 +16,13 @@ | |||
16 | #define PIL_SMP_CALL_FUNC 1 | 16 | #define PIL_SMP_CALL_FUNC 1 |
17 | #define PIL_SMP_RECEIVE_SIGNAL 2 | 17 | #define PIL_SMP_RECEIVE_SIGNAL 2 |
18 | #define PIL_SMP_CAPTURE 3 | 18 | #define PIL_SMP_CAPTURE 3 |
19 | #define PIL_SMP_CTX_NEW_VERSION 4 | ||
19 | 20 | ||
20 | #ifndef __ASSEMBLY__ | 21 | #ifndef __ASSEMBLY__ |
21 | #define PIL_RESERVED(PIL) ((PIL) == PIL_SMP_CALL_FUNC || \ | 22 | #define PIL_RESERVED(PIL) ((PIL) == PIL_SMP_CALL_FUNC || \ |
22 | (PIL) == PIL_SMP_RECEIVE_SIGNAL || \ | 23 | (PIL) == PIL_SMP_RECEIVE_SIGNAL || \ |
23 | (PIL) == PIL_SMP_CAPTURE) | 24 | (PIL) == PIL_SMP_CAPTURE || \ |
25 | (PIL) == PIL_SMP_CTX_NEW_VERSION) | ||
24 | #endif | 26 | #endif |
25 | 27 | ||
26 | #endif /* !(_SPARC64_PIL_H) */ | 28 | #endif /* !(_SPARC64_PIL_H) */ |
diff --git a/include/asm-sparc64/processor.h b/include/asm-sparc64/processor.h index cd8d9b4c8658..c6896b88283e 100644 --- a/include/asm-sparc64/processor.h +++ b/include/asm-sparc64/processor.h | |||
@@ -28,6 +28,8 @@ | |||
28 | * User lives in his very own context, and cannot reference us. Note | 28 | * User lives in his very own context, and cannot reference us. Note |
29 | * that TASK_SIZE is a misnomer, it really gives maximum user virtual | 29 | * that TASK_SIZE is a misnomer, it really gives maximum user virtual |
30 | * address that the kernel will allocate out. | 30 | * address that the kernel will allocate out. |
31 | * | ||
32 | * XXX No longer using virtual page tables, kill this upper limit... | ||
31 | */ | 33 | */ |
32 | #define VA_BITS 44 | 34 | #define VA_BITS 44 |
33 | #ifndef __ASSEMBLY__ | 35 | #ifndef __ASSEMBLY__ |
@@ -37,18 +39,6 @@ | |||
37 | #endif | 39 | #endif |
38 | #define TASK_SIZE ((unsigned long)-VPTE_SIZE) | 40 | #define TASK_SIZE ((unsigned long)-VPTE_SIZE) |
39 | 41 | ||
40 | /* | ||
41 | * The vpte base must be able to hold the entire vpte, half | ||
42 | * of which lives above, and half below, the base. And it | ||
43 | * is placed as close to the highest address range as possible. | ||
44 | */ | ||
45 | #define VPTE_BASE_SPITFIRE (-(VPTE_SIZE/2)) | ||
46 | #if 1 | ||
47 | #define VPTE_BASE_CHEETAH VPTE_BASE_SPITFIRE | ||
48 | #else | ||
49 | #define VPTE_BASE_CHEETAH 0xffe0000000000000 | ||
50 | #endif | ||
51 | |||
52 | #ifndef __ASSEMBLY__ | 42 | #ifndef __ASSEMBLY__ |
53 | 43 | ||
54 | typedef struct { | 44 | typedef struct { |
@@ -101,7 +91,8 @@ extern unsigned long thread_saved_pc(struct task_struct *); | |||
101 | /* Do necessary setup to start up a newly executed thread. */ | 91 | /* Do necessary setup to start up a newly executed thread. */ |
102 | #define start_thread(regs, pc, sp) \ | 92 | #define start_thread(regs, pc, sp) \ |
103 | do { \ | 93 | do { \ |
104 | regs->tstate = (regs->tstate & (TSTATE_CWP)) | (TSTATE_INITIAL_MM|TSTATE_IE) | (ASI_PNF << 24); \ | 94 | unsigned long __asi = ASI_PNF; \ |
95 | regs->tstate = (regs->tstate & (TSTATE_CWP)) | (TSTATE_INITIAL_MM|TSTATE_IE) | (__asi << 24UL); \ | ||
105 | regs->tpc = ((pc & (~3)) - 4); \ | 96 | regs->tpc = ((pc & (~3)) - 4); \ |
106 | regs->tnpc = regs->tpc + 4; \ | 97 | regs->tnpc = regs->tpc + 4; \ |
107 | regs->y = 0; \ | 98 | regs->y = 0; \ |
@@ -138,10 +129,10 @@ do { \ | |||
138 | 129 | ||
139 | #define start_thread32(regs, pc, sp) \ | 130 | #define start_thread32(regs, pc, sp) \ |
140 | do { \ | 131 | do { \ |
132 | unsigned long __asi = ASI_PNF; \ | ||
141 | pc &= 0x00000000ffffffffUL; \ | 133 | pc &= 0x00000000ffffffffUL; \ |
142 | sp &= 0x00000000ffffffffUL; \ | 134 | sp &= 0x00000000ffffffffUL; \ |
143 | \ | 135 | regs->tstate = (regs->tstate & (TSTATE_CWP))|(TSTATE_INITIAL_MM|TSTATE_IE|TSTATE_AM) | (__asi << 24UL); \ |
144 | regs->tstate = (regs->tstate & (TSTATE_CWP))|(TSTATE_INITIAL_MM|TSTATE_IE|TSTATE_AM); \ | ||
145 | regs->tpc = ((pc & (~3)) - 4); \ | 136 | regs->tpc = ((pc & (~3)) - 4); \ |
146 | regs->tnpc = regs->tpc + 4; \ | 137 | regs->tnpc = regs->tpc + 4; \ |
147 | regs->y = 0; \ | 138 | regs->y = 0; \ |
@@ -226,6 +217,8 @@ static inline void prefetchw(const void *x) | |||
226 | 217 | ||
227 | #define spin_lock_prefetch(x) prefetchw(x) | 218 | #define spin_lock_prefetch(x) prefetchw(x) |
228 | 219 | ||
220 | #define HAVE_ARCH_PICK_MMAP_LAYOUT | ||
221 | |||
229 | #endif /* !(__ASSEMBLY__) */ | 222 | #endif /* !(__ASSEMBLY__) */ |
230 | 223 | ||
231 | #endif /* !(__ASM_SPARC64_PROCESSOR_H) */ | 224 | #endif /* !(__ASM_SPARC64_PROCESSOR_H) */ |
diff --git a/include/asm-sparc64/pstate.h b/include/asm-sparc64/pstate.h index 29fb74aa805d..49a7924a89ab 100644 --- a/include/asm-sparc64/pstate.h +++ b/include/asm-sparc64/pstate.h | |||
@@ -28,11 +28,12 @@ | |||
28 | 28 | ||
29 | /* The V9 TSTATE Register (with SpitFire and Linux extensions). | 29 | /* The V9 TSTATE Register (with SpitFire and Linux extensions). |
30 | * | 30 | * |
31 | * --------------------------------------------------------------- | 31 | * --------------------------------------------------------------------- |
32 | * | Resv | CCR | ASI | %pil | PSTATE | Resv | CWP | | 32 | * | Resv | GL | CCR | ASI | %pil | PSTATE | Resv | CWP | |
33 | * --------------------------------------------------------------- | 33 | * --------------------------------------------------------------------- |
34 | * 63 40 39 32 31 24 23 20 19 8 7 5 4 0 | 34 | * 63 43 42 40 39 32 31 24 23 20 19 8 7 5 4 0 |
35 | */ | 35 | */ |
36 | #define TSTATE_GL _AC(0x0000070000000000,UL) /* Global reg level */ | ||
36 | #define TSTATE_CCR _AC(0x000000ff00000000,UL) /* Condition Codes. */ | 37 | #define TSTATE_CCR _AC(0x000000ff00000000,UL) /* Condition Codes. */ |
37 | #define TSTATE_XCC _AC(0x000000f000000000,UL) /* Condition Codes. */ | 38 | #define TSTATE_XCC _AC(0x000000f000000000,UL) /* Condition Codes. */ |
38 | #define TSTATE_XNEG _AC(0x0000008000000000,UL) /* %xcc Negative. */ | 39 | #define TSTATE_XNEG _AC(0x0000008000000000,UL) /* %xcc Negative. */ |
diff --git a/include/asm-sparc64/scratchpad.h b/include/asm-sparc64/scratchpad.h new file mode 100644 index 000000000000..5e8b01fb3343 --- /dev/null +++ b/include/asm-sparc64/scratchpad.h | |||
@@ -0,0 +1,14 @@ | |||
1 | #ifndef _SPARC64_SCRATCHPAD_H | ||
2 | #define _SPARC64_SCRATCHPAD_H | ||
3 | |||
4 | /* Sun4v scratchpad registers, accessed via ASI_SCRATCHPAD. */ | ||
5 | |||
6 | #define SCRATCHPAD_MMU_MISS 0x00 /* Shared with OBP - set by OBP */ | ||
7 | #define SCRATCHPAD_CPUID 0x08 /* Shared with OBP - set by hypervisor */ | ||
8 | #define SCRATCHPAD_UTSBREG1 0x10 | ||
9 | #define SCRATCHPAD_UTSBREG2 0x18 | ||
10 | /* 0x20 and 0x28, hypervisor only... */ | ||
11 | #define SCRATCHPAD_UNUSED1 0x30 | ||
12 | #define SCRATCHPAD_UNUSED2 0x38 /* Reserved for OBP */ | ||
13 | |||
14 | #endif /* !(_SPARC64_SCRATCHPAD_H) */ | ||
diff --git a/include/asm-sparc64/smp.h b/include/asm-sparc64/smp.h index 473edb2603ec..89d86ecaab24 100644 --- a/include/asm-sparc64/smp.h +++ b/include/asm-sparc64/smp.h | |||
@@ -33,37 +33,13 @@ | |||
33 | extern cpumask_t phys_cpu_present_map; | 33 | extern cpumask_t phys_cpu_present_map; |
34 | #define cpu_possible_map phys_cpu_present_map | 34 | #define cpu_possible_map phys_cpu_present_map |
35 | 35 | ||
36 | extern cpumask_t cpu_sibling_map[NR_CPUS]; | ||
37 | |||
36 | /* | 38 | /* |
37 | * General functions that each host system must provide. | 39 | * General functions that each host system must provide. |
38 | */ | 40 | */ |
39 | 41 | ||
40 | static __inline__ int hard_smp_processor_id(void) | 42 | extern int hard_smp_processor_id(void); |
41 | { | ||
42 | if (tlb_type == cheetah || tlb_type == cheetah_plus) { | ||
43 | unsigned long cfg, ver; | ||
44 | __asm__ __volatile__("rdpr %%ver, %0" : "=r" (ver)); | ||
45 | if ((ver >> 32) == 0x003e0016) { | ||
46 | __asm__ __volatile__("ldxa [%%g0] %1, %0" | ||
47 | : "=r" (cfg) | ||
48 | : "i" (ASI_JBUS_CONFIG)); | ||
49 | return ((cfg >> 17) & 0x1f); | ||
50 | } else { | ||
51 | __asm__ __volatile__("ldxa [%%g0] %1, %0" | ||
52 | : "=r" (cfg) | ||
53 | : "i" (ASI_SAFARI_CONFIG)); | ||
54 | return ((cfg >> 17) & 0x3ff); | ||
55 | } | ||
56 | } else if (this_is_starfire != 0) { | ||
57 | return starfire_hard_smp_processor_id(); | ||
58 | } else { | ||
59 | unsigned long upaconfig; | ||
60 | __asm__ __volatile__("ldxa [%%g0] %1, %0" | ||
61 | : "=r" (upaconfig) | ||
62 | : "i" (ASI_UPA_CONFIG)); | ||
63 | return ((upaconfig >> 17) & 0x1f); | ||
64 | } | ||
65 | } | ||
66 | |||
67 | #define raw_smp_processor_id() (current_thread_info()->cpu) | 43 | #define raw_smp_processor_id() (current_thread_info()->cpu) |
68 | 44 | ||
69 | extern void smp_setup_cpu_possible_map(void); | 45 | extern void smp_setup_cpu_possible_map(void); |
diff --git a/include/asm-sparc64/sparsemem.h b/include/asm-sparc64/sparsemem.h new file mode 100644 index 000000000000..ed5c9d8541e2 --- /dev/null +++ b/include/asm-sparc64/sparsemem.h | |||
@@ -0,0 +1,12 @@ | |||
1 | #ifndef _SPARC64_SPARSEMEM_H | ||
2 | #define _SPARC64_SPARSEMEM_H | ||
3 | |||
4 | #ifdef __KERNEL__ | ||
5 | |||
6 | #define SECTION_SIZE_BITS 26 | ||
7 | #define MAX_PHYSADDR_BITS 42 | ||
8 | #define MAX_PHYSMEM_BITS 42 | ||
9 | |||
10 | #endif /* !(__KERNEL__) */ | ||
11 | |||
12 | #endif /* !(_SPARC64_SPARSEMEM_H) */ | ||
diff --git a/include/asm-sparc64/spitfire.h b/include/asm-sparc64/spitfire.h index 962638c9d122..23ad8a7987ad 100644 --- a/include/asm-sparc64/spitfire.h +++ b/include/asm-sparc64/spitfire.h | |||
@@ -44,6 +44,7 @@ enum ultra_tlb_layout { | |||
44 | spitfire = 0, | 44 | spitfire = 0, |
45 | cheetah = 1, | 45 | cheetah = 1, |
46 | cheetah_plus = 2, | 46 | cheetah_plus = 2, |
47 | hypervisor = 3, | ||
47 | }; | 48 | }; |
48 | 49 | ||
49 | extern enum ultra_tlb_layout tlb_type; | 50 | extern enum ultra_tlb_layout tlb_type; |
diff --git a/include/asm-sparc64/system.h b/include/asm-sparc64/system.h index af254e581834..a18ec87a52c1 100644 --- a/include/asm-sparc64/system.h +++ b/include/asm-sparc64/system.h | |||
@@ -209,9 +209,10 @@ do { if (test_thread_flag(TIF_PERFCTR)) { \ | |||
209 | /* so that ASI is only written if it changes, think again. */ \ | 209 | /* so that ASI is only written if it changes, think again. */ \ |
210 | __asm__ __volatile__("wr %%g0, %0, %%asi" \ | 210 | __asm__ __volatile__("wr %%g0, %0, %%asi" \ |
211 | : : "r" (__thread_flag_byte_ptr(task_thread_info(next))[TI_FLAG_BYTE_CURRENT_DS]));\ | 211 | : : "r" (__thread_flag_byte_ptr(task_thread_info(next))[TI_FLAG_BYTE_CURRENT_DS]));\ |
212 | trap_block[current_thread_info()->cpu].thread = \ | ||
213 | task_thread_info(next); \ | ||
212 | __asm__ __volatile__( \ | 214 | __asm__ __volatile__( \ |
213 | "mov %%g4, %%g7\n\t" \ | 215 | "mov %%g4, %%g7\n\t" \ |
214 | "wrpr %%g0, 0x95, %%pstate\n\t" \ | ||
215 | "stx %%i6, [%%sp + 2047 + 0x70]\n\t" \ | 216 | "stx %%i6, [%%sp + 2047 + 0x70]\n\t" \ |
216 | "stx %%i7, [%%sp + 2047 + 0x78]\n\t" \ | 217 | "stx %%i7, [%%sp + 2047 + 0x78]\n\t" \ |
217 | "rdpr %%wstate, %%o5\n\t" \ | 218 | "rdpr %%wstate, %%o5\n\t" \ |
@@ -225,14 +226,10 @@ do { if (test_thread_flag(TIF_PERFCTR)) { \ | |||
225 | "ldx [%%g6 + %3], %%o6\n\t" \ | 226 | "ldx [%%g6 + %3], %%o6\n\t" \ |
226 | "ldub [%%g6 + %2], %%o5\n\t" \ | 227 | "ldub [%%g6 + %2], %%o5\n\t" \ |
227 | "ldub [%%g6 + %4], %%o7\n\t" \ | 228 | "ldub [%%g6 + %4], %%o7\n\t" \ |
228 | "mov %%g6, %%l2\n\t" \ | ||
229 | "wrpr %%o5, 0x0, %%wstate\n\t" \ | 229 | "wrpr %%o5, 0x0, %%wstate\n\t" \ |
230 | "ldx [%%sp + 2047 + 0x70], %%i6\n\t" \ | 230 | "ldx [%%sp + 2047 + 0x70], %%i6\n\t" \ |
231 | "ldx [%%sp + 2047 + 0x78], %%i7\n\t" \ | 231 | "ldx [%%sp + 2047 + 0x78], %%i7\n\t" \ |
232 | "wrpr %%g0, 0x94, %%pstate\n\t" \ | ||
233 | "mov %%l2, %%g6\n\t" \ | ||
234 | "ldx [%%g6 + %6], %%g4\n\t" \ | 232 | "ldx [%%g6 + %6], %%g4\n\t" \ |
235 | "wrpr %%g0, 0x96, %%pstate\n\t" \ | ||
236 | "brz,pt %%o7, 1f\n\t" \ | 233 | "brz,pt %%o7, 1f\n\t" \ |
237 | " mov %%g7, %0\n\t" \ | 234 | " mov %%g7, %0\n\t" \ |
238 | "b,a ret_from_syscall\n\t" \ | 235 | "b,a ret_from_syscall\n\t" \ |
diff --git a/include/asm-sparc64/thread_info.h b/include/asm-sparc64/thread_info.h index ac9d068aab4f..2ebf7f27bf91 100644 --- a/include/asm-sparc64/thread_info.h +++ b/include/asm-sparc64/thread_info.h | |||
@@ -64,8 +64,6 @@ struct thread_info { | |||
64 | __u64 kernel_cntd0, kernel_cntd1; | 64 | __u64 kernel_cntd0, kernel_cntd1; |
65 | __u64 pcr_reg; | 65 | __u64 pcr_reg; |
66 | 66 | ||
67 | __u64 cee_stuff; | ||
68 | |||
69 | struct restart_block restart_block; | 67 | struct restart_block restart_block; |
70 | 68 | ||
71 | struct pt_regs *kern_una_regs; | 69 | struct pt_regs *kern_una_regs; |
@@ -104,10 +102,9 @@ struct thread_info { | |||
104 | #define TI_KERN_CNTD0 0x00000480 | 102 | #define TI_KERN_CNTD0 0x00000480 |
105 | #define TI_KERN_CNTD1 0x00000488 | 103 | #define TI_KERN_CNTD1 0x00000488 |
106 | #define TI_PCR 0x00000490 | 104 | #define TI_PCR 0x00000490 |
107 | #define TI_CEE_STUFF 0x00000498 | 105 | #define TI_RESTART_BLOCK 0x00000498 |
108 | #define TI_RESTART_BLOCK 0x000004a0 | 106 | #define TI_KUNA_REGS 0x000004c0 |
109 | #define TI_KUNA_REGS 0x000004c8 | 107 | #define TI_KUNA_INSN 0x000004c8 |
110 | #define TI_KUNA_INSN 0x000004d0 | ||
111 | #define TI_FPREGS 0x00000500 | 108 | #define TI_FPREGS 0x00000500 |
112 | 109 | ||
113 | /* We embed this in the uppermost byte of thread_info->flags */ | 110 | /* We embed this in the uppermost byte of thread_info->flags */ |
diff --git a/include/asm-sparc64/timex.h b/include/asm-sparc64/timex.h index 9e8d4175bcb2..2a5e4ebaad80 100644 --- a/include/asm-sparc64/timex.h +++ b/include/asm-sparc64/timex.h | |||
@@ -14,4 +14,10 @@ | |||
14 | typedef unsigned long cycles_t; | 14 | typedef unsigned long cycles_t; |
15 | #define get_cycles() tick_ops->get_tick() | 15 | #define get_cycles() tick_ops->get_tick() |
16 | 16 | ||
17 | #define ARCH_HAS_READ_CURRENT_TIMER 1 | ||
18 | #define read_current_timer(timer_val_p) \ | ||
19 | ({ *timer_val_p = tick_ops->get_tick(); \ | ||
20 | 0; \ | ||
21 | }) | ||
22 | |||
17 | #endif | 23 | #endif |
diff --git a/include/asm-sparc64/tlbflush.h b/include/asm-sparc64/tlbflush.h index 3ef9909ac3ac..9ad5d9c51d42 100644 --- a/include/asm-sparc64/tlbflush.h +++ b/include/asm-sparc64/tlbflush.h | |||
@@ -5,6 +5,11 @@ | |||
5 | #include <linux/mm.h> | 5 | #include <linux/mm.h> |
6 | #include <asm/mmu_context.h> | 6 | #include <asm/mmu_context.h> |
7 | 7 | ||
8 | /* TSB flush operations. */ | ||
9 | struct mmu_gather; | ||
10 | extern void flush_tsb_kernel_range(unsigned long start, unsigned long end); | ||
11 | extern void flush_tsb_user(struct mmu_gather *mp); | ||
12 | |||
8 | /* TLB flush operations. */ | 13 | /* TLB flush operations. */ |
9 | 14 | ||
10 | extern void flush_tlb_pending(void); | 15 | extern void flush_tlb_pending(void); |
@@ -14,28 +19,36 @@ extern void flush_tlb_pending(void); | |||
14 | #define flush_tlb_page(vma,addr) flush_tlb_pending() | 19 | #define flush_tlb_page(vma,addr) flush_tlb_pending() |
15 | #define flush_tlb_mm(mm) flush_tlb_pending() | 20 | #define flush_tlb_mm(mm) flush_tlb_pending() |
16 | 21 | ||
22 | /* Local cpu only. */ | ||
17 | extern void __flush_tlb_all(void); | 23 | extern void __flush_tlb_all(void); |
24 | |||
18 | extern void __flush_tlb_page(unsigned long context, unsigned long page, unsigned long r); | 25 | extern void __flush_tlb_page(unsigned long context, unsigned long page, unsigned long r); |
19 | 26 | ||
20 | extern void __flush_tlb_kernel_range(unsigned long start, unsigned long end); | 27 | extern void __flush_tlb_kernel_range(unsigned long start, unsigned long end); |
21 | 28 | ||
22 | #ifndef CONFIG_SMP | 29 | #ifndef CONFIG_SMP |
23 | 30 | ||
24 | #define flush_tlb_all() __flush_tlb_all() | ||
25 | #define flush_tlb_kernel_range(start,end) \ | 31 | #define flush_tlb_kernel_range(start,end) \ |
26 | __flush_tlb_kernel_range(start,end) | 32 | do { flush_tsb_kernel_range(start,end); \ |
33 | __flush_tlb_kernel_range(start,end); \ | ||
34 | } while (0) | ||
27 | 35 | ||
28 | #else /* CONFIG_SMP */ | 36 | #else /* CONFIG_SMP */ |
29 | 37 | ||
30 | extern void smp_flush_tlb_all(void); | ||
31 | extern void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end); | 38 | extern void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end); |
32 | 39 | ||
33 | #define flush_tlb_all() smp_flush_tlb_all() | ||
34 | #define flush_tlb_kernel_range(start, end) \ | 40 | #define flush_tlb_kernel_range(start, end) \ |
35 | smp_flush_tlb_kernel_range(start, end) | 41 | do { flush_tsb_kernel_range(start,end); \ |
42 | smp_flush_tlb_kernel_range(start, end); \ | ||
43 | } while (0) | ||
36 | 44 | ||
37 | #endif /* ! CONFIG_SMP */ | 45 | #endif /* ! CONFIG_SMP */ |
38 | 46 | ||
39 | extern void flush_tlb_pgtables(struct mm_struct *, unsigned long, unsigned long); | 47 | static inline void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start, unsigned long end) |
48 | { | ||
49 | /* We don't use virtual page tables for TLB miss processing | ||
50 | * any more. Nowadays we use the TSB. | ||
51 | */ | ||
52 | } | ||
40 | 53 | ||
41 | #endif /* _SPARC64_TLBFLUSH_H */ | 54 | #endif /* _SPARC64_TLBFLUSH_H */ |
diff --git a/include/asm-sparc64/tsb.h b/include/asm-sparc64/tsb.h new file mode 100644 index 000000000000..e82612cd9f33 --- /dev/null +++ b/include/asm-sparc64/tsb.h | |||
@@ -0,0 +1,281 @@ | |||
1 | #ifndef _SPARC64_TSB_H | ||
2 | #define _SPARC64_TSB_H | ||
3 | |||
4 | /* The sparc64 TSB is similar to the powerpc hashtables. It's a | ||
5 | * power-of-2 sized table of TAG/PTE pairs. The cpu precomputes | ||
6 | * pointers into this table for 8K and 64K page sizes, and also a | ||
7 | * comparison TAG based upon the virtual address and context which | ||
8 | * faults. | ||
9 | * | ||
10 | * TLB miss trap handler software does the actual lookup via something | ||
11 | * of the form: | ||
12 | * | ||
13 | * ldxa [%g0] ASI_{D,I}MMU_TSB_8KB_PTR, %g1 | ||
14 | * ldxa [%g0] ASI_{D,I}MMU, %g6 | ||
15 | * sllx %g6, 22, %g6 | ||
16 | * srlx %g6, 22, %g6 | ||
17 | * ldda [%g1] ASI_NUCLEUS_QUAD_LDD, %g4 | ||
18 | * cmp %g4, %g6 | ||
19 | * bne,pn %xcc, tsb_miss_{d,i}tlb | ||
20 | * mov FAULT_CODE_{D,I}TLB, %g3 | ||
21 | * stxa %g5, [%g0] ASI_{D,I}TLB_DATA_IN | ||
22 | * retry | ||
23 | * | ||
24 | * | ||
25 | * Each 16-byte slot of the TSB is the 8-byte tag and then the 8-byte | ||
26 | * PTE. The TAG is of the same layout as the TLB TAG TARGET mmu | ||
27 | * register which is: | ||
28 | * | ||
29 | * ------------------------------------------------- | ||
30 | * | - | CONTEXT | - | VADDR bits 63:22 | | ||
31 | * ------------------------------------------------- | ||
32 | * 63 61 60 48 47 42 41 0 | ||
33 | * | ||
34 | * But actually, since we use per-mm TSB's, we zero out the CONTEXT | ||
35 | * field. | ||
36 | * | ||
37 | * Like the powerpc hashtables we need to use locking in order to | ||
38 | * synchronize while we update the entries. PTE updates need locking | ||
39 | * as well. | ||
40 | * | ||
41 | * We need to carefully choose a lock bits for the TSB entry. We | ||
42 | * choose to use bit 47 in the tag. Also, since we never map anything | ||
43 | * at page zero in context zero, we use zero as an invalid tag entry. | ||
44 | * When the lock bit is set, this forces a tag comparison failure. | ||
45 | */ | ||
46 | |||
47 | #define TSB_TAG_LOCK_BIT 47 | ||
48 | #define TSB_TAG_LOCK_HIGH (1 << (TSB_TAG_LOCK_BIT - 32)) | ||
49 | |||
50 | #define TSB_TAG_INVALID_BIT 46 | ||
51 | #define TSB_TAG_INVALID_HIGH (1 << (TSB_TAG_INVALID_BIT - 32)) | ||
52 | |||
53 | #define TSB_MEMBAR membar #StoreStore | ||
54 | |||
55 | /* Some cpus support physical address quad loads. We want to use | ||
56 | * those if possible so we don't need to hard-lock the TSB mapping | ||
57 | * into the TLB. We encode some instruction patching in order to | ||
58 | * support this. | ||
59 | * | ||
60 | * The kernel TSB is locked into the TLB by virtue of being in the | ||
61 | * kernel image, so we don't play these games for swapper_tsb access. | ||
62 | */ | ||
63 | #ifndef __ASSEMBLY__ | ||
64 | struct tsb_ldquad_phys_patch_entry { | ||
65 | unsigned int addr; | ||
66 | unsigned int sun4u_insn; | ||
67 | unsigned int sun4v_insn; | ||
68 | }; | ||
69 | extern struct tsb_ldquad_phys_patch_entry __tsb_ldquad_phys_patch, | ||
70 | __tsb_ldquad_phys_patch_end; | ||
71 | |||
72 | struct tsb_phys_patch_entry { | ||
73 | unsigned int addr; | ||
74 | unsigned int insn; | ||
75 | }; | ||
76 | extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end; | ||
77 | #endif | ||
78 | #define TSB_LOAD_QUAD(TSB, REG) \ | ||
79 | 661: ldda [TSB] ASI_NUCLEUS_QUAD_LDD, REG; \ | ||
80 | .section .tsb_ldquad_phys_patch, "ax"; \ | ||
81 | .word 661b; \ | ||
82 | ldda [TSB] ASI_QUAD_LDD_PHYS, REG; \ | ||
83 | ldda [TSB] ASI_QUAD_LDD_PHYS_4V, REG; \ | ||
84 | .previous | ||
85 | |||
86 | #define TSB_LOAD_TAG_HIGH(TSB, REG) \ | ||
87 | 661: lduwa [TSB] ASI_N, REG; \ | ||
88 | .section .tsb_phys_patch, "ax"; \ | ||
89 | .word 661b; \ | ||
90 | lduwa [TSB] ASI_PHYS_USE_EC, REG; \ | ||
91 | .previous | ||
92 | |||
93 | #define TSB_LOAD_TAG(TSB, REG) \ | ||
94 | 661: ldxa [TSB] ASI_N, REG; \ | ||
95 | .section .tsb_phys_patch, "ax"; \ | ||
96 | .word 661b; \ | ||
97 | ldxa [TSB] ASI_PHYS_USE_EC, REG; \ | ||
98 | .previous | ||
99 | |||
100 | #define TSB_CAS_TAG_HIGH(TSB, REG1, REG2) \ | ||
101 | 661: casa [TSB] ASI_N, REG1, REG2; \ | ||
102 | .section .tsb_phys_patch, "ax"; \ | ||
103 | .word 661b; \ | ||
104 | casa [TSB] ASI_PHYS_USE_EC, REG1, REG2; \ | ||
105 | .previous | ||
106 | |||
107 | #define TSB_CAS_TAG(TSB, REG1, REG2) \ | ||
108 | 661: casxa [TSB] ASI_N, REG1, REG2; \ | ||
109 | .section .tsb_phys_patch, "ax"; \ | ||
110 | .word 661b; \ | ||
111 | casxa [TSB] ASI_PHYS_USE_EC, REG1, REG2; \ | ||
112 | .previous | ||
113 | |||
114 | #define TSB_STORE(ADDR, VAL) \ | ||
115 | 661: stxa VAL, [ADDR] ASI_N; \ | ||
116 | .section .tsb_phys_patch, "ax"; \ | ||
117 | .word 661b; \ | ||
118 | stxa VAL, [ADDR] ASI_PHYS_USE_EC; \ | ||
119 | .previous | ||
120 | |||
121 | #define TSB_LOCK_TAG(TSB, REG1, REG2) \ | ||
122 | 99: TSB_LOAD_TAG_HIGH(TSB, REG1); \ | ||
123 | sethi %hi(TSB_TAG_LOCK_HIGH), REG2;\ | ||
124 | andcc REG1, REG2, %g0; \ | ||
125 | bne,pn %icc, 99b; \ | ||
126 | nop; \ | ||
127 | TSB_CAS_TAG_HIGH(TSB, REG1, REG2); \ | ||
128 | cmp REG1, REG2; \ | ||
129 | bne,pn %icc, 99b; \ | ||
130 | nop; \ | ||
131 | TSB_MEMBAR | ||
132 | |||
133 | #define TSB_WRITE(TSB, TTE, TAG) \ | ||
134 | add TSB, 0x8, TSB; \ | ||
135 | TSB_STORE(TSB, TTE); \ | ||
136 | sub TSB, 0x8, TSB; \ | ||
137 | TSB_MEMBAR; \ | ||
138 | TSB_STORE(TSB, TAG); | ||
139 | |||
140 | #define KTSB_LOAD_QUAD(TSB, REG) \ | ||
141 | ldda [TSB] ASI_NUCLEUS_QUAD_LDD, REG; | ||
142 | |||
143 | #define KTSB_STORE(ADDR, VAL) \ | ||
144 | stxa VAL, [ADDR] ASI_N; | ||
145 | |||
146 | #define KTSB_LOCK_TAG(TSB, REG1, REG2) \ | ||
147 | 99: lduwa [TSB] ASI_N, REG1; \ | ||
148 | sethi %hi(TSB_TAG_LOCK_HIGH), REG2;\ | ||
149 | andcc REG1, REG2, %g0; \ | ||
150 | bne,pn %icc, 99b; \ | ||
151 | nop; \ | ||
152 | casa [TSB] ASI_N, REG1, REG2;\ | ||
153 | cmp REG1, REG2; \ | ||
154 | bne,pn %icc, 99b; \ | ||
155 | nop; \ | ||
156 | TSB_MEMBAR | ||
157 | |||
158 | #define KTSB_WRITE(TSB, TTE, TAG) \ | ||
159 | add TSB, 0x8, TSB; \ | ||
160 | stxa TTE, [TSB] ASI_N; \ | ||
161 | sub TSB, 0x8, TSB; \ | ||
162 | TSB_MEMBAR; \ | ||
163 | stxa TAG, [TSB] ASI_N; | ||
164 | |||
165 | /* Do a kernel page table walk. Leaves physical PTE pointer in | ||
166 | * REG1. Jumps to FAIL_LABEL on early page table walk termination. | ||
167 | * VADDR will not be clobbered, but REG2 will. | ||
168 | */ | ||
169 | #define KERN_PGTABLE_WALK(VADDR, REG1, REG2, FAIL_LABEL) \ | ||
170 | sethi %hi(swapper_pg_dir), REG1; \ | ||
171 | or REG1, %lo(swapper_pg_dir), REG1; \ | ||
172 | sllx VADDR, 64 - (PGDIR_SHIFT + PGDIR_BITS), REG2; \ | ||
173 | srlx REG2, 64 - PAGE_SHIFT, REG2; \ | ||
174 | andn REG2, 0x3, REG2; \ | ||
175 | lduw [REG1 + REG2], REG1; \ | ||
176 | brz,pn REG1, FAIL_LABEL; \ | ||
177 | sllx VADDR, 64 - (PMD_SHIFT + PMD_BITS), REG2; \ | ||
178 | srlx REG2, 64 - PAGE_SHIFT, REG2; \ | ||
179 | sllx REG1, 11, REG1; \ | ||
180 | andn REG2, 0x3, REG2; \ | ||
181 | lduwa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \ | ||
182 | brz,pn REG1, FAIL_LABEL; \ | ||
183 | sllx VADDR, 64 - PMD_SHIFT, REG2; \ | ||
184 | srlx REG2, 64 - PAGE_SHIFT, REG2; \ | ||
185 | sllx REG1, 11, REG1; \ | ||
186 | andn REG2, 0x7, REG2; \ | ||
187 | add REG1, REG2, REG1; | ||
188 | |||
189 | /* Do a user page table walk in MMU globals. Leaves physical PTE | ||
190 | * pointer in REG1. Jumps to FAIL_LABEL on early page table walk | ||
191 | * termination. Physical base of page tables is in PHYS_PGD which | ||
192 | * will not be modified. | ||
193 | * | ||
194 | * VADDR will not be clobbered, but REG1 and REG2 will. | ||
195 | */ | ||
196 | #define USER_PGTABLE_WALK_TL1(VADDR, PHYS_PGD, REG1, REG2, FAIL_LABEL) \ | ||
197 | sllx VADDR, 64 - (PGDIR_SHIFT + PGDIR_BITS), REG2; \ | ||
198 | srlx REG2, 64 - PAGE_SHIFT, REG2; \ | ||
199 | andn REG2, 0x3, REG2; \ | ||
200 | lduwa [PHYS_PGD + REG2] ASI_PHYS_USE_EC, REG1; \ | ||
201 | brz,pn REG1, FAIL_LABEL; \ | ||
202 | sllx VADDR, 64 - (PMD_SHIFT + PMD_BITS), REG2; \ | ||
203 | srlx REG2, 64 - PAGE_SHIFT, REG2; \ | ||
204 | sllx REG1, 11, REG1; \ | ||
205 | andn REG2, 0x3, REG2; \ | ||
206 | lduwa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \ | ||
207 | brz,pn REG1, FAIL_LABEL; \ | ||
208 | sllx VADDR, 64 - PMD_SHIFT, REG2; \ | ||
209 | srlx REG2, 64 - PAGE_SHIFT, REG2; \ | ||
210 | sllx REG1, 11, REG1; \ | ||
211 | andn REG2, 0x7, REG2; \ | ||
212 | add REG1, REG2, REG1; | ||
213 | |||
214 | /* Lookup a OBP mapping on VADDR in the prom_trans[] table at TL>0. | ||
215 | * If no entry is found, FAIL_LABEL will be branched to. On success | ||
216 | * the resulting PTE value will be left in REG1. VADDR is preserved | ||
217 | * by this routine. | ||
218 | */ | ||
219 | #define OBP_TRANS_LOOKUP(VADDR, REG1, REG2, REG3, FAIL_LABEL) \ | ||
220 | sethi %hi(prom_trans), REG1; \ | ||
221 | or REG1, %lo(prom_trans), REG1; \ | ||
222 | 97: ldx [REG1 + 0x00], REG2; \ | ||
223 | brz,pn REG2, FAIL_LABEL; \ | ||
224 | nop; \ | ||
225 | ldx [REG1 + 0x08], REG3; \ | ||
226 | add REG2, REG3, REG3; \ | ||
227 | cmp REG2, VADDR; \ | ||
228 | bgu,pt %xcc, 98f; \ | ||
229 | cmp VADDR, REG3; \ | ||
230 | bgeu,pt %xcc, 98f; \ | ||
231 | ldx [REG1 + 0x10], REG3; \ | ||
232 | sub VADDR, REG2, REG2; \ | ||
233 | ba,pt %xcc, 99f; \ | ||
234 | add REG3, REG2, REG1; \ | ||
235 | 98: ba,pt %xcc, 97b; \ | ||
236 | add REG1, (3 * 8), REG1; \ | ||
237 | 99: | ||
238 | |||
239 | /* We use a 32K TSB for the whole kernel, this allows to | ||
240 | * handle about 16MB of modules and vmalloc mappings without | ||
241 | * incurring many hash conflicts. | ||
242 | */ | ||
243 | #define KERNEL_TSB_SIZE_BYTES (32 * 1024) | ||
244 | #define KERNEL_TSB_NENTRIES \ | ||
245 | (KERNEL_TSB_SIZE_BYTES / 16) | ||
246 | #define KERNEL_TSB4M_NENTRIES 4096 | ||
247 | |||
248 | /* Do a kernel TSB lookup at tl>0 on VADDR+TAG, branch to OK_LABEL | ||
249 | * on TSB hit. REG1, REG2, REG3, and REG4 are used as temporaries | ||
250 | * and the found TTE will be left in REG1. REG3 and REG4 must | ||
251 | * be an even/odd pair of registers. | ||
252 | * | ||
253 | * VADDR and TAG will be preserved and not clobbered by this macro. | ||
254 | */ | ||
255 | #define KERN_TSB_LOOKUP_TL1(VADDR, TAG, REG1, REG2, REG3, REG4, OK_LABEL) \ | ||
256 | sethi %hi(swapper_tsb), REG1; \ | ||
257 | or REG1, %lo(swapper_tsb), REG1; \ | ||
258 | srlx VADDR, PAGE_SHIFT, REG2; \ | ||
259 | and REG2, (KERNEL_TSB_NENTRIES - 1), REG2; \ | ||
260 | sllx REG2, 4, REG2; \ | ||
261 | add REG1, REG2, REG2; \ | ||
262 | KTSB_LOAD_QUAD(REG2, REG3); \ | ||
263 | cmp REG3, TAG; \ | ||
264 | be,a,pt %xcc, OK_LABEL; \ | ||
265 | mov REG4, REG1; | ||
266 | |||
267 | /* This version uses a trick, the TAG is already (VADDR >> 22) so | ||
268 | * we can make use of that for the index computation. | ||
269 | */ | ||
270 | #define KERN_TSB4M_LOOKUP_TL1(TAG, REG1, REG2, REG3, REG4, OK_LABEL) \ | ||
271 | sethi %hi(swapper_4m_tsb), REG1; \ | ||
272 | or REG1, %lo(swapper_4m_tsb), REG1; \ | ||
273 | and TAG, (KERNEL_TSB_NENTRIES - 1), REG2; \ | ||
274 | sllx REG2, 4, REG2; \ | ||
275 | add REG1, REG2, REG2; \ | ||
276 | KTSB_LOAD_QUAD(REG2, REG3); \ | ||
277 | cmp REG3, TAG; \ | ||
278 | be,a,pt %xcc, OK_LABEL; \ | ||
279 | mov REG4, REG1; | ||
280 | |||
281 | #endif /* !(_SPARC64_TSB_H) */ | ||
diff --git a/include/asm-sparc64/ttable.h b/include/asm-sparc64/ttable.h index 2784f80094c3..2d5e3c464df5 100644 --- a/include/asm-sparc64/ttable.h +++ b/include/asm-sparc64/ttable.h | |||
@@ -93,7 +93,7 @@ | |||
93 | 93 | ||
94 | #define SYSCALL_TRAP(routine, systbl) \ | 94 | #define SYSCALL_TRAP(routine, systbl) \ |
95 | sethi %hi(109f), %g7; \ | 95 | sethi %hi(109f), %g7; \ |
96 | ba,pt %xcc, scetrap; \ | 96 | ba,pt %xcc, etrap; \ |
97 | 109: or %g7, %lo(109b), %g7; \ | 97 | 109: or %g7, %lo(109b), %g7; \ |
98 | sethi %hi(systbl), %l7; \ | 98 | sethi %hi(systbl), %l7; \ |
99 | ba,pt %xcc, routine; \ | 99 | ba,pt %xcc, routine; \ |
@@ -109,14 +109,14 @@ | |||
109 | nop;nop;nop; | 109 | nop;nop;nop; |
110 | 110 | ||
111 | #define TRAP_UTRAP(handler,lvl) \ | 111 | #define TRAP_UTRAP(handler,lvl) \ |
112 | ldx [%g6 + TI_UTRAPS], %g1; \ | 112 | mov handler, %g3; \ |
113 | sethi %hi(109f), %g7; \ | 113 | ba,pt %xcc, utrap_trap; \ |
114 | brz,pn %g1, utrap; \ | 114 | mov lvl, %g4; \ |
115 | or %g7, %lo(109f), %g7; \ | 115 | nop; \ |
116 | ba,pt %xcc, utrap; \ | 116 | nop; \ |
117 | 109: ldx [%g1 + handler*8], %g1; \ | 117 | nop; \ |
118 | ba,pt %xcc, utrap_ill; \ | 118 | nop; \ |
119 | mov lvl, %o1; | 119 | nop; |
120 | 120 | ||
121 | #ifdef CONFIG_SUNOS_EMUL | 121 | #ifdef CONFIG_SUNOS_EMUL |
122 | #define SUNOS_SYSCALL_TRAP SYSCALL_TRAP(linux_sparc_syscall32, sunos_sys_table) | 122 | #define SUNOS_SYSCALL_TRAP SYSCALL_TRAP(linux_sparc_syscall32, sunos_sys_table) |
@@ -136,8 +136,6 @@ | |||
136 | #else | 136 | #else |
137 | #define SOLARIS_SYSCALL_TRAP TRAP(solaris_syscall) | 137 | #define SOLARIS_SYSCALL_TRAP TRAP(solaris_syscall) |
138 | #endif | 138 | #endif |
139 | /* FIXME: Write these actually */ | ||
140 | #define NETBSD_SYSCALL_TRAP TRAP(netbsd_syscall) | ||
141 | #define BREAKPOINT_TRAP TRAP(breakpoint_trap) | 139 | #define BREAKPOINT_TRAP TRAP(breakpoint_trap) |
142 | 140 | ||
143 | #define TRAP_IRQ(routine, level) \ | 141 | #define TRAP_IRQ(routine, level) \ |
@@ -182,6 +180,26 @@ | |||
182 | #define KPROBES_TRAP(lvl) TRAP_ARG(bad_trap, lvl) | 180 | #define KPROBES_TRAP(lvl) TRAP_ARG(bad_trap, lvl) |
183 | #endif | 181 | #endif |
184 | 182 | ||
183 | #define SUN4V_ITSB_MISS \ | ||
184 | ldxa [%g0] ASI_SCRATCHPAD, %g2; \ | ||
185 | ldx [%g2 + HV_FAULT_I_ADDR_OFFSET], %g4; \ | ||
186 | ldx [%g2 + HV_FAULT_I_CTX_OFFSET], %g5; \ | ||
187 | srlx %g4, 22, %g6; \ | ||
188 | ba,pt %xcc, sun4v_itsb_miss; \ | ||
189 | nop; \ | ||
190 | nop; \ | ||
191 | nop; | ||
192 | |||
193 | #define SUN4V_DTSB_MISS \ | ||
194 | ldxa [%g0] ASI_SCRATCHPAD, %g2; \ | ||
195 | ldx [%g2 + HV_FAULT_D_ADDR_OFFSET], %g4; \ | ||
196 | ldx [%g2 + HV_FAULT_D_CTX_OFFSET], %g5; \ | ||
197 | srlx %g4, 22, %g6; \ | ||
198 | ba,pt %xcc, sun4v_dtsb_miss; \ | ||
199 | nop; \ | ||
200 | nop; \ | ||
201 | nop; | ||
202 | |||
185 | /* Before touching these macros, you owe it to yourself to go and | 203 | /* Before touching these macros, you owe it to yourself to go and |
186 | * see how arch/sparc64/kernel/winfixup.S works... -DaveM | 204 | * see how arch/sparc64/kernel/winfixup.S works... -DaveM |
187 | * | 205 | * |
@@ -221,6 +239,31 @@ | |||
221 | saved; retry; nop; nop; nop; nop; nop; nop; \ | 239 | saved; retry; nop; nop; nop; nop; nop; nop; \ |
222 | nop; nop; nop; nop; nop; nop; nop; nop; | 240 | nop; nop; nop; nop; nop; nop; nop; nop; |
223 | 241 | ||
242 | #define SPILL_0_NORMAL_ETRAP \ | ||
243 | etrap_kernel_spill: \ | ||
244 | stx %l0, [%sp + STACK_BIAS + 0x00]; \ | ||
245 | stx %l1, [%sp + STACK_BIAS + 0x08]; \ | ||
246 | stx %l2, [%sp + STACK_BIAS + 0x10]; \ | ||
247 | stx %l3, [%sp + STACK_BIAS + 0x18]; \ | ||
248 | stx %l4, [%sp + STACK_BIAS + 0x20]; \ | ||
249 | stx %l5, [%sp + STACK_BIAS + 0x28]; \ | ||
250 | stx %l6, [%sp + STACK_BIAS + 0x30]; \ | ||
251 | stx %l7, [%sp + STACK_BIAS + 0x38]; \ | ||
252 | stx %i0, [%sp + STACK_BIAS + 0x40]; \ | ||
253 | stx %i1, [%sp + STACK_BIAS + 0x48]; \ | ||
254 | stx %i2, [%sp + STACK_BIAS + 0x50]; \ | ||
255 | stx %i3, [%sp + STACK_BIAS + 0x58]; \ | ||
256 | stx %i4, [%sp + STACK_BIAS + 0x60]; \ | ||
257 | stx %i5, [%sp + STACK_BIAS + 0x68]; \ | ||
258 | stx %i6, [%sp + STACK_BIAS + 0x70]; \ | ||
259 | stx %i7, [%sp + STACK_BIAS + 0x78]; \ | ||
260 | saved; \ | ||
261 | sub %g1, 2, %g1; \ | ||
262 | ba,pt %xcc, etrap_save; \ | ||
263 | wrpr %g1, %cwp; \ | ||
264 | nop; nop; nop; nop; nop; nop; nop; nop; \ | ||
265 | nop; nop; nop; nop; | ||
266 | |||
224 | /* Normal 64bit spill */ | 267 | /* Normal 64bit spill */ |
225 | #define SPILL_1_GENERIC(ASI) \ | 268 | #define SPILL_1_GENERIC(ASI) \ |
226 | add %sp, STACK_BIAS + 0x00, %g1; \ | 269 | add %sp, STACK_BIAS + 0x00, %g1; \ |
@@ -254,6 +297,67 @@ | |||
254 | b,a,pt %xcc, spill_fixup_mna; \ | 297 | b,a,pt %xcc, spill_fixup_mna; \ |
255 | b,a,pt %xcc, spill_fixup; | 298 | b,a,pt %xcc, spill_fixup; |
256 | 299 | ||
300 | #define SPILL_1_GENERIC_ETRAP \ | ||
301 | etrap_user_spill_64bit: \ | ||
302 | stxa %l0, [%sp + STACK_BIAS + 0x00] %asi; \ | ||
303 | stxa %l1, [%sp + STACK_BIAS + 0x08] %asi; \ | ||
304 | stxa %l2, [%sp + STACK_BIAS + 0x10] %asi; \ | ||
305 | stxa %l3, [%sp + STACK_BIAS + 0x18] %asi; \ | ||
306 | stxa %l4, [%sp + STACK_BIAS + 0x20] %asi; \ | ||
307 | stxa %l5, [%sp + STACK_BIAS + 0x28] %asi; \ | ||
308 | stxa %l6, [%sp + STACK_BIAS + 0x30] %asi; \ | ||
309 | stxa %l7, [%sp + STACK_BIAS + 0x38] %asi; \ | ||
310 | stxa %i0, [%sp + STACK_BIAS + 0x40] %asi; \ | ||
311 | stxa %i1, [%sp + STACK_BIAS + 0x48] %asi; \ | ||
312 | stxa %i2, [%sp + STACK_BIAS + 0x50] %asi; \ | ||
313 | stxa %i3, [%sp + STACK_BIAS + 0x58] %asi; \ | ||
314 | stxa %i4, [%sp + STACK_BIAS + 0x60] %asi; \ | ||
315 | stxa %i5, [%sp + STACK_BIAS + 0x68] %asi; \ | ||
316 | stxa %i6, [%sp + STACK_BIAS + 0x70] %asi; \ | ||
317 | stxa %i7, [%sp + STACK_BIAS + 0x78] %asi; \ | ||
318 | saved; \ | ||
319 | sub %g1, 2, %g1; \ | ||
320 | ba,pt %xcc, etrap_save; \ | ||
321 | wrpr %g1, %cwp; \ | ||
322 | nop; nop; nop; nop; nop; \ | ||
323 | nop; nop; nop; nop; \ | ||
324 | ba,a,pt %xcc, etrap_spill_fixup_64bit; \ | ||
325 | ba,a,pt %xcc, etrap_spill_fixup_64bit; \ | ||
326 | ba,a,pt %xcc, etrap_spill_fixup_64bit; | ||
327 | |||
328 | #define SPILL_1_GENERIC_ETRAP_FIXUP \ | ||
329 | etrap_spill_fixup_64bit: \ | ||
330 | ldub [%g6 + TI_WSAVED], %g1; \ | ||
331 | sll %g1, 3, %g3; \ | ||
332 | add %g6, %g3, %g3; \ | ||
333 | stx %sp, [%g3 + TI_RWIN_SPTRS]; \ | ||
334 | sll %g1, 7, %g3; \ | ||
335 | add %g6, %g3, %g3; \ | ||
336 | stx %l0, [%g3 + TI_REG_WINDOW + 0x00]; \ | ||
337 | stx %l1, [%g3 + TI_REG_WINDOW + 0x08]; \ | ||
338 | stx %l2, [%g3 + TI_REG_WINDOW + 0x10]; \ | ||
339 | stx %l3, [%g3 + TI_REG_WINDOW + 0x18]; \ | ||
340 | stx %l4, [%g3 + TI_REG_WINDOW + 0x20]; \ | ||
341 | stx %l5, [%g3 + TI_REG_WINDOW + 0x28]; \ | ||
342 | stx %l6, [%g3 + TI_REG_WINDOW + 0x30]; \ | ||
343 | stx %l7, [%g3 + TI_REG_WINDOW + 0x38]; \ | ||
344 | stx %i0, [%g3 + TI_REG_WINDOW + 0x40]; \ | ||
345 | stx %i1, [%g3 + TI_REG_WINDOW + 0x48]; \ | ||
346 | stx %i2, [%g3 + TI_REG_WINDOW + 0x50]; \ | ||
347 | stx %i3, [%g3 + TI_REG_WINDOW + 0x58]; \ | ||
348 | stx %i4, [%g3 + TI_REG_WINDOW + 0x60]; \ | ||
349 | stx %i5, [%g3 + TI_REG_WINDOW + 0x68]; \ | ||
350 | stx %i6, [%g3 + TI_REG_WINDOW + 0x70]; \ | ||
351 | stx %i7, [%g3 + TI_REG_WINDOW + 0x78]; \ | ||
352 | add %g1, 1, %g1; \ | ||
353 | stb %g1, [%g6 + TI_WSAVED]; \ | ||
354 | saved; \ | ||
355 | rdpr %cwp, %g1; \ | ||
356 | sub %g1, 2, %g1; \ | ||
357 | ba,pt %xcc, etrap_save; \ | ||
358 | wrpr %g1, %cwp; \ | ||
359 | nop; nop; nop | ||
360 | |||
257 | /* Normal 32bit spill */ | 361 | /* Normal 32bit spill */ |
258 | #define SPILL_2_GENERIC(ASI) \ | 362 | #define SPILL_2_GENERIC(ASI) \ |
259 | srl %sp, 0, %sp; \ | 363 | srl %sp, 0, %sp; \ |
@@ -287,6 +391,68 @@ | |||
287 | b,a,pt %xcc, spill_fixup_mna; \ | 391 | b,a,pt %xcc, spill_fixup_mna; \ |
288 | b,a,pt %xcc, spill_fixup; | 392 | b,a,pt %xcc, spill_fixup; |
289 | 393 | ||
394 | #define SPILL_2_GENERIC_ETRAP \ | ||
395 | etrap_user_spill_32bit: \ | ||
396 | srl %sp, 0, %sp; \ | ||
397 | stwa %l0, [%sp + 0x00] %asi; \ | ||
398 | stwa %l1, [%sp + 0x04] %asi; \ | ||
399 | stwa %l2, [%sp + 0x08] %asi; \ | ||
400 | stwa %l3, [%sp + 0x0c] %asi; \ | ||
401 | stwa %l4, [%sp + 0x10] %asi; \ | ||
402 | stwa %l5, [%sp + 0x14] %asi; \ | ||
403 | stwa %l6, [%sp + 0x18] %asi; \ | ||
404 | stwa %l7, [%sp + 0x1c] %asi; \ | ||
405 | stwa %i0, [%sp + 0x20] %asi; \ | ||
406 | stwa %i1, [%sp + 0x24] %asi; \ | ||
407 | stwa %i2, [%sp + 0x28] %asi; \ | ||
408 | stwa %i3, [%sp + 0x2c] %asi; \ | ||
409 | stwa %i4, [%sp + 0x30] %asi; \ | ||
410 | stwa %i5, [%sp + 0x34] %asi; \ | ||
411 | stwa %i6, [%sp + 0x38] %asi; \ | ||
412 | stwa %i7, [%sp + 0x3c] %asi; \ | ||
413 | saved; \ | ||
414 | sub %g1, 2, %g1; \ | ||
415 | ba,pt %xcc, etrap_save; \ | ||
416 | wrpr %g1, %cwp; \ | ||
417 | nop; nop; nop; nop; \ | ||
418 | nop; nop; nop; nop; \ | ||
419 | ba,a,pt %xcc, etrap_spill_fixup_32bit; \ | ||
420 | ba,a,pt %xcc, etrap_spill_fixup_32bit; \ | ||
421 | ba,a,pt %xcc, etrap_spill_fixup_32bit; | ||
422 | |||
423 | #define SPILL_2_GENERIC_ETRAP_FIXUP \ | ||
424 | etrap_spill_fixup_32bit: \ | ||
425 | ldub [%g6 + TI_WSAVED], %g1; \ | ||
426 | sll %g1, 3, %g3; \ | ||
427 | add %g6, %g3, %g3; \ | ||
428 | stx %sp, [%g3 + TI_RWIN_SPTRS]; \ | ||
429 | sll %g1, 7, %g3; \ | ||
430 | add %g6, %g3, %g3; \ | ||
431 | stw %l0, [%g3 + TI_REG_WINDOW + 0x00]; \ | ||
432 | stw %l1, [%g3 + TI_REG_WINDOW + 0x04]; \ | ||
433 | stw %l2, [%g3 + TI_REG_WINDOW + 0x08]; \ | ||
434 | stw %l3, [%g3 + TI_REG_WINDOW + 0x0c]; \ | ||
435 | stw %l4, [%g3 + TI_REG_WINDOW + 0x10]; \ | ||
436 | stw %l5, [%g3 + TI_REG_WINDOW + 0x14]; \ | ||
437 | stw %l6, [%g3 + TI_REG_WINDOW + 0x18]; \ | ||
438 | stw %l7, [%g3 + TI_REG_WINDOW + 0x1c]; \ | ||
439 | stw %i0, [%g3 + TI_REG_WINDOW + 0x20]; \ | ||
440 | stw %i1, [%g3 + TI_REG_WINDOW + 0x24]; \ | ||
441 | stw %i2, [%g3 + TI_REG_WINDOW + 0x28]; \ | ||
442 | stw %i3, [%g3 + TI_REG_WINDOW + 0x2c]; \ | ||
443 | stw %i4, [%g3 + TI_REG_WINDOW + 0x30]; \ | ||
444 | stw %i5, [%g3 + TI_REG_WINDOW + 0x34]; \ | ||
445 | stw %i6, [%g3 + TI_REG_WINDOW + 0x38]; \ | ||
446 | stw %i7, [%g3 + TI_REG_WINDOW + 0x3c]; \ | ||
447 | add %g1, 1, %g1; \ | ||
448 | stb %g1, [%g6 + TI_WSAVED]; \ | ||
449 | saved; \ | ||
450 | rdpr %cwp, %g1; \ | ||
451 | sub %g1, 2, %g1; \ | ||
452 | ba,pt %xcc, etrap_save; \ | ||
453 | wrpr %g1, %cwp; \ | ||
454 | nop; nop; nop | ||
455 | |||
290 | #define SPILL_1_NORMAL SPILL_1_GENERIC(ASI_AIUP) | 456 | #define SPILL_1_NORMAL SPILL_1_GENERIC(ASI_AIUP) |
291 | #define SPILL_2_NORMAL SPILL_2_GENERIC(ASI_AIUP) | 457 | #define SPILL_2_NORMAL SPILL_2_GENERIC(ASI_AIUP) |
292 | #define SPILL_3_NORMAL SPILL_0_NORMAL | 458 | #define SPILL_3_NORMAL SPILL_0_NORMAL |
@@ -325,6 +491,35 @@ | |||
325 | restored; retry; nop; nop; nop; nop; nop; nop; \ | 491 | restored; retry; nop; nop; nop; nop; nop; nop; \ |
326 | nop; nop; nop; nop; nop; nop; nop; nop; | 492 | nop; nop; nop; nop; nop; nop; nop; nop; |
327 | 493 | ||
494 | #define FILL_0_NORMAL_RTRAP \ | ||
495 | kern_rtt_fill: \ | ||
496 | rdpr %cwp, %g1; \ | ||
497 | sub %g1, 1, %g1; \ | ||
498 | wrpr %g1, %cwp; \ | ||
499 | ldx [%sp + STACK_BIAS + 0x00], %l0; \ | ||
500 | ldx [%sp + STACK_BIAS + 0x08], %l1; \ | ||
501 | ldx [%sp + STACK_BIAS + 0x10], %l2; \ | ||
502 | ldx [%sp + STACK_BIAS + 0x18], %l3; \ | ||
503 | ldx [%sp + STACK_BIAS + 0x20], %l4; \ | ||
504 | ldx [%sp + STACK_BIAS + 0x28], %l5; \ | ||
505 | ldx [%sp + STACK_BIAS + 0x30], %l6; \ | ||
506 | ldx [%sp + STACK_BIAS + 0x38], %l7; \ | ||
507 | ldx [%sp + STACK_BIAS + 0x40], %i0; \ | ||
508 | ldx [%sp + STACK_BIAS + 0x48], %i1; \ | ||
509 | ldx [%sp + STACK_BIAS + 0x50], %i2; \ | ||
510 | ldx [%sp + STACK_BIAS + 0x58], %i3; \ | ||
511 | ldx [%sp + STACK_BIAS + 0x60], %i4; \ | ||
512 | ldx [%sp + STACK_BIAS + 0x68], %i5; \ | ||
513 | ldx [%sp + STACK_BIAS + 0x70], %i6; \ | ||
514 | ldx [%sp + STACK_BIAS + 0x78], %i7; \ | ||
515 | restored; \ | ||
516 | add %g1, 1, %g1; \ | ||
517 | ba,pt %xcc, kern_rtt_restore; \ | ||
518 | wrpr %g1, %cwp; \ | ||
519 | nop; nop; nop; nop; nop; \ | ||
520 | nop; nop; nop; nop; | ||
521 | |||
522 | |||
328 | /* Normal 64bit fill */ | 523 | /* Normal 64bit fill */ |
329 | #define FILL_1_GENERIC(ASI) \ | 524 | #define FILL_1_GENERIC(ASI) \ |
330 | add %sp, STACK_BIAS + 0x00, %g1; \ | 525 | add %sp, STACK_BIAS + 0x00, %g1; \ |
@@ -356,6 +551,33 @@ | |||
356 | b,a,pt %xcc, fill_fixup_mna; \ | 551 | b,a,pt %xcc, fill_fixup_mna; \ |
357 | b,a,pt %xcc, fill_fixup; | 552 | b,a,pt %xcc, fill_fixup; |
358 | 553 | ||
554 | #define FILL_1_GENERIC_RTRAP \ | ||
555 | user_rtt_fill_64bit: \ | ||
556 | ldxa [%sp + STACK_BIAS + 0x00] %asi, %l0; \ | ||
557 | ldxa [%sp + STACK_BIAS + 0x08] %asi, %l1; \ | ||
558 | ldxa [%sp + STACK_BIAS + 0x10] %asi, %l2; \ | ||
559 | ldxa [%sp + STACK_BIAS + 0x18] %asi, %l3; \ | ||
560 | ldxa [%sp + STACK_BIAS + 0x20] %asi, %l4; \ | ||
561 | ldxa [%sp + STACK_BIAS + 0x28] %asi, %l5; \ | ||
562 | ldxa [%sp + STACK_BIAS + 0x30] %asi, %l6; \ | ||
563 | ldxa [%sp + STACK_BIAS + 0x38] %asi, %l7; \ | ||
564 | ldxa [%sp + STACK_BIAS + 0x40] %asi, %i0; \ | ||
565 | ldxa [%sp + STACK_BIAS + 0x48] %asi, %i1; \ | ||
566 | ldxa [%sp + STACK_BIAS + 0x50] %asi, %i2; \ | ||
567 | ldxa [%sp + STACK_BIAS + 0x58] %asi, %i3; \ | ||
568 | ldxa [%sp + STACK_BIAS + 0x60] %asi, %i4; \ | ||
569 | ldxa [%sp + STACK_BIAS + 0x68] %asi, %i5; \ | ||
570 | ldxa [%sp + STACK_BIAS + 0x70] %asi, %i6; \ | ||
571 | ldxa [%sp + STACK_BIAS + 0x78] %asi, %i7; \ | ||
572 | ba,pt %xcc, user_rtt_pre_restore; \ | ||
573 | restored; \ | ||
574 | nop; nop; nop; nop; nop; nop; \ | ||
575 | nop; nop; nop; nop; nop; \ | ||
576 | ba,a,pt %xcc, user_rtt_fill_fixup; \ | ||
577 | ba,a,pt %xcc, user_rtt_fill_fixup; \ | ||
578 | ba,a,pt %xcc, user_rtt_fill_fixup; | ||
579 | |||
580 | |||
359 | /* Normal 32bit fill */ | 581 | /* Normal 32bit fill */ |
360 | #define FILL_2_GENERIC(ASI) \ | 582 | #define FILL_2_GENERIC(ASI) \ |
361 | srl %sp, 0, %sp; \ | 583 | srl %sp, 0, %sp; \ |
@@ -387,6 +609,34 @@ | |||
387 | b,a,pt %xcc, fill_fixup_mna; \ | 609 | b,a,pt %xcc, fill_fixup_mna; \ |
388 | b,a,pt %xcc, fill_fixup; | 610 | b,a,pt %xcc, fill_fixup; |
389 | 611 | ||
612 | #define FILL_2_GENERIC_RTRAP \ | ||
613 | user_rtt_fill_32bit: \ | ||
614 | srl %sp, 0, %sp; \ | ||
615 | lduwa [%sp + 0x00] %asi, %l0; \ | ||
616 | lduwa [%sp + 0x04] %asi, %l1; \ | ||
617 | lduwa [%sp + 0x08] %asi, %l2; \ | ||
618 | lduwa [%sp + 0x0c] %asi, %l3; \ | ||
619 | lduwa [%sp + 0x10] %asi, %l4; \ | ||
620 | lduwa [%sp + 0x14] %asi, %l5; \ | ||
621 | lduwa [%sp + 0x18] %asi, %l6; \ | ||
622 | lduwa [%sp + 0x1c] %asi, %l7; \ | ||
623 | lduwa [%sp + 0x20] %asi, %i0; \ | ||
624 | lduwa [%sp + 0x24] %asi, %i1; \ | ||
625 | lduwa [%sp + 0x28] %asi, %i2; \ | ||
626 | lduwa [%sp + 0x2c] %asi, %i3; \ | ||
627 | lduwa [%sp + 0x30] %asi, %i4; \ | ||
628 | lduwa [%sp + 0x34] %asi, %i5; \ | ||
629 | lduwa [%sp + 0x38] %asi, %i6; \ | ||
630 | lduwa [%sp + 0x3c] %asi, %i7; \ | ||
631 | ba,pt %xcc, user_rtt_pre_restore; \ | ||
632 | restored; \ | ||
633 | nop; nop; nop; nop; nop; \ | ||
634 | nop; nop; nop; nop; nop; \ | ||
635 | ba,a,pt %xcc, user_rtt_fill_fixup; \ | ||
636 | ba,a,pt %xcc, user_rtt_fill_fixup; \ | ||
637 | ba,a,pt %xcc, user_rtt_fill_fixup; | ||
638 | |||
639 | |||
390 | #define FILL_1_NORMAL FILL_1_GENERIC(ASI_AIUP) | 640 | #define FILL_1_NORMAL FILL_1_GENERIC(ASI_AIUP) |
391 | #define FILL_2_NORMAL FILL_2_GENERIC(ASI_AIUP) | 641 | #define FILL_2_NORMAL FILL_2_GENERIC(ASI_AIUP) |
392 | #define FILL_3_NORMAL FILL_0_NORMAL | 642 | #define FILL_3_NORMAL FILL_0_NORMAL |
diff --git a/include/asm-sparc64/uaccess.h b/include/asm-sparc64/uaccess.h index c91d1e38eac6..afe236ba555b 100644 --- a/include/asm-sparc64/uaccess.h +++ b/include/asm-sparc64/uaccess.h | |||
@@ -114,16 +114,6 @@ case 8: __put_user_asm(data,x,addr,__pu_ret); break; \ | |||
114 | default: __pu_ret = __put_user_bad(); break; \ | 114 | default: __pu_ret = __put_user_bad(); break; \ |
115 | } __pu_ret; }) | 115 | } __pu_ret; }) |
116 | 116 | ||
117 | #define __put_user_nocheck_ret(data,addr,size,retval) ({ \ | ||
118 | register int __foo __asm__ ("l1"); \ | ||
119 | switch (size) { \ | ||
120 | case 1: __put_user_asm_ret(data,b,addr,retval,__foo); break; \ | ||
121 | case 2: __put_user_asm_ret(data,h,addr,retval,__foo); break; \ | ||
122 | case 4: __put_user_asm_ret(data,w,addr,retval,__foo); break; \ | ||
123 | case 8: __put_user_asm_ret(data,x,addr,retval,__foo); break; \ | ||
124 | default: if (__put_user_bad()) return retval; break; \ | ||
125 | } }) | ||
126 | |||
127 | #define __put_user_asm(x,size,addr,ret) \ | 117 | #define __put_user_asm(x,size,addr,ret) \ |
128 | __asm__ __volatile__( \ | 118 | __asm__ __volatile__( \ |
129 | "/* Put user asm, inline. */\n" \ | 119 | "/* Put user asm, inline. */\n" \ |
@@ -143,33 +133,6 @@ __asm__ __volatile__( \ | |||
143 | : "=r" (ret) : "r" (x), "r" (__m(addr)), \ | 133 | : "=r" (ret) : "r" (x), "r" (__m(addr)), \ |
144 | "i" (-EFAULT)) | 134 | "i" (-EFAULT)) |
145 | 135 | ||
146 | #define __put_user_asm_ret(x,size,addr,ret,foo) \ | ||
147 | if (__builtin_constant_p(ret) && ret == -EFAULT) \ | ||
148 | __asm__ __volatile__( \ | ||
149 | "/* Put user asm ret, inline. */\n" \ | ||
150 | "1:\t" "st"#size "a %1, [%2] %%asi\n\n\t" \ | ||
151 | ".section __ex_table,\"a\"\n\t" \ | ||
152 | ".align 4\n\t" \ | ||
153 | ".word 1b, __ret_efault\n\n\t" \ | ||
154 | ".previous\n\n\t" \ | ||
155 | : "=r" (foo) : "r" (x), "r" (__m(addr))); \ | ||
156 | else \ | ||
157 | __asm__ __volatile__( \ | ||
158 | "/* Put user asm ret, inline. */\n" \ | ||
159 | "1:\t" "st"#size "a %1, [%2] %%asi\n\n\t" \ | ||
160 | ".section .fixup,#alloc,#execinstr\n\t" \ | ||
161 | ".align 4\n" \ | ||
162 | "3:\n\t" \ | ||
163 | "ret\n\t" \ | ||
164 | " restore %%g0, %3, %%o0\n\n\t" \ | ||
165 | ".previous\n\t" \ | ||
166 | ".section __ex_table,\"a\"\n\t" \ | ||
167 | ".align 4\n\t" \ | ||
168 | ".word 1b, 3b\n\n\t" \ | ||
169 | ".previous\n\n\t" \ | ||
170 | : "=r" (foo) : "r" (x), "r" (__m(addr)), \ | ||
171 | "i" (ret)) | ||
172 | |||
173 | extern int __put_user_bad(void); | 136 | extern int __put_user_bad(void); |
174 | 137 | ||
175 | #define __get_user_nocheck(data,addr,size,type) ({ \ | 138 | #define __get_user_nocheck(data,addr,size,type) ({ \ |
@@ -289,14 +252,7 @@ copy_in_user(void __user *to, void __user *from, unsigned long size) | |||
289 | } | 252 | } |
290 | #define __copy_in_user copy_in_user | 253 | #define __copy_in_user copy_in_user |
291 | 254 | ||
292 | extern unsigned long __must_check __bzero_noasi(void __user *, unsigned long); | 255 | extern unsigned long __must_check __clear_user(void __user *, unsigned long); |
293 | |||
294 | static inline unsigned long __must_check | ||
295 | __clear_user(void __user *addr, unsigned long size) | ||
296 | { | ||
297 | |||
298 | return __bzero_noasi(addr, size); | ||
299 | } | ||
300 | 256 | ||
301 | #define clear_user __clear_user | 257 | #define clear_user __clear_user |
302 | 258 | ||
diff --git a/include/asm-sparc64/vdev.h b/include/asm-sparc64/vdev.h new file mode 100644 index 000000000000..996e6be7b976 --- /dev/null +++ b/include/asm-sparc64/vdev.h | |||
@@ -0,0 +1,16 @@ | |||
1 | /* vdev.h: SUN4V virtual device interfaces and defines. | ||
2 | * | ||
3 | * Copyright (C) 2006 David S. Miller <davem@davemloft.net> | ||
4 | */ | ||
5 | |||
6 | #ifndef _SPARC64_VDEV_H | ||
7 | #define _SPARC64_VDEV_H | ||
8 | |||
9 | #include <linux/types.h> | ||
10 | |||
11 | extern u32 sun4v_vdev_devhandle; | ||
12 | extern int sun4v_vdev_root; | ||
13 | |||
14 | extern unsigned int sun4v_vdev_device_interrupt(unsigned int); | ||
15 | |||
16 | #endif /* !(_SPARC64_VDEV_H) */ | ||
diff --git a/include/asm-sparc64/xor.h b/include/asm-sparc64/xor.h index 8b3a7e4b6062..8ce3f1813e28 100644 --- a/include/asm-sparc64/xor.h +++ b/include/asm-sparc64/xor.h | |||
@@ -2,9 +2,11 @@ | |||
2 | * include/asm-sparc64/xor.h | 2 | * include/asm-sparc64/xor.h |
3 | * | 3 | * |
4 | * High speed xor_block operation for RAID4/5 utilizing the | 4 | * High speed xor_block operation for RAID4/5 utilizing the |
5 | * UltraSparc Visual Instruction Set. | 5 | * UltraSparc Visual Instruction Set and Niagara block-init |
6 | * twin-load instructions. | ||
6 | * | 7 | * |
7 | * Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz) | 8 | * Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz) |
9 | * Copyright (C) 2006 David S. Miller <davem@davemloft.net> | ||
8 | * | 10 | * |
9 | * This program is free software; you can redistribute it and/or modify | 11 | * This program is free software; you can redistribute it and/or modify |
10 | * it under the terms of the GNU General Public License as published by | 12 | * it under the terms of the GNU General Public License as published by |
@@ -16,8 +18,7 @@ | |||
16 | * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | 18 | * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
17 | */ | 19 | */ |
18 | 20 | ||
19 | #include <asm/pstate.h> | 21 | #include <asm/spitfire.h> |
20 | #include <asm/asi.h> | ||
21 | 22 | ||
22 | extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *); | 23 | extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *); |
23 | extern void xor_vis_3(unsigned long, unsigned long *, unsigned long *, | 24 | extern void xor_vis_3(unsigned long, unsigned long *, unsigned long *, |
@@ -37,4 +38,29 @@ static struct xor_block_template xor_block_VIS = { | |||
37 | .do_5 = xor_vis_5, | 38 | .do_5 = xor_vis_5, |
38 | }; | 39 | }; |
39 | 40 | ||
40 | #define XOR_TRY_TEMPLATES xor_speed(&xor_block_VIS) | 41 | extern void xor_niagara_2(unsigned long, unsigned long *, unsigned long *); |
42 | extern void xor_niagara_3(unsigned long, unsigned long *, unsigned long *, | ||
43 | unsigned long *); | ||
44 | extern void xor_niagara_4(unsigned long, unsigned long *, unsigned long *, | ||
45 | unsigned long *, unsigned long *); | ||
46 | extern void xor_niagara_5(unsigned long, unsigned long *, unsigned long *, | ||
47 | unsigned long *, unsigned long *, unsigned long *); | ||
48 | |||
49 | static struct xor_block_template xor_block_niagara = { | ||
50 | .name = "Niagara", | ||
51 | .do_2 = xor_niagara_2, | ||
52 | .do_3 = xor_niagara_3, | ||
53 | .do_4 = xor_niagara_4, | ||
54 | .do_5 = xor_niagara_5, | ||
55 | }; | ||
56 | |||
57 | #undef XOR_TRY_TEMPLATES | ||
58 | #define XOR_TRY_TEMPLATES \ | ||
59 | do { \ | ||
60 | xor_speed(&xor_block_VIS); \ | ||
61 | xor_speed(&xor_block_niagara); \ | ||
62 | } while (0) | ||
63 | |||
64 | /* For VIS for everything except Niagara. */ | ||
65 | #define XOR_SELECT_TEMPLATE(FASTEST) \ | ||
66 | (tlb_type == hypervisor ? &xor_block_niagara : &xor_block_VIS) | ||
diff --git a/include/linux/arcdevice.h b/include/linux/arcdevice.h index 7198f129e135..231ba090ae34 100644 --- a/include/linux/arcdevice.h +++ b/include/linux/arcdevice.h | |||
@@ -206,7 +206,6 @@ struct ArcProto { | |||
206 | 206 | ||
207 | extern struct ArcProto *arc_proto_map[256], *arc_proto_default, | 207 | extern struct ArcProto *arc_proto_map[256], *arc_proto_default, |
208 | *arc_bcast_proto, *arc_raw_proto; | 208 | *arc_bcast_proto, *arc_raw_proto; |
209 | extern struct ArcProto arc_proto_null; | ||
210 | 209 | ||
211 | 210 | ||
212 | /* | 211 | /* |
@@ -334,17 +333,9 @@ void arcnet_dump_skb(struct net_device *dev, struct sk_buff *skb, char *desc); | |||
334 | #define arcnet_dump_skb(dev,skb,desc) ; | 333 | #define arcnet_dump_skb(dev,skb,desc) ; |
335 | #endif | 334 | #endif |
336 | 335 | ||
337 | #if (ARCNET_DEBUG_MAX & D_RX) || (ARCNET_DEBUG_MAX & D_TX) | ||
338 | void arcnet_dump_packet(struct net_device *dev, int bufnum, char *desc, | ||
339 | int take_arcnet_lock); | ||
340 | #else | ||
341 | #define arcnet_dump_packet(dev, bufnum, desc,take_arcnet_lock) ; | ||
342 | #endif | ||
343 | |||
344 | void arcnet_unregister_proto(struct ArcProto *proto); | 336 | void arcnet_unregister_proto(struct ArcProto *proto); |
345 | irqreturn_t arcnet_interrupt(int irq, void *dev_id, struct pt_regs *regs); | 337 | irqreturn_t arcnet_interrupt(int irq, void *dev_id, struct pt_regs *regs); |
346 | struct net_device *alloc_arcdev(char *name); | 338 | struct net_device *alloc_arcdev(char *name); |
347 | void arcnet_rx(struct net_device *dev, int bufnum); | ||
348 | 339 | ||
349 | #endif /* __KERNEL__ */ | 340 | #endif /* __KERNEL__ */ |
350 | #endif /* _LINUX_ARCDEVICE_H */ | 341 | #endif /* _LINUX_ARCDEVICE_H */ |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 860e7a485a5f..56bb6a4e15f3 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -58,7 +58,7 @@ struct cfq_io_context { | |||
58 | * circular list of cfq_io_contexts belonging to a process io context | 58 | * circular list of cfq_io_contexts belonging to a process io context |
59 | */ | 59 | */ |
60 | struct list_head list; | 60 | struct list_head list; |
61 | struct cfq_queue *cfqq; | 61 | struct cfq_queue *cfqq[2]; |
62 | void *key; | 62 | void *key; |
63 | 63 | ||
64 | struct io_context *ioc; | 64 | struct io_context *ioc; |
@@ -69,6 +69,8 @@ struct cfq_io_context { | |||
69 | unsigned long ttime_samples; | 69 | unsigned long ttime_samples; |
70 | unsigned long ttime_mean; | 70 | unsigned long ttime_mean; |
71 | 71 | ||
72 | struct list_head queue_list; | ||
73 | |||
72 | void (*dtor)(struct cfq_io_context *); | 74 | void (*dtor)(struct cfq_io_context *); |
73 | void (*exit)(struct cfq_io_context *); | 75 | void (*exit)(struct cfq_io_context *); |
74 | }; | 76 | }; |
@@ -404,8 +406,6 @@ struct request_queue | |||
404 | 406 | ||
405 | struct blk_queue_tag *queue_tags; | 407 | struct blk_queue_tag *queue_tags; |
406 | 408 | ||
407 | atomic_t refcnt; | ||
408 | |||
409 | unsigned int nr_sorted; | 409 | unsigned int nr_sorted; |
410 | unsigned int in_flight; | 410 | unsigned int in_flight; |
411 | 411 | ||
@@ -424,6 +424,8 @@ struct request_queue | |||
424 | struct request pre_flush_rq, bar_rq, post_flush_rq; | 424 | struct request pre_flush_rq, bar_rq, post_flush_rq; |
425 | struct request *orig_bar_rq; | 425 | struct request *orig_bar_rq; |
426 | unsigned int bi_size; | 426 | unsigned int bi_size; |
427 | |||
428 | struct mutex sysfs_lock; | ||
427 | }; | 429 | }; |
428 | 430 | ||
429 | #define RQ_INACTIVE (-1) | 431 | #define RQ_INACTIVE (-1) |
@@ -725,7 +727,7 @@ extern long nr_blockdev_pages(void); | |||
725 | int blk_get_queue(request_queue_t *); | 727 | int blk_get_queue(request_queue_t *); |
726 | request_queue_t *blk_alloc_queue(gfp_t); | 728 | request_queue_t *blk_alloc_queue(gfp_t); |
727 | request_queue_t *blk_alloc_queue_node(gfp_t, int); | 729 | request_queue_t *blk_alloc_queue_node(gfp_t, int); |
728 | #define blk_put_queue(q) blk_cleanup_queue((q)) | 730 | extern void blk_put_queue(request_queue_t *); |
729 | 731 | ||
730 | /* | 732 | /* |
731 | * tag stuff | 733 | * tag stuff |
diff --git a/include/linux/elevator.h b/include/linux/elevator.h index 18cf1f3e1184..ad133fcfb239 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h | |||
@@ -48,10 +48,17 @@ struct elevator_ops | |||
48 | 48 | ||
49 | elevator_init_fn *elevator_init_fn; | 49 | elevator_init_fn *elevator_init_fn; |
50 | elevator_exit_fn *elevator_exit_fn; | 50 | elevator_exit_fn *elevator_exit_fn; |
51 | void (*trim)(struct io_context *); | ||
51 | }; | 52 | }; |
52 | 53 | ||
53 | #define ELV_NAME_MAX (16) | 54 | #define ELV_NAME_MAX (16) |
54 | 55 | ||
56 | struct elv_fs_entry { | ||
57 | struct attribute attr; | ||
58 | ssize_t (*show)(elevator_t *, char *); | ||
59 | ssize_t (*store)(elevator_t *, const char *, size_t); | ||
60 | }; | ||
61 | |||
55 | /* | 62 | /* |
56 | * identifies an elevator type, such as AS or deadline | 63 | * identifies an elevator type, such as AS or deadline |
57 | */ | 64 | */ |
@@ -60,7 +67,7 @@ struct elevator_type | |||
60 | struct list_head list; | 67 | struct list_head list; |
61 | struct elevator_ops ops; | 68 | struct elevator_ops ops; |
62 | struct elevator_type *elevator_type; | 69 | struct elevator_type *elevator_type; |
63 | struct kobj_type *elevator_ktype; | 70 | struct elv_fs_entry *elevator_attrs; |
64 | char elevator_name[ELV_NAME_MAX]; | 71 | char elevator_name[ELV_NAME_MAX]; |
65 | struct module *elevator_owner; | 72 | struct module *elevator_owner; |
66 | }; | 73 | }; |
@@ -74,6 +81,7 @@ struct elevator_queue | |||
74 | void *elevator_data; | 81 | void *elevator_data; |
75 | struct kobject kobj; | 82 | struct kobject kobj; |
76 | struct elevator_type *elevator_type; | 83 | struct elevator_type *elevator_type; |
84 | struct mutex sysfs_lock; | ||
77 | }; | 85 | }; |
78 | 86 | ||
79 | /* | 87 | /* |
diff --git a/include/linux/if.h b/include/linux/if.h index ce627d9092ef..12c6f6d157c3 100644 --- a/include/linux/if.h +++ b/include/linux/if.h | |||
@@ -52,6 +52,9 @@ | |||
52 | /* Private (from user) interface flags (netdevice->priv_flags). */ | 52 | /* Private (from user) interface flags (netdevice->priv_flags). */ |
53 | #define IFF_802_1Q_VLAN 0x1 /* 802.1Q VLAN device. */ | 53 | #define IFF_802_1Q_VLAN 0x1 /* 802.1Q VLAN device. */ |
54 | #define IFF_EBRIDGE 0x2 /* Ethernet bridging device. */ | 54 | #define IFF_EBRIDGE 0x2 /* Ethernet bridging device. */ |
55 | #define IFF_SLAVE_INACTIVE 0x4 /* bonding slave not the curr. active */ | ||
56 | #define IFF_MASTER_8023AD 0x8 /* bonding master, 802.3ad. */ | ||
57 | #define IFF_MASTER_ALB 0x10 /* bonding master, balance-alb. */ | ||
55 | 58 | ||
56 | #define IF_GET_IFACE 0x0001 /* for querying only */ | 59 | #define IF_GET_IFACE 0x0001 /* for querying only */ |
57 | #define IF_GET_PROTO 0x0002 | 60 | #define IF_GET_PROTO 0x0002 |
diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h index 7a92c1ce1457..ab08f35cbc35 100644 --- a/include/linux/if_ether.h +++ b/include/linux/if_ether.h | |||
@@ -61,6 +61,7 @@ | |||
61 | #define ETH_P_8021Q 0x8100 /* 802.1Q VLAN Extended Header */ | 61 | #define ETH_P_8021Q 0x8100 /* 802.1Q VLAN Extended Header */ |
62 | #define ETH_P_IPX 0x8137 /* IPX over DIX */ | 62 | #define ETH_P_IPX 0x8137 /* IPX over DIX */ |
63 | #define ETH_P_IPV6 0x86DD /* IPv6 over bluebook */ | 63 | #define ETH_P_IPV6 0x86DD /* IPv6 over bluebook */ |
64 | #define ETH_P_SLOW 0x8809 /* Slow Protocol. See 802.3ad 43B */ | ||
64 | #define ETH_P_WCCP 0x883E /* Web-cache coordination protocol | 65 | #define ETH_P_WCCP 0x883E /* Web-cache coordination protocol |
65 | * defined in draft-wilson-wrec-wccp-v2-00.txt */ | 66 | * defined in draft-wilson-wrec-wccp-v2-00.txt */ |
66 | #define ETH_P_PPP_DISC 0x8863 /* PPPoE discovery messages */ | 67 | #define ETH_P_PPP_DISC 0x8863 /* PPPoE discovery messages */ |
diff --git a/include/linux/mv643xx.h b/include/linux/mv643xx.h index 0b08cd692201..955d3069d727 100644 --- a/include/linux/mv643xx.h +++ b/include/linux/mv643xx.h | |||
@@ -1214,6 +1214,7 @@ struct mv64xxx_i2c_pdata { | |||
1214 | #define MV643XX_ETH_FORCE_BP_MODE_NO_JAM 0 | 1214 | #define MV643XX_ETH_FORCE_BP_MODE_NO_JAM 0 |
1215 | #define MV643XX_ETH_FORCE_BP_MODE_JAM_TX (1<<7) | 1215 | #define MV643XX_ETH_FORCE_BP_MODE_JAM_TX (1<<7) |
1216 | #define MV643XX_ETH_FORCE_BP_MODE_JAM_TX_ON_RX_ERR (1<<8) | 1216 | #define MV643XX_ETH_FORCE_BP_MODE_JAM_TX_ON_RX_ERR (1<<8) |
1217 | #define MV643XX_ETH_SERIAL_PORT_CONTROL_RESERVED (1<<9) | ||
1217 | #define MV643XX_ETH_FORCE_LINK_FAIL 0 | 1218 | #define MV643XX_ETH_FORCE_LINK_FAIL 0 |
1218 | #define MV643XX_ETH_DO_NOT_FORCE_LINK_FAIL (1<<10) | 1219 | #define MV643XX_ETH_DO_NOT_FORCE_LINK_FAIL (1<<10) |
1219 | #define MV643XX_ETH_RETRANSMIT_16_ATTEMPTS 0 | 1220 | #define MV643XX_ETH_RETRANSMIT_16_ATTEMPTS 0 |
@@ -1243,6 +1244,8 @@ struct mv64xxx_i2c_pdata { | |||
1243 | #define MV643XX_ETH_SET_MII_SPEED_TO_10 0 | 1244 | #define MV643XX_ETH_SET_MII_SPEED_TO_10 0 |
1244 | #define MV643XX_ETH_SET_MII_SPEED_TO_100 (1<<24) | 1245 | #define MV643XX_ETH_SET_MII_SPEED_TO_100 (1<<24) |
1245 | 1246 | ||
1247 | #define MV643XX_ETH_MAX_RX_PACKET_MASK (0x7<<17) | ||
1248 | |||
1246 | #define MV643XX_ETH_PORT_SERIAL_CONTROL_DEFAULT_VALUE \ | 1249 | #define MV643XX_ETH_PORT_SERIAL_CONTROL_DEFAULT_VALUE \ |
1247 | MV643XX_ETH_DO_NOT_FORCE_LINK_PASS | \ | 1250 | MV643XX_ETH_DO_NOT_FORCE_LINK_PASS | \ |
1248 | MV643XX_ETH_ENABLE_AUTO_NEG_FOR_DUPLX | \ | 1251 | MV643XX_ETH_ENABLE_AUTO_NEG_FOR_DUPLX | \ |
@@ -1285,23 +1288,15 @@ struct mv64xxx_i2c_pdata { | |||
1285 | #define MV643XX_ETH_NAME "mv643xx_eth" | 1288 | #define MV643XX_ETH_NAME "mv643xx_eth" |
1286 | 1289 | ||
1287 | struct mv643xx_eth_platform_data { | 1290 | struct mv643xx_eth_platform_data { |
1288 | /* | ||
1289 | * Non-values for mac_addr, phy_addr, port_config, etc. | ||
1290 | * override the default value. Setting the corresponding | ||
1291 | * force_* field, causes the default value to be overridden | ||
1292 | * even when zero. | ||
1293 | */ | ||
1294 | unsigned int force_phy_addr:1; | ||
1295 | unsigned int force_port_config:1; | ||
1296 | unsigned int force_port_config_extend:1; | ||
1297 | unsigned int force_port_sdma_config:1; | ||
1298 | unsigned int force_port_serial_control:1; | ||
1299 | int phy_addr; | ||
1300 | char *mac_addr; /* pointer to mac address */ | 1291 | char *mac_addr; /* pointer to mac address */ |
1301 | u32 port_config; | 1292 | u16 force_phy_addr; /* force override if phy_addr == 0 */ |
1302 | u32 port_config_extend; | 1293 | u16 phy_addr; |
1303 | u32 port_sdma_config; | 1294 | |
1304 | u32 port_serial_control; | 1295 | /* If speed is 0, then speed and duplex are autonegotiated. */ |
1296 | int speed; /* 0, SPEED_10, SPEED_100, SPEED_1000 */ | ||
1297 | int duplex; /* DUPLEX_HALF or DUPLEX_FULL */ | ||
1298 | |||
1299 | /* non-zero values of the following fields override defaults */ | ||
1305 | u32 tx_queue_size; | 1300 | u32 tx_queue_size; |
1306 | u32 rx_queue_size; | 1301 | u32 rx_queue_size; |
1307 | u32 tx_sram_addr; | 1302 | u32 tx_sram_addr; |
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index 4041122dabfc..57abcea1cb5d 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h | |||
@@ -127,6 +127,9 @@ | |||
127 | /* Hilscher netx */ | 127 | /* Hilscher netx */ |
128 | #define PORT_NETX 71 | 128 | #define PORT_NETX 71 |
129 | 129 | ||
130 | /* SUN4V Hypervisor Console */ | ||
131 | #define PORT_SUNHV 72 | ||
132 | |||
130 | #ifdef __KERNEL__ | 133 | #ifdef __KERNEL__ |
131 | 134 | ||
132 | #include <linux/config.h> | 135 | #include <linux/config.h> |
diff --git a/include/net/ieee80211.h b/include/net/ieee80211.h index 9a92aef8b0b2..4725ff861c57 100644 --- a/include/net/ieee80211.h +++ b/include/net/ieee80211.h | |||
@@ -220,6 +220,7 @@ struct ieee80211_snap_hdr { | |||
220 | /* Authentication algorithms */ | 220 | /* Authentication algorithms */ |
221 | #define WLAN_AUTH_OPEN 0 | 221 | #define WLAN_AUTH_OPEN 0 |
222 | #define WLAN_AUTH_SHARED_KEY 1 | 222 | #define WLAN_AUTH_SHARED_KEY 1 |
223 | #define WLAN_AUTH_LEAP 2 | ||
223 | 224 | ||
224 | #define WLAN_AUTH_CHALLENGE_LEN 128 | 225 | #define WLAN_AUTH_CHALLENGE_LEN 128 |
225 | 226 | ||
@@ -299,6 +300,23 @@ enum ieee80211_reasoncode { | |||
299 | WLAN_REASON_CIPHER_SUITE_REJECTED = 24, | 300 | WLAN_REASON_CIPHER_SUITE_REJECTED = 24, |
300 | }; | 301 | }; |
301 | 302 | ||
303 | /* Action categories - 802.11h */ | ||
304 | enum ieee80211_actioncategories { | ||
305 | WLAN_ACTION_SPECTRUM_MGMT = 0, | ||
306 | /* Reserved 1-127 */ | ||
307 | /* Error 128-255 */ | ||
308 | }; | ||
309 | |||
310 | /* Action details - 802.11h */ | ||
311 | enum ieee80211_actiondetails { | ||
312 | WLAN_ACTION_CATEGORY_MEASURE_REQUEST = 0, | ||
313 | WLAN_ACTION_CATEGORY_MEASURE_REPORT = 1, | ||
314 | WLAN_ACTION_CATEGORY_TPC_REQUEST = 2, | ||
315 | WLAN_ACTION_CATEGORY_TPC_REPORT = 3, | ||
316 | WLAN_ACTION_CATEGORY_CHANNEL_SWITCH = 4, | ||
317 | /* 5 - 255 Reserved */ | ||
318 | }; | ||
319 | |||
302 | #define IEEE80211_STATMASK_SIGNAL (1<<0) | 320 | #define IEEE80211_STATMASK_SIGNAL (1<<0) |
303 | #define IEEE80211_STATMASK_RSSI (1<<1) | 321 | #define IEEE80211_STATMASK_RSSI (1<<1) |
304 | #define IEEE80211_STATMASK_NOISE (1<<2) | 322 | #define IEEE80211_STATMASK_NOISE (1<<2) |
@@ -377,6 +395,8 @@ struct ieee80211_rx_stats { | |||
377 | u8 mask; | 395 | u8 mask; |
378 | u8 freq; | 396 | u8 freq; |
379 | u16 len; | 397 | u16 len; |
398 | u64 tsf; | ||
399 | u32 beacon_time; | ||
380 | }; | 400 | }; |
381 | 401 | ||
382 | /* IEEE 802.11 requires that STA supports concurrent reception of at least | 402 | /* IEEE 802.11 requires that STA supports concurrent reception of at least |
@@ -608,6 +628,28 @@ struct ieee80211_auth { | |||
608 | struct ieee80211_info_element info_element[0]; | 628 | struct ieee80211_info_element info_element[0]; |
609 | } __attribute__ ((packed)); | 629 | } __attribute__ ((packed)); |
610 | 630 | ||
631 | struct ieee80211_channel_switch { | ||
632 | u8 id; | ||
633 | u8 len; | ||
634 | u8 mode; | ||
635 | u8 channel; | ||
636 | u8 count; | ||
637 | } __attribute__ ((packed)); | ||
638 | |||
639 | struct ieee80211_action { | ||
640 | struct ieee80211_hdr_3addr header; | ||
641 | u8 category; | ||
642 | u8 action; | ||
643 | union { | ||
644 | struct ieee80211_action_exchange { | ||
645 | u8 token; | ||
646 | struct ieee80211_info_element info_element[0]; | ||
647 | } exchange; | ||
648 | struct ieee80211_channel_switch channel_switch; | ||
649 | |||
650 | } format; | ||
651 | } __attribute__ ((packed)); | ||
652 | |||
611 | struct ieee80211_disassoc { | 653 | struct ieee80211_disassoc { |
612 | struct ieee80211_hdr_3addr header; | 654 | struct ieee80211_hdr_3addr header; |
613 | __le16 reason; | 655 | __le16 reason; |
@@ -692,7 +734,15 @@ struct ieee80211_txb { | |||
692 | /* QoS structure */ | 734 | /* QoS structure */ |
693 | #define NETWORK_HAS_QOS_PARAMETERS (1<<3) | 735 | #define NETWORK_HAS_QOS_PARAMETERS (1<<3) |
694 | #define NETWORK_HAS_QOS_INFORMATION (1<<4) | 736 | #define NETWORK_HAS_QOS_INFORMATION (1<<4) |
695 | #define NETWORK_HAS_QOS_MASK (NETWORK_HAS_QOS_PARAMETERS | NETWORK_HAS_QOS_INFORMATION) | 737 | #define NETWORK_HAS_QOS_MASK (NETWORK_HAS_QOS_PARAMETERS | \ |
738 | NETWORK_HAS_QOS_INFORMATION) | ||
739 | |||
740 | /* 802.11h */ | ||
741 | #define NETWORK_HAS_POWER_CONSTRAINT (1<<5) | ||
742 | #define NETWORK_HAS_CSA (1<<6) | ||
743 | #define NETWORK_HAS_QUIET (1<<7) | ||
744 | #define NETWORK_HAS_IBSS_DFS (1<<8) | ||
745 | #define NETWORK_HAS_TPC_REPORT (1<<9) | ||
696 | 746 | ||
697 | #define QOS_QUEUE_NUM 4 | 747 | #define QOS_QUEUE_NUM 4 |
698 | #define QOS_OUI_LEN 3 | 748 | #define QOS_OUI_LEN 3 |
@@ -748,6 +798,91 @@ struct ieee80211_tim_parameters { | |||
748 | 798 | ||
749 | /*******************************************************/ | 799 | /*******************************************************/ |
750 | 800 | ||
801 | enum { /* ieee80211_basic_report.map */ | ||
802 | IEEE80211_BASIC_MAP_BSS = (1 << 0), | ||
803 | IEEE80211_BASIC_MAP_OFDM = (1 << 1), | ||
804 | IEEE80211_BASIC_MAP_UNIDENTIFIED = (1 << 2), | ||
805 | IEEE80211_BASIC_MAP_RADAR = (1 << 3), | ||
806 | IEEE80211_BASIC_MAP_UNMEASURED = (1 << 4), | ||
807 | /* Bits 5-7 are reserved */ | ||
808 | |||
809 | }; | ||
810 | struct ieee80211_basic_report { | ||
811 | u8 channel; | ||
812 | __le64 start_time; | ||
813 | __le16 duration; | ||
814 | u8 map; | ||
815 | } __attribute__ ((packed)); | ||
816 | |||
817 | enum { /* ieee80211_measurement_request.mode */ | ||
818 | /* Bit 0 is reserved */ | ||
819 | IEEE80211_MEASUREMENT_ENABLE = (1 << 1), | ||
820 | IEEE80211_MEASUREMENT_REQUEST = (1 << 2), | ||
821 | IEEE80211_MEASUREMENT_REPORT = (1 << 3), | ||
822 | /* Bits 4-7 are reserved */ | ||
823 | }; | ||
824 | |||
825 | enum { | ||
826 | IEEE80211_REPORT_BASIC = 0, /* required */ | ||
827 | IEEE80211_REPORT_CCA = 1, /* optional */ | ||
828 | IEEE80211_REPORT_RPI = 2, /* optional */ | ||
829 | /* 3-255 reserved */ | ||
830 | }; | ||
831 | |||
832 | struct ieee80211_measurement_params { | ||
833 | u8 channel; | ||
834 | __le64 start_time; | ||
835 | __le16 duration; | ||
836 | } __attribute__ ((packed)); | ||
837 | |||
838 | struct ieee80211_measurement_request { | ||
839 | struct ieee80211_info_element ie; | ||
840 | u8 token; | ||
841 | u8 mode; | ||
842 | u8 type; | ||
843 | struct ieee80211_measurement_params params[0]; | ||
844 | } __attribute__ ((packed)); | ||
845 | |||
846 | struct ieee80211_measurement_report { | ||
847 | struct ieee80211_info_element ie; | ||
848 | u8 token; | ||
849 | u8 mode; | ||
850 | u8 type; | ||
851 | union { | ||
852 | struct ieee80211_basic_report basic[0]; | ||
853 | } u; | ||
854 | } __attribute__ ((packed)); | ||
855 | |||
856 | struct ieee80211_tpc_report { | ||
857 | u8 transmit_power; | ||
858 | u8 link_margin; | ||
859 | } __attribute__ ((packed)); | ||
860 | |||
861 | struct ieee80211_channel_map { | ||
862 | u8 channel; | ||
863 | u8 map; | ||
864 | } __attribute__ ((packed)); | ||
865 | |||
866 | struct ieee80211_ibss_dfs { | ||
867 | struct ieee80211_info_element ie; | ||
868 | u8 owner[ETH_ALEN]; | ||
869 | u8 recovery_interval; | ||
870 | struct ieee80211_channel_map channel_map[0]; | ||
871 | }; | ||
872 | |||
873 | struct ieee80211_csa { | ||
874 | u8 mode; | ||
875 | u8 channel; | ||
876 | u8 count; | ||
877 | } __attribute__ ((packed)); | ||
878 | |||
879 | struct ieee80211_quiet { | ||
880 | u8 count; | ||
881 | u8 period; | ||
882 | u8 duration; | ||
883 | u8 offset; | ||
884 | } __attribute__ ((packed)); | ||
885 | |||
751 | struct ieee80211_network { | 886 | struct ieee80211_network { |
752 | /* These entries are used to identify a unique network */ | 887 | /* These entries are used to identify a unique network */ |
753 | u8 bssid[ETH_ALEN]; | 888 | u8 bssid[ETH_ALEN]; |
@@ -767,7 +902,7 @@ struct ieee80211_network { | |||
767 | u8 rates_ex_len; | 902 | u8 rates_ex_len; |
768 | unsigned long last_scanned; | 903 | unsigned long last_scanned; |
769 | u8 mode; | 904 | u8 mode; |
770 | u8 flags; | 905 | u32 flags; |
771 | u32 last_associate; | 906 | u32 last_associate; |
772 | u32 time_stamp[2]; | 907 | u32 time_stamp[2]; |
773 | u16 beacon_interval; | 908 | u16 beacon_interval; |
@@ -779,6 +914,25 @@ struct ieee80211_network { | |||
779 | u8 rsn_ie[MAX_WPA_IE_LEN]; | 914 | u8 rsn_ie[MAX_WPA_IE_LEN]; |
780 | size_t rsn_ie_len; | 915 | size_t rsn_ie_len; |
781 | struct ieee80211_tim_parameters tim; | 916 | struct ieee80211_tim_parameters tim; |
917 | |||
918 | /* 802.11h info */ | ||
919 | |||
920 | /* Power Constraint - mandatory if spctrm mgmt required */ | ||
921 | u8 power_constraint; | ||
922 | |||
923 | /* TPC Report - mandatory if spctrm mgmt required */ | ||
924 | struct ieee80211_tpc_report tpc_report; | ||
925 | |||
926 | /* IBSS DFS - mandatory if spctrm mgmt required and IBSS | ||
927 | * NOTE: This is variable length and so must be allocated dynamically */ | ||
928 | struct ieee80211_ibss_dfs *ibss_dfs; | ||
929 | |||
930 | /* Channel Switch Announcement - optional if spctrm mgmt required */ | ||
931 | struct ieee80211_csa csa; | ||
932 | |||
933 | /* Quiet - optional if spctrm mgmt required */ | ||
934 | struct ieee80211_quiet quiet; | ||
935 | |||
782 | struct list_head list; | 936 | struct list_head list; |
783 | }; | 937 | }; |
784 | 938 | ||
@@ -924,7 +1078,10 @@ struct ieee80211_device { | |||
924 | int (*handle_auth) (struct net_device * dev, | 1078 | int (*handle_auth) (struct net_device * dev, |
925 | struct ieee80211_auth * auth); | 1079 | struct ieee80211_auth * auth); |
926 | int (*handle_deauth) (struct net_device * dev, | 1080 | int (*handle_deauth) (struct net_device * dev, |
927 | struct ieee80211_auth * auth); | 1081 | struct ieee80211_deauth * auth); |
1082 | int (*handle_action) (struct net_device * dev, | ||
1083 | struct ieee80211_action * action, | ||
1084 | struct ieee80211_rx_stats * stats); | ||
928 | int (*handle_disassoc) (struct net_device * dev, | 1085 | int (*handle_disassoc) (struct net_device * dev, |
929 | struct ieee80211_disassoc * assoc); | 1086 | struct ieee80211_disassoc * assoc); |
930 | int (*handle_beacon) (struct net_device * dev, | 1087 | int (*handle_beacon) (struct net_device * dev, |
@@ -1093,6 +1250,7 @@ extern int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, | |||
1093 | extern void ieee80211_rx_mgt(struct ieee80211_device *ieee, | 1250 | extern void ieee80211_rx_mgt(struct ieee80211_device *ieee, |
1094 | struct ieee80211_hdr_4addr *header, | 1251 | struct ieee80211_hdr_4addr *header, |
1095 | struct ieee80211_rx_stats *stats); | 1252 | struct ieee80211_rx_stats *stats); |
1253 | extern void ieee80211_network_reset(struct ieee80211_network *network); | ||
1096 | 1254 | ||
1097 | /* ieee80211_geo.c */ | 1255 | /* ieee80211_geo.c */ |
1098 | extern const struct ieee80211_geo *ieee80211_get_geo(struct ieee80211_device | 1256 | extern const struct ieee80211_geo *ieee80211_get_geo(struct ieee80211_device |
@@ -1105,6 +1263,11 @@ extern int ieee80211_is_valid_channel(struct ieee80211_device *ieee, | |||
1105 | extern int ieee80211_channel_to_index(struct ieee80211_device *ieee, | 1263 | extern int ieee80211_channel_to_index(struct ieee80211_device *ieee, |
1106 | u8 channel); | 1264 | u8 channel); |
1107 | extern u8 ieee80211_freq_to_channel(struct ieee80211_device *ieee, u32 freq); | 1265 | extern u8 ieee80211_freq_to_channel(struct ieee80211_device *ieee, u32 freq); |
1266 | extern u8 ieee80211_get_channel_flags(struct ieee80211_device *ieee, | ||
1267 | u8 channel); | ||
1268 | extern const struct ieee80211_channel *ieee80211_get_channel(struct | ||
1269 | ieee80211_device | ||
1270 | *ieee, u8 channel); | ||
1108 | 1271 | ||
1109 | /* ieee80211_wx.c */ | 1272 | /* ieee80211_wx.c */ |
1110 | extern int ieee80211_wx_get_scan(struct ieee80211_device *ieee, | 1273 | extern int ieee80211_wx_get_scan(struct ieee80211_device *ieee, |
@@ -1122,6 +1285,14 @@ extern int ieee80211_wx_set_encodeext(struct ieee80211_device *ieee, | |||
1122 | extern int ieee80211_wx_get_encodeext(struct ieee80211_device *ieee, | 1285 | extern int ieee80211_wx_get_encodeext(struct ieee80211_device *ieee, |
1123 | struct iw_request_info *info, | 1286 | struct iw_request_info *info, |
1124 | union iwreq_data *wrqu, char *extra); | 1287 | union iwreq_data *wrqu, char *extra); |
1288 | extern int ieee80211_wx_set_auth(struct net_device *dev, | ||
1289 | struct iw_request_info *info, | ||
1290 | union iwreq_data *wrqu, | ||
1291 | char *extra); | ||
1292 | extern int ieee80211_wx_get_auth(struct net_device *dev, | ||
1293 | struct iw_request_info *info, | ||
1294 | union iwreq_data *wrqu, | ||
1295 | char *extra); | ||
1125 | 1296 | ||
1126 | static inline void ieee80211_increment_scans(struct ieee80211_device *ieee) | 1297 | static inline void ieee80211_increment_scans(struct ieee80211_device *ieee) |
1127 | { | 1298 | { |
diff --git a/include/net/ieee80211_crypt.h b/include/net/ieee80211_crypt.h index cd82c3e998e4..eb476414fd72 100644 --- a/include/net/ieee80211_crypt.h +++ b/include/net/ieee80211_crypt.h | |||
@@ -47,7 +47,8 @@ struct ieee80211_crypto_ops { | |||
47 | /* deinitialize crypto context and free allocated private data */ | 47 | /* deinitialize crypto context and free allocated private data */ |
48 | void (*deinit) (void *priv); | 48 | void (*deinit) (void *priv); |
49 | 49 | ||
50 | int (*build_iv) (struct sk_buff * skb, int hdr_len, void *priv); | 50 | int (*build_iv) (struct sk_buff * skb, int hdr_len, |
51 | u8 *key, int keylen, void *priv); | ||
51 | 52 | ||
52 | /* encrypt/decrypt return < 0 on error or >= 0 on success. The return | 53 | /* encrypt/decrypt return < 0 on error or >= 0 on success. The return |
53 | * value from decrypt_mpdu is passed as the keyidx value for | 54 | * value from decrypt_mpdu is passed as the keyidx value for |