diff options
Diffstat (limited to 'arch/mips/mm')
-rw-r--r-- | arch/mips/mm/c-octeon.c | 6 | ||||
-rw-r--r-- | arch/mips/mm/c-r3k.c | 8 | ||||
-rw-r--r-- | arch/mips/mm/c-r4k.c | 34 | ||||
-rw-r--r-- | arch/mips/mm/c-tx39.c | 2 | ||||
-rw-r--r-- | arch/mips/mm/cache.c | 2 | ||||
-rw-r--r-- | arch/mips/mm/cex-sb1.S | 4 | ||||
-rw-r--r-- | arch/mips/mm/page.c | 40 | ||||
-rw-r--r-- | arch/mips/mm/sc-ip22.c | 2 | ||||
-rw-r--r-- | arch/mips/mm/sc-mips.c | 2 | ||||
-rw-r--r-- | arch/mips/mm/sc-r5k.c | 2 | ||||
-rw-r--r-- | arch/mips/mm/sc-rm7k.c | 12 | ||||
-rw-r--r-- | arch/mips/mm/tlb-r3k.c | 2 | ||||
-rw-r--r-- | arch/mips/mm/tlb-r4k.c | 4 | ||||
-rw-r--r-- | arch/mips/mm/tlb-r8k.c | 4 | ||||
-rw-r--r-- | arch/mips/mm/tlbex.c | 144 | ||||
-rw-r--r-- | arch/mips/mm/uasm-micromips.c | 10 | ||||
-rw-r--r-- | arch/mips/mm/uasm-mips.c | 10 | ||||
-rw-r--r-- | arch/mips/mm/uasm.c | 106 |
18 files changed, 187 insertions, 207 deletions
diff --git a/arch/mips/mm/c-octeon.c b/arch/mips/mm/c-octeon.c index 8557fb552863..a0bcdbb81d41 100644 --- a/arch/mips/mm/c-octeon.c +++ b/arch/mips/mm/c-octeon.c | |||
@@ -180,7 +180,7 @@ static void octeon_flush_kernel_vmap_range(unsigned long vaddr, int size) | |||
180 | * Probe Octeon's caches | 180 | * Probe Octeon's caches |
181 | * | 181 | * |
182 | */ | 182 | */ |
183 | static void __cpuinit probe_octeon(void) | 183 | static void probe_octeon(void) |
184 | { | 184 | { |
185 | unsigned long icache_size; | 185 | unsigned long icache_size; |
186 | unsigned long dcache_size; | 186 | unsigned long dcache_size; |
@@ -251,7 +251,7 @@ static void __cpuinit probe_octeon(void) | |||
251 | } | 251 | } |
252 | } | 252 | } |
253 | 253 | ||
254 | static void __cpuinit octeon_cache_error_setup(void) | 254 | static void octeon_cache_error_setup(void) |
255 | { | 255 | { |
256 | extern char except_vec2_octeon; | 256 | extern char except_vec2_octeon; |
257 | set_handler(0x100, &except_vec2_octeon, 0x80); | 257 | set_handler(0x100, &except_vec2_octeon, 0x80); |
@@ -261,7 +261,7 @@ static void __cpuinit octeon_cache_error_setup(void) | |||
261 | * Setup the Octeon cache flush routines | 261 | * Setup the Octeon cache flush routines |
262 | * | 262 | * |
263 | */ | 263 | */ |
264 | void __cpuinit octeon_cache_init(void) | 264 | void octeon_cache_init(void) |
265 | { | 265 | { |
266 | probe_octeon(); | 266 | probe_octeon(); |
267 | 267 | ||
diff --git a/arch/mips/mm/c-r3k.c b/arch/mips/mm/c-r3k.c index 704dc735a59d..2fcde0c8ea02 100644 --- a/arch/mips/mm/c-r3k.c +++ b/arch/mips/mm/c-r3k.c | |||
@@ -26,7 +26,7 @@ | |||
26 | static unsigned long icache_size, dcache_size; /* Size in bytes */ | 26 | static unsigned long icache_size, dcache_size; /* Size in bytes */ |
27 | static unsigned long icache_lsize, dcache_lsize; /* Size in bytes */ | 27 | static unsigned long icache_lsize, dcache_lsize; /* Size in bytes */ |
28 | 28 | ||
29 | unsigned long __cpuinit r3k_cache_size(unsigned long ca_flags) | 29 | unsigned long r3k_cache_size(unsigned long ca_flags) |
30 | { | 30 | { |
31 | unsigned long flags, status, dummy, size; | 31 | unsigned long flags, status, dummy, size; |
32 | volatile unsigned long *p; | 32 | volatile unsigned long *p; |
@@ -61,7 +61,7 @@ unsigned long __cpuinit r3k_cache_size(unsigned long ca_flags) | |||
61 | return size * sizeof(*p); | 61 | return size * sizeof(*p); |
62 | } | 62 | } |
63 | 63 | ||
64 | unsigned long __cpuinit r3k_cache_lsize(unsigned long ca_flags) | 64 | unsigned long r3k_cache_lsize(unsigned long ca_flags) |
65 | { | 65 | { |
66 | unsigned long flags, status, lsize, i; | 66 | unsigned long flags, status, lsize, i; |
67 | volatile unsigned long *p; | 67 | volatile unsigned long *p; |
@@ -90,7 +90,7 @@ unsigned long __cpuinit r3k_cache_lsize(unsigned long ca_flags) | |||
90 | return lsize * sizeof(*p); | 90 | return lsize * sizeof(*p); |
91 | } | 91 | } |
92 | 92 | ||
93 | static void __cpuinit r3k_probe_cache(void) | 93 | static void r3k_probe_cache(void) |
94 | { | 94 | { |
95 | dcache_size = r3k_cache_size(ST0_ISC); | 95 | dcache_size = r3k_cache_size(ST0_ISC); |
96 | if (dcache_size) | 96 | if (dcache_size) |
@@ -312,7 +312,7 @@ static void r3k_dma_cache_wback_inv(unsigned long start, unsigned long size) | |||
312 | r3k_flush_dcache_range(start, start + size); | 312 | r3k_flush_dcache_range(start, start + size); |
313 | } | 313 | } |
314 | 314 | ||
315 | void __cpuinit r3k_cache_init(void) | 315 | void r3k_cache_init(void) |
316 | { | 316 | { |
317 | extern void build_clear_page(void); | 317 | extern void build_clear_page(void); |
318 | extern void build_copy_page(void); | 318 | extern void build_copy_page(void); |
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index 21813beec7a5..f749f687ee87 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c | |||
@@ -107,7 +107,7 @@ static inline void r4k_blast_dcache_page_dc64(unsigned long addr) | |||
107 | blast_dcache64_page(addr); | 107 | blast_dcache64_page(addr); |
108 | } | 108 | } |
109 | 109 | ||
110 | static void __cpuinit r4k_blast_dcache_page_setup(void) | 110 | static void r4k_blast_dcache_page_setup(void) |
111 | { | 111 | { |
112 | unsigned long dc_lsize = cpu_dcache_line_size(); | 112 | unsigned long dc_lsize = cpu_dcache_line_size(); |
113 | 113 | ||
@@ -123,7 +123,7 @@ static void __cpuinit r4k_blast_dcache_page_setup(void) | |||
123 | 123 | ||
124 | static void (* r4k_blast_dcache_page_indexed)(unsigned long addr); | 124 | static void (* r4k_blast_dcache_page_indexed)(unsigned long addr); |
125 | 125 | ||
126 | static void __cpuinit r4k_blast_dcache_page_indexed_setup(void) | 126 | static void r4k_blast_dcache_page_indexed_setup(void) |
127 | { | 127 | { |
128 | unsigned long dc_lsize = cpu_dcache_line_size(); | 128 | unsigned long dc_lsize = cpu_dcache_line_size(); |
129 | 129 | ||
@@ -140,7 +140,7 @@ static void __cpuinit r4k_blast_dcache_page_indexed_setup(void) | |||
140 | void (* r4k_blast_dcache)(void); | 140 | void (* r4k_blast_dcache)(void); |
141 | EXPORT_SYMBOL(r4k_blast_dcache); | 141 | EXPORT_SYMBOL(r4k_blast_dcache); |
142 | 142 | ||
143 | static void __cpuinit r4k_blast_dcache_setup(void) | 143 | static void r4k_blast_dcache_setup(void) |
144 | { | 144 | { |
145 | unsigned long dc_lsize = cpu_dcache_line_size(); | 145 | unsigned long dc_lsize = cpu_dcache_line_size(); |
146 | 146 | ||
@@ -227,7 +227,7 @@ static inline void tx49_blast_icache32_page_indexed(unsigned long page) | |||
227 | 227 | ||
228 | static void (* r4k_blast_icache_page)(unsigned long addr); | 228 | static void (* r4k_blast_icache_page)(unsigned long addr); |
229 | 229 | ||
230 | static void __cpuinit r4k_blast_icache_page_setup(void) | 230 | static void r4k_blast_icache_page_setup(void) |
231 | { | 231 | { |
232 | unsigned long ic_lsize = cpu_icache_line_size(); | 232 | unsigned long ic_lsize = cpu_icache_line_size(); |
233 | 233 | ||
@@ -244,7 +244,7 @@ static void __cpuinit r4k_blast_icache_page_setup(void) | |||
244 | 244 | ||
245 | static void (* r4k_blast_icache_page_indexed)(unsigned long addr); | 245 | static void (* r4k_blast_icache_page_indexed)(unsigned long addr); |
246 | 246 | ||
247 | static void __cpuinit r4k_blast_icache_page_indexed_setup(void) | 247 | static void r4k_blast_icache_page_indexed_setup(void) |
248 | { | 248 | { |
249 | unsigned long ic_lsize = cpu_icache_line_size(); | 249 | unsigned long ic_lsize = cpu_icache_line_size(); |
250 | 250 | ||
@@ -269,7 +269,7 @@ static void __cpuinit r4k_blast_icache_page_indexed_setup(void) | |||
269 | void (* r4k_blast_icache)(void); | 269 | void (* r4k_blast_icache)(void); |
270 | EXPORT_SYMBOL(r4k_blast_icache); | 270 | EXPORT_SYMBOL(r4k_blast_icache); |
271 | 271 | ||
272 | static void __cpuinit r4k_blast_icache_setup(void) | 272 | static void r4k_blast_icache_setup(void) |
273 | { | 273 | { |
274 | unsigned long ic_lsize = cpu_icache_line_size(); | 274 | unsigned long ic_lsize = cpu_icache_line_size(); |
275 | 275 | ||
@@ -290,7 +290,7 @@ static void __cpuinit r4k_blast_icache_setup(void) | |||
290 | 290 | ||
291 | static void (* r4k_blast_scache_page)(unsigned long addr); | 291 | static void (* r4k_blast_scache_page)(unsigned long addr); |
292 | 292 | ||
293 | static void __cpuinit r4k_blast_scache_page_setup(void) | 293 | static void r4k_blast_scache_page_setup(void) |
294 | { | 294 | { |
295 | unsigned long sc_lsize = cpu_scache_line_size(); | 295 | unsigned long sc_lsize = cpu_scache_line_size(); |
296 | 296 | ||
@@ -308,7 +308,7 @@ static void __cpuinit r4k_blast_scache_page_setup(void) | |||
308 | 308 | ||
309 | static void (* r4k_blast_scache_page_indexed)(unsigned long addr); | 309 | static void (* r4k_blast_scache_page_indexed)(unsigned long addr); |
310 | 310 | ||
311 | static void __cpuinit r4k_blast_scache_page_indexed_setup(void) | 311 | static void r4k_blast_scache_page_indexed_setup(void) |
312 | { | 312 | { |
313 | unsigned long sc_lsize = cpu_scache_line_size(); | 313 | unsigned long sc_lsize = cpu_scache_line_size(); |
314 | 314 | ||
@@ -326,7 +326,7 @@ static void __cpuinit r4k_blast_scache_page_indexed_setup(void) | |||
326 | 326 | ||
327 | static void (* r4k_blast_scache)(void); | 327 | static void (* r4k_blast_scache)(void); |
328 | 328 | ||
329 | static void __cpuinit r4k_blast_scache_setup(void) | 329 | static void r4k_blast_scache_setup(void) |
330 | { | 330 | { |
331 | unsigned long sc_lsize = cpu_scache_line_size(); | 331 | unsigned long sc_lsize = cpu_scache_line_size(); |
332 | 332 | ||
@@ -797,11 +797,11 @@ static inline void alias_74k_erratum(struct cpuinfo_mips *c) | |||
797 | } | 797 | } |
798 | } | 798 | } |
799 | 799 | ||
800 | static char *way_string[] __cpuinitdata = { NULL, "direct mapped", "2-way", | 800 | static char *way_string[] = { NULL, "direct mapped", "2-way", |
801 | "3-way", "4-way", "5-way", "6-way", "7-way", "8-way" | 801 | "3-way", "4-way", "5-way", "6-way", "7-way", "8-way" |
802 | }; | 802 | }; |
803 | 803 | ||
804 | static void __cpuinit probe_pcache(void) | 804 | static void probe_pcache(void) |
805 | { | 805 | { |
806 | struct cpuinfo_mips *c = ¤t_cpu_data; | 806 | struct cpuinfo_mips *c = ¤t_cpu_data; |
807 | unsigned int config = read_c0_config(); | 807 | unsigned int config = read_c0_config(); |
@@ -1119,7 +1119,7 @@ static void __cpuinit probe_pcache(void) | |||
1119 | * executes in KSEG1 space or else you will crash and burn badly. You have | 1119 | * executes in KSEG1 space or else you will crash and burn badly. You have |
1120 | * been warned. | 1120 | * been warned. |
1121 | */ | 1121 | */ |
1122 | static int __cpuinit probe_scache(void) | 1122 | static int probe_scache(void) |
1123 | { | 1123 | { |
1124 | unsigned long flags, addr, begin, end, pow2; | 1124 | unsigned long flags, addr, begin, end, pow2; |
1125 | unsigned int config = read_c0_config(); | 1125 | unsigned int config = read_c0_config(); |
@@ -1196,7 +1196,7 @@ extern int r5k_sc_init(void); | |||
1196 | extern int rm7k_sc_init(void); | 1196 | extern int rm7k_sc_init(void); |
1197 | extern int mips_sc_init(void); | 1197 | extern int mips_sc_init(void); |
1198 | 1198 | ||
1199 | static void __cpuinit setup_scache(void) | 1199 | static void setup_scache(void) |
1200 | { | 1200 | { |
1201 | struct cpuinfo_mips *c = ¤t_cpu_data; | 1201 | struct cpuinfo_mips *c = ¤t_cpu_data; |
1202 | unsigned int config = read_c0_config(); | 1202 | unsigned int config = read_c0_config(); |
@@ -1329,7 +1329,7 @@ static void nxp_pr4450_fixup_config(void) | |||
1329 | NXP_BARRIER(); | 1329 | NXP_BARRIER(); |
1330 | } | 1330 | } |
1331 | 1331 | ||
1332 | static int __cpuinitdata cca = -1; | 1332 | static int cca = -1; |
1333 | 1333 | ||
1334 | static int __init cca_setup(char *str) | 1334 | static int __init cca_setup(char *str) |
1335 | { | 1335 | { |
@@ -1340,7 +1340,7 @@ static int __init cca_setup(char *str) | |||
1340 | 1340 | ||
1341 | early_param("cca", cca_setup); | 1341 | early_param("cca", cca_setup); |
1342 | 1342 | ||
1343 | static void __cpuinit coherency_setup(void) | 1343 | static void coherency_setup(void) |
1344 | { | 1344 | { |
1345 | if (cca < 0 || cca > 7) | 1345 | if (cca < 0 || cca > 7) |
1346 | cca = read_c0_config() & CONF_CM_CMASK; | 1346 | cca = read_c0_config() & CONF_CM_CMASK; |
@@ -1380,7 +1380,7 @@ static void __cpuinit coherency_setup(void) | |||
1380 | } | 1380 | } |
1381 | } | 1381 | } |
1382 | 1382 | ||
1383 | static void __cpuinit r4k_cache_error_setup(void) | 1383 | static void r4k_cache_error_setup(void) |
1384 | { | 1384 | { |
1385 | extern char __weak except_vec2_generic; | 1385 | extern char __weak except_vec2_generic; |
1386 | extern char __weak except_vec2_sb1; | 1386 | extern char __weak except_vec2_sb1; |
@@ -1398,7 +1398,7 @@ static void __cpuinit r4k_cache_error_setup(void) | |||
1398 | } | 1398 | } |
1399 | } | 1399 | } |
1400 | 1400 | ||
1401 | void __cpuinit r4k_cache_init(void) | 1401 | void r4k_cache_init(void) |
1402 | { | 1402 | { |
1403 | extern void build_clear_page(void); | 1403 | extern void build_clear_page(void); |
1404 | extern void build_copy_page(void); | 1404 | extern void build_copy_page(void); |
diff --git a/arch/mips/mm/c-tx39.c b/arch/mips/mm/c-tx39.c index ba9da270289f..8d909dbbf37f 100644 --- a/arch/mips/mm/c-tx39.c +++ b/arch/mips/mm/c-tx39.c | |||
@@ -344,7 +344,7 @@ static __init void tx39_probe_cache(void) | |||
344 | } | 344 | } |
345 | } | 345 | } |
346 | 346 | ||
347 | void __cpuinit tx39_cache_init(void) | 347 | void tx39_cache_init(void) |
348 | { | 348 | { |
349 | extern void build_clear_page(void); | 349 | extern void build_clear_page(void); |
350 | extern void build_copy_page(void); | 350 | extern void build_copy_page(void); |
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c index 5aeb3eb0b72f..15f813c303b4 100644 --- a/arch/mips/mm/cache.c +++ b/arch/mips/mm/cache.c | |||
@@ -182,7 +182,7 @@ static inline void setup_protection_map(void) | |||
182 | } | 182 | } |
183 | } | 183 | } |
184 | 184 | ||
185 | void __cpuinit cpu_cache_init(void) | 185 | void cpu_cache_init(void) |
186 | { | 186 | { |
187 | if (cpu_has_3k_cache) { | 187 | if (cpu_has_3k_cache) { |
188 | extern void __weak r3k_cache_init(void); | 188 | extern void __weak r3k_cache_init(void); |
diff --git a/arch/mips/mm/cex-sb1.S b/arch/mips/mm/cex-sb1.S index fe1d887e8d70..191cf6e0c725 100644 --- a/arch/mips/mm/cex-sb1.S +++ b/arch/mips/mm/cex-sb1.S | |||
@@ -49,8 +49,6 @@ | |||
49 | * (0x170-0x17f) are used to preserve k0, k1, and ra. | 49 | * (0x170-0x17f) are used to preserve k0, k1, and ra. |
50 | */ | 50 | */ |
51 | 51 | ||
52 | __CPUINIT | ||
53 | |||
54 | LEAF(except_vec2_sb1) | 52 | LEAF(except_vec2_sb1) |
55 | /* | 53 | /* |
56 | * If this error is recoverable, we need to exit the handler | 54 | * If this error is recoverable, we need to exit the handler |
@@ -142,8 +140,6 @@ unrecoverable: | |||
142 | 140 | ||
143 | END(except_vec2_sb1) | 141 | END(except_vec2_sb1) |
144 | 142 | ||
145 | __FINIT | ||
146 | |||
147 | LEAF(handle_vec2_sb1) | 143 | LEAF(handle_vec2_sb1) |
148 | mfc0 k0,CP0_CONFIG | 144 | mfc0 k0,CP0_CONFIG |
149 | li k1,~CONF_CM_CMASK | 145 | li k1,~CONF_CM_CMASK |
diff --git a/arch/mips/mm/page.c b/arch/mips/mm/page.c index 2c0bd580b9da..218c2109a55d 100644 --- a/arch/mips/mm/page.c +++ b/arch/mips/mm/page.c | |||
@@ -66,29 +66,29 @@ UASM_L_LA(_copy_pref_both) | |||
66 | UASM_L_LA(_copy_pref_store) | 66 | UASM_L_LA(_copy_pref_store) |
67 | 67 | ||
68 | /* We need one branch and therefore one relocation per target label. */ | 68 | /* We need one branch and therefore one relocation per target label. */ |
69 | static struct uasm_label __cpuinitdata labels[5]; | 69 | static struct uasm_label labels[5]; |
70 | static struct uasm_reloc __cpuinitdata relocs[5]; | 70 | static struct uasm_reloc relocs[5]; |
71 | 71 | ||
72 | #define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010) | 72 | #define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010) |
73 | #define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020) | 73 | #define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020) |
74 | 74 | ||
75 | static int pref_bias_clear_store __cpuinitdata; | 75 | static int pref_bias_clear_store; |
76 | static int pref_bias_copy_load __cpuinitdata; | 76 | static int pref_bias_copy_load; |
77 | static int pref_bias_copy_store __cpuinitdata; | 77 | static int pref_bias_copy_store; |
78 | 78 | ||
79 | static u32 pref_src_mode __cpuinitdata; | 79 | static u32 pref_src_mode; |
80 | static u32 pref_dst_mode __cpuinitdata; | 80 | static u32 pref_dst_mode; |
81 | 81 | ||
82 | static int clear_word_size __cpuinitdata; | 82 | static int clear_word_size; |
83 | static int copy_word_size __cpuinitdata; | 83 | static int copy_word_size; |
84 | 84 | ||
85 | static int half_clear_loop_size __cpuinitdata; | 85 | static int half_clear_loop_size; |
86 | static int half_copy_loop_size __cpuinitdata; | 86 | static int half_copy_loop_size; |
87 | 87 | ||
88 | static int cache_line_size __cpuinitdata; | 88 | static int cache_line_size; |
89 | #define cache_line_mask() (cache_line_size - 1) | 89 | #define cache_line_mask() (cache_line_size - 1) |
90 | 90 | ||
91 | static inline void __cpuinit | 91 | static inline void |
92 | pg_addiu(u32 **buf, unsigned int reg1, unsigned int reg2, unsigned int off) | 92 | pg_addiu(u32 **buf, unsigned int reg1, unsigned int reg2, unsigned int off) |
93 | { | 93 | { |
94 | if (cpu_has_64bit_gp_regs && DADDI_WAR && r4k_daddiu_bug()) { | 94 | if (cpu_has_64bit_gp_regs && DADDI_WAR && r4k_daddiu_bug()) { |
@@ -108,7 +108,7 @@ pg_addiu(u32 **buf, unsigned int reg1, unsigned int reg2, unsigned int off) | |||
108 | } | 108 | } |
109 | } | 109 | } |
110 | 110 | ||
111 | static void __cpuinit set_prefetch_parameters(void) | 111 | static void set_prefetch_parameters(void) |
112 | { | 112 | { |
113 | if (cpu_has_64bit_gp_regs || cpu_has_64bit_zero_reg) | 113 | if (cpu_has_64bit_gp_regs || cpu_has_64bit_zero_reg) |
114 | clear_word_size = 8; | 114 | clear_word_size = 8; |
@@ -199,7 +199,7 @@ static void __cpuinit set_prefetch_parameters(void) | |||
199 | 4 * copy_word_size)); | 199 | 4 * copy_word_size)); |
200 | } | 200 | } |
201 | 201 | ||
202 | static void __cpuinit build_clear_store(u32 **buf, int off) | 202 | static void build_clear_store(u32 **buf, int off) |
203 | { | 203 | { |
204 | if (cpu_has_64bit_gp_regs || cpu_has_64bit_zero_reg) { | 204 | if (cpu_has_64bit_gp_regs || cpu_has_64bit_zero_reg) { |
205 | uasm_i_sd(buf, ZERO, off, A0); | 205 | uasm_i_sd(buf, ZERO, off, A0); |
@@ -208,7 +208,7 @@ static void __cpuinit build_clear_store(u32 **buf, int off) | |||
208 | } | 208 | } |
209 | } | 209 | } |
210 | 210 | ||
211 | static inline void __cpuinit build_clear_pref(u32 **buf, int off) | 211 | static inline void build_clear_pref(u32 **buf, int off) |
212 | { | 212 | { |
213 | if (off & cache_line_mask()) | 213 | if (off & cache_line_mask()) |
214 | return; | 214 | return; |
@@ -240,7 +240,7 @@ extern u32 __clear_page_end; | |||
240 | extern u32 __copy_page_start; | 240 | extern u32 __copy_page_start; |
241 | extern u32 __copy_page_end; | 241 | extern u32 __copy_page_end; |
242 | 242 | ||
243 | void __cpuinit build_clear_page(void) | 243 | void build_clear_page(void) |
244 | { | 244 | { |
245 | int off; | 245 | int off; |
246 | u32 *buf = &__clear_page_start; | 246 | u32 *buf = &__clear_page_start; |
@@ -333,7 +333,7 @@ void __cpuinit build_clear_page(void) | |||
333 | pr_debug("\t.set pop\n"); | 333 | pr_debug("\t.set pop\n"); |
334 | } | 334 | } |
335 | 335 | ||
336 | static void __cpuinit build_copy_load(u32 **buf, int reg, int off) | 336 | static void build_copy_load(u32 **buf, int reg, int off) |
337 | { | 337 | { |
338 | if (cpu_has_64bit_gp_regs) { | 338 | if (cpu_has_64bit_gp_regs) { |
339 | uasm_i_ld(buf, reg, off, A1); | 339 | uasm_i_ld(buf, reg, off, A1); |
@@ -342,7 +342,7 @@ static void __cpuinit build_copy_load(u32 **buf, int reg, int off) | |||
342 | } | 342 | } |
343 | } | 343 | } |
344 | 344 | ||
345 | static void __cpuinit build_copy_store(u32 **buf, int reg, int off) | 345 | static void build_copy_store(u32 **buf, int reg, int off) |
346 | { | 346 | { |
347 | if (cpu_has_64bit_gp_regs) { | 347 | if (cpu_has_64bit_gp_regs) { |
348 | uasm_i_sd(buf, reg, off, A0); | 348 | uasm_i_sd(buf, reg, off, A0); |
@@ -387,7 +387,7 @@ static inline void build_copy_store_pref(u32 **buf, int off) | |||
387 | } | 387 | } |
388 | } | 388 | } |
389 | 389 | ||
390 | void __cpuinit build_copy_page(void) | 390 | void build_copy_page(void) |
391 | { | 391 | { |
392 | int off; | 392 | int off; |
393 | u32 *buf = &__copy_page_start; | 393 | u32 *buf = &__copy_page_start; |
diff --git a/arch/mips/mm/sc-ip22.c b/arch/mips/mm/sc-ip22.c index c6aaed934d53..dc7c5a5214a9 100644 --- a/arch/mips/mm/sc-ip22.c +++ b/arch/mips/mm/sc-ip22.c | |||
@@ -167,7 +167,7 @@ static struct bcache_ops indy_sc_ops = { | |||
167 | .bc_inv = indy_sc_wback_invalidate | 167 | .bc_inv = indy_sc_wback_invalidate |
168 | }; | 168 | }; |
169 | 169 | ||
170 | void __cpuinit indy_sc_init(void) | 170 | void indy_sc_init(void) |
171 | { | 171 | { |
172 | if (indy_sc_probe()) { | 172 | if (indy_sc_probe()) { |
173 | indy_sc_enable(); | 173 | indy_sc_enable(); |
diff --git a/arch/mips/mm/sc-mips.c b/arch/mips/mm/sc-mips.c index df96da7e939b..5d01392e3518 100644 --- a/arch/mips/mm/sc-mips.c +++ b/arch/mips/mm/sc-mips.c | |||
@@ -132,7 +132,7 @@ static inline int __init mips_sc_probe(void) | |||
132 | return 1; | 132 | return 1; |
133 | } | 133 | } |
134 | 134 | ||
135 | int __cpuinit mips_sc_init(void) | 135 | int mips_sc_init(void) |
136 | { | 136 | { |
137 | int found = mips_sc_probe(); | 137 | int found = mips_sc_probe(); |
138 | if (found) { | 138 | if (found) { |
diff --git a/arch/mips/mm/sc-r5k.c b/arch/mips/mm/sc-r5k.c index 8bc67720e145..0216ed6eaa2a 100644 --- a/arch/mips/mm/sc-r5k.c +++ b/arch/mips/mm/sc-r5k.c | |||
@@ -98,7 +98,7 @@ static struct bcache_ops r5k_sc_ops = { | |||
98 | .bc_inv = r5k_dma_cache_inv_sc | 98 | .bc_inv = r5k_dma_cache_inv_sc |
99 | }; | 99 | }; |
100 | 100 | ||
101 | void __cpuinit r5k_sc_init(void) | 101 | void r5k_sc_init(void) |
102 | { | 102 | { |
103 | if (r5k_sc_probe()) { | 103 | if (r5k_sc_probe()) { |
104 | r5k_sc_enable(); | 104 | r5k_sc_enable(); |
diff --git a/arch/mips/mm/sc-rm7k.c b/arch/mips/mm/sc-rm7k.c index 274af3be1442..aaffbba33706 100644 --- a/arch/mips/mm/sc-rm7k.c +++ b/arch/mips/mm/sc-rm7k.c | |||
@@ -104,7 +104,7 @@ static void blast_rm7k_tcache(void) | |||
104 | /* | 104 | /* |
105 | * This function is executed in uncached address space. | 105 | * This function is executed in uncached address space. |
106 | */ | 106 | */ |
107 | static __cpuinit void __rm7k_tc_enable(void) | 107 | static void __rm7k_tc_enable(void) |
108 | { | 108 | { |
109 | int i; | 109 | int i; |
110 | 110 | ||
@@ -117,7 +117,7 @@ static __cpuinit void __rm7k_tc_enable(void) | |||
117 | cache_op(Index_Store_Tag_T, CKSEG0ADDR(i)); | 117 | cache_op(Index_Store_Tag_T, CKSEG0ADDR(i)); |
118 | } | 118 | } |
119 | 119 | ||
120 | static __cpuinit void rm7k_tc_enable(void) | 120 | static void rm7k_tc_enable(void) |
121 | { | 121 | { |
122 | if (read_c0_config() & RM7K_CONF_TE) | 122 | if (read_c0_config() & RM7K_CONF_TE) |
123 | return; | 123 | return; |
@@ -130,7 +130,7 @@ static __cpuinit void rm7k_tc_enable(void) | |||
130 | /* | 130 | /* |
131 | * This function is executed in uncached address space. | 131 | * This function is executed in uncached address space. |
132 | */ | 132 | */ |
133 | static __cpuinit void __rm7k_sc_enable(void) | 133 | static void __rm7k_sc_enable(void) |
134 | { | 134 | { |
135 | int i; | 135 | int i; |
136 | 136 | ||
@@ -143,7 +143,7 @@ static __cpuinit void __rm7k_sc_enable(void) | |||
143 | cache_op(Index_Store_Tag_SD, CKSEG0ADDR(i)); | 143 | cache_op(Index_Store_Tag_SD, CKSEG0ADDR(i)); |
144 | } | 144 | } |
145 | 145 | ||
146 | static __cpuinit void rm7k_sc_enable(void) | 146 | static void rm7k_sc_enable(void) |
147 | { | 147 | { |
148 | if (read_c0_config() & RM7K_CONF_SE) | 148 | if (read_c0_config() & RM7K_CONF_SE) |
149 | return; | 149 | return; |
@@ -184,7 +184,7 @@ static struct bcache_ops rm7k_sc_ops = { | |||
184 | * This is a probing function like the one found in c-r4k.c, we look for the | 184 | * This is a probing function like the one found in c-r4k.c, we look for the |
185 | * wrap around point with different addresses. | 185 | * wrap around point with different addresses. |
186 | */ | 186 | */ |
187 | static __cpuinit void __probe_tcache(void) | 187 | static void __probe_tcache(void) |
188 | { | 188 | { |
189 | unsigned long flags, addr, begin, end, pow2; | 189 | unsigned long flags, addr, begin, end, pow2; |
190 | 190 | ||
@@ -226,7 +226,7 @@ static __cpuinit void __probe_tcache(void) | |||
226 | local_irq_restore(flags); | 226 | local_irq_restore(flags); |
227 | } | 227 | } |
228 | 228 | ||
229 | void __cpuinit rm7k_sc_init(void) | 229 | void rm7k_sc_init(void) |
230 | { | 230 | { |
231 | struct cpuinfo_mips *c = ¤t_cpu_data; | 231 | struct cpuinfo_mips *c = ¤t_cpu_data; |
232 | unsigned int config = read_c0_config(); | 232 | unsigned int config = read_c0_config(); |
diff --git a/arch/mips/mm/tlb-r3k.c b/arch/mips/mm/tlb-r3k.c index a63d1ed0827f..9aca10994cd2 100644 --- a/arch/mips/mm/tlb-r3k.c +++ b/arch/mips/mm/tlb-r3k.c | |||
@@ -276,7 +276,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, | |||
276 | } | 276 | } |
277 | } | 277 | } |
278 | 278 | ||
279 | void __cpuinit tlb_init(void) | 279 | void tlb_init(void) |
280 | { | 280 | { |
281 | local_flush_tlb_all(); | 281 | local_flush_tlb_all(); |
282 | 282 | ||
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c index c643de4c473a..00b26a67a06d 100644 --- a/arch/mips/mm/tlb-r4k.c +++ b/arch/mips/mm/tlb-r4k.c | |||
@@ -389,7 +389,7 @@ int __init has_transparent_hugepage(void) | |||
389 | 389 | ||
390 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | 390 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
391 | 391 | ||
392 | static int __cpuinitdata ntlb; | 392 | static int ntlb; |
393 | static int __init set_ntlb(char *str) | 393 | static int __init set_ntlb(char *str) |
394 | { | 394 | { |
395 | get_option(&str, &ntlb); | 395 | get_option(&str, &ntlb); |
@@ -398,7 +398,7 @@ static int __init set_ntlb(char *str) | |||
398 | 398 | ||
399 | __setup("ntlb=", set_ntlb); | 399 | __setup("ntlb=", set_ntlb); |
400 | 400 | ||
401 | void __cpuinit tlb_init(void) | 401 | void tlb_init(void) |
402 | { | 402 | { |
403 | /* | 403 | /* |
404 | * You should never change this register: | 404 | * You should never change this register: |
diff --git a/arch/mips/mm/tlb-r8k.c b/arch/mips/mm/tlb-r8k.c index 91c2499f806a..6a99733a4440 100644 --- a/arch/mips/mm/tlb-r8k.c +++ b/arch/mips/mm/tlb-r8k.c | |||
@@ -213,14 +213,14 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) | |||
213 | local_irq_restore(flags); | 213 | local_irq_restore(flags); |
214 | } | 214 | } |
215 | 215 | ||
216 | static void __cpuinit probe_tlb(unsigned long config) | 216 | static void probe_tlb(unsigned long config) |
217 | { | 217 | { |
218 | struct cpuinfo_mips *c = ¤t_cpu_data; | 218 | struct cpuinfo_mips *c = ¤t_cpu_data; |
219 | 219 | ||
220 | c->tlbsize = 3 * 128; /* 3 sets each 128 entries */ | 220 | c->tlbsize = 3 * 128; /* 3 sets each 128 entries */ |
221 | } | 221 | } |
222 | 222 | ||
223 | void __cpuinit tlb_init(void) | 223 | void tlb_init(void) |
224 | { | 224 | { |
225 | unsigned int config = read_c0_config(); | 225 | unsigned int config = read_c0_config(); |
226 | unsigned long status; | 226 | unsigned long status; |
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 9ab0f907a52c..34fce2b2095b 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c | |||
@@ -136,7 +136,7 @@ static int scratchpad_offset(int i) | |||
136 | * why; it's not an issue caused by the core RTL. | 136 | * why; it's not an issue caused by the core RTL. |
137 | * | 137 | * |
138 | */ | 138 | */ |
139 | static int __cpuinit m4kc_tlbp_war(void) | 139 | static int m4kc_tlbp_war(void) |
140 | { | 140 | { |
141 | return (current_cpu_data.processor_id & 0xffff00) == | 141 | return (current_cpu_data.processor_id & 0xffff00) == |
142 | (PRID_COMP_MIPS | PRID_IMP_4KC); | 142 | (PRID_COMP_MIPS | PRID_IMP_4KC); |
@@ -181,11 +181,9 @@ UASM_L_LA(_large_segbits_fault) | |||
181 | UASM_L_LA(_tlb_huge_update) | 181 | UASM_L_LA(_tlb_huge_update) |
182 | #endif | 182 | #endif |
183 | 183 | ||
184 | static int __cpuinitdata hazard_instance; | 184 | static int hazard_instance; |
185 | 185 | ||
186 | static void __cpuinit uasm_bgezl_hazard(u32 **p, | 186 | static void uasm_bgezl_hazard(u32 **p, struct uasm_reloc **r, int instance) |
187 | struct uasm_reloc **r, | ||
188 | int instance) | ||
189 | { | 187 | { |
190 | switch (instance) { | 188 | switch (instance) { |
191 | case 0 ... 7: | 189 | case 0 ... 7: |
@@ -196,9 +194,7 @@ static void __cpuinit uasm_bgezl_hazard(u32 **p, | |||
196 | } | 194 | } |
197 | } | 195 | } |
198 | 196 | ||
199 | static void __cpuinit uasm_bgezl_label(struct uasm_label **l, | 197 | static void uasm_bgezl_label(struct uasm_label **l, u32 **p, int instance) |
200 | u32 **p, | ||
201 | int instance) | ||
202 | { | 198 | { |
203 | switch (instance) { | 199 | switch (instance) { |
204 | case 0 ... 7: | 200 | case 0 ... 7: |
@@ -295,15 +291,15 @@ static inline void dump_handler(const char *symbol, const u32 *handler, int coun | |||
295 | * We deliberately chose a buffer size of 128, so we won't scribble | 291 | * We deliberately chose a buffer size of 128, so we won't scribble |
296 | * over anything important on overflow before we panic. | 292 | * over anything important on overflow before we panic. |
297 | */ | 293 | */ |
298 | static u32 tlb_handler[128] __cpuinitdata; | 294 | static u32 tlb_handler[128]; |
299 | 295 | ||
300 | /* simply assume worst case size for labels and relocs */ | 296 | /* simply assume worst case size for labels and relocs */ |
301 | static struct uasm_label labels[128] __cpuinitdata; | 297 | static struct uasm_label labels[128]; |
302 | static struct uasm_reloc relocs[128] __cpuinitdata; | 298 | static struct uasm_reloc relocs[128]; |
303 | 299 | ||
304 | static int check_for_high_segbits __cpuinitdata; | 300 | static int check_for_high_segbits; |
305 | 301 | ||
306 | static unsigned int kscratch_used_mask __cpuinitdata; | 302 | static unsigned int kscratch_used_mask; |
307 | 303 | ||
308 | static inline int __maybe_unused c0_kscratch(void) | 304 | static inline int __maybe_unused c0_kscratch(void) |
309 | { | 305 | { |
@@ -316,7 +312,7 @@ static inline int __maybe_unused c0_kscratch(void) | |||
316 | } | 312 | } |
317 | } | 313 | } |
318 | 314 | ||
319 | static int __cpuinit allocate_kscratch(void) | 315 | static int allocate_kscratch(void) |
320 | { | 316 | { |
321 | int r; | 317 | int r; |
322 | unsigned int a = cpu_data[0].kscratch_mask & ~kscratch_used_mask; | 318 | unsigned int a = cpu_data[0].kscratch_mask & ~kscratch_used_mask; |
@@ -333,11 +329,11 @@ static int __cpuinit allocate_kscratch(void) | |||
333 | return r; | 329 | return r; |
334 | } | 330 | } |
335 | 331 | ||
336 | static int scratch_reg __cpuinitdata; | 332 | static int scratch_reg; |
337 | static int pgd_reg __cpuinitdata; | 333 | static int pgd_reg; |
338 | enum vmalloc64_mode {not_refill, refill_scratch, refill_noscratch}; | 334 | enum vmalloc64_mode {not_refill, refill_scratch, refill_noscratch}; |
339 | 335 | ||
340 | static struct work_registers __cpuinit build_get_work_registers(u32 **p) | 336 | static struct work_registers build_get_work_registers(u32 **p) |
341 | { | 337 | { |
342 | struct work_registers r; | 338 | struct work_registers r; |
343 | 339 | ||
@@ -393,7 +389,7 @@ static struct work_registers __cpuinit build_get_work_registers(u32 **p) | |||
393 | return r; | 389 | return r; |
394 | } | 390 | } |
395 | 391 | ||
396 | static void __cpuinit build_restore_work_registers(u32 **p) | 392 | static void build_restore_work_registers(u32 **p) |
397 | { | 393 | { |
398 | if (scratch_reg >= 0) { | 394 | if (scratch_reg >= 0) { |
399 | UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg); | 395 | UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg); |
@@ -418,7 +414,7 @@ extern unsigned long pgd_current[]; | |||
418 | /* | 414 | /* |
419 | * The R3000 TLB handler is simple. | 415 | * The R3000 TLB handler is simple. |
420 | */ | 416 | */ |
421 | static void __cpuinit build_r3000_tlb_refill_handler(void) | 417 | static void build_r3000_tlb_refill_handler(void) |
422 | { | 418 | { |
423 | long pgdc = (long)pgd_current; | 419 | long pgdc = (long)pgd_current; |
424 | u32 *p; | 420 | u32 *p; |
@@ -463,7 +459,7 @@ static void __cpuinit build_r3000_tlb_refill_handler(void) | |||
463 | * other one.To keep things simple, we first assume linear space, | 459 | * other one.To keep things simple, we first assume linear space, |
464 | * then we relocate it to the final handler layout as needed. | 460 | * then we relocate it to the final handler layout as needed. |
465 | */ | 461 | */ |
466 | static u32 final_handler[64] __cpuinitdata; | 462 | static u32 final_handler[64]; |
467 | 463 | ||
468 | /* | 464 | /* |
469 | * Hazards | 465 | * Hazards |
@@ -487,7 +483,7 @@ static u32 final_handler[64] __cpuinitdata; | |||
487 | * | 483 | * |
488 | * As if we MIPS hackers wouldn't know how to nop pipelines happy ... | 484 | * As if we MIPS hackers wouldn't know how to nop pipelines happy ... |
489 | */ | 485 | */ |
490 | static void __cpuinit __maybe_unused build_tlb_probe_entry(u32 **p) | 486 | static void __maybe_unused build_tlb_probe_entry(u32 **p) |
491 | { | 487 | { |
492 | switch (current_cpu_type()) { | 488 | switch (current_cpu_type()) { |
493 | /* Found by experiment: R4600 v2.0/R4700 needs this, too. */ | 489 | /* Found by experiment: R4600 v2.0/R4700 needs this, too. */ |
@@ -511,9 +507,9 @@ static void __cpuinit __maybe_unused build_tlb_probe_entry(u32 **p) | |||
511 | */ | 507 | */ |
512 | enum tlb_write_entry { tlb_random, tlb_indexed }; | 508 | enum tlb_write_entry { tlb_random, tlb_indexed }; |
513 | 509 | ||
514 | static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l, | 510 | static void build_tlb_write_entry(u32 **p, struct uasm_label **l, |
515 | struct uasm_reloc **r, | 511 | struct uasm_reloc **r, |
516 | enum tlb_write_entry wmode) | 512 | enum tlb_write_entry wmode) |
517 | { | 513 | { |
518 | void(*tlbw)(u32 **) = NULL; | 514 | void(*tlbw)(u32 **) = NULL; |
519 | 515 | ||
@@ -647,8 +643,8 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l, | |||
647 | } | 643 | } |
648 | } | 644 | } |
649 | 645 | ||
650 | static __cpuinit __maybe_unused void build_convert_pte_to_entrylo(u32 **p, | 646 | static __maybe_unused void build_convert_pte_to_entrylo(u32 **p, |
651 | unsigned int reg) | 647 | unsigned int reg) |
652 | { | 648 | { |
653 | if (cpu_has_rixi) { | 649 | if (cpu_has_rixi) { |
654 | UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL)); | 650 | UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL)); |
@@ -663,11 +659,9 @@ static __cpuinit __maybe_unused void build_convert_pte_to_entrylo(u32 **p, | |||
663 | 659 | ||
664 | #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT | 660 | #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT |
665 | 661 | ||
666 | static __cpuinit void build_restore_pagemask(u32 **p, | 662 | static void build_restore_pagemask(u32 **p, struct uasm_reloc **r, |
667 | struct uasm_reloc **r, | 663 | unsigned int tmp, enum label_id lid, |
668 | unsigned int tmp, | 664 | int restore_scratch) |
669 | enum label_id lid, | ||
670 | int restore_scratch) | ||
671 | { | 665 | { |
672 | if (restore_scratch) { | 666 | if (restore_scratch) { |
673 | /* Reset default page size */ | 667 | /* Reset default page size */ |
@@ -706,12 +700,11 @@ static __cpuinit void build_restore_pagemask(u32 **p, | |||
706 | } | 700 | } |
707 | } | 701 | } |
708 | 702 | ||
709 | static __cpuinit void build_huge_tlb_write_entry(u32 **p, | 703 | static void build_huge_tlb_write_entry(u32 **p, struct uasm_label **l, |
710 | struct uasm_label **l, | 704 | struct uasm_reloc **r, |
711 | struct uasm_reloc **r, | 705 | unsigned int tmp, |
712 | unsigned int tmp, | 706 | enum tlb_write_entry wmode, |
713 | enum tlb_write_entry wmode, | 707 | int restore_scratch) |
714 | int restore_scratch) | ||
715 | { | 708 | { |
716 | /* Set huge page tlb entry size */ | 709 | /* Set huge page tlb entry size */ |
717 | uasm_i_lui(p, tmp, PM_HUGE_MASK >> 16); | 710 | uasm_i_lui(p, tmp, PM_HUGE_MASK >> 16); |
@@ -726,9 +719,9 @@ static __cpuinit void build_huge_tlb_write_entry(u32 **p, | |||
726 | /* | 719 | /* |
727 | * Check if Huge PTE is present, if so then jump to LABEL. | 720 | * Check if Huge PTE is present, if so then jump to LABEL. |
728 | */ | 721 | */ |
729 | static void __cpuinit | 722 | static void |
730 | build_is_huge_pte(u32 **p, struct uasm_reloc **r, unsigned int tmp, | 723 | build_is_huge_pte(u32 **p, struct uasm_reloc **r, unsigned int tmp, |
731 | unsigned int pmd, int lid) | 724 | unsigned int pmd, int lid) |
732 | { | 725 | { |
733 | UASM_i_LW(p, tmp, 0, pmd); | 726 | UASM_i_LW(p, tmp, 0, pmd); |
734 | if (use_bbit_insns()) { | 727 | if (use_bbit_insns()) { |
@@ -739,9 +732,8 @@ build_is_huge_pte(u32 **p, struct uasm_reloc **r, unsigned int tmp, | |||
739 | } | 732 | } |
740 | } | 733 | } |
741 | 734 | ||
742 | static __cpuinit void build_huge_update_entries(u32 **p, | 735 | static void build_huge_update_entries(u32 **p, unsigned int pte, |
743 | unsigned int pte, | 736 | unsigned int tmp) |
744 | unsigned int tmp) | ||
745 | { | 737 | { |
746 | int small_sequence; | 738 | int small_sequence; |
747 | 739 | ||
@@ -771,11 +763,10 @@ static __cpuinit void build_huge_update_entries(u32 **p, | |||
771 | UASM_i_MTC0(p, pte, C0_ENTRYLO1); /* load it */ | 763 | UASM_i_MTC0(p, pte, C0_ENTRYLO1); /* load it */ |
772 | } | 764 | } |
773 | 765 | ||
774 | static __cpuinit void build_huge_handler_tail(u32 **p, | 766 | static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r, |
775 | struct uasm_reloc **r, | 767 | struct uasm_label **l, |
776 | struct uasm_label **l, | 768 | unsigned int pte, |
777 | unsigned int pte, | 769 | unsigned int ptr) |
778 | unsigned int ptr) | ||
779 | { | 770 | { |
780 | #ifdef CONFIG_SMP | 771 | #ifdef CONFIG_SMP |
781 | UASM_i_SC(p, pte, 0, ptr); | 772 | UASM_i_SC(p, pte, 0, ptr); |
@@ -794,7 +785,7 @@ static __cpuinit void build_huge_handler_tail(u32 **p, | |||
794 | * TMP and PTR are scratch. | 785 | * TMP and PTR are scratch. |
795 | * TMP will be clobbered, PTR will hold the pmd entry. | 786 | * TMP will be clobbered, PTR will hold the pmd entry. |
796 | */ | 787 | */ |
797 | static void __cpuinit | 788 | static void |
798 | build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, | 789 | build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, |
799 | unsigned int tmp, unsigned int ptr) | 790 | unsigned int tmp, unsigned int ptr) |
800 | { | 791 | { |
@@ -886,7 +877,7 @@ build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, | |||
886 | * BVADDR is the faulting address, PTR is scratch. | 877 | * BVADDR is the faulting address, PTR is scratch. |
887 | * PTR will hold the pgd for vmalloc. | 878 | * PTR will hold the pgd for vmalloc. |
888 | */ | 879 | */ |
889 | static void __cpuinit | 880 | static void |
890 | build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, | 881 | build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, |
891 | unsigned int bvaddr, unsigned int ptr, | 882 | unsigned int bvaddr, unsigned int ptr, |
892 | enum vmalloc64_mode mode) | 883 | enum vmalloc64_mode mode) |
@@ -956,7 +947,7 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, | |||
956 | * TMP and PTR are scratch. | 947 | * TMP and PTR are scratch. |
957 | * TMP will be clobbered, PTR will hold the pgd entry. | 948 | * TMP will be clobbered, PTR will hold the pgd entry. |
958 | */ | 949 | */ |
959 | static void __cpuinit __maybe_unused | 950 | static void __maybe_unused |
960 | build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr) | 951 | build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr) |
961 | { | 952 | { |
962 | long pgdc = (long)pgd_current; | 953 | long pgdc = (long)pgd_current; |
@@ -991,7 +982,7 @@ build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr) | |||
991 | 982 | ||
992 | #endif /* !CONFIG_64BIT */ | 983 | #endif /* !CONFIG_64BIT */ |
993 | 984 | ||
994 | static void __cpuinit build_adjust_context(u32 **p, unsigned int ctx) | 985 | static void build_adjust_context(u32 **p, unsigned int ctx) |
995 | { | 986 | { |
996 | unsigned int shift = 4 - (PTE_T_LOG2 + 1) + PAGE_SHIFT - 12; | 987 | unsigned int shift = 4 - (PTE_T_LOG2 + 1) + PAGE_SHIFT - 12; |
997 | unsigned int mask = (PTRS_PER_PTE / 2 - 1) << (PTE_T_LOG2 + 1); | 988 | unsigned int mask = (PTRS_PER_PTE / 2 - 1) << (PTE_T_LOG2 + 1); |
@@ -1017,7 +1008,7 @@ static void __cpuinit build_adjust_context(u32 **p, unsigned int ctx) | |||
1017 | uasm_i_andi(p, ctx, ctx, mask); | 1008 | uasm_i_andi(p, ctx, ctx, mask); |
1018 | } | 1009 | } |
1019 | 1010 | ||
1020 | static void __cpuinit build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr) | 1011 | static void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr) |
1021 | { | 1012 | { |
1022 | /* | 1013 | /* |
1023 | * Bug workaround for the Nevada. It seems as if under certain | 1014 | * Bug workaround for the Nevada. It seems as if under certain |
@@ -1042,8 +1033,7 @@ static void __cpuinit build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr | |||
1042 | UASM_i_ADDU(p, ptr, ptr, tmp); /* add in offset */ | 1033 | UASM_i_ADDU(p, ptr, ptr, tmp); /* add in offset */ |
1043 | } | 1034 | } |
1044 | 1035 | ||
1045 | static void __cpuinit build_update_entries(u32 **p, unsigned int tmp, | 1036 | static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep) |
1046 | unsigned int ptep) | ||
1047 | { | 1037 | { |
1048 | /* | 1038 | /* |
1049 | * 64bit address support (36bit on a 32bit CPU) in a 32bit | 1039 | * 64bit address support (36bit on a 32bit CPU) in a 32bit |
@@ -1104,7 +1094,7 @@ struct mips_huge_tlb_info { | |||
1104 | int restore_scratch; | 1094 | int restore_scratch; |
1105 | }; | 1095 | }; |
1106 | 1096 | ||
1107 | static struct mips_huge_tlb_info __cpuinit | 1097 | static struct mips_huge_tlb_info |
1108 | build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l, | 1098 | build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l, |
1109 | struct uasm_reloc **r, unsigned int tmp, | 1099 | struct uasm_reloc **r, unsigned int tmp, |
1110 | unsigned int ptr, int c0_scratch_reg) | 1100 | unsigned int ptr, int c0_scratch_reg) |
@@ -1282,7 +1272,7 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l, | |||
1282 | */ | 1272 | */ |
1283 | #define MIPS64_REFILL_INSNS 32 | 1273 | #define MIPS64_REFILL_INSNS 32 |
1284 | 1274 | ||
1285 | static void __cpuinit build_r4000_tlb_refill_handler(void) | 1275 | static void build_r4000_tlb_refill_handler(void) |
1286 | { | 1276 | { |
1287 | u32 *p = tlb_handler; | 1277 | u32 *p = tlb_handler; |
1288 | struct uasm_label *l = labels; | 1278 | struct uasm_label *l = labels; |
@@ -1462,7 +1452,7 @@ extern u32 handle_tlbm[], handle_tlbm_end[]; | |||
1462 | #ifdef CONFIG_MIPS_PGD_C0_CONTEXT | 1452 | #ifdef CONFIG_MIPS_PGD_C0_CONTEXT |
1463 | extern u32 tlbmiss_handler_setup_pgd[], tlbmiss_handler_setup_pgd_end[]; | 1453 | extern u32 tlbmiss_handler_setup_pgd[], tlbmiss_handler_setup_pgd_end[]; |
1464 | 1454 | ||
1465 | static void __cpuinit build_r4000_setup_pgd(void) | 1455 | static void build_r4000_setup_pgd(void) |
1466 | { | 1456 | { |
1467 | const int a0 = 4; | 1457 | const int a0 = 4; |
1468 | const int a1 = 5; | 1458 | const int a1 = 5; |
@@ -1513,7 +1503,7 @@ static void __cpuinit build_r4000_setup_pgd(void) | |||
1513 | } | 1503 | } |
1514 | #endif | 1504 | #endif |
1515 | 1505 | ||
1516 | static void __cpuinit | 1506 | static void |
1517 | iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr) | 1507 | iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr) |
1518 | { | 1508 | { |
1519 | #ifdef CONFIG_SMP | 1509 | #ifdef CONFIG_SMP |
@@ -1533,7 +1523,7 @@ iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr) | |||
1533 | #endif | 1523 | #endif |
1534 | } | 1524 | } |
1535 | 1525 | ||
1536 | static void __cpuinit | 1526 | static void |
1537 | iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr, | 1527 | iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr, |
1538 | unsigned int mode) | 1528 | unsigned int mode) |
1539 | { | 1529 | { |
@@ -1593,7 +1583,7 @@ iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr, | |||
1593 | * the page table where this PTE is located, PTE will be re-loaded | 1583 | * the page table where this PTE is located, PTE will be re-loaded |
1594 | * with it's original value. | 1584 | * with it's original value. |
1595 | */ | 1585 | */ |
1596 | static void __cpuinit | 1586 | static void |
1597 | build_pte_present(u32 **p, struct uasm_reloc **r, | 1587 | build_pte_present(u32 **p, struct uasm_reloc **r, |
1598 | int pte, int ptr, int scratch, enum label_id lid) | 1588 | int pte, int ptr, int scratch, enum label_id lid) |
1599 | { | 1589 | { |
@@ -1621,7 +1611,7 @@ build_pte_present(u32 **p, struct uasm_reloc **r, | |||
1621 | } | 1611 | } |
1622 | 1612 | ||
1623 | /* Make PTE valid, store result in PTR. */ | 1613 | /* Make PTE valid, store result in PTR. */ |
1624 | static void __cpuinit | 1614 | static void |
1625 | build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte, | 1615 | build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte, |
1626 | unsigned int ptr) | 1616 | unsigned int ptr) |
1627 | { | 1617 | { |
@@ -1634,7 +1624,7 @@ build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte, | |||
1634 | * Check if PTE can be written to, if not branch to LABEL. Regardless | 1624 | * Check if PTE can be written to, if not branch to LABEL. Regardless |
1635 | * restore PTE with value from PTR when done. | 1625 | * restore PTE with value from PTR when done. |
1636 | */ | 1626 | */ |
1637 | static void __cpuinit | 1627 | static void |
1638 | build_pte_writable(u32 **p, struct uasm_reloc **r, | 1628 | build_pte_writable(u32 **p, struct uasm_reloc **r, |
1639 | unsigned int pte, unsigned int ptr, int scratch, | 1629 | unsigned int pte, unsigned int ptr, int scratch, |
1640 | enum label_id lid) | 1630 | enum label_id lid) |
@@ -1654,7 +1644,7 @@ build_pte_writable(u32 **p, struct uasm_reloc **r, | |||
1654 | /* Make PTE writable, update software status bits as well, then store | 1644 | /* Make PTE writable, update software status bits as well, then store |
1655 | * at PTR. | 1645 | * at PTR. |
1656 | */ | 1646 | */ |
1657 | static void __cpuinit | 1647 | static void |
1658 | build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte, | 1648 | build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte, |
1659 | unsigned int ptr) | 1649 | unsigned int ptr) |
1660 | { | 1650 | { |
@@ -1668,7 +1658,7 @@ build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte, | |||
1668 | * Check if PTE can be modified, if not branch to LABEL. Regardless | 1658 | * Check if PTE can be modified, if not branch to LABEL. Regardless |
1669 | * restore PTE with value from PTR when done. | 1659 | * restore PTE with value from PTR when done. |
1670 | */ | 1660 | */ |
1671 | static void __cpuinit | 1661 | static void |
1672 | build_pte_modifiable(u32 **p, struct uasm_reloc **r, | 1662 | build_pte_modifiable(u32 **p, struct uasm_reloc **r, |
1673 | unsigned int pte, unsigned int ptr, int scratch, | 1663 | unsigned int pte, unsigned int ptr, int scratch, |
1674 | enum label_id lid) | 1664 | enum label_id lid) |
@@ -1697,7 +1687,7 @@ build_pte_modifiable(u32 **p, struct uasm_reloc **r, | |||
1697 | * This places the pte into ENTRYLO0 and writes it with tlbwi. | 1687 | * This places the pte into ENTRYLO0 and writes it with tlbwi. |
1698 | * Then it returns. | 1688 | * Then it returns. |
1699 | */ | 1689 | */ |
1700 | static void __cpuinit | 1690 | static void |
1701 | build_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp) | 1691 | build_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp) |
1702 | { | 1692 | { |
1703 | uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */ | 1693 | uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */ |
@@ -1713,7 +1703,7 @@ build_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp) | |||
1713 | * may have the probe fail bit set as a result of a trap on a | 1703 | * may have the probe fail bit set as a result of a trap on a |
1714 | * kseg2 access, i.e. without refill. Then it returns. | 1704 | * kseg2 access, i.e. without refill. Then it returns. |
1715 | */ | 1705 | */ |
1716 | static void __cpuinit | 1706 | static void |
1717 | build_r3000_tlb_reload_write(u32 **p, struct uasm_label **l, | 1707 | build_r3000_tlb_reload_write(u32 **p, struct uasm_label **l, |
1718 | struct uasm_reloc **r, unsigned int pte, | 1708 | struct uasm_reloc **r, unsigned int pte, |
1719 | unsigned int tmp) | 1709 | unsigned int tmp) |
@@ -1731,7 +1721,7 @@ build_r3000_tlb_reload_write(u32 **p, struct uasm_label **l, | |||
1731 | uasm_i_rfe(p); /* branch delay */ | 1721 | uasm_i_rfe(p); /* branch delay */ |
1732 | } | 1722 | } |
1733 | 1723 | ||
1734 | static void __cpuinit | 1724 | static void |
1735 | build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte, | 1725 | build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte, |
1736 | unsigned int ptr) | 1726 | unsigned int ptr) |
1737 | { | 1727 | { |
@@ -1751,7 +1741,7 @@ build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte, | |||
1751 | uasm_i_tlbp(p); /* load delay */ | 1741 | uasm_i_tlbp(p); /* load delay */ |
1752 | } | 1742 | } |
1753 | 1743 | ||
1754 | static void __cpuinit build_r3000_tlb_load_handler(void) | 1744 | static void build_r3000_tlb_load_handler(void) |
1755 | { | 1745 | { |
1756 | u32 *p = handle_tlbl; | 1746 | u32 *p = handle_tlbl; |
1757 | const int handle_tlbl_size = handle_tlbl_end - handle_tlbl; | 1747 | const int handle_tlbl_size = handle_tlbl_end - handle_tlbl; |
@@ -1782,7 +1772,7 @@ static void __cpuinit build_r3000_tlb_load_handler(void) | |||
1782 | dump_handler("r3000_tlb_load", handle_tlbl, handle_tlbl_size); | 1772 | dump_handler("r3000_tlb_load", handle_tlbl, handle_tlbl_size); |
1783 | } | 1773 | } |
1784 | 1774 | ||
1785 | static void __cpuinit build_r3000_tlb_store_handler(void) | 1775 | static void build_r3000_tlb_store_handler(void) |
1786 | { | 1776 | { |
1787 | u32 *p = handle_tlbs; | 1777 | u32 *p = handle_tlbs; |
1788 | const int handle_tlbs_size = handle_tlbs_end - handle_tlbs; | 1778 | const int handle_tlbs_size = handle_tlbs_end - handle_tlbs; |
@@ -1813,7 +1803,7 @@ static void __cpuinit build_r3000_tlb_store_handler(void) | |||
1813 | dump_handler("r3000_tlb_store", handle_tlbs, handle_tlbs_size); | 1803 | dump_handler("r3000_tlb_store", handle_tlbs, handle_tlbs_size); |
1814 | } | 1804 | } |
1815 | 1805 | ||
1816 | static void __cpuinit build_r3000_tlb_modify_handler(void) | 1806 | static void build_r3000_tlb_modify_handler(void) |
1817 | { | 1807 | { |
1818 | u32 *p = handle_tlbm; | 1808 | u32 *p = handle_tlbm; |
1819 | const int handle_tlbm_size = handle_tlbm_end - handle_tlbm; | 1809 | const int handle_tlbm_size = handle_tlbm_end - handle_tlbm; |
@@ -1848,7 +1838,7 @@ static void __cpuinit build_r3000_tlb_modify_handler(void) | |||
1848 | /* | 1838 | /* |
1849 | * R4000 style TLB load/store/modify handlers. | 1839 | * R4000 style TLB load/store/modify handlers. |
1850 | */ | 1840 | */ |
1851 | static struct work_registers __cpuinit | 1841 | static struct work_registers |
1852 | build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l, | 1842 | build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l, |
1853 | struct uasm_reloc **r) | 1843 | struct uasm_reloc **r) |
1854 | { | 1844 | { |
@@ -1884,7 +1874,7 @@ build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l, | |||
1884 | return wr; | 1874 | return wr; |
1885 | } | 1875 | } |
1886 | 1876 | ||
1887 | static void __cpuinit | 1877 | static void |
1888 | build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l, | 1878 | build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l, |
1889 | struct uasm_reloc **r, unsigned int tmp, | 1879 | struct uasm_reloc **r, unsigned int tmp, |
1890 | unsigned int ptr) | 1880 | unsigned int ptr) |
@@ -1902,7 +1892,7 @@ build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l, | |||
1902 | #endif | 1892 | #endif |
1903 | } | 1893 | } |
1904 | 1894 | ||
1905 | static void __cpuinit build_r4000_tlb_load_handler(void) | 1895 | static void build_r4000_tlb_load_handler(void) |
1906 | { | 1896 | { |
1907 | u32 *p = handle_tlbl; | 1897 | u32 *p = handle_tlbl; |
1908 | const int handle_tlbl_size = handle_tlbl_end - handle_tlbl; | 1898 | const int handle_tlbl_size = handle_tlbl_end - handle_tlbl; |
@@ -2085,7 +2075,7 @@ static void __cpuinit build_r4000_tlb_load_handler(void) | |||
2085 | dump_handler("r4000_tlb_load", handle_tlbl, handle_tlbl_size); | 2075 | dump_handler("r4000_tlb_load", handle_tlbl, handle_tlbl_size); |
2086 | } | 2076 | } |
2087 | 2077 | ||
2088 | static void __cpuinit build_r4000_tlb_store_handler(void) | 2078 | static void build_r4000_tlb_store_handler(void) |
2089 | { | 2079 | { |
2090 | u32 *p = handle_tlbs; | 2080 | u32 *p = handle_tlbs; |
2091 | const int handle_tlbs_size = handle_tlbs_end - handle_tlbs; | 2081 | const int handle_tlbs_size = handle_tlbs_end - handle_tlbs; |
@@ -2140,7 +2130,7 @@ static void __cpuinit build_r4000_tlb_store_handler(void) | |||
2140 | dump_handler("r4000_tlb_store", handle_tlbs, handle_tlbs_size); | 2130 | dump_handler("r4000_tlb_store", handle_tlbs, handle_tlbs_size); |
2141 | } | 2131 | } |
2142 | 2132 | ||
2143 | static void __cpuinit build_r4000_tlb_modify_handler(void) | 2133 | static void build_r4000_tlb_modify_handler(void) |
2144 | { | 2134 | { |
2145 | u32 *p = handle_tlbm; | 2135 | u32 *p = handle_tlbm; |
2146 | const int handle_tlbm_size = handle_tlbm_end - handle_tlbm; | 2136 | const int handle_tlbm_size = handle_tlbm_end - handle_tlbm; |
@@ -2196,7 +2186,7 @@ static void __cpuinit build_r4000_tlb_modify_handler(void) | |||
2196 | dump_handler("r4000_tlb_modify", handle_tlbm, handle_tlbm_size); | 2186 | dump_handler("r4000_tlb_modify", handle_tlbm, handle_tlbm_size); |
2197 | } | 2187 | } |
2198 | 2188 | ||
2199 | static void __cpuinit flush_tlb_handlers(void) | 2189 | static void flush_tlb_handlers(void) |
2200 | { | 2190 | { |
2201 | local_flush_icache_range((unsigned long)handle_tlbl, | 2191 | local_flush_icache_range((unsigned long)handle_tlbl, |
2202 | (unsigned long)handle_tlbl_end); | 2192 | (unsigned long)handle_tlbl_end); |
@@ -2210,7 +2200,7 @@ static void __cpuinit flush_tlb_handlers(void) | |||
2210 | #endif | 2200 | #endif |
2211 | } | 2201 | } |
2212 | 2202 | ||
2213 | void __cpuinit build_tlb_refill_handler(void) | 2203 | void build_tlb_refill_handler(void) |
2214 | { | 2204 | { |
2215 | /* | 2205 | /* |
2216 | * The refill handler is generated per-CPU, multi-node systems | 2206 | * The refill handler is generated per-CPU, multi-node systems |
diff --git a/arch/mips/mm/uasm-micromips.c b/arch/mips/mm/uasm-micromips.c index 162ee6d62788..060000fa653c 100644 --- a/arch/mips/mm/uasm-micromips.c +++ b/arch/mips/mm/uasm-micromips.c | |||
@@ -49,7 +49,7 @@ | |||
49 | 49 | ||
50 | #include "uasm.c" | 50 | #include "uasm.c" |
51 | 51 | ||
52 | static struct insn insn_table_MM[] __uasminitdata = { | 52 | static struct insn insn_table_MM[] = { |
53 | { insn_addu, M(mm_pool32a_op, 0, 0, 0, 0, mm_addu32_op), RT | RS | RD }, | 53 | { insn_addu, M(mm_pool32a_op, 0, 0, 0, 0, mm_addu32_op), RT | RS | RD }, |
54 | { insn_addiu, M(mm_addiu32_op, 0, 0, 0, 0, 0), RT | RS | SIMM }, | 54 | { insn_addiu, M(mm_addiu32_op, 0, 0, 0, 0, 0), RT | RS | SIMM }, |
55 | { insn_and, M(mm_pool32a_op, 0, 0, 0, 0, mm_and_op), RT | RS | RD }, | 55 | { insn_and, M(mm_pool32a_op, 0, 0, 0, 0, mm_and_op), RT | RS | RD }, |
@@ -118,7 +118,7 @@ static struct insn insn_table_MM[] __uasminitdata = { | |||
118 | 118 | ||
119 | #undef M | 119 | #undef M |
120 | 120 | ||
121 | static inline __uasminit u32 build_bimm(s32 arg) | 121 | static inline u32 build_bimm(s32 arg) |
122 | { | 122 | { |
123 | WARN(arg > 0xffff || arg < -0x10000, | 123 | WARN(arg > 0xffff || arg < -0x10000, |
124 | KERN_WARNING "Micro-assembler field overflow\n"); | 124 | KERN_WARNING "Micro-assembler field overflow\n"); |
@@ -128,7 +128,7 @@ static inline __uasminit u32 build_bimm(s32 arg) | |||
128 | return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 1) & 0x7fff); | 128 | return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 1) & 0x7fff); |
129 | } | 129 | } |
130 | 130 | ||
131 | static inline __uasminit u32 build_jimm(u32 arg) | 131 | static inline u32 build_jimm(u32 arg) |
132 | { | 132 | { |
133 | 133 | ||
134 | WARN(arg & ~((JIMM_MASK << 2) | 1), | 134 | WARN(arg & ~((JIMM_MASK << 2) | 1), |
@@ -141,7 +141,7 @@ static inline __uasminit u32 build_jimm(u32 arg) | |||
141 | * The order of opcode arguments is implicitly left to right, | 141 | * The order of opcode arguments is implicitly left to right, |
142 | * starting with RS and ending with FUNC or IMM. | 142 | * starting with RS and ending with FUNC or IMM. |
143 | */ | 143 | */ |
144 | static void __uasminit build_insn(u32 **buf, enum opcode opc, ...) | 144 | static void build_insn(u32 **buf, enum opcode opc, ...) |
145 | { | 145 | { |
146 | struct insn *ip = NULL; | 146 | struct insn *ip = NULL; |
147 | unsigned int i; | 147 | unsigned int i; |
@@ -199,7 +199,7 @@ static void __uasminit build_insn(u32 **buf, enum opcode opc, ...) | |||
199 | (*buf)++; | 199 | (*buf)++; |
200 | } | 200 | } |
201 | 201 | ||
202 | static inline void __uasminit | 202 | static inline void |
203 | __resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab) | 203 | __resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab) |
204 | { | 204 | { |
205 | long laddr = (long)lab->addr; | 205 | long laddr = (long)lab->addr; |
diff --git a/arch/mips/mm/uasm-mips.c b/arch/mips/mm/uasm-mips.c index 5fcdd8fe3e83..0c724589854e 100644 --- a/arch/mips/mm/uasm-mips.c +++ b/arch/mips/mm/uasm-mips.c | |||
@@ -49,7 +49,7 @@ | |||
49 | 49 | ||
50 | #include "uasm.c" | 50 | #include "uasm.c" |
51 | 51 | ||
52 | static struct insn insn_table[] __uasminitdata = { | 52 | static struct insn insn_table[] = { |
53 | { insn_addiu, M(addiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, | 53 | { insn_addiu, M(addiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, |
54 | { insn_addu, M(spec_op, 0, 0, 0, 0, addu_op), RS | RT | RD }, | 54 | { insn_addu, M(spec_op, 0, 0, 0, 0, addu_op), RS | RT | RD }, |
55 | { insn_andi, M(andi_op, 0, 0, 0, 0, 0), RS | RT | UIMM }, | 55 | { insn_andi, M(andi_op, 0, 0, 0, 0, 0), RS | RT | UIMM }, |
@@ -119,7 +119,7 @@ static struct insn insn_table[] __uasminitdata = { | |||
119 | 119 | ||
120 | #undef M | 120 | #undef M |
121 | 121 | ||
122 | static inline __uasminit u32 build_bimm(s32 arg) | 122 | static inline u32 build_bimm(s32 arg) |
123 | { | 123 | { |
124 | WARN(arg > 0x1ffff || arg < -0x20000, | 124 | WARN(arg > 0x1ffff || arg < -0x20000, |
125 | KERN_WARNING "Micro-assembler field overflow\n"); | 125 | KERN_WARNING "Micro-assembler field overflow\n"); |
@@ -129,7 +129,7 @@ static inline __uasminit u32 build_bimm(s32 arg) | |||
129 | return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 2) & 0x7fff); | 129 | return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 2) & 0x7fff); |
130 | } | 130 | } |
131 | 131 | ||
132 | static inline __uasminit u32 build_jimm(u32 arg) | 132 | static inline u32 build_jimm(u32 arg) |
133 | { | 133 | { |
134 | WARN(arg & ~(JIMM_MASK << 2), | 134 | WARN(arg & ~(JIMM_MASK << 2), |
135 | KERN_WARNING "Micro-assembler field overflow\n"); | 135 | KERN_WARNING "Micro-assembler field overflow\n"); |
@@ -141,7 +141,7 @@ static inline __uasminit u32 build_jimm(u32 arg) | |||
141 | * The order of opcode arguments is implicitly left to right, | 141 | * The order of opcode arguments is implicitly left to right, |
142 | * starting with RS and ending with FUNC or IMM. | 142 | * starting with RS and ending with FUNC or IMM. |
143 | */ | 143 | */ |
144 | static void __uasminit build_insn(u32 **buf, enum opcode opc, ...) | 144 | static void build_insn(u32 **buf, enum opcode opc, ...) |
145 | { | 145 | { |
146 | struct insn *ip = NULL; | 146 | struct insn *ip = NULL; |
147 | unsigned int i; | 147 | unsigned int i; |
@@ -187,7 +187,7 @@ static void __uasminit build_insn(u32 **buf, enum opcode opc, ...) | |||
187 | (*buf)++; | 187 | (*buf)++; |
188 | } | 188 | } |
189 | 189 | ||
190 | static inline void __uasminit | 190 | static inline void |
191 | __resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab) | 191 | __resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab) |
192 | { | 192 | { |
193 | long laddr = (long)lab->addr; | 193 | long laddr = (long)lab->addr; |
diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c index 7eb5e4355d25..b9d14b6c7f58 100644 --- a/arch/mips/mm/uasm.c +++ b/arch/mips/mm/uasm.c | |||
@@ -63,35 +63,35 @@ struct insn { | |||
63 | enum fields fields; | 63 | enum fields fields; |
64 | }; | 64 | }; |
65 | 65 | ||
66 | static inline __uasminit u32 build_rs(u32 arg) | 66 | static inline u32 build_rs(u32 arg) |
67 | { | 67 | { |
68 | WARN(arg & ~RS_MASK, KERN_WARNING "Micro-assembler field overflow\n"); | 68 | WARN(arg & ~RS_MASK, KERN_WARNING "Micro-assembler field overflow\n"); |
69 | 69 | ||
70 | return (arg & RS_MASK) << RS_SH; | 70 | return (arg & RS_MASK) << RS_SH; |
71 | } | 71 | } |
72 | 72 | ||
73 | static inline __uasminit u32 build_rt(u32 arg) | 73 | static inline u32 build_rt(u32 arg) |
74 | { | 74 | { |
75 | WARN(arg & ~RT_MASK, KERN_WARNING "Micro-assembler field overflow\n"); | 75 | WARN(arg & ~RT_MASK, KERN_WARNING "Micro-assembler field overflow\n"); |
76 | 76 | ||
77 | return (arg & RT_MASK) << RT_SH; | 77 | return (arg & RT_MASK) << RT_SH; |
78 | } | 78 | } |
79 | 79 | ||
80 | static inline __uasminit u32 build_rd(u32 arg) | 80 | static inline u32 build_rd(u32 arg) |
81 | { | 81 | { |
82 | WARN(arg & ~RD_MASK, KERN_WARNING "Micro-assembler field overflow\n"); | 82 | WARN(arg & ~RD_MASK, KERN_WARNING "Micro-assembler field overflow\n"); |
83 | 83 | ||
84 | return (arg & RD_MASK) << RD_SH; | 84 | return (arg & RD_MASK) << RD_SH; |
85 | } | 85 | } |
86 | 86 | ||
87 | static inline __uasminit u32 build_re(u32 arg) | 87 | static inline u32 build_re(u32 arg) |
88 | { | 88 | { |
89 | WARN(arg & ~RE_MASK, KERN_WARNING "Micro-assembler field overflow\n"); | 89 | WARN(arg & ~RE_MASK, KERN_WARNING "Micro-assembler field overflow\n"); |
90 | 90 | ||
91 | return (arg & RE_MASK) << RE_SH; | 91 | return (arg & RE_MASK) << RE_SH; |
92 | } | 92 | } |
93 | 93 | ||
94 | static inline __uasminit u32 build_simm(s32 arg) | 94 | static inline u32 build_simm(s32 arg) |
95 | { | 95 | { |
96 | WARN(arg > 0x7fff || arg < -0x8000, | 96 | WARN(arg > 0x7fff || arg < -0x8000, |
97 | KERN_WARNING "Micro-assembler field overflow\n"); | 97 | KERN_WARNING "Micro-assembler field overflow\n"); |
@@ -99,14 +99,14 @@ static inline __uasminit u32 build_simm(s32 arg) | |||
99 | return arg & 0xffff; | 99 | return arg & 0xffff; |
100 | } | 100 | } |
101 | 101 | ||
102 | static inline __uasminit u32 build_uimm(u32 arg) | 102 | static inline u32 build_uimm(u32 arg) |
103 | { | 103 | { |
104 | WARN(arg & ~IMM_MASK, KERN_WARNING "Micro-assembler field overflow\n"); | 104 | WARN(arg & ~IMM_MASK, KERN_WARNING "Micro-assembler field overflow\n"); |
105 | 105 | ||
106 | return arg & IMM_MASK; | 106 | return arg & IMM_MASK; |
107 | } | 107 | } |
108 | 108 | ||
109 | static inline __uasminit u32 build_scimm(u32 arg) | 109 | static inline u32 build_scimm(u32 arg) |
110 | { | 110 | { |
111 | WARN(arg & ~SCIMM_MASK, | 111 | WARN(arg & ~SCIMM_MASK, |
112 | KERN_WARNING "Micro-assembler field overflow\n"); | 112 | KERN_WARNING "Micro-assembler field overflow\n"); |
@@ -114,21 +114,21 @@ static inline __uasminit u32 build_scimm(u32 arg) | |||
114 | return (arg & SCIMM_MASK) << SCIMM_SH; | 114 | return (arg & SCIMM_MASK) << SCIMM_SH; |
115 | } | 115 | } |
116 | 116 | ||
117 | static inline __uasminit u32 build_func(u32 arg) | 117 | static inline u32 build_func(u32 arg) |
118 | { | 118 | { |
119 | WARN(arg & ~FUNC_MASK, KERN_WARNING "Micro-assembler field overflow\n"); | 119 | WARN(arg & ~FUNC_MASK, KERN_WARNING "Micro-assembler field overflow\n"); |
120 | 120 | ||
121 | return arg & FUNC_MASK; | 121 | return arg & FUNC_MASK; |
122 | } | 122 | } |
123 | 123 | ||
124 | static inline __uasminit u32 build_set(u32 arg) | 124 | static inline u32 build_set(u32 arg) |
125 | { | 125 | { |
126 | WARN(arg & ~SET_MASK, KERN_WARNING "Micro-assembler field overflow\n"); | 126 | WARN(arg & ~SET_MASK, KERN_WARNING "Micro-assembler field overflow\n"); |
127 | 127 | ||
128 | return arg & SET_MASK; | 128 | return arg & SET_MASK; |
129 | } | 129 | } |
130 | 130 | ||
131 | static void __uasminit build_insn(u32 **buf, enum opcode opc, ...); | 131 | static void build_insn(u32 **buf, enum opcode opc, ...); |
132 | 132 | ||
133 | #define I_u1u2u3(op) \ | 133 | #define I_u1u2u3(op) \ |
134 | Ip_u1u2u3(op) \ | 134 | Ip_u1u2u3(op) \ |
@@ -286,7 +286,7 @@ I_u3u1u2(_ldx) | |||
286 | 286 | ||
287 | #ifdef CONFIG_CPU_CAVIUM_OCTEON | 287 | #ifdef CONFIG_CPU_CAVIUM_OCTEON |
288 | #include <asm/octeon/octeon.h> | 288 | #include <asm/octeon/octeon.h> |
289 | void __uasminit ISAFUNC(uasm_i_pref)(u32 **buf, unsigned int a, signed int b, | 289 | void ISAFUNC(uasm_i_pref)(u32 **buf, unsigned int a, signed int b, |
290 | unsigned int c) | 290 | unsigned int c) |
291 | { | 291 | { |
292 | if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X) && a <= 24 && a != 5) | 292 | if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X) && a <= 24 && a != 5) |
@@ -304,7 +304,7 @@ I_u2s3u1(_pref) | |||
304 | #endif | 304 | #endif |
305 | 305 | ||
306 | /* Handle labels. */ | 306 | /* Handle labels. */ |
307 | void __uasminit ISAFUNC(uasm_build_label)(struct uasm_label **lab, u32 *addr, int lid) | 307 | void ISAFUNC(uasm_build_label)(struct uasm_label **lab, u32 *addr, int lid) |
308 | { | 308 | { |
309 | (*lab)->addr = addr; | 309 | (*lab)->addr = addr; |
310 | (*lab)->lab = lid; | 310 | (*lab)->lab = lid; |
@@ -312,7 +312,7 @@ void __uasminit ISAFUNC(uasm_build_label)(struct uasm_label **lab, u32 *addr, in | |||
312 | } | 312 | } |
313 | UASM_EXPORT_SYMBOL(ISAFUNC(uasm_build_label)); | 313 | UASM_EXPORT_SYMBOL(ISAFUNC(uasm_build_label)); |
314 | 314 | ||
315 | int __uasminit ISAFUNC(uasm_in_compat_space_p)(long addr) | 315 | int ISAFUNC(uasm_in_compat_space_p)(long addr) |
316 | { | 316 | { |
317 | /* Is this address in 32bit compat space? */ | 317 | /* Is this address in 32bit compat space? */ |
318 | #ifdef CONFIG_64BIT | 318 | #ifdef CONFIG_64BIT |
@@ -323,7 +323,7 @@ int __uasminit ISAFUNC(uasm_in_compat_space_p)(long addr) | |||
323 | } | 323 | } |
324 | UASM_EXPORT_SYMBOL(ISAFUNC(uasm_in_compat_space_p)); | 324 | UASM_EXPORT_SYMBOL(ISAFUNC(uasm_in_compat_space_p)); |
325 | 325 | ||
326 | static int __uasminit uasm_rel_highest(long val) | 326 | static int uasm_rel_highest(long val) |
327 | { | 327 | { |
328 | #ifdef CONFIG_64BIT | 328 | #ifdef CONFIG_64BIT |
329 | return ((((val + 0x800080008000L) >> 48) & 0xffff) ^ 0x8000) - 0x8000; | 329 | return ((((val + 0x800080008000L) >> 48) & 0xffff) ^ 0x8000) - 0x8000; |
@@ -332,7 +332,7 @@ static int __uasminit uasm_rel_highest(long val) | |||
332 | #endif | 332 | #endif |
333 | } | 333 | } |
334 | 334 | ||
335 | static int __uasminit uasm_rel_higher(long val) | 335 | static int uasm_rel_higher(long val) |
336 | { | 336 | { |
337 | #ifdef CONFIG_64BIT | 337 | #ifdef CONFIG_64BIT |
338 | return ((((val + 0x80008000L) >> 32) & 0xffff) ^ 0x8000) - 0x8000; | 338 | return ((((val + 0x80008000L) >> 32) & 0xffff) ^ 0x8000) - 0x8000; |
@@ -341,19 +341,19 @@ static int __uasminit uasm_rel_higher(long val) | |||
341 | #endif | 341 | #endif |
342 | } | 342 | } |
343 | 343 | ||
344 | int __uasminit ISAFUNC(uasm_rel_hi)(long val) | 344 | int ISAFUNC(uasm_rel_hi)(long val) |
345 | { | 345 | { |
346 | return ((((val + 0x8000L) >> 16) & 0xffff) ^ 0x8000) - 0x8000; | 346 | return ((((val + 0x8000L) >> 16) & 0xffff) ^ 0x8000) - 0x8000; |
347 | } | 347 | } |
348 | UASM_EXPORT_SYMBOL(ISAFUNC(uasm_rel_hi)); | 348 | UASM_EXPORT_SYMBOL(ISAFUNC(uasm_rel_hi)); |
349 | 349 | ||
350 | int __uasminit ISAFUNC(uasm_rel_lo)(long val) | 350 | int ISAFUNC(uasm_rel_lo)(long val) |
351 | { | 351 | { |
352 | return ((val & 0xffff) ^ 0x8000) - 0x8000; | 352 | return ((val & 0xffff) ^ 0x8000) - 0x8000; |
353 | } | 353 | } |
354 | UASM_EXPORT_SYMBOL(ISAFUNC(uasm_rel_lo)); | 354 | UASM_EXPORT_SYMBOL(ISAFUNC(uasm_rel_lo)); |
355 | 355 | ||
356 | void __uasminit ISAFUNC(UASM_i_LA_mostly)(u32 **buf, unsigned int rs, long addr) | 356 | void ISAFUNC(UASM_i_LA_mostly)(u32 **buf, unsigned int rs, long addr) |
357 | { | 357 | { |
358 | if (!ISAFUNC(uasm_in_compat_space_p)(addr)) { | 358 | if (!ISAFUNC(uasm_in_compat_space_p)(addr)) { |
359 | ISAFUNC(uasm_i_lui)(buf, rs, uasm_rel_highest(addr)); | 359 | ISAFUNC(uasm_i_lui)(buf, rs, uasm_rel_highest(addr)); |
@@ -371,7 +371,7 @@ void __uasminit ISAFUNC(UASM_i_LA_mostly)(u32 **buf, unsigned int rs, long addr) | |||
371 | } | 371 | } |
372 | UASM_EXPORT_SYMBOL(ISAFUNC(UASM_i_LA_mostly)); | 372 | UASM_EXPORT_SYMBOL(ISAFUNC(UASM_i_LA_mostly)); |
373 | 373 | ||
374 | void __uasminit ISAFUNC(UASM_i_LA)(u32 **buf, unsigned int rs, long addr) | 374 | void ISAFUNC(UASM_i_LA)(u32 **buf, unsigned int rs, long addr) |
375 | { | 375 | { |
376 | ISAFUNC(UASM_i_LA_mostly)(buf, rs, addr); | 376 | ISAFUNC(UASM_i_LA_mostly)(buf, rs, addr); |
377 | if (ISAFUNC(uasm_rel_lo(addr))) { | 377 | if (ISAFUNC(uasm_rel_lo(addr))) { |
@@ -386,8 +386,7 @@ void __uasminit ISAFUNC(UASM_i_LA)(u32 **buf, unsigned int rs, long addr) | |||
386 | UASM_EXPORT_SYMBOL(ISAFUNC(UASM_i_LA)); | 386 | UASM_EXPORT_SYMBOL(ISAFUNC(UASM_i_LA)); |
387 | 387 | ||
388 | /* Handle relocations. */ | 388 | /* Handle relocations. */ |
389 | void __uasminit | 389 | void ISAFUNC(uasm_r_mips_pc16)(struct uasm_reloc **rel, u32 *addr, int lid) |
390 | ISAFUNC(uasm_r_mips_pc16)(struct uasm_reloc **rel, u32 *addr, int lid) | ||
391 | { | 390 | { |
392 | (*rel)->addr = addr; | 391 | (*rel)->addr = addr; |
393 | (*rel)->type = R_MIPS_PC16; | 392 | (*rel)->type = R_MIPS_PC16; |
@@ -396,11 +395,11 @@ ISAFUNC(uasm_r_mips_pc16)(struct uasm_reloc **rel, u32 *addr, int lid) | |||
396 | } | 395 | } |
397 | UASM_EXPORT_SYMBOL(ISAFUNC(uasm_r_mips_pc16)); | 396 | UASM_EXPORT_SYMBOL(ISAFUNC(uasm_r_mips_pc16)); |
398 | 397 | ||
399 | static inline void __uasminit | 398 | static inline void __resolve_relocs(struct uasm_reloc *rel, |
400 | __resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab); | 399 | struct uasm_label *lab); |
401 | 400 | ||
402 | void __uasminit | 401 | void ISAFUNC(uasm_resolve_relocs)(struct uasm_reloc *rel, |
403 | ISAFUNC(uasm_resolve_relocs)(struct uasm_reloc *rel, struct uasm_label *lab) | 402 | struct uasm_label *lab) |
404 | { | 403 | { |
405 | struct uasm_label *l; | 404 | struct uasm_label *l; |
406 | 405 | ||
@@ -411,8 +410,8 @@ ISAFUNC(uasm_resolve_relocs)(struct uasm_reloc *rel, struct uasm_label *lab) | |||
411 | } | 410 | } |
412 | UASM_EXPORT_SYMBOL(ISAFUNC(uasm_resolve_relocs)); | 411 | UASM_EXPORT_SYMBOL(ISAFUNC(uasm_resolve_relocs)); |
413 | 412 | ||
414 | void __uasminit | 413 | void ISAFUNC(uasm_move_relocs)(struct uasm_reloc *rel, u32 *first, u32 *end, |
415 | ISAFUNC(uasm_move_relocs)(struct uasm_reloc *rel, u32 *first, u32 *end, long off) | 414 | long off) |
416 | { | 415 | { |
417 | for (; rel->lab != UASM_LABEL_INVALID; rel++) | 416 | for (; rel->lab != UASM_LABEL_INVALID; rel++) |
418 | if (rel->addr >= first && rel->addr < end) | 417 | if (rel->addr >= first && rel->addr < end) |
@@ -420,8 +419,8 @@ ISAFUNC(uasm_move_relocs)(struct uasm_reloc *rel, u32 *first, u32 *end, long off | |||
420 | } | 419 | } |
421 | UASM_EXPORT_SYMBOL(ISAFUNC(uasm_move_relocs)); | 420 | UASM_EXPORT_SYMBOL(ISAFUNC(uasm_move_relocs)); |
422 | 421 | ||
423 | void __uasminit | 422 | void ISAFUNC(uasm_move_labels)(struct uasm_label *lab, u32 *first, u32 *end, |
424 | ISAFUNC(uasm_move_labels)(struct uasm_label *lab, u32 *first, u32 *end, long off) | 423 | long off) |
425 | { | 424 | { |
426 | for (; lab->lab != UASM_LABEL_INVALID; lab++) | 425 | for (; lab->lab != UASM_LABEL_INVALID; lab++) |
427 | if (lab->addr >= first && lab->addr < end) | 426 | if (lab->addr >= first && lab->addr < end) |
@@ -429,9 +428,8 @@ ISAFUNC(uasm_move_labels)(struct uasm_label *lab, u32 *first, u32 *end, long off | |||
429 | } | 428 | } |
430 | UASM_EXPORT_SYMBOL(ISAFUNC(uasm_move_labels)); | 429 | UASM_EXPORT_SYMBOL(ISAFUNC(uasm_move_labels)); |
431 | 430 | ||
432 | void __uasminit | 431 | void ISAFUNC(uasm_copy_handler)(struct uasm_reloc *rel, struct uasm_label *lab, |
433 | ISAFUNC(uasm_copy_handler)(struct uasm_reloc *rel, struct uasm_label *lab, u32 *first, | 432 | u32 *first, u32 *end, u32 *target) |
434 | u32 *end, u32 *target) | ||
435 | { | 433 | { |
436 | long off = (long)(target - first); | 434 | long off = (long)(target - first); |
437 | 435 | ||
@@ -442,7 +440,7 @@ ISAFUNC(uasm_copy_handler)(struct uasm_reloc *rel, struct uasm_label *lab, u32 * | |||
442 | } | 440 | } |
443 | UASM_EXPORT_SYMBOL(ISAFUNC(uasm_copy_handler)); | 441 | UASM_EXPORT_SYMBOL(ISAFUNC(uasm_copy_handler)); |
444 | 442 | ||
445 | int __uasminit ISAFUNC(uasm_insn_has_bdelay)(struct uasm_reloc *rel, u32 *addr) | 443 | int ISAFUNC(uasm_insn_has_bdelay)(struct uasm_reloc *rel, u32 *addr) |
446 | { | 444 | { |
447 | for (; rel->lab != UASM_LABEL_INVALID; rel++) { | 445 | for (; rel->lab != UASM_LABEL_INVALID; rel++) { |
448 | if (rel->addr == addr | 446 | if (rel->addr == addr |
@@ -456,83 +454,79 @@ int __uasminit ISAFUNC(uasm_insn_has_bdelay)(struct uasm_reloc *rel, u32 *addr) | |||
456 | UASM_EXPORT_SYMBOL(ISAFUNC(uasm_insn_has_bdelay)); | 454 | UASM_EXPORT_SYMBOL(ISAFUNC(uasm_insn_has_bdelay)); |
457 | 455 | ||
458 | /* Convenience functions for labeled branches. */ | 456 | /* Convenience functions for labeled branches. */ |
459 | void __uasminit | 457 | void ISAFUNC(uasm_il_bltz)(u32 **p, struct uasm_reloc **r, unsigned int reg, |
460 | ISAFUNC(uasm_il_bltz)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) | 458 | int lid) |
461 | { | 459 | { |
462 | uasm_r_mips_pc16(r, *p, lid); | 460 | uasm_r_mips_pc16(r, *p, lid); |
463 | ISAFUNC(uasm_i_bltz)(p, reg, 0); | 461 | ISAFUNC(uasm_i_bltz)(p, reg, 0); |
464 | } | 462 | } |
465 | UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bltz)); | 463 | UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bltz)); |
466 | 464 | ||
467 | void __uasminit | 465 | void ISAFUNC(uasm_il_b)(u32 **p, struct uasm_reloc **r, int lid) |
468 | ISAFUNC(uasm_il_b)(u32 **p, struct uasm_reloc **r, int lid) | ||
469 | { | 466 | { |
470 | uasm_r_mips_pc16(r, *p, lid); | 467 | uasm_r_mips_pc16(r, *p, lid); |
471 | ISAFUNC(uasm_i_b)(p, 0); | 468 | ISAFUNC(uasm_i_b)(p, 0); |
472 | } | 469 | } |
473 | UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_b)); | 470 | UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_b)); |
474 | 471 | ||
475 | void __uasminit | 472 | void ISAFUNC(uasm_il_beqz)(u32 **p, struct uasm_reloc **r, unsigned int reg, |
476 | ISAFUNC(uasm_il_beqz)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) | 473 | int lid) |
477 | { | 474 | { |
478 | uasm_r_mips_pc16(r, *p, lid); | 475 | uasm_r_mips_pc16(r, *p, lid); |
479 | ISAFUNC(uasm_i_beqz)(p, reg, 0); | 476 | ISAFUNC(uasm_i_beqz)(p, reg, 0); |
480 | } | 477 | } |
481 | UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_beqz)); | 478 | UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_beqz)); |
482 | 479 | ||
483 | void __uasminit | 480 | void ISAFUNC(uasm_il_beqzl)(u32 **p, struct uasm_reloc **r, unsigned int reg, |
484 | ISAFUNC(uasm_il_beqzl)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) | 481 | int lid) |
485 | { | 482 | { |
486 | uasm_r_mips_pc16(r, *p, lid); | 483 | uasm_r_mips_pc16(r, *p, lid); |
487 | ISAFUNC(uasm_i_beqzl)(p, reg, 0); | 484 | ISAFUNC(uasm_i_beqzl)(p, reg, 0); |
488 | } | 485 | } |
489 | UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_beqzl)); | 486 | UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_beqzl)); |
490 | 487 | ||
491 | void __uasminit | 488 | void ISAFUNC(uasm_il_bne)(u32 **p, struct uasm_reloc **r, unsigned int reg1, |
492 | ISAFUNC(uasm_il_bne)(u32 **p, struct uasm_reloc **r, unsigned int reg1, | 489 | unsigned int reg2, int lid) |
493 | unsigned int reg2, int lid) | ||
494 | { | 490 | { |
495 | uasm_r_mips_pc16(r, *p, lid); | 491 | uasm_r_mips_pc16(r, *p, lid); |
496 | ISAFUNC(uasm_i_bne)(p, reg1, reg2, 0); | 492 | ISAFUNC(uasm_i_bne)(p, reg1, reg2, 0); |
497 | } | 493 | } |
498 | UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bne)); | 494 | UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bne)); |
499 | 495 | ||
500 | void __uasminit | 496 | void ISAFUNC(uasm_il_bnez)(u32 **p, struct uasm_reloc **r, unsigned int reg, |
501 | ISAFUNC(uasm_il_bnez)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) | 497 | int lid) |
502 | { | 498 | { |
503 | uasm_r_mips_pc16(r, *p, lid); | 499 | uasm_r_mips_pc16(r, *p, lid); |
504 | ISAFUNC(uasm_i_bnez)(p, reg, 0); | 500 | ISAFUNC(uasm_i_bnez)(p, reg, 0); |
505 | } | 501 | } |
506 | UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bnez)); | 502 | UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bnez)); |
507 | 503 | ||
508 | void __uasminit | 504 | void ISAFUNC(uasm_il_bgezl)(u32 **p, struct uasm_reloc **r, unsigned int reg, |
509 | ISAFUNC(uasm_il_bgezl)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) | 505 | int lid) |
510 | { | 506 | { |
511 | uasm_r_mips_pc16(r, *p, lid); | 507 | uasm_r_mips_pc16(r, *p, lid); |
512 | ISAFUNC(uasm_i_bgezl)(p, reg, 0); | 508 | ISAFUNC(uasm_i_bgezl)(p, reg, 0); |
513 | } | 509 | } |
514 | UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bgezl)); | 510 | UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bgezl)); |
515 | 511 | ||
516 | void __uasminit | 512 | void ISAFUNC(uasm_il_bgez)(u32 **p, struct uasm_reloc **r, unsigned int reg, |
517 | ISAFUNC(uasm_il_bgez)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) | 513 | int lid) |
518 | { | 514 | { |
519 | uasm_r_mips_pc16(r, *p, lid); | 515 | uasm_r_mips_pc16(r, *p, lid); |
520 | ISAFUNC(uasm_i_bgez)(p, reg, 0); | 516 | ISAFUNC(uasm_i_bgez)(p, reg, 0); |
521 | } | 517 | } |
522 | UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bgez)); | 518 | UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bgez)); |
523 | 519 | ||
524 | void __uasminit | 520 | void ISAFUNC(uasm_il_bbit0)(u32 **p, struct uasm_reloc **r, unsigned int reg, |
525 | ISAFUNC(uasm_il_bbit0)(u32 **p, struct uasm_reloc **r, unsigned int reg, | 521 | unsigned int bit, int lid) |
526 | unsigned int bit, int lid) | ||
527 | { | 522 | { |
528 | uasm_r_mips_pc16(r, *p, lid); | 523 | uasm_r_mips_pc16(r, *p, lid); |
529 | ISAFUNC(uasm_i_bbit0)(p, reg, bit, 0); | 524 | ISAFUNC(uasm_i_bbit0)(p, reg, bit, 0); |
530 | } | 525 | } |
531 | UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bbit0)); | 526 | UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bbit0)); |
532 | 527 | ||
533 | void __uasminit | 528 | void ISAFUNC(uasm_il_bbit1)(u32 **p, struct uasm_reloc **r, unsigned int reg, |
534 | ISAFUNC(uasm_il_bbit1)(u32 **p, struct uasm_reloc **r, unsigned int reg, | 529 | unsigned int bit, int lid) |
535 | unsigned int bit, int lid) | ||
536 | { | 530 | { |
537 | uasm_r_mips_pc16(r, *p, lid); | 531 | uasm_r_mips_pc16(r, *p, lid); |
538 | ISAFUNC(uasm_i_bbit1)(p, reg, bit, 0); | 532 | ISAFUNC(uasm_i_bbit1)(p, reg, bit, 0); |