aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/mm')
-rw-r--r--arch/mips/mm/c-r3k.c2
-rw-r--r--arch/mips/mm/c-r4k.c30
-rw-r--r--arch/mips/mm/c-tx39.c2
-rw-r--r--arch/mips/mm/cache.c5
-rw-r--r--arch/mips/mm/cex-sb1.S4
-rw-r--r--arch/mips/mm/pg-r4k.c22
-rw-r--r--arch/mips/mm/pg-sb1.c4
-rw-r--r--arch/mips/mm/sc-ip22.c2
-rw-r--r--arch/mips/mm/sc-mips.c3
-rw-r--r--arch/mips/mm/sc-r5k.c2
-rw-r--r--arch/mips/mm/sc-rm7k.c2
-rw-r--r--arch/mips/mm/tlb-r3k.c2
-rw-r--r--arch/mips/mm/tlb-r4k.c10
-rw-r--r--arch/mips/mm/tlb-r8k.c4
-rw-r--r--arch/mips/mm/tlbex.c70
-rw-r--r--arch/mips/mm/uasm.c68
-rw-r--r--arch/mips/mm/uasm.h76
17 files changed, 149 insertions, 159 deletions
diff --git a/arch/mips/mm/c-r3k.c b/arch/mips/mm/c-r3k.c
index 562abb77d1d5..76935e320214 100644
--- a/arch/mips/mm/c-r3k.c
+++ b/arch/mips/mm/c-r3k.c
@@ -307,7 +307,7 @@ static void r3k_dma_cache_wback_inv(unsigned long start, unsigned long size)
307 r3k_flush_dcache_range(start, start + size); 307 r3k_flush_dcache_range(start, start + size);
308} 308}
309 309
310void __init r3k_cache_init(void) 310void __cpuinit r3k_cache_init(void)
311{ 311{
312 extern void build_clear_page(void); 312 extern void build_clear_page(void);
313 extern void build_copy_page(void); 313 extern void build_copy_page(void);
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 2c4f7e11f0d5..6496925b5e29 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -93,7 +93,7 @@ static inline void r4k_blast_dcache_page_dc32(unsigned long addr)
93 blast_dcache32_page(addr); 93 blast_dcache32_page(addr);
94} 94}
95 95
96static void __init r4k_blast_dcache_page_setup(void) 96static void __cpuinit r4k_blast_dcache_page_setup(void)
97{ 97{
98 unsigned long dc_lsize = cpu_dcache_line_size(); 98 unsigned long dc_lsize = cpu_dcache_line_size();
99 99
@@ -107,7 +107,7 @@ static void __init r4k_blast_dcache_page_setup(void)
107 107
108static void (* r4k_blast_dcache_page_indexed)(unsigned long addr); 108static void (* r4k_blast_dcache_page_indexed)(unsigned long addr);
109 109
110static void __init r4k_blast_dcache_page_indexed_setup(void) 110static void __cpuinit r4k_blast_dcache_page_indexed_setup(void)
111{ 111{
112 unsigned long dc_lsize = cpu_dcache_line_size(); 112 unsigned long dc_lsize = cpu_dcache_line_size();
113 113
@@ -121,7 +121,7 @@ static void __init r4k_blast_dcache_page_indexed_setup(void)
121 121
122static void (* r4k_blast_dcache)(void); 122static void (* r4k_blast_dcache)(void);
123 123
124static void __init r4k_blast_dcache_setup(void) 124static void __cpuinit r4k_blast_dcache_setup(void)
125{ 125{
126 unsigned long dc_lsize = cpu_dcache_line_size(); 126 unsigned long dc_lsize = cpu_dcache_line_size();
127 127
@@ -206,7 +206,7 @@ static inline void tx49_blast_icache32_page_indexed(unsigned long page)
206 206
207static void (* r4k_blast_icache_page)(unsigned long addr); 207static void (* r4k_blast_icache_page)(unsigned long addr);
208 208
209static void __init r4k_blast_icache_page_setup(void) 209static void __cpuinit r4k_blast_icache_page_setup(void)
210{ 210{
211 unsigned long ic_lsize = cpu_icache_line_size(); 211 unsigned long ic_lsize = cpu_icache_line_size();
212 212
@@ -223,7 +223,7 @@ static void __init r4k_blast_icache_page_setup(void)
223 223
224static void (* r4k_blast_icache_page_indexed)(unsigned long addr); 224static void (* r4k_blast_icache_page_indexed)(unsigned long addr);
225 225
226static void __init r4k_blast_icache_page_indexed_setup(void) 226static void __cpuinit r4k_blast_icache_page_indexed_setup(void)
227{ 227{
228 unsigned long ic_lsize = cpu_icache_line_size(); 228 unsigned long ic_lsize = cpu_icache_line_size();
229 229
@@ -247,7 +247,7 @@ static void __init r4k_blast_icache_page_indexed_setup(void)
247 247
248static void (* r4k_blast_icache)(void); 248static void (* r4k_blast_icache)(void);
249 249
250static void __init r4k_blast_icache_setup(void) 250static void __cpuinit r4k_blast_icache_setup(void)
251{ 251{
252 unsigned long ic_lsize = cpu_icache_line_size(); 252 unsigned long ic_lsize = cpu_icache_line_size();
253 253
@@ -268,7 +268,7 @@ static void __init r4k_blast_icache_setup(void)
268 268
269static void (* r4k_blast_scache_page)(unsigned long addr); 269static void (* r4k_blast_scache_page)(unsigned long addr);
270 270
271static void __init r4k_blast_scache_page_setup(void) 271static void __cpuinit r4k_blast_scache_page_setup(void)
272{ 272{
273 unsigned long sc_lsize = cpu_scache_line_size(); 273 unsigned long sc_lsize = cpu_scache_line_size();
274 274
@@ -286,7 +286,7 @@ static void __init r4k_blast_scache_page_setup(void)
286 286
287static void (* r4k_blast_scache_page_indexed)(unsigned long addr); 287static void (* r4k_blast_scache_page_indexed)(unsigned long addr);
288 288
289static void __init r4k_blast_scache_page_indexed_setup(void) 289static void __cpuinit r4k_blast_scache_page_indexed_setup(void)
290{ 290{
291 unsigned long sc_lsize = cpu_scache_line_size(); 291 unsigned long sc_lsize = cpu_scache_line_size();
292 292
@@ -304,7 +304,7 @@ static void __init r4k_blast_scache_page_indexed_setup(void)
304 304
305static void (* r4k_blast_scache)(void); 305static void (* r4k_blast_scache)(void);
306 306
307static void __init r4k_blast_scache_setup(void) 307static void __cpuinit r4k_blast_scache_setup(void)
308{ 308{
309 unsigned long sc_lsize = cpu_scache_line_size(); 309 unsigned long sc_lsize = cpu_scache_line_size();
310 310
@@ -691,11 +691,11 @@ static inline void rm7k_erratum31(void)
691 } 691 }
692} 692}
693 693
694static char *way_string[] __initdata = { NULL, "direct mapped", "2-way", 694static char *way_string[] __cpuinitdata = { NULL, "direct mapped", "2-way",
695 "3-way", "4-way", "5-way", "6-way", "7-way", "8-way" 695 "3-way", "4-way", "5-way", "6-way", "7-way", "8-way"
696}; 696};
697 697
698static void __init probe_pcache(void) 698static void __cpuinit probe_pcache(void)
699{ 699{
700 struct cpuinfo_mips *c = &current_cpu_data; 700 struct cpuinfo_mips *c = &current_cpu_data;
701 unsigned int config = read_c0_config(); 701 unsigned int config = read_c0_config();
@@ -1016,7 +1016,7 @@ static void __init probe_pcache(void)
1016 * executes in KSEG1 space or else you will crash and burn badly. You have 1016 * executes in KSEG1 space or else you will crash and burn badly. You have
1017 * been warned. 1017 * been warned.
1018 */ 1018 */
1019static int __init probe_scache(void) 1019static int __cpuinit probe_scache(void)
1020{ 1020{
1021 unsigned long flags, addr, begin, end, pow2; 1021 unsigned long flags, addr, begin, end, pow2;
1022 unsigned int config = read_c0_config(); 1022 unsigned int config = read_c0_config();
@@ -1095,7 +1095,7 @@ extern int r5k_sc_init(void);
1095extern int rm7k_sc_init(void); 1095extern int rm7k_sc_init(void);
1096extern int mips_sc_init(void); 1096extern int mips_sc_init(void);
1097 1097
1098static void __init setup_scache(void) 1098static void __cpuinit setup_scache(void)
1099{ 1099{
1100 struct cpuinfo_mips *c = &current_cpu_data; 1100 struct cpuinfo_mips *c = &current_cpu_data;
1101 unsigned int config = read_c0_config(); 1101 unsigned int config = read_c0_config();
@@ -1206,7 +1206,7 @@ void au1x00_fixup_config_od(void)
1206 } 1206 }
1207} 1207}
1208 1208
1209static void __init coherency_setup(void) 1209static void __cpuinit coherency_setup(void)
1210{ 1210{
1211 change_c0_config(CONF_CM_CMASK, CONF_CM_DEFAULT); 1211 change_c0_config(CONF_CM_CMASK, CONF_CM_DEFAULT);
1212 1212
@@ -1238,7 +1238,7 @@ static void __init coherency_setup(void)
1238 } 1238 }
1239} 1239}
1240 1240
1241void __init r4k_cache_init(void) 1241void __cpuinit r4k_cache_init(void)
1242{ 1242{
1243 extern void build_clear_page(void); 1243 extern void build_clear_page(void);
1244 extern void build_copy_page(void); 1244 extern void build_copy_page(void);
diff --git a/arch/mips/mm/c-tx39.c b/arch/mips/mm/c-tx39.c
index 9ea121e8cdce..b09d56981d53 100644
--- a/arch/mips/mm/c-tx39.c
+++ b/arch/mips/mm/c-tx39.c
@@ -329,7 +329,7 @@ static __init void tx39_probe_cache(void)
329 } 329 }
330} 330}
331 331
332void __init tx39_cache_init(void) 332void __cpuinit tx39_cache_init(void)
333{ 333{
334 extern void build_clear_page(void); 334 extern void build_clear_page(void);
335 extern void build_copy_page(void); 335 extern void build_copy_page(void);
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
index 6a24651971df..51ab1faa027d 100644
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -127,9 +127,10 @@ void __update_cache(struct vm_area_struct *vma, unsigned long address,
127 } 127 }
128} 128}
129 129
130static char cache_panic[] __initdata = "Yeee, unsupported cache architecture."; 130static char cache_panic[] __cpuinitdata =
131 "Yeee, unsupported cache architecture.";
131 132
132void __init cpu_cache_init(void) 133void __devinit cpu_cache_init(void)
133{ 134{
134 if (cpu_has_3k_cache) { 135 if (cpu_has_3k_cache) {
135 extern void __weak r3k_cache_init(void); 136 extern void __weak r3k_cache_init(void);
diff --git a/arch/mips/mm/cex-sb1.S b/arch/mips/mm/cex-sb1.S
index e54a62f2807c..2d08268bb705 100644
--- a/arch/mips/mm/cex-sb1.S
+++ b/arch/mips/mm/cex-sb1.S
@@ -34,8 +34,6 @@
34 * is changed. 34 * is changed.
35 */ 35 */
36 36
37 __INIT
38
39 .set mips64 37 .set mips64
40 .set noreorder 38 .set noreorder
41 .set noat 39 .set noat
@@ -51,6 +49,8 @@
51 * (0x170-0x17f) are used to preserve k0, k1, and ra. 49 * (0x170-0x17f) are used to preserve k0, k1, and ra.
52 */ 50 */
53 51
52 __CPUINIT
53
54LEAF(except_vec2_sb1) 54LEAF(except_vec2_sb1)
55 /* 55 /*
56 * If this error is recoverable, we need to exit the handler 56 * If this error is recoverable, we need to exit the handler
diff --git a/arch/mips/mm/pg-r4k.c b/arch/mips/mm/pg-r4k.c
index 9185fbf37c0d..455dedb5b39e 100644
--- a/arch/mips/mm/pg-r4k.c
+++ b/arch/mips/mm/pg-r4k.c
@@ -66,21 +66,21 @@ EXPORT_SYMBOL(copy_page);
66 * with 64-bit kernels. The prefetch offsets have been experimentally tuned 66 * with 64-bit kernels. The prefetch offsets have been experimentally tuned
67 * an Origin 200. 67 * an Origin 200.
68 */ 68 */
69static int pref_offset_clear __initdata = 512; 69static int pref_offset_clear __cpuinitdata = 512;
70static int pref_offset_copy __initdata = 256; 70static int pref_offset_copy __cpuinitdata = 256;
71 71
72static unsigned int pref_src_mode __initdata; 72static unsigned int pref_src_mode __cpuinitdata;
73static unsigned int pref_dst_mode __initdata; 73static unsigned int pref_dst_mode __cpuinitdata;
74 74
75static int load_offset __initdata; 75static int load_offset __cpuinitdata;
76static int store_offset __initdata; 76static int store_offset __cpuinitdata;
77 77
78static unsigned int __initdata *dest, *epc; 78static unsigned int __cpuinitdata *dest, *epc;
79 79
80static unsigned int instruction_pending; 80static unsigned int instruction_pending;
81static union mips_instruction delayed_mi; 81static union mips_instruction delayed_mi;
82 82
83static void __init emit_instruction(union mips_instruction mi) 83static void __cpuinit emit_instruction(union mips_instruction mi)
84{ 84{
85 if (instruction_pending) 85 if (instruction_pending)
86 *epc++ = delayed_mi.word; 86 *epc++ = delayed_mi.word;
@@ -222,7 +222,7 @@ static inline void build_cdex_p(void)
222 emit_instruction(mi); 222 emit_instruction(mi);
223} 223}
224 224
225static void __init __build_store_reg(int reg) 225static void __cpuinit __build_store_reg(int reg)
226{ 226{
227 union mips_instruction mi; 227 union mips_instruction mi;
228 unsigned int width; 228 unsigned int width;
@@ -339,7 +339,7 @@ static inline void build_jr_ra(void)
339 flush_delay_slot_or_nop(); 339 flush_delay_slot_or_nop();
340} 340}
341 341
342void __init build_clear_page(void) 342void __cpuinit build_clear_page(void)
343{ 343{
344 unsigned int loop_start; 344 unsigned int loop_start;
345 unsigned long off; 345 unsigned long off;
@@ -442,7 +442,7 @@ dest = label();
442 pr_debug("\t.set pop\n"); 442 pr_debug("\t.set pop\n");
443} 443}
444 444
445void __init build_copy_page(void) 445void __cpuinit build_copy_page(void)
446{ 446{
447 unsigned int loop_start; 447 unsigned int loop_start;
448 unsigned long off; 448 unsigned long off;
diff --git a/arch/mips/mm/pg-sb1.c b/arch/mips/mm/pg-sb1.c
index 89925ec57d6a..49e289d05414 100644
--- a/arch/mips/mm/pg-sb1.c
+++ b/arch/mips/mm/pg-sb1.c
@@ -293,10 +293,10 @@ void copy_page(void *to, void *from)
293EXPORT_SYMBOL(clear_page); 293EXPORT_SYMBOL(clear_page);
294EXPORT_SYMBOL(copy_page); 294EXPORT_SYMBOL(copy_page);
295 295
296void __init build_clear_page(void) 296void __cpuinit build_clear_page(void)
297{ 297{
298} 298}
299 299
300void __init build_copy_page(void) 300void __cpuinit build_copy_page(void)
301{ 301{
302} 302}
diff --git a/arch/mips/mm/sc-ip22.c b/arch/mips/mm/sc-ip22.c
index d236cf8b7374..1f602a110e10 100644
--- a/arch/mips/mm/sc-ip22.c
+++ b/arch/mips/mm/sc-ip22.c
@@ -168,7 +168,7 @@ struct bcache_ops indy_sc_ops = {
168 .bc_inv = indy_sc_wback_invalidate 168 .bc_inv = indy_sc_wback_invalidate
169}; 169};
170 170
171void __init indy_sc_init(void) 171void __cpuinit indy_sc_init(void)
172{ 172{
173 if (indy_sc_probe()) { 173 if (indy_sc_probe()) {
174 indy_sc_enable(); 174 indy_sc_enable();
diff --git a/arch/mips/mm/sc-mips.c b/arch/mips/mm/sc-mips.c
index c13170bc675c..b55c2d1b998f 100644
--- a/arch/mips/mm/sc-mips.c
+++ b/arch/mips/mm/sc-mips.c
@@ -100,7 +100,7 @@ static inline int __init mips_sc_probe(void)
100 return 1; 100 return 1;
101} 101}
102 102
103int __init mips_sc_init(void) 103int __cpuinit mips_sc_init(void)
104{ 104{
105 int found = mips_sc_probe(); 105 int found = mips_sc_probe();
106 if (found) { 106 if (found) {
@@ -109,4 +109,3 @@ int __init mips_sc_init(void)
109 } 109 }
110 return found; 110 return found;
111} 111}
112
diff --git a/arch/mips/mm/sc-r5k.c b/arch/mips/mm/sc-r5k.c
index d35b6c1103a3..f330d38e5575 100644
--- a/arch/mips/mm/sc-r5k.c
+++ b/arch/mips/mm/sc-r5k.c
@@ -99,7 +99,7 @@ static struct bcache_ops r5k_sc_ops = {
99 .bc_inv = r5k_dma_cache_inv_sc 99 .bc_inv = r5k_dma_cache_inv_sc
100}; 100};
101 101
102void __init r5k_sc_init(void) 102void __cpuinit r5k_sc_init(void)
103{ 103{
104 if (r5k_sc_probe()) { 104 if (r5k_sc_probe()) {
105 r5k_sc_enable(); 105 r5k_sc_enable();
diff --git a/arch/mips/mm/sc-rm7k.c b/arch/mips/mm/sc-rm7k.c
index 31ec73052423..fc227f3b1199 100644
--- a/arch/mips/mm/sc-rm7k.c
+++ b/arch/mips/mm/sc-rm7k.c
@@ -128,7 +128,7 @@ struct bcache_ops rm7k_sc_ops = {
128 .bc_inv = rm7k_sc_inv 128 .bc_inv = rm7k_sc_inv
129}; 129};
130 130
131void __init rm7k_sc_init(void) 131void __cpuinit rm7k_sc_init(void)
132{ 132{
133 struct cpuinfo_mips *c = &current_cpu_data; 133 struct cpuinfo_mips *c = &current_cpu_data;
134 unsigned int config = read_c0_config(); 134 unsigned int config = read_c0_config();
diff --git a/arch/mips/mm/tlb-r3k.c b/arch/mips/mm/tlb-r3k.c
index 7948e9a5e372..a782549ac80e 100644
--- a/arch/mips/mm/tlb-r3k.c
+++ b/arch/mips/mm/tlb-r3k.c
@@ -281,7 +281,7 @@ void __init add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
281 } 281 }
282} 282}
283 283
284void __init tlb_init(void) 284void __cpuinit tlb_init(void)
285{ 285{
286 local_flush_tlb_all(); 286 local_flush_tlb_all();
287 287
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index 74ae0348cc92..63065d6e8063 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -388,7 +388,7 @@ void __init add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
388 * lifetime of the system 388 * lifetime of the system
389 */ 389 */
390 390
391static int temp_tlb_entry __initdata; 391static int temp_tlb_entry __cpuinitdata;
392 392
393__init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1, 393__init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
394 unsigned long entryhi, unsigned long pagemask) 394 unsigned long entryhi, unsigned long pagemask)
@@ -427,7 +427,7 @@ out:
427 return ret; 427 return ret;
428} 428}
429 429
430static void __init probe_tlb(unsigned long config) 430static void __cpuinit probe_tlb(unsigned long config)
431{ 431{
432 struct cpuinfo_mips *c = &current_cpu_data; 432 struct cpuinfo_mips *c = &current_cpu_data;
433 unsigned int reg; 433 unsigned int reg;
@@ -455,7 +455,7 @@ static void __init probe_tlb(unsigned long config)
455 c->tlbsize = ((reg >> 25) & 0x3f) + 1; 455 c->tlbsize = ((reg >> 25) & 0x3f) + 1;
456} 456}
457 457
458static int __initdata ntlb = 0; 458static int __cpuinitdata ntlb = 0;
459static int __init set_ntlb(char *str) 459static int __init set_ntlb(char *str)
460{ 460{
461 get_option(&str, &ntlb); 461 get_option(&str, &ntlb);
@@ -464,7 +464,7 @@ static int __init set_ntlb(char *str)
464 464
465__setup("ntlb=", set_ntlb); 465__setup("ntlb=", set_ntlb);
466 466
467void __init tlb_init(void) 467void __cpuinit tlb_init(void)
468{ 468{
469 unsigned int config = read_c0_config(); 469 unsigned int config = read_c0_config();
470 470
@@ -473,7 +473,7 @@ void __init tlb_init(void)
473 * - On R4600 1.7 the tlbp never hits for pages smaller than 473 * - On R4600 1.7 the tlbp never hits for pages smaller than
474 * the value in the c0_pagemask register. 474 * the value in the c0_pagemask register.
475 * - The entire mm handling assumes the c0_pagemask register to 475 * - The entire mm handling assumes the c0_pagemask register to
476 * be set for 4kb pages. 476 * be set to fixed-size pages.
477 */ 477 */
478 probe_tlb(config); 478 probe_tlb(config);
479 write_c0_pagemask(PM_DEFAULT_MASK); 479 write_c0_pagemask(PM_DEFAULT_MASK);
diff --git a/arch/mips/mm/tlb-r8k.c b/arch/mips/mm/tlb-r8k.c
index bd8409d8ff62..4f01a3be215c 100644
--- a/arch/mips/mm/tlb-r8k.c
+++ b/arch/mips/mm/tlb-r8k.c
@@ -214,14 +214,14 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
214 local_irq_restore(flags); 214 local_irq_restore(flags);
215} 215}
216 216
217static void __init probe_tlb(unsigned long config) 217static void __cpuinit probe_tlb(unsigned long config)
218{ 218{
219 struct cpuinfo_mips *c = &current_cpu_data; 219 struct cpuinfo_mips *c = &current_cpu_data;
220 220
221 c->tlbsize = 3 * 128; /* 3 sets each 128 entries */ 221 c->tlbsize = 3 * 128; /* 3 sets each 128 entries */
222} 222}
223 223
224void __init tlb_init(void) 224void __cpuinit tlb_init(void)
225{ 225{
226 unsigned int config = read_c0_config(); 226 unsigned int config = read_c0_config();
227 unsigned long status; 227 unsigned long status;
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 218a6cc415e8..3a93d4ce2703 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -60,7 +60,7 @@ static inline int __maybe_unused r10000_llsc_war(void)
60 * why; it's not an issue caused by the core RTL. 60 * why; it's not an issue caused by the core RTL.
61 * 61 *
62 */ 62 */
63static int __init m4kc_tlbp_war(void) 63static int __cpuinit m4kc_tlbp_war(void)
64{ 64{
65 return (current_cpu_data.processor_id & 0xffff00) == 65 return (current_cpu_data.processor_id & 0xffff00) ==
66 (PRID_COMP_MIPS | PRID_IMP_4KC); 66 (PRID_COMP_MIPS | PRID_IMP_4KC);
@@ -144,16 +144,16 @@ static inline void dump_handler(const u32 *handler, int count)
144 * We deliberately chose a buffer size of 128, so we won't scribble 144 * We deliberately chose a buffer size of 128, so we won't scribble
145 * over anything important on overflow before we panic. 145 * over anything important on overflow before we panic.
146 */ 146 */
147static u32 tlb_handler[128] __initdata; 147static u32 tlb_handler[128] __cpuinitdata;
148 148
149/* simply assume worst case size for labels and relocs */ 149/* simply assume worst case size for labels and relocs */
150static struct uasm_label labels[128] __initdata; 150static struct uasm_label labels[128] __cpuinitdata;
151static struct uasm_reloc relocs[128] __initdata; 151static struct uasm_reloc relocs[128] __cpuinitdata;
152 152
153/* 153/*
154 * The R3000 TLB handler is simple. 154 * The R3000 TLB handler is simple.
155 */ 155 */
156static void __init build_r3000_tlb_refill_handler(void) 156static void __cpuinit build_r3000_tlb_refill_handler(void)
157{ 157{
158 long pgdc = (long)pgd_current; 158 long pgdc = (long)pgd_current;
159 u32 *p; 159 u32 *p;
@@ -197,7 +197,7 @@ static void __init build_r3000_tlb_refill_handler(void)
197 * other one.To keep things simple, we first assume linear space, 197 * other one.To keep things simple, we first assume linear space,
198 * then we relocate it to the final handler layout as needed. 198 * then we relocate it to the final handler layout as needed.
199 */ 199 */
200static u32 final_handler[64] __initdata; 200static u32 final_handler[64] __cpuinitdata;
201 201
202/* 202/*
203 * Hazards 203 * Hazards
@@ -221,7 +221,7 @@ static u32 final_handler[64] __initdata;
221 * 221 *
222 * As if we MIPS hackers wouldn't know how to nop pipelines happy ... 222 * As if we MIPS hackers wouldn't know how to nop pipelines happy ...
223 */ 223 */
224static void __init __maybe_unused build_tlb_probe_entry(u32 **p) 224static void __cpuinit __maybe_unused build_tlb_probe_entry(u32 **p)
225{ 225{
226 switch (current_cpu_type()) { 226 switch (current_cpu_type()) {
227 /* Found by experiment: R4600 v2.0 needs this, too. */ 227 /* Found by experiment: R4600 v2.0 needs this, too. */
@@ -245,7 +245,7 @@ static void __init __maybe_unused build_tlb_probe_entry(u32 **p)
245 */ 245 */
246enum tlb_write_entry { tlb_random, tlb_indexed }; 246enum tlb_write_entry { tlb_random, tlb_indexed };
247 247
248static void __init build_tlb_write_entry(u32 **p, struct uasm_label **l, 248static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l,
249 struct uasm_reloc **r, 249 struct uasm_reloc **r,
250 enum tlb_write_entry wmode) 250 enum tlb_write_entry wmode)
251{ 251{
@@ -389,7 +389,7 @@ static void __init build_tlb_write_entry(u32 **p, struct uasm_label **l,
389 * TMP and PTR are scratch. 389 * TMP and PTR are scratch.
390 * TMP will be clobbered, PTR will hold the pmd entry. 390 * TMP will be clobbered, PTR will hold the pmd entry.
391 */ 391 */
392static void __init 392static void __cpuinit
393build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, 393build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
394 unsigned int tmp, unsigned int ptr) 394 unsigned int tmp, unsigned int ptr)
395{ 395{
@@ -450,7 +450,7 @@ build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
450 * BVADDR is the faulting address, PTR is scratch. 450 * BVADDR is the faulting address, PTR is scratch.
451 * PTR will hold the pgd for vmalloc. 451 * PTR will hold the pgd for vmalloc.
452 */ 452 */
453static void __init 453static void __cpuinit
454build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, 454build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
455 unsigned int bvaddr, unsigned int ptr) 455 unsigned int bvaddr, unsigned int ptr)
456{ 456{
@@ -522,7 +522,7 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
522 * TMP and PTR are scratch. 522 * TMP and PTR are scratch.
523 * TMP will be clobbered, PTR will hold the pgd entry. 523 * TMP will be clobbered, PTR will hold the pgd entry.
524 */ 524 */
525static void __init __maybe_unused 525static void __cpuinit __maybe_unused
526build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr) 526build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
527{ 527{
528 long pgdc = (long)pgd_current; 528 long pgdc = (long)pgd_current;
@@ -557,7 +557,7 @@ build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
557 557
558#endif /* !CONFIG_64BIT */ 558#endif /* !CONFIG_64BIT */
559 559
560static void __init build_adjust_context(u32 **p, unsigned int ctx) 560static void __cpuinit build_adjust_context(u32 **p, unsigned int ctx)
561{ 561{
562 unsigned int shift = 4 - (PTE_T_LOG2 + 1) + PAGE_SHIFT - 12; 562 unsigned int shift = 4 - (PTE_T_LOG2 + 1) + PAGE_SHIFT - 12;
563 unsigned int mask = (PTRS_PER_PTE / 2 - 1) << (PTE_T_LOG2 + 1); 563 unsigned int mask = (PTRS_PER_PTE / 2 - 1) << (PTE_T_LOG2 + 1);
@@ -583,7 +583,7 @@ static void __init build_adjust_context(u32 **p, unsigned int ctx)
583 uasm_i_andi(p, ctx, ctx, mask); 583 uasm_i_andi(p, ctx, ctx, mask);
584} 584}
585 585
586static void __init build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr) 586static void __cpuinit build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr)
587{ 587{
588 /* 588 /*
589 * Bug workaround for the Nevada. It seems as if under certain 589 * Bug workaround for the Nevada. It seems as if under certain
@@ -608,7 +608,7 @@ static void __init build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr)
608 UASM_i_ADDU(p, ptr, ptr, tmp); /* add in offset */ 608 UASM_i_ADDU(p, ptr, ptr, tmp); /* add in offset */
609} 609}
610 610
611static void __init build_update_entries(u32 **p, unsigned int tmp, 611static void __cpuinit build_update_entries(u32 **p, unsigned int tmp,
612 unsigned int ptep) 612 unsigned int ptep)
613{ 613{
614 /* 614 /*
@@ -651,7 +651,7 @@ static void __init build_update_entries(u32 **p, unsigned int tmp,
651#endif 651#endif
652} 652}
653 653
654static void __init build_r4000_tlb_refill_handler(void) 654static void __cpuinit build_r4000_tlb_refill_handler(void)
655{ 655{
656 u32 *p = tlb_handler; 656 u32 *p = tlb_handler;
657 struct uasm_label *l = labels; 657 struct uasm_label *l = labels;
@@ -783,7 +783,7 @@ u32 handle_tlbl[FASTPATH_SIZE] __cacheline_aligned;
783u32 handle_tlbs[FASTPATH_SIZE] __cacheline_aligned; 783u32 handle_tlbs[FASTPATH_SIZE] __cacheline_aligned;
784u32 handle_tlbm[FASTPATH_SIZE] __cacheline_aligned; 784u32 handle_tlbm[FASTPATH_SIZE] __cacheline_aligned;
785 785
786static void __init 786static void __cpuinit
787iPTE_LW(u32 **p, struct uasm_label **l, unsigned int pte, unsigned int ptr) 787iPTE_LW(u32 **p, struct uasm_label **l, unsigned int pte, unsigned int ptr)
788{ 788{
789#ifdef CONFIG_SMP 789#ifdef CONFIG_SMP
@@ -803,7 +803,7 @@ iPTE_LW(u32 **p, struct uasm_label **l, unsigned int pte, unsigned int ptr)
803#endif 803#endif
804} 804}
805 805
806static void __init 806static void __cpuinit
807iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr, 807iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
808 unsigned int mode) 808 unsigned int mode)
809{ 809{
@@ -863,7 +863,7 @@ iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
863 * the page table where this PTE is located, PTE will be re-loaded 863 * the page table where this PTE is located, PTE will be re-loaded
864 * with it's original value. 864 * with it's original value.
865 */ 865 */
866static void __init 866static void __cpuinit
867build_pte_present(u32 **p, struct uasm_label **l, struct uasm_reloc **r, 867build_pte_present(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
868 unsigned int pte, unsigned int ptr, enum label_id lid) 868 unsigned int pte, unsigned int ptr, enum label_id lid)
869{ 869{
@@ -874,7 +874,7 @@ build_pte_present(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
874} 874}
875 875
876/* Make PTE valid, store result in PTR. */ 876/* Make PTE valid, store result in PTR. */
877static void __init 877static void __cpuinit
878build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte, 878build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte,
879 unsigned int ptr) 879 unsigned int ptr)
880{ 880{
@@ -887,7 +887,7 @@ build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte,
887 * Check if PTE can be written to, if not branch to LABEL. Regardless 887 * Check if PTE can be written to, if not branch to LABEL. Regardless
888 * restore PTE with value from PTR when done. 888 * restore PTE with value from PTR when done.
889 */ 889 */
890static void __init 890static void __cpuinit
891build_pte_writable(u32 **p, struct uasm_label **l, struct uasm_reloc **r, 891build_pte_writable(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
892 unsigned int pte, unsigned int ptr, enum label_id lid) 892 unsigned int pte, unsigned int ptr, enum label_id lid)
893{ 893{
@@ -900,7 +900,7 @@ build_pte_writable(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
900/* Make PTE writable, update software status bits as well, then store 900/* Make PTE writable, update software status bits as well, then store
901 * at PTR. 901 * at PTR.
902 */ 902 */
903static void __init 903static void __cpuinit
904build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte, 904build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte,
905 unsigned int ptr) 905 unsigned int ptr)
906{ 906{
@@ -914,7 +914,7 @@ build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte,
914 * Check if PTE can be modified, if not branch to LABEL. Regardless 914 * Check if PTE can be modified, if not branch to LABEL. Regardless
915 * restore PTE with value from PTR when done. 915 * restore PTE with value from PTR when done.
916 */ 916 */
917static void __init 917static void __cpuinit
918build_pte_modifiable(u32 **p, struct uasm_label **l, struct uasm_reloc **r, 918build_pte_modifiable(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
919 unsigned int pte, unsigned int ptr, enum label_id lid) 919 unsigned int pte, unsigned int ptr, enum label_id lid)
920{ 920{
@@ -931,7 +931,7 @@ build_pte_modifiable(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
931 * This places the pte into ENTRYLO0 and writes it with tlbwi. 931 * This places the pte into ENTRYLO0 and writes it with tlbwi.
932 * Then it returns. 932 * Then it returns.
933 */ 933 */
934static void __init 934static void __cpuinit
935build_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp) 935build_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp)
936{ 936{
937 uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */ 937 uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */
@@ -947,7 +947,7 @@ build_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp)
947 * may have the probe fail bit set as a result of a trap on a 947 * may have the probe fail bit set as a result of a trap on a
948 * kseg2 access, i.e. without refill. Then it returns. 948 * kseg2 access, i.e. without refill. Then it returns.
949 */ 949 */
950static void __init 950static void __cpuinit
951build_r3000_tlb_reload_write(u32 **p, struct uasm_label **l, 951build_r3000_tlb_reload_write(u32 **p, struct uasm_label **l,
952 struct uasm_reloc **r, unsigned int pte, 952 struct uasm_reloc **r, unsigned int pte,
953 unsigned int tmp) 953 unsigned int tmp)
@@ -965,7 +965,7 @@ build_r3000_tlb_reload_write(u32 **p, struct uasm_label **l,
965 uasm_i_rfe(p); /* branch delay */ 965 uasm_i_rfe(p); /* branch delay */
966} 966}
967 967
968static void __init 968static void __cpuinit
969build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte, 969build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte,
970 unsigned int ptr) 970 unsigned int ptr)
971{ 971{
@@ -985,7 +985,7 @@ build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte,
985 uasm_i_tlbp(p); /* load delay */ 985 uasm_i_tlbp(p); /* load delay */
986} 986}
987 987
988static void __init build_r3000_tlb_load_handler(void) 988static void __cpuinit build_r3000_tlb_load_handler(void)
989{ 989{
990 u32 *p = handle_tlbl; 990 u32 *p = handle_tlbl;
991 struct uasm_label *l = labels; 991 struct uasm_label *l = labels;
@@ -1015,7 +1015,7 @@ static void __init build_r3000_tlb_load_handler(void)
1015 dump_handler(handle_tlbl, ARRAY_SIZE(handle_tlbl)); 1015 dump_handler(handle_tlbl, ARRAY_SIZE(handle_tlbl));
1016} 1016}
1017 1017
1018static void __init build_r3000_tlb_store_handler(void) 1018static void __cpuinit build_r3000_tlb_store_handler(void)
1019{ 1019{
1020 u32 *p = handle_tlbs; 1020 u32 *p = handle_tlbs;
1021 struct uasm_label *l = labels; 1021 struct uasm_label *l = labels;
@@ -1045,7 +1045,7 @@ static void __init build_r3000_tlb_store_handler(void)
1045 dump_handler(handle_tlbs, ARRAY_SIZE(handle_tlbs)); 1045 dump_handler(handle_tlbs, ARRAY_SIZE(handle_tlbs));
1046} 1046}
1047 1047
1048static void __init build_r3000_tlb_modify_handler(void) 1048static void __cpuinit build_r3000_tlb_modify_handler(void)
1049{ 1049{
1050 u32 *p = handle_tlbm; 1050 u32 *p = handle_tlbm;
1051 struct uasm_label *l = labels; 1051 struct uasm_label *l = labels;
@@ -1078,7 +1078,7 @@ static void __init build_r3000_tlb_modify_handler(void)
1078/* 1078/*
1079 * R4000 style TLB load/store/modify handlers. 1079 * R4000 style TLB load/store/modify handlers.
1080 */ 1080 */
1081static void __init 1081static void __cpuinit
1082build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l, 1082build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l,
1083 struct uasm_reloc **r, unsigned int pte, 1083 struct uasm_reloc **r, unsigned int pte,
1084 unsigned int ptr) 1084 unsigned int ptr)
@@ -1103,7 +1103,7 @@ build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l,
1103 build_tlb_probe_entry(p); 1103 build_tlb_probe_entry(p);
1104} 1104}
1105 1105
1106static void __init 1106static void __cpuinit
1107build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l, 1107build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l,
1108 struct uasm_reloc **r, unsigned int tmp, 1108 struct uasm_reloc **r, unsigned int tmp,
1109 unsigned int ptr) 1109 unsigned int ptr)
@@ -1120,7 +1120,7 @@ build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l,
1120#endif 1120#endif
1121} 1121}
1122 1122
1123static void __init build_r4000_tlb_load_handler(void) 1123static void __cpuinit build_r4000_tlb_load_handler(void)
1124{ 1124{
1125 u32 *p = handle_tlbl; 1125 u32 *p = handle_tlbl;
1126 struct uasm_label *l = labels; 1126 struct uasm_label *l = labels;
@@ -1160,7 +1160,7 @@ static void __init build_r4000_tlb_load_handler(void)
1160 dump_handler(handle_tlbl, ARRAY_SIZE(handle_tlbl)); 1160 dump_handler(handle_tlbl, ARRAY_SIZE(handle_tlbl));
1161} 1161}
1162 1162
1163static void __init build_r4000_tlb_store_handler(void) 1163static void __cpuinit build_r4000_tlb_store_handler(void)
1164{ 1164{
1165 u32 *p = handle_tlbs; 1165 u32 *p = handle_tlbs;
1166 struct uasm_label *l = labels; 1166 struct uasm_label *l = labels;
@@ -1191,7 +1191,7 @@ static void __init build_r4000_tlb_store_handler(void)
1191 dump_handler(handle_tlbs, ARRAY_SIZE(handle_tlbs)); 1191 dump_handler(handle_tlbs, ARRAY_SIZE(handle_tlbs));
1192} 1192}
1193 1193
1194static void __init build_r4000_tlb_modify_handler(void) 1194static void __cpuinit build_r4000_tlb_modify_handler(void)
1195{ 1195{
1196 u32 *p = handle_tlbm; 1196 u32 *p = handle_tlbm;
1197 struct uasm_label *l = labels; 1197 struct uasm_label *l = labels;
@@ -1223,7 +1223,7 @@ static void __init build_r4000_tlb_modify_handler(void)
1223 dump_handler(handle_tlbm, ARRAY_SIZE(handle_tlbm)); 1223 dump_handler(handle_tlbm, ARRAY_SIZE(handle_tlbm));
1224} 1224}
1225 1225
1226void __init build_tlb_refill_handler(void) 1226void __cpuinit build_tlb_refill_handler(void)
1227{ 1227{
1228 /* 1228 /*
1229 * The refill handler is generated per-CPU, multi-node systems 1229 * The refill handler is generated per-CPU, multi-node systems
@@ -1269,7 +1269,7 @@ void __init build_tlb_refill_handler(void)
1269 } 1269 }
1270} 1270}
1271 1271
1272void __init flush_tlb_handlers(void) 1272void __cpuinit flush_tlb_handlers(void)
1273{ 1273{
1274 flush_icache_range((unsigned long)handle_tlbl, 1274 flush_icache_range((unsigned long)handle_tlbl,
1275 (unsigned long)handle_tlbl + sizeof(handle_tlbl)); 1275 (unsigned long)handle_tlbl + sizeof(handle_tlbl));
diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c
index e3f74ed5f704..1a6f7704cc89 100644
--- a/arch/mips/mm/uasm.c
+++ b/arch/mips/mm/uasm.c
@@ -82,7 +82,7 @@ struct insn {
82 | (e) << RE_SH \ 82 | (e) << RE_SH \
83 | (f) << FUNC_SH) 83 | (f) << FUNC_SH)
84 84
85static struct insn insn_table[] __initdata = { 85static struct insn insn_table[] __cpuinitdata = {
86 { insn_addiu, M(addiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 86 { insn_addiu, M(addiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
87 { insn_addu, M(spec_op, 0, 0, 0, 0, addu_op), RS | RT | RD }, 87 { insn_addu, M(spec_op, 0, 0, 0, 0, addu_op), RS | RT | RD },
88 { insn_and, M(spec_op, 0, 0, 0, 0, and_op), RS | RT | RD }, 88 { insn_and, M(spec_op, 0, 0, 0, 0, and_op), RS | RT | RD },
@@ -135,7 +135,7 @@ static struct insn insn_table[] __initdata = {
135 135
136#undef M 136#undef M
137 137
138static inline __init u32 build_rs(u32 arg) 138static inline __cpuinit u32 build_rs(u32 arg)
139{ 139{
140 if (arg & ~RS_MASK) 140 if (arg & ~RS_MASK)
141 printk(KERN_WARNING "Micro-assembler field overflow\n"); 141 printk(KERN_WARNING "Micro-assembler field overflow\n");
@@ -143,7 +143,7 @@ static inline __init u32 build_rs(u32 arg)
143 return (arg & RS_MASK) << RS_SH; 143 return (arg & RS_MASK) << RS_SH;
144} 144}
145 145
146static inline __init u32 build_rt(u32 arg) 146static inline __cpuinit u32 build_rt(u32 arg)
147{ 147{
148 if (arg & ~RT_MASK) 148 if (arg & ~RT_MASK)
149 printk(KERN_WARNING "Micro-assembler field overflow\n"); 149 printk(KERN_WARNING "Micro-assembler field overflow\n");
@@ -151,7 +151,7 @@ static inline __init u32 build_rt(u32 arg)
151 return (arg & RT_MASK) << RT_SH; 151 return (arg & RT_MASK) << RT_SH;
152} 152}
153 153
154static inline __init u32 build_rd(u32 arg) 154static inline __cpuinit u32 build_rd(u32 arg)
155{ 155{
156 if (arg & ~RD_MASK) 156 if (arg & ~RD_MASK)
157 printk(KERN_WARNING "Micro-assembler field overflow\n"); 157 printk(KERN_WARNING "Micro-assembler field overflow\n");
@@ -159,7 +159,7 @@ static inline __init u32 build_rd(u32 arg)
159 return (arg & RD_MASK) << RD_SH; 159 return (arg & RD_MASK) << RD_SH;
160} 160}
161 161
162static inline __init u32 build_re(u32 arg) 162static inline __cpuinit u32 build_re(u32 arg)
163{ 163{
164 if (arg & ~RE_MASK) 164 if (arg & ~RE_MASK)
165 printk(KERN_WARNING "Micro-assembler field overflow\n"); 165 printk(KERN_WARNING "Micro-assembler field overflow\n");
@@ -167,7 +167,7 @@ static inline __init u32 build_re(u32 arg)
167 return (arg & RE_MASK) << RE_SH; 167 return (arg & RE_MASK) << RE_SH;
168} 168}
169 169
170static inline __init u32 build_simm(s32 arg) 170static inline __cpuinit u32 build_simm(s32 arg)
171{ 171{
172 if (arg > 0x7fff || arg < -0x8000) 172 if (arg > 0x7fff || arg < -0x8000)
173 printk(KERN_WARNING "Micro-assembler field overflow\n"); 173 printk(KERN_WARNING "Micro-assembler field overflow\n");
@@ -175,7 +175,7 @@ static inline __init u32 build_simm(s32 arg)
175 return arg & 0xffff; 175 return arg & 0xffff;
176} 176}
177 177
178static inline __init u32 build_uimm(u32 arg) 178static inline __cpuinit u32 build_uimm(u32 arg)
179{ 179{
180 if (arg & ~IMM_MASK) 180 if (arg & ~IMM_MASK)
181 printk(KERN_WARNING "Micro-assembler field overflow\n"); 181 printk(KERN_WARNING "Micro-assembler field overflow\n");
@@ -183,7 +183,7 @@ static inline __init u32 build_uimm(u32 arg)
183 return arg & IMM_MASK; 183 return arg & IMM_MASK;
184} 184}
185 185
186static inline __init u32 build_bimm(s32 arg) 186static inline __cpuinit u32 build_bimm(s32 arg)
187{ 187{
188 if (arg > 0x1ffff || arg < -0x20000) 188 if (arg > 0x1ffff || arg < -0x20000)
189 printk(KERN_WARNING "Micro-assembler field overflow\n"); 189 printk(KERN_WARNING "Micro-assembler field overflow\n");
@@ -194,7 +194,7 @@ static inline __init u32 build_bimm(s32 arg)
194 return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 2) & 0x7fff); 194 return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 2) & 0x7fff);
195} 195}
196 196
197static inline __init u32 build_jimm(u32 arg) 197static inline __cpuinit u32 build_jimm(u32 arg)
198{ 198{
199 if (arg & ~((JIMM_MASK) << 2)) 199 if (arg & ~((JIMM_MASK) << 2))
200 printk(KERN_WARNING "Micro-assembler field overflow\n"); 200 printk(KERN_WARNING "Micro-assembler field overflow\n");
@@ -202,7 +202,7 @@ static inline __init u32 build_jimm(u32 arg)
202 return (arg >> 2) & JIMM_MASK; 202 return (arg >> 2) & JIMM_MASK;
203} 203}
204 204
205static inline __init u32 build_func(u32 arg) 205static inline __cpuinit u32 build_func(u32 arg)
206{ 206{
207 if (arg & ~FUNC_MASK) 207 if (arg & ~FUNC_MASK)
208 printk(KERN_WARNING "Micro-assembler field overflow\n"); 208 printk(KERN_WARNING "Micro-assembler field overflow\n");
@@ -210,7 +210,7 @@ static inline __init u32 build_func(u32 arg)
210 return arg & FUNC_MASK; 210 return arg & FUNC_MASK;
211} 211}
212 212
213static inline __init u32 build_set(u32 arg) 213static inline __cpuinit u32 build_set(u32 arg)
214{ 214{
215 if (arg & ~SET_MASK) 215 if (arg & ~SET_MASK)
216 printk(KERN_WARNING "Micro-assembler field overflow\n"); 216 printk(KERN_WARNING "Micro-assembler field overflow\n");
@@ -222,7 +222,7 @@ static inline __init u32 build_set(u32 arg)
222 * The order of opcode arguments is implicitly left to right, 222 * The order of opcode arguments is implicitly left to right,
223 * starting with RS and ending with FUNC or IMM. 223 * starting with RS and ending with FUNC or IMM.
224 */ 224 */
225static void __init build_insn(u32 **buf, enum opcode opc, ...) 225static void __cpuinit build_insn(u32 **buf, enum opcode opc, ...)
226{ 226{
227 struct insn *ip = NULL; 227 struct insn *ip = NULL;
228 unsigned int i; 228 unsigned int i;
@@ -375,14 +375,14 @@ I_u3u1u2(_xor)
375I_u2u1u3(_xori) 375I_u2u1u3(_xori)
376 376
377/* Handle labels. */ 377/* Handle labels. */
378void __init uasm_build_label(struct uasm_label **lab, u32 *addr, int lid) 378void __cpuinit uasm_build_label(struct uasm_label **lab, u32 *addr, int lid)
379{ 379{
380 (*lab)->addr = addr; 380 (*lab)->addr = addr;
381 (*lab)->lab = lid; 381 (*lab)->lab = lid;
382 (*lab)++; 382 (*lab)++;
383} 383}
384 384
385int __init uasm_in_compat_space_p(long addr) 385int __cpuinit uasm_in_compat_space_p(long addr)
386{ 386{
387 /* Is this address in 32bit compat space? */ 387 /* Is this address in 32bit compat space? */
388#ifdef CONFIG_64BIT 388#ifdef CONFIG_64BIT
@@ -392,7 +392,7 @@ int __init uasm_in_compat_space_p(long addr)
392#endif 392#endif
393} 393}
394 394
395int __init uasm_rel_highest(long val) 395int __cpuinit uasm_rel_highest(long val)
396{ 396{
397#ifdef CONFIG_64BIT 397#ifdef CONFIG_64BIT
398 return ((((val + 0x800080008000L) >> 48) & 0xffff) ^ 0x8000) - 0x8000; 398 return ((((val + 0x800080008000L) >> 48) & 0xffff) ^ 0x8000) - 0x8000;
@@ -401,7 +401,7 @@ int __init uasm_rel_highest(long val)
401#endif 401#endif
402} 402}
403 403
404int __init uasm_rel_higher(long val) 404int __cpuinit uasm_rel_higher(long val)
405{ 405{
406#ifdef CONFIG_64BIT 406#ifdef CONFIG_64BIT
407 return ((((val + 0x80008000L) >> 32) & 0xffff) ^ 0x8000) - 0x8000; 407 return ((((val + 0x80008000L) >> 32) & 0xffff) ^ 0x8000) - 0x8000;
@@ -410,17 +410,17 @@ int __init uasm_rel_higher(long val)
410#endif 410#endif
411} 411}
412 412
413int __init uasm_rel_hi(long val) 413int __cpuinit uasm_rel_hi(long val)
414{ 414{
415 return ((((val + 0x8000L) >> 16) & 0xffff) ^ 0x8000) - 0x8000; 415 return ((((val + 0x8000L) >> 16) & 0xffff) ^ 0x8000) - 0x8000;
416} 416}
417 417
418int __init uasm_rel_lo(long val) 418int __cpuinit uasm_rel_lo(long val)
419{ 419{
420 return ((val & 0xffff) ^ 0x8000) - 0x8000; 420 return ((val & 0xffff) ^ 0x8000) - 0x8000;
421} 421}
422 422
423void __init UASM_i_LA_mostly(u32 **buf, unsigned int rs, long addr) 423void __cpuinit UASM_i_LA_mostly(u32 **buf, unsigned int rs, long addr)
424{ 424{
425 if (!uasm_in_compat_space_p(addr)) { 425 if (!uasm_in_compat_space_p(addr)) {
426 uasm_i_lui(buf, rs, uasm_rel_highest(addr)); 426 uasm_i_lui(buf, rs, uasm_rel_highest(addr));
@@ -436,7 +436,7 @@ void __init UASM_i_LA_mostly(u32 **buf, unsigned int rs, long addr)
436 uasm_i_lui(buf, rs, uasm_rel_hi(addr)); 436 uasm_i_lui(buf, rs, uasm_rel_hi(addr));
437} 437}
438 438
439void __init UASM_i_LA(u32 **buf, unsigned int rs, long addr) 439void __cpuinit UASM_i_LA(u32 **buf, unsigned int rs, long addr)
440{ 440{
441 UASM_i_LA_mostly(buf, rs, addr); 441 UASM_i_LA_mostly(buf, rs, addr);
442 if (uasm_rel_lo(addr)) { 442 if (uasm_rel_lo(addr)) {
@@ -448,7 +448,7 @@ void __init UASM_i_LA(u32 **buf, unsigned int rs, long addr)
448} 448}
449 449
450/* Handle relocations. */ 450/* Handle relocations. */
451void __init 451void __cpuinit
452uasm_r_mips_pc16(struct uasm_reloc **rel, u32 *addr, int lid) 452uasm_r_mips_pc16(struct uasm_reloc **rel, u32 *addr, int lid)
453{ 453{
454 (*rel)->addr = addr; 454 (*rel)->addr = addr;
@@ -457,7 +457,7 @@ uasm_r_mips_pc16(struct uasm_reloc **rel, u32 *addr, int lid)
457 (*rel)++; 457 (*rel)++;
458} 458}
459 459
460static inline void __init 460static inline void __cpuinit
461__resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab) 461__resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
462{ 462{
463 long laddr = (long)lab->addr; 463 long laddr = (long)lab->addr;
@@ -474,7 +474,7 @@ __resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
474 } 474 }
475} 475}
476 476
477void __init 477void __cpuinit
478uasm_resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab) 478uasm_resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
479{ 479{
480 struct uasm_label *l; 480 struct uasm_label *l;
@@ -485,7 +485,7 @@ uasm_resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
485 __resolve_relocs(rel, l); 485 __resolve_relocs(rel, l);
486} 486}
487 487
488void __init 488void __cpuinit
489uasm_move_relocs(struct uasm_reloc *rel, u32 *first, u32 *end, long off) 489uasm_move_relocs(struct uasm_reloc *rel, u32 *first, u32 *end, long off)
490{ 490{
491 for (; rel->lab != UASM_LABEL_INVALID; rel++) 491 for (; rel->lab != UASM_LABEL_INVALID; rel++)
@@ -493,7 +493,7 @@ uasm_move_relocs(struct uasm_reloc *rel, u32 *first, u32 *end, long off)
493 rel->addr += off; 493 rel->addr += off;
494} 494}
495 495
496void __init 496void __cpuinit
497uasm_move_labels(struct uasm_label *lab, u32 *first, u32 *end, long off) 497uasm_move_labels(struct uasm_label *lab, u32 *first, u32 *end, long off)
498{ 498{
499 for (; lab->lab != UASM_LABEL_INVALID; lab++) 499 for (; lab->lab != UASM_LABEL_INVALID; lab++)
@@ -501,7 +501,7 @@ uasm_move_labels(struct uasm_label *lab, u32 *first, u32 *end, long off)
501 lab->addr += off; 501 lab->addr += off;
502} 502}
503 503
504void __init 504void __cpuinit
505uasm_copy_handler(struct uasm_reloc *rel, struct uasm_label *lab, u32 *first, 505uasm_copy_handler(struct uasm_reloc *rel, struct uasm_label *lab, u32 *first,
506 u32 *end, u32 *target) 506 u32 *end, u32 *target)
507{ 507{
@@ -513,7 +513,7 @@ uasm_copy_handler(struct uasm_reloc *rel, struct uasm_label *lab, u32 *first,
513 uasm_move_labels(lab, first, end, off); 513 uasm_move_labels(lab, first, end, off);
514} 514}
515 515
516int __init uasm_insn_has_bdelay(struct uasm_reloc *rel, u32 *addr) 516int __cpuinit uasm_insn_has_bdelay(struct uasm_reloc *rel, u32 *addr)
517{ 517{
518 for (; rel->lab != UASM_LABEL_INVALID; rel++) { 518 for (; rel->lab != UASM_LABEL_INVALID; rel++) {
519 if (rel->addr == addr 519 if (rel->addr == addr
@@ -526,49 +526,49 @@ int __init uasm_insn_has_bdelay(struct uasm_reloc *rel, u32 *addr)
526} 526}
527 527
528/* Convenience functions for labeled branches. */ 528/* Convenience functions for labeled branches. */
529void __init 529void __cpuinit
530uasm_il_bltz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) 530uasm_il_bltz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
531{ 531{
532 uasm_r_mips_pc16(r, *p, lid); 532 uasm_r_mips_pc16(r, *p, lid);
533 uasm_i_bltz(p, reg, 0); 533 uasm_i_bltz(p, reg, 0);
534} 534}
535 535
536void __init 536void __cpuinit
537uasm_il_b(u32 **p, struct uasm_reloc **r, int lid) 537uasm_il_b(u32 **p, struct uasm_reloc **r, int lid)
538{ 538{
539 uasm_r_mips_pc16(r, *p, lid); 539 uasm_r_mips_pc16(r, *p, lid);
540 uasm_i_b(p, 0); 540 uasm_i_b(p, 0);
541} 541}
542 542
543void __init 543void __cpuinit
544uasm_il_beqz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) 544uasm_il_beqz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
545{ 545{
546 uasm_r_mips_pc16(r, *p, lid); 546 uasm_r_mips_pc16(r, *p, lid);
547 uasm_i_beqz(p, reg, 0); 547 uasm_i_beqz(p, reg, 0);
548} 548}
549 549
550void __init 550void __cpuinit
551uasm_il_beqzl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) 551uasm_il_beqzl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
552{ 552{
553 uasm_r_mips_pc16(r, *p, lid); 553 uasm_r_mips_pc16(r, *p, lid);
554 uasm_i_beqzl(p, reg, 0); 554 uasm_i_beqzl(p, reg, 0);
555} 555}
556 556
557void __init 557void __cpuinit
558uasm_il_bnez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) 558uasm_il_bnez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
559{ 559{
560 uasm_r_mips_pc16(r, *p, lid); 560 uasm_r_mips_pc16(r, *p, lid);
561 uasm_i_bnez(p, reg, 0); 561 uasm_i_bnez(p, reg, 0);
562} 562}
563 563
564void __init 564void __cpuinit
565uasm_il_bgezl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) 565uasm_il_bgezl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
566{ 566{
567 uasm_r_mips_pc16(r, *p, lid); 567 uasm_r_mips_pc16(r, *p, lid);
568 uasm_i_bgezl(p, reg, 0); 568 uasm_i_bgezl(p, reg, 0);
569} 569}
570 570
571void __init 571void __cpuinit
572uasm_il_bgez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) 572uasm_il_bgez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
573{ 573{
574 uasm_r_mips_pc16(r, *p, lid); 574 uasm_r_mips_pc16(r, *p, lid);
diff --git a/arch/mips/mm/uasm.h b/arch/mips/mm/uasm.h
index a10fc1135c76..fe0574f6e77d 100644
--- a/arch/mips/mm/uasm.h
+++ b/arch/mips/mm/uasm.h
@@ -11,38 +11,38 @@
11#include <linux/types.h> 11#include <linux/types.h>
12 12
13#define Ip_u1u2u3(op) \ 13#define Ip_u1u2u3(op) \
14void __init \ 14void __cpuinit \
15uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c) 15uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
16 16
17#define Ip_u2u1u3(op) \ 17#define Ip_u2u1u3(op) \
18void __init \ 18void __cpuinit \
19uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c) 19uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
20 20
21#define Ip_u3u1u2(op) \ 21#define Ip_u3u1u2(op) \
22void __init \ 22void __cpuinit \
23uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c) 23uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
24 24
25#define Ip_u1u2s3(op) \ 25#define Ip_u1u2s3(op) \
26void __init \ 26void __cpuinit \
27uasm_i##op(u32 **buf, unsigned int a, unsigned int b, signed int c) 27uasm_i##op(u32 **buf, unsigned int a, unsigned int b, signed int c)
28 28
29#define Ip_u2s3u1(op) \ 29#define Ip_u2s3u1(op) \
30void __init \ 30void __cpuinit \
31uasm_i##op(u32 **buf, unsigned int a, signed int b, unsigned int c) 31uasm_i##op(u32 **buf, unsigned int a, signed int b, unsigned int c)
32 32
33#define Ip_u2u1s3(op) \ 33#define Ip_u2u1s3(op) \
34void __init \ 34void __cpuinit \
35uasm_i##op(u32 **buf, unsigned int a, unsigned int b, signed int c) 35uasm_i##op(u32 **buf, unsigned int a, unsigned int b, signed int c)
36 36
37#define Ip_u1u2(op) \ 37#define Ip_u1u2(op) \
38void __init uasm_i##op(u32 **buf, unsigned int a, unsigned int b) 38void __cpuinit uasm_i##op(u32 **buf, unsigned int a, unsigned int b)
39 39
40#define Ip_u1s2(op) \ 40#define Ip_u1s2(op) \
41void __init uasm_i##op(u32 **buf, unsigned int a, signed int b) 41void __cpuinit uasm_i##op(u32 **buf, unsigned int a, signed int b)
42 42
43#define Ip_u1(op) void __init uasm_i##op(u32 **buf, unsigned int a) 43#define Ip_u1(op) void __cpuinit uasm_i##op(u32 **buf, unsigned int a)
44 44
45#define Ip_0(op) void __init uasm_i##op(u32 **buf) 45#define Ip_0(op) void __cpuinit uasm_i##op(u32 **buf)
46 46
47Ip_u2u1s3(_addiu); 47Ip_u2u1s3(_addiu);
48Ip_u3u1u2(_addu); 48Ip_u3u1u2(_addu);
@@ -98,19 +98,19 @@ struct uasm_label {
98 int lab; 98 int lab;
99}; 99};
100 100
101void __init uasm_build_label(struct uasm_label **lab, u32 *addr, int lid); 101void __cpuinit uasm_build_label(struct uasm_label **lab, u32 *addr, int lid);
102#ifdef CONFIG_64BIT 102#ifdef CONFIG_64BIT
103int __init uasm_in_compat_space_p(long addr); 103int uasm_in_compat_space_p(long addr);
104int __init uasm_rel_highest(long val); 104int uasm_rel_highest(long val);
105int __init uasm_rel_higher(long val); 105int uasm_rel_higher(long val);
106#endif 106#endif
107int __init uasm_rel_hi(long val); 107int uasm_rel_hi(long val);
108int __init uasm_rel_lo(long val); 108int uasm_rel_lo(long val);
109void __init UASM_i_LA_mostly(u32 **buf, unsigned int rs, long addr); 109void UASM_i_LA_mostly(u32 **buf, unsigned int rs, long addr);
110void __init UASM_i_LA(u32 **buf, unsigned int rs, long addr); 110void UASM_i_LA(u32 **buf, unsigned int rs, long addr);
111 111
112#define UASM_L_LA(lb) \ 112#define UASM_L_LA(lb) \
113static inline void __init uasm_l##lb(struct uasm_label **lab, u32 *addr) \ 113static inline void __cpuinit uasm_l##lb(struct uasm_label **lab, u32 *addr) \
114{ \ 114{ \
115 uasm_build_label(lab, addr, label##lb); \ 115 uasm_build_label(lab, addr, label##lb); \
116} 116}
@@ -164,29 +164,19 @@ struct uasm_reloc {
164/* This is zero so we can use zeroed label arrays. */ 164/* This is zero so we can use zeroed label arrays. */
165#define UASM_LABEL_INVALID 0 165#define UASM_LABEL_INVALID 0
166 166
167void __init uasm_r_mips_pc16(struct uasm_reloc **rel, u32 *addr, int lid); 167void uasm_r_mips_pc16(struct uasm_reloc **rel, u32 *addr, int lid);
168void __init 168void uasm_resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab);
169uasm_resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab); 169void uasm_move_relocs(struct uasm_reloc *rel, u32 *first, u32 *end, long off);
170void __init 170void uasm_move_labels(struct uasm_label *lab, u32 *first, u32 *end, long off);
171uasm_move_relocs(struct uasm_reloc *rel, u32 *first, u32 *end, long off); 171void uasm_copy_handler(struct uasm_reloc *rel, struct uasm_label *lab,
172void __init 172 u32 *first, u32 *end, u32 *target);
173uasm_move_labels(struct uasm_label *lab, u32 *first, u32 *end, long off); 173int uasm_insn_has_bdelay(struct uasm_reloc *rel, u32 *addr);
174void __init
175uasm_copy_handler(struct uasm_reloc *rel, struct uasm_label *lab, u32 *first,
176 u32 *end, u32 *target);
177int __init uasm_insn_has_bdelay(struct uasm_reloc *rel, u32 *addr);
178 174
179/* Convenience functions for labeled branches. */ 175/* Convenience functions for labeled branches. */
180void __init 176void uasm_il_bltz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
181uasm_il_bltz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid); 177void uasm_il_b(u32 **p, struct uasm_reloc **r, int lid);
182void __init uasm_il_b(u32 **p, struct uasm_reloc **r, int lid); 178void uasm_il_beqz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
183void __init 179void uasm_il_beqzl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
184uasm_il_beqz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid); 180void uasm_il_bnez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
185void __init 181void uasm_il_bgezl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
186uasm_il_beqzl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid); 182void uasm_il_bgez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
187void __init
188uasm_il_bnez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
189void __init
190uasm_il_bgezl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
191void __init
192uasm_il_bgez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);