aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/mm/tlbex.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/mm/tlbex.c')
-rw-r--r--arch/mips/mm/tlbex.c96
1 files changed, 48 insertions, 48 deletions
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 01b0961acfb6..a61246d3533d 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -66,7 +66,7 @@ static inline int __maybe_unused r10000_llsc_war(void)
66 * why; it's not an issue caused by the core RTL. 66 * why; it's not an issue caused by the core RTL.
67 * 67 *
68 */ 68 */
69static int __init m4kc_tlbp_war(void) 69static __init int __attribute__((unused)) m4kc_tlbp_war(void)
70{ 70{
71 return (current_cpu_data.processor_id & 0xffff00) == 71 return (current_cpu_data.processor_id & 0xffff00) ==
72 (PRID_COMP_MIPS | PRID_IMP_4KC); 72 (PRID_COMP_MIPS | PRID_IMP_4KC);
@@ -140,7 +140,7 @@ struct insn {
140 | (e) << RE_SH \ 140 | (e) << RE_SH \
141 | (f) << FUNC_SH) 141 | (f) << FUNC_SH)
142 142
143static struct insn insn_table[] __initdata = { 143static __initdata struct insn insn_table[] = {
144 { insn_addiu, M(addiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 144 { insn_addiu, M(addiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
145 { insn_addu, M(spec_op, 0, 0, 0, 0, addu_op), RS | RT | RD }, 145 { insn_addu, M(spec_op, 0, 0, 0, 0, addu_op), RS | RT | RD },
146 { insn_and, M(spec_op, 0, 0, 0, 0, and_op), RS | RT | RD }, 146 { insn_and, M(spec_op, 0, 0, 0, 0, and_op), RS | RT | RD },
@@ -193,7 +193,7 @@ static struct insn insn_table[] __initdata = {
193 193
194#undef M 194#undef M
195 195
196static u32 __init build_rs(u32 arg) 196static __init u32 build_rs(u32 arg)
197{ 197{
198 if (arg & ~RS_MASK) 198 if (arg & ~RS_MASK)
199 printk(KERN_WARNING "TLB synthesizer field overflow\n"); 199 printk(KERN_WARNING "TLB synthesizer field overflow\n");
@@ -201,7 +201,7 @@ static u32 __init build_rs(u32 arg)
201 return (arg & RS_MASK) << RS_SH; 201 return (arg & RS_MASK) << RS_SH;
202} 202}
203 203
204static u32 __init build_rt(u32 arg) 204static __init u32 build_rt(u32 arg)
205{ 205{
206 if (arg & ~RT_MASK) 206 if (arg & ~RT_MASK)
207 printk(KERN_WARNING "TLB synthesizer field overflow\n"); 207 printk(KERN_WARNING "TLB synthesizer field overflow\n");
@@ -209,7 +209,7 @@ static u32 __init build_rt(u32 arg)
209 return (arg & RT_MASK) << RT_SH; 209 return (arg & RT_MASK) << RT_SH;
210} 210}
211 211
212static u32 __init build_rd(u32 arg) 212static __init u32 build_rd(u32 arg)
213{ 213{
214 if (arg & ~RD_MASK) 214 if (arg & ~RD_MASK)
215 printk(KERN_WARNING "TLB synthesizer field overflow\n"); 215 printk(KERN_WARNING "TLB synthesizer field overflow\n");
@@ -217,7 +217,7 @@ static u32 __init build_rd(u32 arg)
217 return (arg & RD_MASK) << RD_SH; 217 return (arg & RD_MASK) << RD_SH;
218} 218}
219 219
220static u32 __init build_re(u32 arg) 220static __init u32 build_re(u32 arg)
221{ 221{
222 if (arg & ~RE_MASK) 222 if (arg & ~RE_MASK)
223 printk(KERN_WARNING "TLB synthesizer field overflow\n"); 223 printk(KERN_WARNING "TLB synthesizer field overflow\n");
@@ -225,7 +225,7 @@ static u32 __init build_re(u32 arg)
225 return (arg & RE_MASK) << RE_SH; 225 return (arg & RE_MASK) << RE_SH;
226} 226}
227 227
228static u32 __init build_simm(s32 arg) 228static __init u32 build_simm(s32 arg)
229{ 229{
230 if (arg > 0x7fff || arg < -0x8000) 230 if (arg > 0x7fff || arg < -0x8000)
231 printk(KERN_WARNING "TLB synthesizer field overflow\n"); 231 printk(KERN_WARNING "TLB synthesizer field overflow\n");
@@ -233,7 +233,7 @@ static u32 __init build_simm(s32 arg)
233 return arg & 0xffff; 233 return arg & 0xffff;
234} 234}
235 235
236static u32 __init build_uimm(u32 arg) 236static __init u32 build_uimm(u32 arg)
237{ 237{
238 if (arg & ~IMM_MASK) 238 if (arg & ~IMM_MASK)
239 printk(KERN_WARNING "TLB synthesizer field overflow\n"); 239 printk(KERN_WARNING "TLB synthesizer field overflow\n");
@@ -241,7 +241,7 @@ static u32 __init build_uimm(u32 arg)
241 return arg & IMM_MASK; 241 return arg & IMM_MASK;
242} 242}
243 243
244static u32 __init build_bimm(s32 arg) 244static __init u32 build_bimm(s32 arg)
245{ 245{
246 if (arg > 0x1ffff || arg < -0x20000) 246 if (arg > 0x1ffff || arg < -0x20000)
247 printk(KERN_WARNING "TLB synthesizer field overflow\n"); 247 printk(KERN_WARNING "TLB synthesizer field overflow\n");
@@ -252,7 +252,7 @@ static u32 __init build_bimm(s32 arg)
252 return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 2) & 0x7fff); 252 return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 2) & 0x7fff);
253} 253}
254 254
255static u32 __init build_jimm(u32 arg) 255static __init u32 build_jimm(u32 arg)
256{ 256{
257 if (arg & ~((JIMM_MASK) << 2)) 257 if (arg & ~((JIMM_MASK) << 2))
258 printk(KERN_WARNING "TLB synthesizer field overflow\n"); 258 printk(KERN_WARNING "TLB synthesizer field overflow\n");
@@ -260,7 +260,7 @@ static u32 __init build_jimm(u32 arg)
260 return (arg >> 2) & JIMM_MASK; 260 return (arg >> 2) & JIMM_MASK;
261} 261}
262 262
263static u32 __init build_func(u32 arg) 263static __init u32 build_func(u32 arg)
264{ 264{
265 if (arg & ~FUNC_MASK) 265 if (arg & ~FUNC_MASK)
266 printk(KERN_WARNING "TLB synthesizer field overflow\n"); 266 printk(KERN_WARNING "TLB synthesizer field overflow\n");
@@ -268,7 +268,7 @@ static u32 __init build_func(u32 arg)
268 return arg & FUNC_MASK; 268 return arg & FUNC_MASK;
269} 269}
270 270
271static u32 __init build_set(u32 arg) 271static __init u32 build_set(u32 arg)
272{ 272{
273 if (arg & ~SET_MASK) 273 if (arg & ~SET_MASK)
274 printk(KERN_WARNING "TLB synthesizer field overflow\n"); 274 printk(KERN_WARNING "TLB synthesizer field overflow\n");
@@ -315,69 +315,69 @@ static void __init build_insn(u32 **buf, enum opcode opc, ...)
315} 315}
316 316
317#define I_u1u2u3(op) \ 317#define I_u1u2u3(op) \
318 static inline void i##op(u32 **buf, unsigned int a, \ 318 static inline void __init i##op(u32 **buf, unsigned int a, \
319 unsigned int b, unsigned int c) \ 319 unsigned int b, unsigned int c) \
320 { \ 320 { \
321 build_insn(buf, insn##op, a, b, c); \ 321 build_insn(buf, insn##op, a, b, c); \
322 } 322 }
323 323
324#define I_u2u1u3(op) \ 324#define I_u2u1u3(op) \
325 static inline void i##op(u32 **buf, unsigned int a, \ 325 static inline void __init i##op(u32 **buf, unsigned int a, \
326 unsigned int b, unsigned int c) \ 326 unsigned int b, unsigned int c) \
327 { \ 327 { \
328 build_insn(buf, insn##op, b, a, c); \ 328 build_insn(buf, insn##op, b, a, c); \
329 } 329 }
330 330
331#define I_u3u1u2(op) \ 331#define I_u3u1u2(op) \
332 static inline void i##op(u32 **buf, unsigned int a, \ 332 static inline void __init i##op(u32 **buf, unsigned int a, \
333 unsigned int b, unsigned int c) \ 333 unsigned int b, unsigned int c) \
334 { \ 334 { \
335 build_insn(buf, insn##op, b, c, a); \ 335 build_insn(buf, insn##op, b, c, a); \
336 } 336 }
337 337
338#define I_u1u2s3(op) \ 338#define I_u1u2s3(op) \
339 static inline void i##op(u32 **buf, unsigned int a, \ 339 static inline void __init i##op(u32 **buf, unsigned int a, \
340 unsigned int b, signed int c) \ 340 unsigned int b, signed int c) \
341 { \ 341 { \
342 build_insn(buf, insn##op, a, b, c); \ 342 build_insn(buf, insn##op, a, b, c); \
343 } 343 }
344 344
345#define I_u2s3u1(op) \ 345#define I_u2s3u1(op) \
346 static inline void i##op(u32 **buf, unsigned int a, \ 346 static inline void __init i##op(u32 **buf, unsigned int a, \
347 signed int b, unsigned int c) \ 347 signed int b, unsigned int c) \
348 { \ 348 { \
349 build_insn(buf, insn##op, c, a, b); \ 349 build_insn(buf, insn##op, c, a, b); \
350 } 350 }
351 351
352#define I_u2u1s3(op) \ 352#define I_u2u1s3(op) \
353 static inline void i##op(u32 **buf, unsigned int a, \ 353 static inline void __init i##op(u32 **buf, unsigned int a, \
354 unsigned int b, signed int c) \ 354 unsigned int b, signed int c) \
355 { \ 355 { \
356 build_insn(buf, insn##op, b, a, c); \ 356 build_insn(buf, insn##op, b, a, c); \
357 } 357 }
358 358
359#define I_u1u2(op) \ 359#define I_u1u2(op) \
360 static inline void i##op(u32 **buf, unsigned int a, \ 360 static inline void __init i##op(u32 **buf, unsigned int a, \
361 unsigned int b) \ 361 unsigned int b) \
362 { \ 362 { \
363 build_insn(buf, insn##op, a, b); \ 363 build_insn(buf, insn##op, a, b); \
364 } 364 }
365 365
366#define I_u1s2(op) \ 366#define I_u1s2(op) \
367 static inline void i##op(u32 **buf, unsigned int a, \ 367 static inline void __init i##op(u32 **buf, unsigned int a, \
368 signed int b) \ 368 signed int b) \
369 { \ 369 { \
370 build_insn(buf, insn##op, a, b); \ 370 build_insn(buf, insn##op, a, b); \
371 } 371 }
372 372
373#define I_u1(op) \ 373#define I_u1(op) \
374 static inline void i##op(u32 **buf, unsigned int a) \ 374 static inline void __init i##op(u32 **buf, unsigned int a) \
375 { \ 375 { \
376 build_insn(buf, insn##op, a); \ 376 build_insn(buf, insn##op, a); \
377 } 377 }
378 378
379#define I_0(op) \ 379#define I_0(op) \
380 static inline void i##op(u32 **buf) \ 380 static inline void __init i##op(u32 **buf) \
381 { \ 381 { \
382 build_insn(buf, insn##op); \ 382 build_insn(buf, insn##op); \
383 } 383 }
@@ -457,7 +457,7 @@ struct label {
457 enum label_id lab; 457 enum label_id lab;
458}; 458};
459 459
460static void __init build_label(struct label **lab, u32 *addr, 460static __init void build_label(struct label **lab, u32 *addr,
461 enum label_id l) 461 enum label_id l)
462{ 462{
463 (*lab)->addr = addr; 463 (*lab)->addr = addr;
@@ -526,34 +526,34 @@ L_LA(_r3000_write_probe_fail)
526#define i_ehb(buf) i_sll(buf, 0, 0, 3) 526#define i_ehb(buf) i_sll(buf, 0, 0, 3)
527 527
528#ifdef CONFIG_64BIT 528#ifdef CONFIG_64BIT
529static int __init __maybe_unused in_compat_space_p(long addr) 529static __init int __maybe_unused in_compat_space_p(long addr)
530{ 530{
531 /* Is this address in 32bit compat space? */ 531 /* Is this address in 32bit compat space? */
532 return (((addr) & 0xffffffff00000000L) == 0xffffffff00000000L); 532 return (((addr) & 0xffffffff00000000L) == 0xffffffff00000000L);
533} 533}
534 534
535static int __init __maybe_unused rel_highest(long val) 535static __init int __maybe_unused rel_highest(long val)
536{ 536{
537 return ((((val + 0x800080008000L) >> 48) & 0xffff) ^ 0x8000) - 0x8000; 537 return ((((val + 0x800080008000L) >> 48) & 0xffff) ^ 0x8000) - 0x8000;
538} 538}
539 539
540static int __init __maybe_unused rel_higher(long val) 540static __init int __maybe_unused rel_higher(long val)
541{ 541{
542 return ((((val + 0x80008000L) >> 32) & 0xffff) ^ 0x8000) - 0x8000; 542 return ((((val + 0x80008000L) >> 32) & 0xffff) ^ 0x8000) - 0x8000;
543} 543}
544#endif 544#endif
545 545
546static int __init rel_hi(long val) 546static __init int rel_hi(long val)
547{ 547{
548 return ((((val + 0x8000L) >> 16) & 0xffff) ^ 0x8000) - 0x8000; 548 return ((((val + 0x8000L) >> 16) & 0xffff) ^ 0x8000) - 0x8000;
549} 549}
550 550
551static int __init rel_lo(long val) 551static __init int rel_lo(long val)
552{ 552{
553 return ((val & 0xffff) ^ 0x8000) - 0x8000; 553 return ((val & 0xffff) ^ 0x8000) - 0x8000;
554} 554}
555 555
556static void __init i_LA_mostly(u32 **buf, unsigned int rs, long addr) 556static __init void i_LA_mostly(u32 **buf, unsigned int rs, long addr)
557{ 557{
558#ifdef CONFIG_64BIT 558#ifdef CONFIG_64BIT
559 if (!in_compat_space_p(addr)) { 559 if (!in_compat_space_p(addr)) {
@@ -571,7 +571,7 @@ static void __init i_LA_mostly(u32 **buf, unsigned int rs, long addr)
571 i_lui(buf, rs, rel_hi(addr)); 571 i_lui(buf, rs, rel_hi(addr));
572} 572}
573 573
574static void __init __maybe_unused i_LA(u32 **buf, unsigned int rs, 574static __init void __maybe_unused i_LA(u32 **buf, unsigned int rs,
575 long addr) 575 long addr)
576{ 576{
577 i_LA_mostly(buf, rs, addr); 577 i_LA_mostly(buf, rs, addr);
@@ -589,7 +589,7 @@ struct reloc {
589 enum label_id lab; 589 enum label_id lab;
590}; 590};
591 591
592static void __init r_mips_pc16(struct reloc **rel, u32 *addr, 592static __init void r_mips_pc16(struct reloc **rel, u32 *addr,
593 enum label_id l) 593 enum label_id l)
594{ 594{
595 (*rel)->addr = addr; 595 (*rel)->addr = addr;
@@ -614,7 +614,7 @@ static inline void __resolve_relocs(struct reloc *rel, struct label *lab)
614 } 614 }
615} 615}
616 616
617static void __init resolve_relocs(struct reloc *rel, struct label *lab) 617static __init void resolve_relocs(struct reloc *rel, struct label *lab)
618{ 618{
619 struct label *l; 619 struct label *l;
620 620
@@ -624,7 +624,7 @@ static void __init resolve_relocs(struct reloc *rel, struct label *lab)
624 __resolve_relocs(rel, l); 624 __resolve_relocs(rel, l);
625} 625}
626 626
627static void __init move_relocs(struct reloc *rel, u32 *first, u32 *end, 627static __init void move_relocs(struct reloc *rel, u32 *first, u32 *end,
628 long off) 628 long off)
629{ 629{
630 for (; rel->lab != label_invalid; rel++) 630 for (; rel->lab != label_invalid; rel++)
@@ -632,7 +632,7 @@ static void __init move_relocs(struct reloc *rel, u32 *first, u32 *end,
632 rel->addr += off; 632 rel->addr += off;
633} 633}
634 634
635static void __init move_labels(struct label *lab, u32 *first, u32 *end, 635static __init void move_labels(struct label *lab, u32 *first, u32 *end,
636 long off) 636 long off)
637{ 637{
638 for (; lab->lab != label_invalid; lab++) 638 for (; lab->lab != label_invalid; lab++)
@@ -640,7 +640,7 @@ static void __init move_labels(struct label *lab, u32 *first, u32 *end,
640 lab->addr += off; 640 lab->addr += off;
641} 641}
642 642
643static void __init copy_handler(struct reloc *rel, struct label *lab, 643static __init void copy_handler(struct reloc *rel, struct label *lab,
644 u32 *first, u32 *end, u32 *target) 644 u32 *first, u32 *end, u32 *target)
645{ 645{
646 long off = (long)(target - first); 646 long off = (long)(target - first);
@@ -651,7 +651,7 @@ static void __init copy_handler(struct reloc *rel, struct label *lab,
651 move_labels(lab, first, end, off); 651 move_labels(lab, first, end, off);
652} 652}
653 653
654static int __init __maybe_unused insn_has_bdelay(struct reloc *rel, 654static __init int __maybe_unused insn_has_bdelay(struct reloc *rel,
655 u32 *addr) 655 u32 *addr)
656{ 656{
657 for (; rel->lab != label_invalid; rel++) { 657 for (; rel->lab != label_invalid; rel++) {
@@ -743,11 +743,11 @@ il_bgez(u32 **p, struct reloc **r, unsigned int reg, enum label_id l)
743 * We deliberately chose a buffer size of 128, so we won't scribble 743 * We deliberately chose a buffer size of 128, so we won't scribble
744 * over anything important on overflow before we panic. 744 * over anything important on overflow before we panic.
745 */ 745 */
746static u32 tlb_handler[128] __initdata; 746static __initdata u32 tlb_handler[128];
747 747
748/* simply assume worst case size for labels and relocs */ 748/* simply assume worst case size for labels and relocs */
749static struct label labels[128] __initdata; 749static __initdata struct label labels[128];
750static struct reloc relocs[128] __initdata; 750static __initdata struct reloc relocs[128];
751 751
752/* 752/*
753 * The R3000 TLB handler is simple. 753 * The R3000 TLB handler is simple.
@@ -801,7 +801,7 @@ static void __init build_r3000_tlb_refill_handler(void)
801 * other one.To keep things simple, we first assume linear space, 801 * other one.To keep things simple, we first assume linear space,
802 * then we relocate it to the final handler layout as needed. 802 * then we relocate it to the final handler layout as needed.
803 */ 803 */
804static u32 final_handler[64] __initdata; 804static __initdata u32 final_handler[64];
805 805
806/* 806/*
807 * Hazards 807 * Hazards
@@ -825,7 +825,7 @@ static u32 final_handler[64] __initdata;
825 * 825 *
826 * As if we MIPS hackers wouldn't know how to nop pipelines happy ... 826 * As if we MIPS hackers wouldn't know how to nop pipelines happy ...
827 */ 827 */
828static void __init __maybe_unused build_tlb_probe_entry(u32 **p) 828static __init void __maybe_unused build_tlb_probe_entry(u32 **p)
829{ 829{
830 switch (current_cpu_type()) { 830 switch (current_cpu_type()) {
831 /* Found by experiment: R4600 v2.0 needs this, too. */ 831 /* Found by experiment: R4600 v2.0 needs this, too. */
@@ -849,7 +849,7 @@ static void __init __maybe_unused build_tlb_probe_entry(u32 **p)
849 */ 849 */
850enum tlb_write_entry { tlb_random, tlb_indexed }; 850enum tlb_write_entry { tlb_random, tlb_indexed };
851 851
852static void __init build_tlb_write_entry(u32 **p, struct label **l, 852static __init void build_tlb_write_entry(u32 **p, struct label **l,
853 struct reloc **r, 853 struct reloc **r,
854 enum tlb_write_entry wmode) 854 enum tlb_write_entry wmode)
855{ 855{
@@ -993,7 +993,7 @@ static void __init build_tlb_write_entry(u32 **p, struct label **l,
993 * TMP and PTR are scratch. 993 * TMP and PTR are scratch.
994 * TMP will be clobbered, PTR will hold the pmd entry. 994 * TMP will be clobbered, PTR will hold the pmd entry.
995 */ 995 */
996static void __init 996static __init void
997build_get_pmde64(u32 **p, struct label **l, struct reloc **r, 997build_get_pmde64(u32 **p, struct label **l, struct reloc **r,
998 unsigned int tmp, unsigned int ptr) 998 unsigned int tmp, unsigned int ptr)
999{ 999{
@@ -1054,7 +1054,7 @@ build_get_pmde64(u32 **p, struct label **l, struct reloc **r,
1054 * BVADDR is the faulting address, PTR is scratch. 1054 * BVADDR is the faulting address, PTR is scratch.
1055 * PTR will hold the pgd for vmalloc. 1055 * PTR will hold the pgd for vmalloc.
1056 */ 1056 */
1057static void __init 1057static __init void
1058build_get_pgd_vmalloc64(u32 **p, struct label **l, struct reloc **r, 1058build_get_pgd_vmalloc64(u32 **p, struct label **l, struct reloc **r,
1059 unsigned int bvaddr, unsigned int ptr) 1059 unsigned int bvaddr, unsigned int ptr)
1060{ 1060{
@@ -1118,7 +1118,7 @@ build_get_pgd_vmalloc64(u32 **p, struct label **l, struct reloc **r,
1118 * TMP and PTR are scratch. 1118 * TMP and PTR are scratch.
1119 * TMP will be clobbered, PTR will hold the pgd entry. 1119 * TMP will be clobbered, PTR will hold the pgd entry.
1120 */ 1120 */
1121static void __init __maybe_unused 1121static __init void __maybe_unused
1122build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr) 1122build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
1123{ 1123{
1124 long pgdc = (long)pgd_current; 1124 long pgdc = (long)pgd_current;
@@ -1153,7 +1153,7 @@ build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
1153 1153
1154#endif /* !CONFIG_64BIT */ 1154#endif /* !CONFIG_64BIT */
1155 1155
1156static void __init build_adjust_context(u32 **p, unsigned int ctx) 1156static __init void build_adjust_context(u32 **p, unsigned int ctx)
1157{ 1157{
1158 unsigned int shift = 4 - (PTE_T_LOG2 + 1) + PAGE_SHIFT - 12; 1158 unsigned int shift = 4 - (PTE_T_LOG2 + 1) + PAGE_SHIFT - 12;
1159 unsigned int mask = (PTRS_PER_PTE / 2 - 1) << (PTE_T_LOG2 + 1); 1159 unsigned int mask = (PTRS_PER_PTE / 2 - 1) << (PTE_T_LOG2 + 1);
@@ -1179,7 +1179,7 @@ static void __init build_adjust_context(u32 **p, unsigned int ctx)
1179 i_andi(p, ctx, ctx, mask); 1179 i_andi(p, ctx, ctx, mask);
1180} 1180}
1181 1181
1182static void __init build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr) 1182static __init void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr)
1183{ 1183{
1184 /* 1184 /*
1185 * Bug workaround for the Nevada. It seems as if under certain 1185 * Bug workaround for the Nevada. It seems as if under certain
@@ -1204,7 +1204,7 @@ static void __init build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr)
1204 i_ADDU(p, ptr, ptr, tmp); /* add in offset */ 1204 i_ADDU(p, ptr, ptr, tmp); /* add in offset */
1205} 1205}
1206 1206
1207static void __init build_update_entries(u32 **p, unsigned int tmp, 1207static __init void build_update_entries(u32 **p, unsigned int tmp,
1208 unsigned int ptep) 1208 unsigned int ptep)
1209{ 1209{
1210 /* 1210 /*