aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/mm/tlbex.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/mm/tlbex.c')
-rw-r--r--arch/mips/mm/tlbex.c148
1 files changed, 69 insertions, 79 deletions
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 9ab0f907a52c..556cb4815770 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -136,7 +136,7 @@ static int scratchpad_offset(int i)
136 * why; it's not an issue caused by the core RTL. 136 * why; it's not an issue caused by the core RTL.
137 * 137 *
138 */ 138 */
139static int __cpuinit m4kc_tlbp_war(void) 139static int m4kc_tlbp_war(void)
140{ 140{
141 return (current_cpu_data.processor_id & 0xffff00) == 141 return (current_cpu_data.processor_id & 0xffff00) ==
142 (PRID_COMP_MIPS | PRID_IMP_4KC); 142 (PRID_COMP_MIPS | PRID_IMP_4KC);
@@ -181,11 +181,9 @@ UASM_L_LA(_large_segbits_fault)
181UASM_L_LA(_tlb_huge_update) 181UASM_L_LA(_tlb_huge_update)
182#endif 182#endif
183 183
184static int __cpuinitdata hazard_instance; 184static int hazard_instance;
185 185
186static void __cpuinit uasm_bgezl_hazard(u32 **p, 186static void uasm_bgezl_hazard(u32 **p, struct uasm_reloc **r, int instance)
187 struct uasm_reloc **r,
188 int instance)
189{ 187{
190 switch (instance) { 188 switch (instance) {
191 case 0 ... 7: 189 case 0 ... 7:
@@ -196,9 +194,7 @@ static void __cpuinit uasm_bgezl_hazard(u32 **p,
196 } 194 }
197} 195}
198 196
199static void __cpuinit uasm_bgezl_label(struct uasm_label **l, 197static void uasm_bgezl_label(struct uasm_label **l, u32 **p, int instance)
200 u32 **p,
201 int instance)
202{ 198{
203 switch (instance) { 199 switch (instance) {
204 case 0 ... 7: 200 case 0 ... 7:
@@ -295,15 +291,15 @@ static inline void dump_handler(const char *symbol, const u32 *handler, int coun
295 * We deliberately chose a buffer size of 128, so we won't scribble 291 * We deliberately chose a buffer size of 128, so we won't scribble
296 * over anything important on overflow before we panic. 292 * over anything important on overflow before we panic.
297 */ 293 */
298static u32 tlb_handler[128] __cpuinitdata; 294static u32 tlb_handler[128];
299 295
300/* simply assume worst case size for labels and relocs */ 296/* simply assume worst case size for labels and relocs */
301static struct uasm_label labels[128] __cpuinitdata; 297static struct uasm_label labels[128];
302static struct uasm_reloc relocs[128] __cpuinitdata; 298static struct uasm_reloc relocs[128];
303 299
304static int check_for_high_segbits __cpuinitdata; 300static int check_for_high_segbits;
305 301
306static unsigned int kscratch_used_mask __cpuinitdata; 302static unsigned int kscratch_used_mask;
307 303
308static inline int __maybe_unused c0_kscratch(void) 304static inline int __maybe_unused c0_kscratch(void)
309{ 305{
@@ -316,7 +312,7 @@ static inline int __maybe_unused c0_kscratch(void)
316 } 312 }
317} 313}
318 314
319static int __cpuinit allocate_kscratch(void) 315static int allocate_kscratch(void)
320{ 316{
321 int r; 317 int r;
322 unsigned int a = cpu_data[0].kscratch_mask & ~kscratch_used_mask; 318 unsigned int a = cpu_data[0].kscratch_mask & ~kscratch_used_mask;
@@ -333,11 +329,11 @@ static int __cpuinit allocate_kscratch(void)
333 return r; 329 return r;
334} 330}
335 331
336static int scratch_reg __cpuinitdata; 332static int scratch_reg;
337static int pgd_reg __cpuinitdata; 333static int pgd_reg;
338enum vmalloc64_mode {not_refill, refill_scratch, refill_noscratch}; 334enum vmalloc64_mode {not_refill, refill_scratch, refill_noscratch};
339 335
340static struct work_registers __cpuinit build_get_work_registers(u32 **p) 336static struct work_registers build_get_work_registers(u32 **p)
341{ 337{
342 struct work_registers r; 338 struct work_registers r;
343 339
@@ -393,7 +389,7 @@ static struct work_registers __cpuinit build_get_work_registers(u32 **p)
393 return r; 389 return r;
394} 390}
395 391
396static void __cpuinit build_restore_work_registers(u32 **p) 392static void build_restore_work_registers(u32 **p)
397{ 393{
398 if (scratch_reg >= 0) { 394 if (scratch_reg >= 0) {
399 UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg); 395 UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
@@ -418,7 +414,7 @@ extern unsigned long pgd_current[];
418/* 414/*
419 * The R3000 TLB handler is simple. 415 * The R3000 TLB handler is simple.
420 */ 416 */
421static void __cpuinit build_r3000_tlb_refill_handler(void) 417static void build_r3000_tlb_refill_handler(void)
422{ 418{
423 long pgdc = (long)pgd_current; 419 long pgdc = (long)pgd_current;
424 u32 *p; 420 u32 *p;
@@ -463,7 +459,7 @@ static void __cpuinit build_r3000_tlb_refill_handler(void)
463 * other one.To keep things simple, we first assume linear space, 459 * other one.To keep things simple, we first assume linear space,
464 * then we relocate it to the final handler layout as needed. 460 * then we relocate it to the final handler layout as needed.
465 */ 461 */
466static u32 final_handler[64] __cpuinitdata; 462static u32 final_handler[64];
467 463
468/* 464/*
469 * Hazards 465 * Hazards
@@ -487,7 +483,7 @@ static u32 final_handler[64] __cpuinitdata;
487 * 483 *
488 * As if we MIPS hackers wouldn't know how to nop pipelines happy ... 484 * As if we MIPS hackers wouldn't know how to nop pipelines happy ...
489 */ 485 */
490static void __cpuinit __maybe_unused build_tlb_probe_entry(u32 **p) 486static void __maybe_unused build_tlb_probe_entry(u32 **p)
491{ 487{
492 switch (current_cpu_type()) { 488 switch (current_cpu_type()) {
493 /* Found by experiment: R4600 v2.0/R4700 needs this, too. */ 489 /* Found by experiment: R4600 v2.0/R4700 needs this, too. */
@@ -511,9 +507,9 @@ static void __cpuinit __maybe_unused build_tlb_probe_entry(u32 **p)
511 */ 507 */
512enum tlb_write_entry { tlb_random, tlb_indexed }; 508enum tlb_write_entry { tlb_random, tlb_indexed };
513 509
514static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l, 510static void build_tlb_write_entry(u32 **p, struct uasm_label **l,
515 struct uasm_reloc **r, 511 struct uasm_reloc **r,
516 enum tlb_write_entry wmode) 512 enum tlb_write_entry wmode)
517{ 513{
518 void(*tlbw)(u32 **) = NULL; 514 void(*tlbw)(u32 **) = NULL;
519 515
@@ -647,8 +643,8 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l,
647 } 643 }
648} 644}
649 645
650static __cpuinit __maybe_unused void build_convert_pte_to_entrylo(u32 **p, 646static __maybe_unused void build_convert_pte_to_entrylo(u32 **p,
651 unsigned int reg) 647 unsigned int reg)
652{ 648{
653 if (cpu_has_rixi) { 649 if (cpu_has_rixi) {
654 UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL)); 650 UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL));
@@ -663,11 +659,9 @@ static __cpuinit __maybe_unused void build_convert_pte_to_entrylo(u32 **p,
663 659
664#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 660#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
665 661
666static __cpuinit void build_restore_pagemask(u32 **p, 662static void build_restore_pagemask(u32 **p, struct uasm_reloc **r,
667 struct uasm_reloc **r, 663 unsigned int tmp, enum label_id lid,
668 unsigned int tmp, 664 int restore_scratch)
669 enum label_id lid,
670 int restore_scratch)
671{ 665{
672 if (restore_scratch) { 666 if (restore_scratch) {
673 /* Reset default page size */ 667 /* Reset default page size */
@@ -706,12 +700,11 @@ static __cpuinit void build_restore_pagemask(u32 **p,
706 } 700 }
707} 701}
708 702
709static __cpuinit void build_huge_tlb_write_entry(u32 **p, 703static void build_huge_tlb_write_entry(u32 **p, struct uasm_label **l,
710 struct uasm_label **l, 704 struct uasm_reloc **r,
711 struct uasm_reloc **r, 705 unsigned int tmp,
712 unsigned int tmp, 706 enum tlb_write_entry wmode,
713 enum tlb_write_entry wmode, 707 int restore_scratch)
714 int restore_scratch)
715{ 708{
716 /* Set huge page tlb entry size */ 709 /* Set huge page tlb entry size */
717 uasm_i_lui(p, tmp, PM_HUGE_MASK >> 16); 710 uasm_i_lui(p, tmp, PM_HUGE_MASK >> 16);
@@ -726,9 +719,9 @@ static __cpuinit void build_huge_tlb_write_entry(u32 **p,
726/* 719/*
727 * Check if Huge PTE is present, if so then jump to LABEL. 720 * Check if Huge PTE is present, if so then jump to LABEL.
728 */ 721 */
729static void __cpuinit 722static void
730build_is_huge_pte(u32 **p, struct uasm_reloc **r, unsigned int tmp, 723build_is_huge_pte(u32 **p, struct uasm_reloc **r, unsigned int tmp,
731 unsigned int pmd, int lid) 724 unsigned int pmd, int lid)
732{ 725{
733 UASM_i_LW(p, tmp, 0, pmd); 726 UASM_i_LW(p, tmp, 0, pmd);
734 if (use_bbit_insns()) { 727 if (use_bbit_insns()) {
@@ -739,9 +732,8 @@ build_is_huge_pte(u32 **p, struct uasm_reloc **r, unsigned int tmp,
739 } 732 }
740} 733}
741 734
742static __cpuinit void build_huge_update_entries(u32 **p, 735static void build_huge_update_entries(u32 **p, unsigned int pte,
743 unsigned int pte, 736 unsigned int tmp)
744 unsigned int tmp)
745{ 737{
746 int small_sequence; 738 int small_sequence;
747 739
@@ -771,11 +763,10 @@ static __cpuinit void build_huge_update_entries(u32 **p,
771 UASM_i_MTC0(p, pte, C0_ENTRYLO1); /* load it */ 763 UASM_i_MTC0(p, pte, C0_ENTRYLO1); /* load it */
772} 764}
773 765
774static __cpuinit void build_huge_handler_tail(u32 **p, 766static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r,
775 struct uasm_reloc **r, 767 struct uasm_label **l,
776 struct uasm_label **l, 768 unsigned int pte,
777 unsigned int pte, 769 unsigned int ptr)
778 unsigned int ptr)
779{ 770{
780#ifdef CONFIG_SMP 771#ifdef CONFIG_SMP
781 UASM_i_SC(p, pte, 0, ptr); 772 UASM_i_SC(p, pte, 0, ptr);
@@ -794,7 +785,7 @@ static __cpuinit void build_huge_handler_tail(u32 **p,
794 * TMP and PTR are scratch. 785 * TMP and PTR are scratch.
795 * TMP will be clobbered, PTR will hold the pmd entry. 786 * TMP will be clobbered, PTR will hold the pmd entry.
796 */ 787 */
797static void __cpuinit 788static void
798build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, 789build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
799 unsigned int tmp, unsigned int ptr) 790 unsigned int tmp, unsigned int ptr)
800{ 791{
@@ -886,7 +877,7 @@ build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
886 * BVADDR is the faulting address, PTR is scratch. 877 * BVADDR is the faulting address, PTR is scratch.
887 * PTR will hold the pgd for vmalloc. 878 * PTR will hold the pgd for vmalloc.
888 */ 879 */
889static void __cpuinit 880static void
890build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, 881build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
891 unsigned int bvaddr, unsigned int ptr, 882 unsigned int bvaddr, unsigned int ptr,
892 enum vmalloc64_mode mode) 883 enum vmalloc64_mode mode)
@@ -956,7 +947,7 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
956 * TMP and PTR are scratch. 947 * TMP and PTR are scratch.
957 * TMP will be clobbered, PTR will hold the pgd entry. 948 * TMP will be clobbered, PTR will hold the pgd entry.
958 */ 949 */
959static void __cpuinit __maybe_unused 950static void __maybe_unused
960build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr) 951build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
961{ 952{
962 long pgdc = (long)pgd_current; 953 long pgdc = (long)pgd_current;
@@ -991,7 +982,7 @@ build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
991 982
992#endif /* !CONFIG_64BIT */ 983#endif /* !CONFIG_64BIT */
993 984
994static void __cpuinit build_adjust_context(u32 **p, unsigned int ctx) 985static void build_adjust_context(u32 **p, unsigned int ctx)
995{ 986{
996 unsigned int shift = 4 - (PTE_T_LOG2 + 1) + PAGE_SHIFT - 12; 987 unsigned int shift = 4 - (PTE_T_LOG2 + 1) + PAGE_SHIFT - 12;
997 unsigned int mask = (PTRS_PER_PTE / 2 - 1) << (PTE_T_LOG2 + 1); 988 unsigned int mask = (PTRS_PER_PTE / 2 - 1) << (PTE_T_LOG2 + 1);
@@ -1017,7 +1008,7 @@ static void __cpuinit build_adjust_context(u32 **p, unsigned int ctx)
1017 uasm_i_andi(p, ctx, ctx, mask); 1008 uasm_i_andi(p, ctx, ctx, mask);
1018} 1009}
1019 1010
1020static void __cpuinit build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr) 1011static void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr)
1021{ 1012{
1022 /* 1013 /*
1023 * Bug workaround for the Nevada. It seems as if under certain 1014 * Bug workaround for the Nevada. It seems as if under certain
@@ -1042,8 +1033,7 @@ static void __cpuinit build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr
1042 UASM_i_ADDU(p, ptr, ptr, tmp); /* add in offset */ 1033 UASM_i_ADDU(p, ptr, ptr, tmp); /* add in offset */
1043} 1034}
1044 1035
1045static void __cpuinit build_update_entries(u32 **p, unsigned int tmp, 1036static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep)
1046 unsigned int ptep)
1047{ 1037{
1048 /* 1038 /*
1049 * 64bit address support (36bit on a 32bit CPU) in a 32bit 1039 * 64bit address support (36bit on a 32bit CPU) in a 32bit
@@ -1104,7 +1094,7 @@ struct mips_huge_tlb_info {
1104 int restore_scratch; 1094 int restore_scratch;
1105}; 1095};
1106 1096
1107static struct mips_huge_tlb_info __cpuinit 1097static struct mips_huge_tlb_info
1108build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l, 1098build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
1109 struct uasm_reloc **r, unsigned int tmp, 1099 struct uasm_reloc **r, unsigned int tmp,
1110 unsigned int ptr, int c0_scratch_reg) 1100 unsigned int ptr, int c0_scratch_reg)
@@ -1282,7 +1272,7 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
1282 */ 1272 */
1283#define MIPS64_REFILL_INSNS 32 1273#define MIPS64_REFILL_INSNS 32
1284 1274
1285static void __cpuinit build_r4000_tlb_refill_handler(void) 1275static void build_r4000_tlb_refill_handler(void)
1286{ 1276{
1287 u32 *p = tlb_handler; 1277 u32 *p = tlb_handler;
1288 struct uasm_label *l = labels; 1278 struct uasm_label *l = labels;
@@ -1462,11 +1452,11 @@ extern u32 handle_tlbm[], handle_tlbm_end[];
1462#ifdef CONFIG_MIPS_PGD_C0_CONTEXT 1452#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
1463extern u32 tlbmiss_handler_setup_pgd[], tlbmiss_handler_setup_pgd_end[]; 1453extern u32 tlbmiss_handler_setup_pgd[], tlbmiss_handler_setup_pgd_end[];
1464 1454
1465static void __cpuinit build_r4000_setup_pgd(void) 1455static void build_r4000_setup_pgd(void)
1466{ 1456{
1467 const int a0 = 4; 1457 const int a0 = 4;
1468 const int a1 = 5; 1458 const int a1 = 5;
1469 u32 *p = tlbmiss_handler_setup_pgd_array; 1459 u32 *p = tlbmiss_handler_setup_pgd;
1470 const int tlbmiss_handler_setup_pgd_size = 1460 const int tlbmiss_handler_setup_pgd_size =
1471 tlbmiss_handler_setup_pgd_end - tlbmiss_handler_setup_pgd; 1461 tlbmiss_handler_setup_pgd_end - tlbmiss_handler_setup_pgd;
1472 struct uasm_label *l = labels; 1462 struct uasm_label *l = labels;
@@ -1513,7 +1503,7 @@ static void __cpuinit build_r4000_setup_pgd(void)
1513} 1503}
1514#endif 1504#endif
1515 1505
1516static void __cpuinit 1506static void
1517iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr) 1507iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr)
1518{ 1508{
1519#ifdef CONFIG_SMP 1509#ifdef CONFIG_SMP
@@ -1533,7 +1523,7 @@ iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr)
1533#endif 1523#endif
1534} 1524}
1535 1525
1536static void __cpuinit 1526static void
1537iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr, 1527iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
1538 unsigned int mode) 1528 unsigned int mode)
1539{ 1529{
@@ -1593,7 +1583,7 @@ iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
1593 * the page table where this PTE is located, PTE will be re-loaded 1583 * the page table where this PTE is located, PTE will be re-loaded
1594 * with it's original value. 1584 * with it's original value.
1595 */ 1585 */
1596static void __cpuinit 1586static void
1597build_pte_present(u32 **p, struct uasm_reloc **r, 1587build_pte_present(u32 **p, struct uasm_reloc **r,
1598 int pte, int ptr, int scratch, enum label_id lid) 1588 int pte, int ptr, int scratch, enum label_id lid)
1599{ 1589{
@@ -1621,7 +1611,7 @@ build_pte_present(u32 **p, struct uasm_reloc **r,
1621} 1611}
1622 1612
1623/* Make PTE valid, store result in PTR. */ 1613/* Make PTE valid, store result in PTR. */
1624static void __cpuinit 1614static void
1625build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte, 1615build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte,
1626 unsigned int ptr) 1616 unsigned int ptr)
1627{ 1617{
@@ -1634,7 +1624,7 @@ build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte,
1634 * Check if PTE can be written to, if not branch to LABEL. Regardless 1624 * Check if PTE can be written to, if not branch to LABEL. Regardless
1635 * restore PTE with value from PTR when done. 1625 * restore PTE with value from PTR when done.
1636 */ 1626 */
1637static void __cpuinit 1627static void
1638build_pte_writable(u32 **p, struct uasm_reloc **r, 1628build_pte_writable(u32 **p, struct uasm_reloc **r,
1639 unsigned int pte, unsigned int ptr, int scratch, 1629 unsigned int pte, unsigned int ptr, int scratch,
1640 enum label_id lid) 1630 enum label_id lid)
@@ -1654,7 +1644,7 @@ build_pte_writable(u32 **p, struct uasm_reloc **r,
1654/* Make PTE writable, update software status bits as well, then store 1644/* Make PTE writable, update software status bits as well, then store
1655 * at PTR. 1645 * at PTR.
1656 */ 1646 */
1657static void __cpuinit 1647static void
1658build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte, 1648build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte,
1659 unsigned int ptr) 1649 unsigned int ptr)
1660{ 1650{
@@ -1668,7 +1658,7 @@ build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte,
1668 * Check if PTE can be modified, if not branch to LABEL. Regardless 1658 * Check if PTE can be modified, if not branch to LABEL. Regardless
1669 * restore PTE with value from PTR when done. 1659 * restore PTE with value from PTR when done.
1670 */ 1660 */
1671static void __cpuinit 1661static void
1672build_pte_modifiable(u32 **p, struct uasm_reloc **r, 1662build_pte_modifiable(u32 **p, struct uasm_reloc **r,
1673 unsigned int pte, unsigned int ptr, int scratch, 1663 unsigned int pte, unsigned int ptr, int scratch,
1674 enum label_id lid) 1664 enum label_id lid)
@@ -1697,7 +1687,7 @@ build_pte_modifiable(u32 **p, struct uasm_reloc **r,
1697 * This places the pte into ENTRYLO0 and writes it with tlbwi. 1687 * This places the pte into ENTRYLO0 and writes it with tlbwi.
1698 * Then it returns. 1688 * Then it returns.
1699 */ 1689 */
1700static void __cpuinit 1690static void
1701build_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp) 1691build_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp)
1702{ 1692{
1703 uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */ 1693 uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */
@@ -1713,7 +1703,7 @@ build_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp)
1713 * may have the probe fail bit set as a result of a trap on a 1703 * may have the probe fail bit set as a result of a trap on a
1714 * kseg2 access, i.e. without refill. Then it returns. 1704 * kseg2 access, i.e. without refill. Then it returns.
1715 */ 1705 */
1716static void __cpuinit 1706static void
1717build_r3000_tlb_reload_write(u32 **p, struct uasm_label **l, 1707build_r3000_tlb_reload_write(u32 **p, struct uasm_label **l,
1718 struct uasm_reloc **r, unsigned int pte, 1708 struct uasm_reloc **r, unsigned int pte,
1719 unsigned int tmp) 1709 unsigned int tmp)
@@ -1731,7 +1721,7 @@ build_r3000_tlb_reload_write(u32 **p, struct uasm_label **l,
1731 uasm_i_rfe(p); /* branch delay */ 1721 uasm_i_rfe(p); /* branch delay */
1732} 1722}
1733 1723
1734static void __cpuinit 1724static void
1735build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte, 1725build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte,
1736 unsigned int ptr) 1726 unsigned int ptr)
1737{ 1727{
@@ -1751,7 +1741,7 @@ build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte,
1751 uasm_i_tlbp(p); /* load delay */ 1741 uasm_i_tlbp(p); /* load delay */
1752} 1742}
1753 1743
1754static void __cpuinit build_r3000_tlb_load_handler(void) 1744static void build_r3000_tlb_load_handler(void)
1755{ 1745{
1756 u32 *p = handle_tlbl; 1746 u32 *p = handle_tlbl;
1757 const int handle_tlbl_size = handle_tlbl_end - handle_tlbl; 1747 const int handle_tlbl_size = handle_tlbl_end - handle_tlbl;
@@ -1782,7 +1772,7 @@ static void __cpuinit build_r3000_tlb_load_handler(void)
1782 dump_handler("r3000_tlb_load", handle_tlbl, handle_tlbl_size); 1772 dump_handler("r3000_tlb_load", handle_tlbl, handle_tlbl_size);
1783} 1773}
1784 1774
1785static void __cpuinit build_r3000_tlb_store_handler(void) 1775static void build_r3000_tlb_store_handler(void)
1786{ 1776{
1787 u32 *p = handle_tlbs; 1777 u32 *p = handle_tlbs;
1788 const int handle_tlbs_size = handle_tlbs_end - handle_tlbs; 1778 const int handle_tlbs_size = handle_tlbs_end - handle_tlbs;
@@ -1803,7 +1793,7 @@ static void __cpuinit build_r3000_tlb_store_handler(void)
1803 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); 1793 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
1804 uasm_i_nop(&p); 1794 uasm_i_nop(&p);
1805 1795
1806 if (p >= handle_tlbs) 1796 if (p >= handle_tlbs_end)
1807 panic("TLB store handler fastpath space exceeded"); 1797 panic("TLB store handler fastpath space exceeded");
1808 1798
1809 uasm_resolve_relocs(relocs, labels); 1799 uasm_resolve_relocs(relocs, labels);
@@ -1813,7 +1803,7 @@ static void __cpuinit build_r3000_tlb_store_handler(void)
1813 dump_handler("r3000_tlb_store", handle_tlbs, handle_tlbs_size); 1803 dump_handler("r3000_tlb_store", handle_tlbs, handle_tlbs_size);
1814} 1804}
1815 1805
1816static void __cpuinit build_r3000_tlb_modify_handler(void) 1806static void build_r3000_tlb_modify_handler(void)
1817{ 1807{
1818 u32 *p = handle_tlbm; 1808 u32 *p = handle_tlbm;
1819 const int handle_tlbm_size = handle_tlbm_end - handle_tlbm; 1809 const int handle_tlbm_size = handle_tlbm_end - handle_tlbm;
@@ -1848,7 +1838,7 @@ static void __cpuinit build_r3000_tlb_modify_handler(void)
1848/* 1838/*
1849 * R4000 style TLB load/store/modify handlers. 1839 * R4000 style TLB load/store/modify handlers.
1850 */ 1840 */
1851static struct work_registers __cpuinit 1841static struct work_registers
1852build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l, 1842build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l,
1853 struct uasm_reloc **r) 1843 struct uasm_reloc **r)
1854{ 1844{
@@ -1884,7 +1874,7 @@ build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l,
1884 return wr; 1874 return wr;
1885} 1875}
1886 1876
1887static void __cpuinit 1877static void
1888build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l, 1878build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l,
1889 struct uasm_reloc **r, unsigned int tmp, 1879 struct uasm_reloc **r, unsigned int tmp,
1890 unsigned int ptr) 1880 unsigned int ptr)
@@ -1902,7 +1892,7 @@ build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l,
1902#endif 1892#endif
1903} 1893}
1904 1894
1905static void __cpuinit build_r4000_tlb_load_handler(void) 1895static void build_r4000_tlb_load_handler(void)
1906{ 1896{
1907 u32 *p = handle_tlbl; 1897 u32 *p = handle_tlbl;
1908 const int handle_tlbl_size = handle_tlbl_end - handle_tlbl; 1898 const int handle_tlbl_size = handle_tlbl_end - handle_tlbl;
@@ -2085,7 +2075,7 @@ static void __cpuinit build_r4000_tlb_load_handler(void)
2085 dump_handler("r4000_tlb_load", handle_tlbl, handle_tlbl_size); 2075 dump_handler("r4000_tlb_load", handle_tlbl, handle_tlbl_size);
2086} 2076}
2087 2077
2088static void __cpuinit build_r4000_tlb_store_handler(void) 2078static void build_r4000_tlb_store_handler(void)
2089{ 2079{
2090 u32 *p = handle_tlbs; 2080 u32 *p = handle_tlbs;
2091 const int handle_tlbs_size = handle_tlbs_end - handle_tlbs; 2081 const int handle_tlbs_size = handle_tlbs_end - handle_tlbs;
@@ -2140,7 +2130,7 @@ static void __cpuinit build_r4000_tlb_store_handler(void)
2140 dump_handler("r4000_tlb_store", handle_tlbs, handle_tlbs_size); 2130 dump_handler("r4000_tlb_store", handle_tlbs, handle_tlbs_size);
2141} 2131}
2142 2132
2143static void __cpuinit build_r4000_tlb_modify_handler(void) 2133static void build_r4000_tlb_modify_handler(void)
2144{ 2134{
2145 u32 *p = handle_tlbm; 2135 u32 *p = handle_tlbm;
2146 const int handle_tlbm_size = handle_tlbm_end - handle_tlbm; 2136 const int handle_tlbm_size = handle_tlbm_end - handle_tlbm;
@@ -2196,7 +2186,7 @@ static void __cpuinit build_r4000_tlb_modify_handler(void)
2196 dump_handler("r4000_tlb_modify", handle_tlbm, handle_tlbm_size); 2186 dump_handler("r4000_tlb_modify", handle_tlbm, handle_tlbm_size);
2197} 2187}
2198 2188
2199static void __cpuinit flush_tlb_handlers(void) 2189static void flush_tlb_handlers(void)
2200{ 2190{
2201 local_flush_icache_range((unsigned long)handle_tlbl, 2191 local_flush_icache_range((unsigned long)handle_tlbl,
2202 (unsigned long)handle_tlbl_end); 2192 (unsigned long)handle_tlbl_end);
@@ -2210,7 +2200,7 @@ static void __cpuinit flush_tlb_handlers(void)
2210#endif 2200#endif
2211} 2201}
2212 2202
2213void __cpuinit build_tlb_refill_handler(void) 2203void build_tlb_refill_handler(void)
2214{ 2204{
2215 /* 2205 /*
2216 * The refill handler is generated per-CPU, multi-node systems 2206 * The refill handler is generated per-CPU, multi-node systems