aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/mm/tlbex.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/mm/tlbex.c')
-rw-r--r--arch/mips/mm/tlbex.c70
1 files changed, 35 insertions, 35 deletions
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 218a6cc415e8..3a93d4ce2703 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -60,7 +60,7 @@ static inline int __maybe_unused r10000_llsc_war(void)
60 * why; it's not an issue caused by the core RTL. 60 * why; it's not an issue caused by the core RTL.
61 * 61 *
62 */ 62 */
63static int __init m4kc_tlbp_war(void) 63static int __cpuinit m4kc_tlbp_war(void)
64{ 64{
65 return (current_cpu_data.processor_id & 0xffff00) == 65 return (current_cpu_data.processor_id & 0xffff00) ==
66 (PRID_COMP_MIPS | PRID_IMP_4KC); 66 (PRID_COMP_MIPS | PRID_IMP_4KC);
@@ -144,16 +144,16 @@ static inline void dump_handler(const u32 *handler, int count)
144 * We deliberately chose a buffer size of 128, so we won't scribble 144 * We deliberately chose a buffer size of 128, so we won't scribble
145 * over anything important on overflow before we panic. 145 * over anything important on overflow before we panic.
146 */ 146 */
147static u32 tlb_handler[128] __initdata; 147static u32 tlb_handler[128] __cpuinitdata;
148 148
149/* simply assume worst case size for labels and relocs */ 149/* simply assume worst case size for labels and relocs */
150static struct uasm_label labels[128] __initdata; 150static struct uasm_label labels[128] __cpuinitdata;
151static struct uasm_reloc relocs[128] __initdata; 151static struct uasm_reloc relocs[128] __cpuinitdata;
152 152
153/* 153/*
154 * The R3000 TLB handler is simple. 154 * The R3000 TLB handler is simple.
155 */ 155 */
156static void __init build_r3000_tlb_refill_handler(void) 156static void __cpuinit build_r3000_tlb_refill_handler(void)
157{ 157{
158 long pgdc = (long)pgd_current; 158 long pgdc = (long)pgd_current;
159 u32 *p; 159 u32 *p;
@@ -197,7 +197,7 @@ static void __init build_r3000_tlb_refill_handler(void)
197 * other one.To keep things simple, we first assume linear space, 197 * other one.To keep things simple, we first assume linear space,
198 * then we relocate it to the final handler layout as needed. 198 * then we relocate it to the final handler layout as needed.
199 */ 199 */
200static u32 final_handler[64] __initdata; 200static u32 final_handler[64] __cpuinitdata;
201 201
202/* 202/*
203 * Hazards 203 * Hazards
@@ -221,7 +221,7 @@ static u32 final_handler[64] __initdata;
221 * 221 *
222 * As if we MIPS hackers wouldn't know how to nop pipelines happy ... 222 * As if we MIPS hackers wouldn't know how to nop pipelines happy ...
223 */ 223 */
224static void __init __maybe_unused build_tlb_probe_entry(u32 **p) 224static void __cpuinit __maybe_unused build_tlb_probe_entry(u32 **p)
225{ 225{
226 switch (current_cpu_type()) { 226 switch (current_cpu_type()) {
227 /* Found by experiment: R4600 v2.0 needs this, too. */ 227 /* Found by experiment: R4600 v2.0 needs this, too. */
@@ -245,7 +245,7 @@ static void __init __maybe_unused build_tlb_probe_entry(u32 **p)
245 */ 245 */
246enum tlb_write_entry { tlb_random, tlb_indexed }; 246enum tlb_write_entry { tlb_random, tlb_indexed };
247 247
248static void __init build_tlb_write_entry(u32 **p, struct uasm_label **l, 248static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l,
249 struct uasm_reloc **r, 249 struct uasm_reloc **r,
250 enum tlb_write_entry wmode) 250 enum tlb_write_entry wmode)
251{ 251{
@@ -389,7 +389,7 @@ static void __init build_tlb_write_entry(u32 **p, struct uasm_label **l,
389 * TMP and PTR are scratch. 389 * TMP and PTR are scratch.
390 * TMP will be clobbered, PTR will hold the pmd entry. 390 * TMP will be clobbered, PTR will hold the pmd entry.
391 */ 391 */
392static void __init 392static void __cpuinit
393build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, 393build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
394 unsigned int tmp, unsigned int ptr) 394 unsigned int tmp, unsigned int ptr)
395{ 395{
@@ -450,7 +450,7 @@ build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
450 * BVADDR is the faulting address, PTR is scratch. 450 * BVADDR is the faulting address, PTR is scratch.
451 * PTR will hold the pgd for vmalloc. 451 * PTR will hold the pgd for vmalloc.
452 */ 452 */
453static void __init 453static void __cpuinit
454build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, 454build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
455 unsigned int bvaddr, unsigned int ptr) 455 unsigned int bvaddr, unsigned int ptr)
456{ 456{
@@ -522,7 +522,7 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
522 * TMP and PTR are scratch. 522 * TMP and PTR are scratch.
523 * TMP will be clobbered, PTR will hold the pgd entry. 523 * TMP will be clobbered, PTR will hold the pgd entry.
524 */ 524 */
525static void __init __maybe_unused 525static void __cpuinit __maybe_unused
526build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr) 526build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
527{ 527{
528 long pgdc = (long)pgd_current; 528 long pgdc = (long)pgd_current;
@@ -557,7 +557,7 @@ build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
557 557
558#endif /* !CONFIG_64BIT */ 558#endif /* !CONFIG_64BIT */
559 559
560static void __init build_adjust_context(u32 **p, unsigned int ctx) 560static void __cpuinit build_adjust_context(u32 **p, unsigned int ctx)
561{ 561{
562 unsigned int shift = 4 - (PTE_T_LOG2 + 1) + PAGE_SHIFT - 12; 562 unsigned int shift = 4 - (PTE_T_LOG2 + 1) + PAGE_SHIFT - 12;
563 unsigned int mask = (PTRS_PER_PTE / 2 - 1) << (PTE_T_LOG2 + 1); 563 unsigned int mask = (PTRS_PER_PTE / 2 - 1) << (PTE_T_LOG2 + 1);
@@ -583,7 +583,7 @@ static void __init build_adjust_context(u32 **p, unsigned int ctx)
583 uasm_i_andi(p, ctx, ctx, mask); 583 uasm_i_andi(p, ctx, ctx, mask);
584} 584}
585 585
586static void __init build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr) 586static void __cpuinit build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr)
587{ 587{
588 /* 588 /*
589 * Bug workaround for the Nevada. It seems as if under certain 589 * Bug workaround for the Nevada. It seems as if under certain
@@ -608,7 +608,7 @@ static void __init build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr)
608 UASM_i_ADDU(p, ptr, ptr, tmp); /* add in offset */ 608 UASM_i_ADDU(p, ptr, ptr, tmp); /* add in offset */
609} 609}
610 610
611static void __init build_update_entries(u32 **p, unsigned int tmp, 611static void __cpuinit build_update_entries(u32 **p, unsigned int tmp,
612 unsigned int ptep) 612 unsigned int ptep)
613{ 613{
614 /* 614 /*
@@ -651,7 +651,7 @@ static void __init build_update_entries(u32 **p, unsigned int tmp,
651#endif 651#endif
652} 652}
653 653
654static void __init build_r4000_tlb_refill_handler(void) 654static void __cpuinit build_r4000_tlb_refill_handler(void)
655{ 655{
656 u32 *p = tlb_handler; 656 u32 *p = tlb_handler;
657 struct uasm_label *l = labels; 657 struct uasm_label *l = labels;
@@ -783,7 +783,7 @@ u32 handle_tlbl[FASTPATH_SIZE] __cacheline_aligned;
783u32 handle_tlbs[FASTPATH_SIZE] __cacheline_aligned; 783u32 handle_tlbs[FASTPATH_SIZE] __cacheline_aligned;
784u32 handle_tlbm[FASTPATH_SIZE] __cacheline_aligned; 784u32 handle_tlbm[FASTPATH_SIZE] __cacheline_aligned;
785 785
786static void __init 786static void __cpuinit
787iPTE_LW(u32 **p, struct uasm_label **l, unsigned int pte, unsigned int ptr) 787iPTE_LW(u32 **p, struct uasm_label **l, unsigned int pte, unsigned int ptr)
788{ 788{
789#ifdef CONFIG_SMP 789#ifdef CONFIG_SMP
@@ -803,7 +803,7 @@ iPTE_LW(u32 **p, struct uasm_label **l, unsigned int pte, unsigned int ptr)
803#endif 803#endif
804} 804}
805 805
806static void __init 806static void __cpuinit
807iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr, 807iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
808 unsigned int mode) 808 unsigned int mode)
809{ 809{
@@ -863,7 +863,7 @@ iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
863 * the page table where this PTE is located, PTE will be re-loaded 863 * the page table where this PTE is located, PTE will be re-loaded
864 * with it's original value. 864 * with it's original value.
865 */ 865 */
866static void __init 866static void __cpuinit
867build_pte_present(u32 **p, struct uasm_label **l, struct uasm_reloc **r, 867build_pte_present(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
868 unsigned int pte, unsigned int ptr, enum label_id lid) 868 unsigned int pte, unsigned int ptr, enum label_id lid)
869{ 869{
@@ -874,7 +874,7 @@ build_pte_present(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
874} 874}
875 875
876/* Make PTE valid, store result in PTR. */ 876/* Make PTE valid, store result in PTR. */
877static void __init 877static void __cpuinit
878build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte, 878build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte,
879 unsigned int ptr) 879 unsigned int ptr)
880{ 880{
@@ -887,7 +887,7 @@ build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte,
887 * Check if PTE can be written to, if not branch to LABEL. Regardless 887 * Check if PTE can be written to, if not branch to LABEL. Regardless
888 * restore PTE with value from PTR when done. 888 * restore PTE with value from PTR when done.
889 */ 889 */
890static void __init 890static void __cpuinit
891build_pte_writable(u32 **p, struct uasm_label **l, struct uasm_reloc **r, 891build_pte_writable(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
892 unsigned int pte, unsigned int ptr, enum label_id lid) 892 unsigned int pte, unsigned int ptr, enum label_id lid)
893{ 893{
@@ -900,7 +900,7 @@ build_pte_writable(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
900/* Make PTE writable, update software status bits as well, then store 900/* Make PTE writable, update software status bits as well, then store
901 * at PTR. 901 * at PTR.
902 */ 902 */
903static void __init 903static void __cpuinit
904build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte, 904build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte,
905 unsigned int ptr) 905 unsigned int ptr)
906{ 906{
@@ -914,7 +914,7 @@ build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte,
914 * Check if PTE can be modified, if not branch to LABEL. Regardless 914 * Check if PTE can be modified, if not branch to LABEL. Regardless
915 * restore PTE with value from PTR when done. 915 * restore PTE with value from PTR when done.
916 */ 916 */
917static void __init 917static void __cpuinit
918build_pte_modifiable(u32 **p, struct uasm_label **l, struct uasm_reloc **r, 918build_pte_modifiable(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
919 unsigned int pte, unsigned int ptr, enum label_id lid) 919 unsigned int pte, unsigned int ptr, enum label_id lid)
920{ 920{
@@ -931,7 +931,7 @@ build_pte_modifiable(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
931 * This places the pte into ENTRYLO0 and writes it with tlbwi. 931 * This places the pte into ENTRYLO0 and writes it with tlbwi.
932 * Then it returns. 932 * Then it returns.
933 */ 933 */
934static void __init 934static void __cpuinit
935build_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp) 935build_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp)
936{ 936{
937 uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */ 937 uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */
@@ -947,7 +947,7 @@ build_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp)
947 * may have the probe fail bit set as a result of a trap on a 947 * may have the probe fail bit set as a result of a trap on a
948 * kseg2 access, i.e. without refill. Then it returns. 948 * kseg2 access, i.e. without refill. Then it returns.
949 */ 949 */
950static void __init 950static void __cpuinit
951build_r3000_tlb_reload_write(u32 **p, struct uasm_label **l, 951build_r3000_tlb_reload_write(u32 **p, struct uasm_label **l,
952 struct uasm_reloc **r, unsigned int pte, 952 struct uasm_reloc **r, unsigned int pte,
953 unsigned int tmp) 953 unsigned int tmp)
@@ -965,7 +965,7 @@ build_r3000_tlb_reload_write(u32 **p, struct uasm_label **l,
965 uasm_i_rfe(p); /* branch delay */ 965 uasm_i_rfe(p); /* branch delay */
966} 966}
967 967
968static void __init 968static void __cpuinit
969build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte, 969build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte,
970 unsigned int ptr) 970 unsigned int ptr)
971{ 971{
@@ -985,7 +985,7 @@ build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte,
985 uasm_i_tlbp(p); /* load delay */ 985 uasm_i_tlbp(p); /* load delay */
986} 986}
987 987
988static void __init build_r3000_tlb_load_handler(void) 988static void __cpuinit build_r3000_tlb_load_handler(void)
989{ 989{
990 u32 *p = handle_tlbl; 990 u32 *p = handle_tlbl;
991 struct uasm_label *l = labels; 991 struct uasm_label *l = labels;
@@ -1015,7 +1015,7 @@ static void __init build_r3000_tlb_load_handler(void)
1015 dump_handler(handle_tlbl, ARRAY_SIZE(handle_tlbl)); 1015 dump_handler(handle_tlbl, ARRAY_SIZE(handle_tlbl));
1016} 1016}
1017 1017
1018static void __init build_r3000_tlb_store_handler(void) 1018static void __cpuinit build_r3000_tlb_store_handler(void)
1019{ 1019{
1020 u32 *p = handle_tlbs; 1020 u32 *p = handle_tlbs;
1021 struct uasm_label *l = labels; 1021 struct uasm_label *l = labels;
@@ -1045,7 +1045,7 @@ static void __init build_r3000_tlb_store_handler(void)
1045 dump_handler(handle_tlbs, ARRAY_SIZE(handle_tlbs)); 1045 dump_handler(handle_tlbs, ARRAY_SIZE(handle_tlbs));
1046} 1046}
1047 1047
1048static void __init build_r3000_tlb_modify_handler(void) 1048static void __cpuinit build_r3000_tlb_modify_handler(void)
1049{ 1049{
1050 u32 *p = handle_tlbm; 1050 u32 *p = handle_tlbm;
1051 struct uasm_label *l = labels; 1051 struct uasm_label *l = labels;
@@ -1078,7 +1078,7 @@ static void __init build_r3000_tlb_modify_handler(void)
1078/* 1078/*
1079 * R4000 style TLB load/store/modify handlers. 1079 * R4000 style TLB load/store/modify handlers.
1080 */ 1080 */
1081static void __init 1081static void __cpuinit
1082build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l, 1082build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l,
1083 struct uasm_reloc **r, unsigned int pte, 1083 struct uasm_reloc **r, unsigned int pte,
1084 unsigned int ptr) 1084 unsigned int ptr)
@@ -1103,7 +1103,7 @@ build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l,
1103 build_tlb_probe_entry(p); 1103 build_tlb_probe_entry(p);
1104} 1104}
1105 1105
1106static void __init 1106static void __cpuinit
1107build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l, 1107build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l,
1108 struct uasm_reloc **r, unsigned int tmp, 1108 struct uasm_reloc **r, unsigned int tmp,
1109 unsigned int ptr) 1109 unsigned int ptr)
@@ -1120,7 +1120,7 @@ build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l,
1120#endif 1120#endif
1121} 1121}
1122 1122
1123static void __init build_r4000_tlb_load_handler(void) 1123static void __cpuinit build_r4000_tlb_load_handler(void)
1124{ 1124{
1125 u32 *p = handle_tlbl; 1125 u32 *p = handle_tlbl;
1126 struct uasm_label *l = labels; 1126 struct uasm_label *l = labels;
@@ -1160,7 +1160,7 @@ static void __init build_r4000_tlb_load_handler(void)
1160 dump_handler(handle_tlbl, ARRAY_SIZE(handle_tlbl)); 1160 dump_handler(handle_tlbl, ARRAY_SIZE(handle_tlbl));
1161} 1161}
1162 1162
1163static void __init build_r4000_tlb_store_handler(void) 1163static void __cpuinit build_r4000_tlb_store_handler(void)
1164{ 1164{
1165 u32 *p = handle_tlbs; 1165 u32 *p = handle_tlbs;
1166 struct uasm_label *l = labels; 1166 struct uasm_label *l = labels;
@@ -1191,7 +1191,7 @@ static void __init build_r4000_tlb_store_handler(void)
1191 dump_handler(handle_tlbs, ARRAY_SIZE(handle_tlbs)); 1191 dump_handler(handle_tlbs, ARRAY_SIZE(handle_tlbs));
1192} 1192}
1193 1193
1194static void __init build_r4000_tlb_modify_handler(void) 1194static void __cpuinit build_r4000_tlb_modify_handler(void)
1195{ 1195{
1196 u32 *p = handle_tlbm; 1196 u32 *p = handle_tlbm;
1197 struct uasm_label *l = labels; 1197 struct uasm_label *l = labels;
@@ -1223,7 +1223,7 @@ static void __init build_r4000_tlb_modify_handler(void)
1223 dump_handler(handle_tlbm, ARRAY_SIZE(handle_tlbm)); 1223 dump_handler(handle_tlbm, ARRAY_SIZE(handle_tlbm));
1224} 1224}
1225 1225
1226void __init build_tlb_refill_handler(void) 1226void __cpuinit build_tlb_refill_handler(void)
1227{ 1227{
1228 /* 1228 /*
1229 * The refill handler is generated per-CPU, multi-node systems 1229 * The refill handler is generated per-CPU, multi-node systems
@@ -1269,7 +1269,7 @@ void __init build_tlb_refill_handler(void)
1269 } 1269 }
1270} 1270}
1271 1271
1272void __init flush_tlb_handlers(void) 1272void __cpuinit flush_tlb_handlers(void)
1273{ 1273{
1274 flush_icache_range((unsigned long)handle_tlbl, 1274 flush_icache_range((unsigned long)handle_tlbl,
1275 (unsigned long)handle_tlbl + sizeof(handle_tlbl)); 1275 (unsigned long)handle_tlbl + sizeof(handle_tlbl));