aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/mm/tlbex.c
diff options
context:
space:
mode:
authorAtsushi Nemoto <anemo@mba.ocn.ne.jp>2006-10-25 11:08:31 -0400
committerRalf Baechle <ralf@linux-mips.org>2006-11-29 20:14:44 -0500
commit656be92f9ae194ed62bc81310a4589a7cd765f13 (patch)
tree5fb14d0d7d9cd2cab2cd83a1eea38c3c964f3054 /arch/mips/mm/tlbex.c
parent56ae58333031bb0564c141f955d1e42276cade55 (diff)
[MIPS] Load modules to CKSEG0 if CONFIG_BUILD_ELF64=n
This is a patch to load 64-bit modules to CKSEG0 so that can be compiled with -msym32 option. This makes each module ~10% smaller. * introduce MODULE_START and MODULE_END * custom module_alloc() * PGD for modules * change XTLB refill handler synthesizer * enable -msym32 for modules again (revert ca78b1a5c6a6e70e052d3ea253828e49b5d07c8a) New XTLB refill handler looks like this: 80000080 dmfc0 k0,C0_BADVADDR 80000084 bltz k0,800000e4 # goto l_module_alloc 80000088 lui k1,0x8046 # %high(pgd_current) 8000008c ld k1,24600(k1) # %low(pgd_current) 80000090 dsrl k0,k0,0x1b # l_vmalloc_done: 80000094 andi k0,k0,0x1ff8 80000098 daddu k1,k1,k0 8000009c dmfc0 k0,C0_BADVADDR 800000a0 ld k1,0(k1) 800000a4 dsrl k0,k0,0x12 800000a8 andi k0,k0,0xff8 800000ac daddu k1,k1,k0 800000b0 dmfc0 k0,C0_XCONTEXT 800000b4 ld k1,0(k1) 800000b8 andi k0,k0,0xff0 800000bc daddu k1,k1,k0 800000c0 ld k0,0(k1) 800000c4 ld k1,8(k1) 800000c8 dsrl k0,k0,0x6 800000cc mtc0 k0,C0_ENTRYLO0 800000d0 dsrl k1,k1,0x6 800000d4 mtc0 k1,C0_ENTRYL01 800000d8 nop 800000dc tlbwr 800000e0 eret 800000e4 dsll k1,k0,0x2 # l_module_alloc: 800000e8 bgez k1,80000008 # goto l_vmalloc 800000ec lui k1,0xc000 800000f0 dsubu k0,k0,k1 800000f4 lui k1,0x8046 # %high(module_pg_dir) 800000f8 beq zero,zero,80000000 800000fc nop 80000000 beq zero,zero,80000090 # goto l_vmalloc_done 80000004 daddiu k1,k1,0x4000 80000008 dsll32 k1,k1,0x0 # l_vmalloc: 8000000c dsubu k0,k0,k1 80000010 beq zero,zero,80000090 # goto l_vmalloc_done 80000014 lui k1,0x8046 # %high(swapper_pg_dir) Signed-off-by: Atsushi Nemoto <anemo@mba.ocn.ne.jp> Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips/mm/tlbex.c')
-rw-r--r--arch/mips/mm/tlbex.c55
1 files changed, 55 insertions, 0 deletions
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index fec318a1c8c5..492c518e7ba5 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -423,6 +423,9 @@ enum label_id {
423 label_invalid, 423 label_invalid,
424 label_second_part, 424 label_second_part,
425 label_leave, 425 label_leave,
426#ifdef MODULE_START
427 label_module_alloc,
428#endif
426 label_vmalloc, 429 label_vmalloc,
427 label_vmalloc_done, 430 label_vmalloc_done,
428 label_tlbw_hazard, 431 label_tlbw_hazard,
@@ -455,6 +458,9 @@ static __init void build_label(struct label **lab, u32 *addr,
455 458
456L_LA(_second_part) 459L_LA(_second_part)
457L_LA(_leave) 460L_LA(_leave)
461#ifdef MODULE_START
462L_LA(_module_alloc)
463#endif
458L_LA(_vmalloc) 464L_LA(_vmalloc)
459L_LA(_vmalloc_done) 465L_LA(_vmalloc_done)
460L_LA(_tlbw_hazard) 466L_LA(_tlbw_hazard)
@@ -686,6 +692,13 @@ static void __init il_bgezl(u32 **p, struct reloc **r, unsigned int reg,
686 i_bgezl(p, reg, 0); 692 i_bgezl(p, reg, 0);
687} 693}
688 694
695static void __init __attribute__((unused))
696il_bgez(u32 **p, struct reloc **r, unsigned int reg, enum label_id l)
697{
698 r_mips_pc16(r, *p, l);
699 i_bgez(p, reg, 0);
700}
701
689/* The only general purpose registers allowed in TLB handlers. */ 702/* The only general purpose registers allowed in TLB handlers. */
690#define K0 26 703#define K0 26
691#define K1 27 704#define K1 27
@@ -970,7 +983,11 @@ build_get_pmde64(u32 **p, struct label **l, struct reloc **r,
970 * The vmalloc handling is not in the hotpath. 983 * The vmalloc handling is not in the hotpath.
971 */ 984 */
972 i_dmfc0(p, tmp, C0_BADVADDR); 985 i_dmfc0(p, tmp, C0_BADVADDR);
986#ifdef MODULE_START
987 il_bltz(p, r, tmp, label_module_alloc);
988#else
973 il_bltz(p, r, tmp, label_vmalloc); 989 il_bltz(p, r, tmp, label_vmalloc);
990#endif
974 /* No i_nop needed here, since the next insn doesn't touch TMP. */ 991 /* No i_nop needed here, since the next insn doesn't touch TMP. */
975 992
976#ifdef CONFIG_SMP 993#ifdef CONFIG_SMP
@@ -1023,8 +1040,46 @@ build_get_pgd_vmalloc64(u32 **p, struct label **l, struct reloc **r,
1023{ 1040{
1024 long swpd = (long)swapper_pg_dir; 1041 long swpd = (long)swapper_pg_dir;
1025 1042
1043#ifdef MODULE_START
1044 long modd = (long)module_pg_dir;
1045
1046 l_module_alloc(l, *p);
1047 /*
1048 * Assumption:
1049 * VMALLOC_START >= 0xc000000000000000UL
1050 * MODULE_START >= 0xe000000000000000UL
1051 */
1052 i_SLL(p, ptr, bvaddr, 2);
1053 il_bgez(p, r, ptr, label_vmalloc);
1054
1055 if (in_compat_space_p(MODULE_START) && !rel_lo(MODULE_START)) {
1056 i_lui(p, ptr, rel_hi(MODULE_START)); /* delay slot */
1057 } else {
1058 /* unlikely configuration */
1059 i_nop(p); /* delay slot */
1060 i_LA(p, ptr, MODULE_START);
1061 }
1062 i_dsubu(p, bvaddr, bvaddr, ptr);
1063
1064 if (in_compat_space_p(modd) && !rel_lo(modd)) {
1065 il_b(p, r, label_vmalloc_done);
1066 i_lui(p, ptr, rel_hi(modd));
1067 } else {
1068 i_LA_mostly(p, ptr, modd);
1069 il_b(p, r, label_vmalloc_done);
1070 i_daddiu(p, ptr, ptr, rel_lo(modd));
1071 }
1072
1073 l_vmalloc(l, *p);
1074 if (in_compat_space_p(MODULE_START) && !rel_lo(MODULE_START) &&
1075 MODULE_START << 32 == VMALLOC_START)
1076 i_dsll32(p, ptr, ptr, 0); /* typical case */
1077 else
1078 i_LA(p, ptr, VMALLOC_START);
1079#else
1026 l_vmalloc(l, *p); 1080 l_vmalloc(l, *p);
1027 i_LA(p, ptr, VMALLOC_START); 1081 i_LA(p, ptr, VMALLOC_START);
1082#endif
1028 i_dsubu(p, bvaddr, bvaddr, ptr); 1083 i_dsubu(p, bvaddr, bvaddr, ptr);
1029 1084
1030 if (in_compat_space_p(swpd) && !rel_lo(swpd)) { 1085 if (in_compat_space_p(swpd) && !rel_lo(swpd)) {