aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/kernel/branch.c
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2014-04-29 09:21:24 -0400
committerRalf Baechle <ralf@linux-mips.org>2014-05-23 09:12:37 -0400
commit76fbfc318de2eb0eb4823095ece020f999a17c63 (patch)
tree376004537e00adf3459c1e32a5ba82b5692a6a9f /arch/mips/kernel/branch.c
parent377cb1b6c16a17e2560a9288f9c8ed5dcdbeba39 (diff)
MIPS: Sort out mm_isBranchInstr.
mm_isBranchInstr() did reside in the math emu code even though it logically is separate and also is used outside the math emu code. In addition GCC 4.9.0 leaves the following unnnecessarily bloated function body for a non-microMIPS configuration: <mm_isBranchInstr>: 105c: afa50004 sw a1,4(sp) 1060: afa60008 sw a2,8(sp) 1064: afa7000c sw a3,12(sp) 1068: 03e00008 jr ra 106c: 00001021 move v0,zero which stores arguments that are never going to be used on the stack frame. Move mm_isBranchInstr() from cp1emu.c to branch.c, then split mm_isBranchInstr() into a __mm_isBranchInstr() core and a mm_isBranchInstr() wrapper inline function which only invokes __mm_isBranchInstr() on microMIPS configurations. This shaves off 112 bytes off the kernel and improves code flow a bit. Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips/kernel/branch.c')
-rw-r--r--arch/mips/kernel/branch.c196
1 files changed, 196 insertions, 0 deletions
diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c
index 4d78bf445a9c..84888d9332b9 100644
--- a/arch/mips/kernel/branch.c
+++ b/arch/mips/kernel/branch.c
@@ -48,6 +48,202 @@ int __isa_exception_epc(struct pt_regs *regs)
48 return epc; 48 return epc;
49} 49}
50 50
51/* (microMIPS) Convert 16-bit register encoding to 32-bit register encoding. */
52static const unsigned int reg16to32map[8] = {16, 17, 2, 3, 4, 5, 6, 7};
53
54int __mm_isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
55 unsigned long *contpc)
56{
57 union mips_instruction insn = (union mips_instruction)dec_insn.insn;
58 int bc_false = 0;
59 unsigned int fcr31;
60 unsigned int bit;
61
62 if (!cpu_has_mmips)
63 return 0;
64
65 switch (insn.mm_i_format.opcode) {
66 case mm_pool32a_op:
67 if ((insn.mm_i_format.simmediate & MM_POOL32A_MINOR_MASK) ==
68 mm_pool32axf_op) {
69 switch (insn.mm_i_format.simmediate >>
70 MM_POOL32A_MINOR_SHIFT) {
71 case mm_jalr_op:
72 case mm_jalrhb_op:
73 case mm_jalrs_op:
74 case mm_jalrshb_op:
75 if (insn.mm_i_format.rt != 0) /* Not mm_jr */
76 regs->regs[insn.mm_i_format.rt] =
77 regs->cp0_epc +
78 dec_insn.pc_inc +
79 dec_insn.next_pc_inc;
80 *contpc = regs->regs[insn.mm_i_format.rs];
81 return 1;
82 }
83 }
84 break;
85 case mm_pool32i_op:
86 switch (insn.mm_i_format.rt) {
87 case mm_bltzals_op:
88 case mm_bltzal_op:
89 regs->regs[31] = regs->cp0_epc +
90 dec_insn.pc_inc +
91 dec_insn.next_pc_inc;
92 /* Fall through */
93 case mm_bltz_op:
94 if ((long)regs->regs[insn.mm_i_format.rs] < 0)
95 *contpc = regs->cp0_epc +
96 dec_insn.pc_inc +
97 (insn.mm_i_format.simmediate << 1);
98 else
99 *contpc = regs->cp0_epc +
100 dec_insn.pc_inc +
101 dec_insn.next_pc_inc;
102 return 1;
103 case mm_bgezals_op:
104 case mm_bgezal_op:
105 regs->regs[31] = regs->cp0_epc +
106 dec_insn.pc_inc +
107 dec_insn.next_pc_inc;
108 /* Fall through */
109 case mm_bgez_op:
110 if ((long)regs->regs[insn.mm_i_format.rs] >= 0)
111 *contpc = regs->cp0_epc +
112 dec_insn.pc_inc +
113 (insn.mm_i_format.simmediate << 1);
114 else
115 *contpc = regs->cp0_epc +
116 dec_insn.pc_inc +
117 dec_insn.next_pc_inc;
118 return 1;
119 case mm_blez_op:
120 if ((long)regs->regs[insn.mm_i_format.rs] <= 0)
121 *contpc = regs->cp0_epc +
122 dec_insn.pc_inc +
123 (insn.mm_i_format.simmediate << 1);
124 else
125 *contpc = regs->cp0_epc +
126 dec_insn.pc_inc +
127 dec_insn.next_pc_inc;
128 return 1;
129 case mm_bgtz_op:
130 if ((long)regs->regs[insn.mm_i_format.rs] <= 0)
131 *contpc = regs->cp0_epc +
132 dec_insn.pc_inc +
133 (insn.mm_i_format.simmediate << 1);
134 else
135 *contpc = regs->cp0_epc +
136 dec_insn.pc_inc +
137 dec_insn.next_pc_inc;
138 return 1;
139 case mm_bc2f_op:
140 case mm_bc1f_op:
141 bc_false = 1;
142 /* Fall through */
143 case mm_bc2t_op:
144 case mm_bc1t_op:
145 preempt_disable();
146 if (is_fpu_owner())
147 asm volatile("cfc1\t%0,$31" : "=r" (fcr31));
148 else
149 fcr31 = current->thread.fpu.fcr31;
150 preempt_enable();
151
152 if (bc_false)
153 fcr31 = ~fcr31;
154
155 bit = (insn.mm_i_format.rs >> 2);
156 bit += (bit != 0);
157 bit += 23;
158 if (fcr31 & (1 << bit))
159 *contpc = regs->cp0_epc +
160 dec_insn.pc_inc +
161 (insn.mm_i_format.simmediate << 1);
162 else
163 *contpc = regs->cp0_epc +
164 dec_insn.pc_inc + dec_insn.next_pc_inc;
165 return 1;
166 }
167 break;
168 case mm_pool16c_op:
169 switch (insn.mm_i_format.rt) {
170 case mm_jalr16_op:
171 case mm_jalrs16_op:
172 regs->regs[31] = regs->cp0_epc +
173 dec_insn.pc_inc + dec_insn.next_pc_inc;
174 /* Fall through */
175 case mm_jr16_op:
176 *contpc = regs->regs[insn.mm_i_format.rs];
177 return 1;
178 }
179 break;
180 case mm_beqz16_op:
181 if ((long)regs->regs[reg16to32map[insn.mm_b1_format.rs]] == 0)
182 *contpc = regs->cp0_epc +
183 dec_insn.pc_inc +
184 (insn.mm_b1_format.simmediate << 1);
185 else
186 *contpc = regs->cp0_epc +
187 dec_insn.pc_inc + dec_insn.next_pc_inc;
188 return 1;
189 case mm_bnez16_op:
190 if ((long)regs->regs[reg16to32map[insn.mm_b1_format.rs]] != 0)
191 *contpc = regs->cp0_epc +
192 dec_insn.pc_inc +
193 (insn.mm_b1_format.simmediate << 1);
194 else
195 *contpc = regs->cp0_epc +
196 dec_insn.pc_inc + dec_insn.next_pc_inc;
197 return 1;
198 case mm_b16_op:
199 *contpc = regs->cp0_epc + dec_insn.pc_inc +
200 (insn.mm_b0_format.simmediate << 1);
201 return 1;
202 case mm_beq32_op:
203 if (regs->regs[insn.mm_i_format.rs] ==
204 regs->regs[insn.mm_i_format.rt])
205 *contpc = regs->cp0_epc +
206 dec_insn.pc_inc +
207 (insn.mm_i_format.simmediate << 1);
208 else
209 *contpc = regs->cp0_epc +
210 dec_insn.pc_inc +
211 dec_insn.next_pc_inc;
212 return 1;
213 case mm_bne32_op:
214 if (regs->regs[insn.mm_i_format.rs] !=
215 regs->regs[insn.mm_i_format.rt])
216 *contpc = regs->cp0_epc +
217 dec_insn.pc_inc +
218 (insn.mm_i_format.simmediate << 1);
219 else
220 *contpc = regs->cp0_epc +
221 dec_insn.pc_inc + dec_insn.next_pc_inc;
222 return 1;
223 case mm_jalx32_op:
224 regs->regs[31] = regs->cp0_epc +
225 dec_insn.pc_inc + dec_insn.next_pc_inc;
226 *contpc = regs->cp0_epc + dec_insn.pc_inc;
227 *contpc >>= 28;
228 *contpc <<= 28;
229 *contpc |= (insn.j_format.target << 2);
230 return 1;
231 case mm_jals32_op:
232 case mm_jal32_op:
233 regs->regs[31] = regs->cp0_epc +
234 dec_insn.pc_inc + dec_insn.next_pc_inc;
235 /* Fall through */
236 case mm_j32_op:
237 *contpc = regs->cp0_epc + dec_insn.pc_inc;
238 *contpc >>= 27;
239 *contpc <<= 27;
240 *contpc |= (insn.j_format.target << 1);
241 set_isa16_mode(*contpc);
242 return 1;
243 }
244 return 0;
245}
246
51/* 247/*
52 * Compute return address and emulate branch in microMIPS mode after an 248 * Compute return address and emulate branch in microMIPS mode after an
53 * exception only. It does not handle compact branches/jumps and cannot 249 * exception only. It does not handle compact branches/jumps and cannot