aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/vfp
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/vfp')
-rw-r--r--arch/arm/vfp/vfp.h8
-rw-r--r--arch/arm/vfp/vfpdouble.c30
-rw-r--r--arch/arm/vfp/vfpmodule.c4
-rw-r--r--arch/arm/vfp/vfpsingle.c35
4 files changed, 55 insertions, 22 deletions
diff --git a/arch/arm/vfp/vfp.h b/arch/arm/vfp/vfp.h
index 4b97950984e9..5fbdf81a8aaf 100644
--- a/arch/arm/vfp/vfp.h
+++ b/arch/arm/vfp/vfp.h
@@ -353,3 +353,11 @@ u32 vfp_estimate_sqrt_significand(u32 exponent, u32 significand);
353 * A special flag to tell the normalisation code not to normalise. 353 * A special flag to tell the normalisation code not to normalise.
354 */ 354 */
355#define VFP_NAN_FLAG 0x100 355#define VFP_NAN_FLAG 0x100
356
357/*
358 * A bit pattern used to indicate the initial (unset) value of the
359 * exception mask, in case nothing handles an instruction. This
360 * doesn't include the NAN flag, which get masked out before
361 * we check for an error.
362 */
363#define VFP_EXCEPTION_ERROR ((u32)-1 & ~VFP_NAN_FLAG)
diff --git a/arch/arm/vfp/vfpdouble.c b/arch/arm/vfp/vfpdouble.c
index 009038c8113e..04bd3425b29b 100644
--- a/arch/arm/vfp/vfpdouble.c
+++ b/arch/arm/vfp/vfpdouble.c
@@ -465,7 +465,7 @@ static u32 vfp_double_fcvts(int sd, int unused, int dm, u32 fpscr)
465 */ 465 */
466 if (tm & (VFP_INFINITY|VFP_NAN)) { 466 if (tm & (VFP_INFINITY|VFP_NAN)) {
467 vsd.exponent = 255; 467 vsd.exponent = 255;
468 if (tm & VFP_NAN) 468 if (tm == VFP_QNAN)
469 vsd.significand |= VFP_SINGLE_SIGNIFICAND_QNAN; 469 vsd.significand |= VFP_SINGLE_SIGNIFICAND_QNAN;
470 goto pack_nan; 470 goto pack_nan;
471 } else if (tm & VFP_ZERO) 471 } else if (tm & VFP_ZERO)
@@ -1127,7 +1127,7 @@ u32 vfp_double_cpdo(u32 inst, u32 fpscr)
1127{ 1127{
1128 u32 op = inst & FOP_MASK; 1128 u32 op = inst & FOP_MASK;
1129 u32 exceptions = 0; 1129 u32 exceptions = 0;
1130 unsigned int dd = vfp_get_dd(inst); 1130 unsigned int dest;
1131 unsigned int dn = vfp_get_dn(inst); 1131 unsigned int dn = vfp_get_dn(inst);
1132 unsigned int dm = vfp_get_dm(inst); 1132 unsigned int dm = vfp_get_dm(inst);
1133 unsigned int vecitr, veclen, vecstride; 1133 unsigned int vecitr, veclen, vecstride;
@@ -1137,10 +1137,20 @@ u32 vfp_double_cpdo(u32 inst, u32 fpscr)
1137 vecstride = (1 + ((fpscr & FPSCR_STRIDE_MASK) == FPSCR_STRIDE_MASK)) * 2; 1137 vecstride = (1 + ((fpscr & FPSCR_STRIDE_MASK) == FPSCR_STRIDE_MASK)) * 2;
1138 1138
1139 /* 1139 /*
1140 * fcvtds takes an sN register number as destination, not dN.
1141 * It also always operates on scalars.
1142 */
1143 if ((inst & FEXT_MASK) == FEXT_FCVT) {
1144 veclen = 0;
1145 dest = vfp_get_sd(inst);
1146 } else
1147 dest = vfp_get_dd(inst);
1148
1149 /*
1140 * If destination bank is zero, vector length is always '1'. 1150 * If destination bank is zero, vector length is always '1'.
1141 * ARM DDI0100F C5.1.3, C5.3.2. 1151 * ARM DDI0100F C5.1.3, C5.3.2.
1142 */ 1152 */
1143 if (FREG_BANK(dd) == 0) 1153 if (FREG_BANK(dest) == 0)
1144 veclen = 0; 1154 veclen = 0;
1145 1155
1146 pr_debug("VFP: vecstride=%u veclen=%u\n", vecstride, 1156 pr_debug("VFP: vecstride=%u veclen=%u\n", vecstride,
@@ -1153,16 +1163,20 @@ u32 vfp_double_cpdo(u32 inst, u32 fpscr)
1153 for (vecitr = 0; vecitr <= veclen; vecitr += 1 << FPSCR_LENGTH_BIT) { 1163 for (vecitr = 0; vecitr <= veclen; vecitr += 1 << FPSCR_LENGTH_BIT) {
1154 u32 except; 1164 u32 except;
1155 1165
1156 if (op == FOP_EXT) 1166 if (op == FOP_EXT && (inst & FEXT_MASK) == FEXT_FCVT)
1167 pr_debug("VFP: itr%d (s%u) = op[%u] (d%u)\n",
1168 vecitr >> FPSCR_LENGTH_BIT,
1169 dest, dn, dm);
1170 else if (op == FOP_EXT)
1157 pr_debug("VFP: itr%d (d%u) = op[%u] (d%u)\n", 1171 pr_debug("VFP: itr%d (d%u) = op[%u] (d%u)\n",
1158 vecitr >> FPSCR_LENGTH_BIT, 1172 vecitr >> FPSCR_LENGTH_BIT,
1159 dd, dn, dm); 1173 dest, dn, dm);
1160 else 1174 else
1161 pr_debug("VFP: itr%d (d%u) = (d%u) op[%u] (d%u)\n", 1175 pr_debug("VFP: itr%d (d%u) = (d%u) op[%u] (d%u)\n",
1162 vecitr >> FPSCR_LENGTH_BIT, 1176 vecitr >> FPSCR_LENGTH_BIT,
1163 dd, dn, FOP_TO_IDX(op), dm); 1177 dest, dn, FOP_TO_IDX(op), dm);
1164 1178
1165 except = fop(dd, dn, dm, fpscr); 1179 except = fop(dest, dn, dm, fpscr);
1166 pr_debug("VFP: itr%d: exceptions=%08x\n", 1180 pr_debug("VFP: itr%d: exceptions=%08x\n",
1167 vecitr >> FPSCR_LENGTH_BIT, except); 1181 vecitr >> FPSCR_LENGTH_BIT, except);
1168 1182
@@ -1180,7 +1194,7 @@ u32 vfp_double_cpdo(u32 inst, u32 fpscr)
1180 * we encounter an exception. We continue. 1194 * we encounter an exception. We continue.
1181 */ 1195 */
1182 1196
1183 dd = FREG_BANK(dd) + ((FREG_IDX(dd) + vecstride) & 6); 1197 dest = FREG_BANK(dest) + ((FREG_IDX(dest) + vecstride) & 6);
1184 dn = FREG_BANK(dn) + ((FREG_IDX(dn) + vecstride) & 6); 1198 dn = FREG_BANK(dn) + ((FREG_IDX(dn) + vecstride) & 6);
1185 if (FREG_BANK(dm) != 0) 1199 if (FREG_BANK(dm) != 0)
1186 dm = FREG_BANK(dm) + ((FREG_IDX(dm) + vecstride) & 6); 1200 dm = FREG_BANK(dm) + ((FREG_IDX(dm) + vecstride) & 6);
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index 9d265d5e748c..4178f6cc3d37 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -131,7 +131,7 @@ static void vfp_raise_exceptions(u32 exceptions, u32 inst, u32 fpscr, struct pt_
131 131
132 pr_debug("VFP: raising exceptions %08x\n", exceptions); 132 pr_debug("VFP: raising exceptions %08x\n", exceptions);
133 133
134 if (exceptions == (u32)-1) { 134 if (exceptions == VFP_EXCEPTION_ERROR) {
135 vfp_panic("unhandled bounce"); 135 vfp_panic("unhandled bounce");
136 vfp_raise_sigfpe(0, regs); 136 vfp_raise_sigfpe(0, regs);
137 return; 137 return;
@@ -170,7 +170,7 @@ static void vfp_raise_exceptions(u32 exceptions, u32 inst, u32 fpscr, struct pt_
170 */ 170 */
171static u32 vfp_emulate_instruction(u32 inst, u32 fpscr, struct pt_regs *regs) 171static u32 vfp_emulate_instruction(u32 inst, u32 fpscr, struct pt_regs *regs)
172{ 172{
173 u32 exceptions = (u32)-1; 173 u32 exceptions = VFP_EXCEPTION_ERROR;
174 174
175 pr_debug("VFP: emulate: INST=0x%08x SCR=0x%08x\n", inst, fpscr); 175 pr_debug("VFP: emulate: INST=0x%08x SCR=0x%08x\n", inst, fpscr);
176 176
diff --git a/arch/arm/vfp/vfpsingle.c b/arch/arm/vfp/vfpsingle.c
index dae2c2f46052..78d7cac5f36b 100644
--- a/arch/arm/vfp/vfpsingle.c
+++ b/arch/arm/vfp/vfpsingle.c
@@ -506,7 +506,7 @@ static u32 vfp_single_fcvtd(int dd, int unused, s32 m, u32 fpscr)
506 */ 506 */
507 if (tm & (VFP_INFINITY|VFP_NAN)) { 507 if (tm & (VFP_INFINITY|VFP_NAN)) {
508 vdd.exponent = 2047; 508 vdd.exponent = 2047;
509 if (tm & VFP_NAN) 509 if (tm == VFP_QNAN)
510 vdd.significand |= VFP_DOUBLE_SIGNIFICAND_QNAN; 510 vdd.significand |= VFP_DOUBLE_SIGNIFICAND_QNAN;
511 goto pack_nan; 511 goto pack_nan;
512 } else if (tm & VFP_ZERO) 512 } else if (tm & VFP_ZERO)
@@ -514,10 +514,6 @@ static u32 vfp_single_fcvtd(int dd, int unused, s32 m, u32 fpscr)
514 else 514 else
515 vdd.exponent = vsm.exponent + (1023 - 127); 515 vdd.exponent = vsm.exponent + (1023 - 127);
516 516
517 /*
518 * Technically, if bit 0 of dd is set, this is an invalid
519 * instruction. However, we ignore this for efficiency.
520 */
521 return vfp_double_normaliseround(dd, &vdd, fpscr, exceptions, "fcvtd"); 517 return vfp_double_normaliseround(dd, &vdd, fpscr, exceptions, "fcvtd");
522 518
523 pack_nan: 519 pack_nan:
@@ -1174,7 +1170,7 @@ u32 vfp_single_cpdo(u32 inst, u32 fpscr)
1174{ 1170{
1175 u32 op = inst & FOP_MASK; 1171 u32 op = inst & FOP_MASK;
1176 u32 exceptions = 0; 1172 u32 exceptions = 0;
1177 unsigned int sd = vfp_get_sd(inst); 1173 unsigned int dest;
1178 unsigned int sn = vfp_get_sn(inst); 1174 unsigned int sn = vfp_get_sn(inst);
1179 unsigned int sm = vfp_get_sm(inst); 1175 unsigned int sm = vfp_get_sm(inst);
1180 unsigned int vecitr, veclen, vecstride; 1176 unsigned int vecitr, veclen, vecstride;
@@ -1184,10 +1180,22 @@ u32 vfp_single_cpdo(u32 inst, u32 fpscr)
1184 vecstride = 1 + ((fpscr & FPSCR_STRIDE_MASK) == FPSCR_STRIDE_MASK); 1180 vecstride = 1 + ((fpscr & FPSCR_STRIDE_MASK) == FPSCR_STRIDE_MASK);
1185 1181
1186 /* 1182 /*
1183 * fcvtsd takes a dN register number as destination, not sN.
1184 * Technically, if bit 0 of dd is set, this is an invalid
1185 * instruction. However, we ignore this for efficiency.
1186 * It also only operates on scalars.
1187 */
1188 if ((inst & FEXT_MASK) == FEXT_FCVT) {
1189 veclen = 0;
1190 dest = vfp_get_dd(inst);
1191 } else
1192 dest = vfp_get_sd(inst);
1193
1194 /*
1187 * If destination bank is zero, vector length is always '1'. 1195 * If destination bank is zero, vector length is always '1'.
1188 * ARM DDI0100F C5.1.3, C5.3.2. 1196 * ARM DDI0100F C5.1.3, C5.3.2.
1189 */ 1197 */
1190 if (FREG_BANK(sd) == 0) 1198 if (FREG_BANK(dest) == 0)
1191 veclen = 0; 1199 veclen = 0;
1192 1200
1193 pr_debug("VFP: vecstride=%u veclen=%u\n", vecstride, 1201 pr_debug("VFP: vecstride=%u veclen=%u\n", vecstride,
@@ -1201,15 +1209,18 @@ u32 vfp_single_cpdo(u32 inst, u32 fpscr)
1201 s32 m = vfp_get_float(sm); 1209 s32 m = vfp_get_float(sm);
1202 u32 except; 1210 u32 except;
1203 1211
1204 if (op == FOP_EXT) 1212 if (op == FOP_EXT && (inst & FEXT_MASK) == FEXT_FCVT)
1213 pr_debug("VFP: itr%d (d%u) = op[%u] (s%u=%08x)\n",
1214 vecitr >> FPSCR_LENGTH_BIT, dest, sn, sm, m);
1215 else if (op == FOP_EXT)
1205 pr_debug("VFP: itr%d (s%u) = op[%u] (s%u=%08x)\n", 1216 pr_debug("VFP: itr%d (s%u) = op[%u] (s%u=%08x)\n",
1206 vecitr >> FPSCR_LENGTH_BIT, sd, sn, sm, m); 1217 vecitr >> FPSCR_LENGTH_BIT, dest, sn, sm, m);
1207 else 1218 else
1208 pr_debug("VFP: itr%d (s%u) = (s%u) op[%u] (s%u=%08x)\n", 1219 pr_debug("VFP: itr%d (s%u) = (s%u) op[%u] (s%u=%08x)\n",
1209 vecitr >> FPSCR_LENGTH_BIT, sd, sn, 1220 vecitr >> FPSCR_LENGTH_BIT, dest, sn,
1210 FOP_TO_IDX(op), sm, m); 1221 FOP_TO_IDX(op), sm, m);
1211 1222
1212 except = fop(sd, sn, m, fpscr); 1223 except = fop(dest, sn, m, fpscr);
1213 pr_debug("VFP: itr%d: exceptions=%08x\n", 1224 pr_debug("VFP: itr%d: exceptions=%08x\n",
1214 vecitr >> FPSCR_LENGTH_BIT, except); 1225 vecitr >> FPSCR_LENGTH_BIT, except);
1215 1226
@@ -1227,7 +1238,7 @@ u32 vfp_single_cpdo(u32 inst, u32 fpscr)
1227 * we encounter an exception. We continue. 1238 * we encounter an exception. We continue.
1228 */ 1239 */
1229 1240
1230 sd = FREG_BANK(sd) + ((FREG_IDX(sd) + vecstride) & 7); 1241 dest = FREG_BANK(dest) + ((FREG_IDX(dest) + vecstride) & 7);
1231 sn = FREG_BANK(sn) + ((FREG_IDX(sn) + vecstride) & 7); 1242 sn = FREG_BANK(sn) + ((FREG_IDX(sn) + vecstride) & 7);
1232 if (FREG_BANK(sm) != 0) 1243 if (FREG_BANK(sm) != 0)
1233 sm = FREG_BANK(sm) + ((FREG_IDX(sm) + vecstride) & 7); 1244 sm = FREG_BANK(sm) + ((FREG_IDX(sm) + vecstride) & 7);