diff options
author | Atsushi Nemoto <anemo@mba.ocn.ne.jp> | 2008-09-20 11:20:04 -0400 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 2008-09-21 08:52:56 -0400 |
commit | b80a1b80818cd83c6c2109081b43551e7af72c84 (patch) | |
tree | c643aa44d3d85835e469499c578a3f36e36135d4 | |
parent | 9824b8f11373b0df806c135a342da9319ef1d893 (diff) |
[MIPS] Fix 64-bit IP checksum code
Use unsigned loads to avoid possible misscalculation of IP checksums. This
bug was instruced in f761106cd728bcf65b7fe161b10221ee00cf7132 (lmo) /
ed99e2bc1dc5dc54eb5a019f4975562dbef20103 (kernel.org).
[Original fix by Atsushi. Improved instruction scheduling and fix for
unaligned unsigned load by me -- Ralf]
Signed-off-by: Atsushi Nemoto <anemo@mba.ocn.ne.jp>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
-rw-r--r-- | arch/mips/lib/csum_partial.S | 21 |
1 files changed, 17 insertions, 4 deletions
diff --git a/arch/mips/lib/csum_partial.S b/arch/mips/lib/csum_partial.S index 8d7784122c14..edac9892c51a 100644 --- a/arch/mips/lib/csum_partial.S +++ b/arch/mips/lib/csum_partial.S | |||
@@ -39,12 +39,14 @@ | |||
39 | #ifdef USE_DOUBLE | 39 | #ifdef USE_DOUBLE |
40 | 40 | ||
41 | #define LOAD ld | 41 | #define LOAD ld |
42 | #define LOAD32 lwu | ||
42 | #define ADD daddu | 43 | #define ADD daddu |
43 | #define NBYTES 8 | 44 | #define NBYTES 8 |
44 | 45 | ||
45 | #else | 46 | #else |
46 | 47 | ||
47 | #define LOAD lw | 48 | #define LOAD lw |
49 | #define LOAD32 lw | ||
48 | #define ADD addu | 50 | #define ADD addu |
49 | #define NBYTES 4 | 51 | #define NBYTES 4 |
50 | 52 | ||
@@ -60,6 +62,14 @@ | |||
60 | ADD sum, v1; \ | 62 | ADD sum, v1; \ |
61 | .set pop | 63 | .set pop |
62 | 64 | ||
65 | #define ADDC32(sum,reg) \ | ||
66 | .set push; \ | ||
67 | .set noat; \ | ||
68 | addu sum, reg; \ | ||
69 | sltu v1, sum, reg; \ | ||
70 | addu sum, v1; \ | ||
71 | .set pop | ||
72 | |||
63 | #define CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3) \ | 73 | #define CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3) \ |
64 | LOAD _t0, (offset + UNIT(0))(src); \ | 74 | LOAD _t0, (offset + UNIT(0))(src); \ |
65 | LOAD _t1, (offset + UNIT(1))(src); \ | 75 | LOAD _t1, (offset + UNIT(1))(src); \ |
@@ -132,7 +142,7 @@ LEAF(csum_partial) | |||
132 | beqz t8, .Lqword_align | 142 | beqz t8, .Lqword_align |
133 | andi t8, src, 0x8 | 143 | andi t8, src, 0x8 |
134 | 144 | ||
135 | lw t0, 0x00(src) | 145 | LOAD32 t0, 0x00(src) |
136 | LONG_SUBU a1, a1, 0x4 | 146 | LONG_SUBU a1, a1, 0x4 |
137 | ADDC(sum, t0) | 147 | ADDC(sum, t0) |
138 | PTR_ADDU src, src, 0x4 | 148 | PTR_ADDU src, src, 0x4 |
@@ -211,7 +221,7 @@ LEAF(csum_partial) | |||
211 | LONG_SRL t8, t8, 0x2 | 221 | LONG_SRL t8, t8, 0x2 |
212 | 222 | ||
213 | .Lend_words: | 223 | .Lend_words: |
214 | lw t0, (src) | 224 | LOAD32 t0, (src) |
215 | LONG_SUBU t8, t8, 0x1 | 225 | LONG_SUBU t8, t8, 0x1 |
216 | ADDC(sum, t0) | 226 | ADDC(sum, t0) |
217 | .set reorder /* DADDI_WAR */ | 227 | .set reorder /* DADDI_WAR */ |
@@ -230,6 +240,9 @@ LEAF(csum_partial) | |||
230 | /* Still a full word to go */ | 240 | /* Still a full word to go */ |
231 | ulw t1, (src) | 241 | ulw t1, (src) |
232 | PTR_ADDIU src, 4 | 242 | PTR_ADDIU src, 4 |
243 | #ifdef USE_DOUBLE | ||
244 | dsll t1, t1, 32 /* clear lower 32bit */ | ||
245 | #endif | ||
233 | ADDC(sum, t1) | 246 | ADDC(sum, t1) |
234 | 247 | ||
235 | 1: move t1, zero | 248 | 1: move t1, zero |
@@ -280,7 +293,7 @@ LEAF(csum_partial) | |||
280 | 1: | 293 | 1: |
281 | .set reorder | 294 | .set reorder |
282 | /* Add the passed partial csum. */ | 295 | /* Add the passed partial csum. */ |
283 | ADDC(sum, a2) | 296 | ADDC32(sum, a2) |
284 | jr ra | 297 | jr ra |
285 | .set noreorder | 298 | .set noreorder |
286 | END(csum_partial) | 299 | END(csum_partial) |
@@ -681,7 +694,7 @@ EXC( sb t0, NBYTES-2(dst), .Ls_exc) | |||
681 | .set pop | 694 | .set pop |
682 | 1: | 695 | 1: |
683 | .set reorder | 696 | .set reorder |
684 | ADDC(sum, psum) | 697 | ADDC32(sum, psum) |
685 | jr ra | 698 | jr ra |
686 | .set noreorder | 699 | .set noreorder |
687 | 700 | ||