diff options
author | Stuart Menefy <stuart.menefy@st.com> | 2008-12-12 13:34:38 -0500 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2009-01-28 21:56:02 -0500 |
commit | cadc4e1a2b4d20d0cc0e81f2c6ba0588775e54e5 (patch) | |
tree | 2341d7a1426ee687ff828b3a6d2885abbbef261b | |
parent | 3d22fca7ab720818cca19a1ee6820e9dc4485195 (diff) |
sh: Handle calling csum_partial with misaligned data
In rare circumstances csum_partial() can be called with data which is
not 16 or 32 bit aligned. This is been observed with RPC calls for NFS
file systems for example. Add support for handling this without resorting
to the misaligned fixup code (which is why this hasn't been seen as a
problem). This mimics the i386 version, which has had this support for
some time.
Signed-off-by: Stuart Menefy <stuart.menefy@st.com>
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
-rw-r--r-- | arch/sh/lib/checksum.S | 69 |
1 files changed, 50 insertions, 19 deletions
diff --git a/arch/sh/lib/checksum.S b/arch/sh/lib/checksum.S index cbdd0d40e545..356c8ec92893 100644 --- a/arch/sh/lib/checksum.S +++ b/arch/sh/lib/checksum.S | |||
@@ -36,8 +36,7 @@ | |||
36 | */ | 36 | */ |
37 | 37 | ||
38 | /* | 38 | /* |
39 | * unsigned int csum_partial(const unsigned char *buf, int len, | 39 | * asmlinkage __wsum csum_partial(const void *buf, int len, __wsum sum); |
40 | * unsigned int sum); | ||
41 | */ | 40 | */ |
42 | 41 | ||
43 | .text | 42 | .text |
@@ -49,11 +48,31 @@ ENTRY(csum_partial) | |||
49 | * Fortunately, it is easy to convert 2-byte alignment to 4-byte | 48 | * Fortunately, it is easy to convert 2-byte alignment to 4-byte |
50 | * alignment for the unrolled loop. | 49 | * alignment for the unrolled loop. |
51 | */ | 50 | */ |
52 | mov r5, r1 | ||
53 | mov r4, r0 | 51 | mov r4, r0 |
54 | tst #2, r0 ! Check alignment. | 52 | tst #3, r0 ! Check alignment. |
55 | bt 2f ! Jump if alignment is ok. | 53 | bt/s 2f ! Jump if alignment is ok. |
54 | mov r4, r7 ! Keep a copy to check for alignment | ||
56 | ! | 55 | ! |
56 | tst #1, r0 ! Check alignment. | ||
57 | bt 21f ! Jump if alignment is boundary of 2bytes. | ||
58 | |||
59 | ! buf is odd | ||
60 | tst r5, r5 | ||
61 | add #-1, r5 | ||
62 | bt 9f | ||
63 | mov.b @r4+, r0 | ||
64 | extu.b r0, r0 | ||
65 | addc r0, r6 ! t=0 from previous tst | ||
66 | mov r6, r0 | ||
67 | shll8 r6 | ||
68 | shlr16 r0 | ||
69 | shlr8 r0 | ||
70 | or r0, r6 | ||
71 | mov r4, r0 | ||
72 | tst #2, r0 | ||
73 | bt 2f | ||
74 | 21: | ||
75 | ! buf is 2 byte aligned (len could be 0) | ||
57 | add #-2, r5 ! Alignment uses up two bytes. | 76 | add #-2, r5 ! Alignment uses up two bytes. |
58 | cmp/pz r5 ! | 77 | cmp/pz r5 ! |
59 | bt/s 1f ! Jump if we had at least two bytes. | 78 | bt/s 1f ! Jump if we had at least two bytes. |
@@ -61,16 +80,17 @@ ENTRY(csum_partial) | |||
61 | bra 6f | 80 | bra 6f |
62 | add #2, r5 ! r5 was < 2. Deal with it. | 81 | add #2, r5 ! r5 was < 2. Deal with it. |
63 | 1: | 82 | 1: |
64 | mov r5, r1 ! Save new len for later use. | ||
65 | mov.w @r4+, r0 | 83 | mov.w @r4+, r0 |
66 | extu.w r0, r0 | 84 | extu.w r0, r0 |
67 | addc r0, r6 | 85 | addc r0, r6 |
68 | bf 2f | 86 | bf 2f |
69 | add #1, r6 | 87 | add #1, r6 |
70 | 2: | 88 | 2: |
89 | ! buf is 4 byte aligned (len could be 0) | ||
90 | mov r5, r1 | ||
71 | mov #-5, r0 | 91 | mov #-5, r0 |
72 | shld r0, r5 | 92 | shld r0, r1 |
73 | tst r5, r5 | 93 | tst r1, r1 |
74 | bt/s 4f ! if it's =0, go to 4f | 94 | bt/s 4f ! if it's =0, go to 4f |
75 | clrt | 95 | clrt |
76 | .align 2 | 96 | .align 2 |
@@ -92,30 +112,31 @@ ENTRY(csum_partial) | |||
92 | addc r0, r6 | 112 | addc r0, r6 |
93 | addc r2, r6 | 113 | addc r2, r6 |
94 | movt r0 | 114 | movt r0 |
95 | dt r5 | 115 | dt r1 |
96 | bf/s 3b | 116 | bf/s 3b |
97 | cmp/eq #1, r0 | 117 | cmp/eq #1, r0 |
98 | ! here, we know r5==0 | 118 | ! here, we know r1==0 |
99 | addc r5, r6 ! add carry to r6 | 119 | addc r1, r6 ! add carry to r6 |
100 | 4: | 120 | 4: |
101 | mov r1, r0 | 121 | mov r5, r0 |
102 | and #0x1c, r0 | 122 | and #0x1c, r0 |
103 | tst r0, r0 | 123 | tst r0, r0 |
104 | bt/s 6f | 124 | bt 6f |
105 | mov r0, r5 | 125 | ! 4 bytes or more remaining |
106 | shlr2 r5 | 126 | mov r0, r1 |
127 | shlr2 r1 | ||
107 | mov #0, r2 | 128 | mov #0, r2 |
108 | 5: | 129 | 5: |
109 | addc r2, r6 | 130 | addc r2, r6 |
110 | mov.l @r4+, r2 | 131 | mov.l @r4+, r2 |
111 | movt r0 | 132 | movt r0 |
112 | dt r5 | 133 | dt r1 |
113 | bf/s 5b | 134 | bf/s 5b |
114 | cmp/eq #1, r0 | 135 | cmp/eq #1, r0 |
115 | addc r2, r6 | 136 | addc r2, r6 |
116 | addc r5, r6 ! r5==0 here, so it means add carry-bit | 137 | addc r1, r6 ! r1==0 here, so it means add carry-bit |
117 | 6: | 138 | 6: |
118 | mov r1, r5 | 139 | ! 3 bytes or less remaining |
119 | mov #3, r0 | 140 | mov #3, r0 |
120 | and r0, r5 | 141 | and r0, r5 |
121 | tst r5, r5 | 142 | tst r5, r5 |
@@ -139,8 +160,18 @@ ENTRY(csum_partial) | |||
139 | 8: | 160 | 8: |
140 | addc r0, r6 | 161 | addc r0, r6 |
141 | mov #0, r0 | 162 | mov #0, r0 |
142 | addc r0, r6 | 163 | addc r0, r6 |
143 | 9: | 164 | 9: |
165 | ! Check if the buffer was misaligned, if so realign sum | ||
166 | mov r7, r0 | ||
167 | tst #1, r0 | ||
168 | bt 10f | ||
169 | mov r6, r0 | ||
170 | shll8 r6 | ||
171 | shlr16 r0 | ||
172 | shlr8 r0 | ||
173 | or r0, r6 | ||
174 | 10: | ||
144 | rts | 175 | rts |
145 | mov r6, r0 | 176 | mov r6, r0 |
146 | 177 | ||