aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/cris/arch-v10/lib/old_checksum.c62
-rw-r--r--include/asm-cris/arch-v10/checksum.h10
-rw-r--r--include/asm-cris/arch-v32/checksum.h10
-rw-r--r--include/asm-cris/checksum.h34
4 files changed, 57 insertions, 59 deletions
diff --git a/arch/cris/arch-v10/lib/old_checksum.c b/arch/cris/arch-v10/lib/old_checksum.c
index 22a6f0aa9cef..497634a64829 100644
--- a/arch/cris/arch-v10/lib/old_checksum.c
+++ b/arch/cris/arch-v10/lib/old_checksum.c
@@ -47,39 +47,41 @@
47 47
48#include <asm/delay.h> 48#include <asm/delay.h>
49 49
50unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum) 50__wsum csum_partial(const void *p, int len, __wsum __sum)
51{ 51{
52 /* 52 u32 sum = (__force u32)__sum;
53 * Experiments with ethernet and slip connections show that buff 53 const u16 *buff = p;
54 * is aligned on either a 2-byte or 4-byte boundary. 54 /*
55 */ 55 * Experiments with ethernet and slip connections show that buff
56 const unsigned char *endMarker = buff + len; 56 * is aligned on either a 2-byte or 4-byte boundary.
57 const unsigned char *marker = endMarker - (len % 16); 57 */
58 const void *endMarker = p + len;
59 const void *marker = endMarker - (len % 16);
58#if 0 60#if 0
59 if((int)buff & 0x3) 61 if((int)buff & 0x3)
60 printk("unaligned buff %p\n", buff); 62 printk("unaligned buff %p\n", buff);
61 __delay(900); /* extra delay of 90 us to test performance hit */ 63 __delay(900); /* extra delay of 90 us to test performance hit */
62#endif 64#endif
63 BITON; 65 BITON;
64 while (buff < marker) { 66 while (buff < marker) {
65 sum += *((unsigned short *)buff)++; 67 sum += *buff++;
66 sum += *((unsigned short *)buff)++; 68 sum += *buff++;
67 sum += *((unsigned short *)buff)++; 69 sum += *buff++;
68 sum += *((unsigned short *)buff)++; 70 sum += *buff++;
69 sum += *((unsigned short *)buff)++; 71 sum += *buff++;
70 sum += *((unsigned short *)buff)++; 72 sum += *buff++;
71 sum += *((unsigned short *)buff)++; 73 sum += *buff++;
72 sum += *((unsigned short *)buff)++; 74 sum += *buff++;
73 } 75 }
74 marker = endMarker - (len % 2); 76 marker = endMarker - (len % 2);
75 while(buff < marker) { 77 while (buff < marker)
76 sum += *((unsigned short *)buff)++; 78 sum += *buff++;
77 } 79
78 if(endMarker - buff > 0) { 80 if (endMarker > buff)
79 sum += *buff; /* add extra byte seperately */ 81 sum += *(const u8 *)buff; /* add extra byte seperately */
80 } 82
81 BITOFF; 83 BITOFF;
82 return(sum); 84 return (__force __wsum)sum;
83} 85}
84 86
85EXPORT_SYMBOL(csum_partial); 87EXPORT_SYMBOL(csum_partial);
diff --git a/include/asm-cris/arch-v10/checksum.h b/include/asm-cris/arch-v10/checksum.h
index 633f234f336b..b8000c5d7fe1 100644
--- a/include/asm-cris/arch-v10/checksum.h
+++ b/include/asm-cris/arch-v10/checksum.h
@@ -8,11 +8,11 @@
8 * to split all of those into 16-bit components, then add. 8 * to split all of those into 16-bit components, then add.
9 */ 9 */
10 10
11static inline unsigned int 11static inline __wsum
12csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr, unsigned short len, 12csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
13 unsigned short proto, unsigned int sum) 13 unsigned short proto, __wsum sum)
14{ 14{
15 int res; 15 __wsum res;
16 __asm__ ("add.d %2, %0\n\t" 16 __asm__ ("add.d %2, %0\n\t"
17 "ax\n\t" 17 "ax\n\t"
18 "add.d %3, %0\n\t" 18 "add.d %3, %0\n\t"
@@ -21,7 +21,7 @@ csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr, unsigned short len,
21 "ax\n\t" 21 "ax\n\t"
22 "addq 0, %0\n" 22 "addq 0, %0\n"
23 : "=r" (res) 23 : "=r" (res)
24 : "0" (sum), "r" (daddr), "r" (saddr), "r" ((ntohs(len) << 16) + (proto << 8))); 24 : "0" (sum), "r" (daddr), "r" (saddr), "r" ((len + proto) << 8));
25 25
26 return res; 26 return res;
27} 27}
diff --git a/include/asm-cris/arch-v32/checksum.h b/include/asm-cris/arch-v32/checksum.h
index 97ef89efea62..e5dcfce6e0dc 100644
--- a/include/asm-cris/arch-v32/checksum.h
+++ b/include/asm-cris/arch-v32/checksum.h
@@ -9,11 +9,11 @@
9 * checksum. Which means it would be necessary to split all those into 9 * checksum. Which means it would be necessary to split all those into
10 * 16-bit components and then add. 10 * 16-bit components and then add.
11 */ 11 */
12static inline unsigned int 12static inline __wsum
13csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr, 13csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
14 unsigned short len, unsigned short proto, unsigned int sum) 14 unsigned short len, unsigned short proto, __wsum sum)
15{ 15{
16 int res; 16 __wsum res;
17 17
18 __asm__ __volatile__ ("add.d %2, %0\n\t" 18 __asm__ __volatile__ ("add.d %2, %0\n\t"
19 "addc %3, %0\n\t" 19 "addc %3, %0\n\t"
@@ -21,7 +21,7 @@ csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr,
21 "addc 0, %0\n\t" 21 "addc 0, %0\n\t"
22 : "=r" (res) 22 : "=r" (res)
23 : "0" (sum), "r" (daddr), "r" (saddr), \ 23 : "0" (sum), "r" (daddr), "r" (saddr), \
24 "r" ((ntohs(len) << 16) + (proto << 8))); 24 "r" ((len + proto) << 8));
25 25
26 return res; 26 return res;
27} 27}
diff --git a/include/asm-cris/checksum.h b/include/asm-cris/checksum.h
index 26a7719bbb84..180dbf2757b0 100644
--- a/include/asm-cris/checksum.h
+++ b/include/asm-cris/checksum.h
@@ -17,7 +17,7 @@
17 * 17 *
18 * it's best to have buff aligned on a 32-bit boundary 18 * it's best to have buff aligned on a 32-bit boundary
19 */ 19 */
20unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum); 20__wsum csum_partial(const void *buff, int len, __wsum sum);
21 21
22/* 22/*
23 * the same as csum_partial, but copies from src while it 23 * the same as csum_partial, but copies from src while it
@@ -27,26 +27,23 @@ unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum)
27 * better 64-bit) boundary 27 * better 64-bit) boundary
28 */ 28 */
29 29
30unsigned int csum_partial_copy_nocheck(const char *src, char *dst, 30__wsum csum_partial_copy_nocheck(const void *src, void *dst,
31 int len, unsigned int sum); 31 int len, __wsum sum);
32 32
33/* 33/*
34 * Fold a partial checksum into a word 34 * Fold a partial checksum into a word
35 */ 35 */
36 36
37static inline unsigned int csum_fold(unsigned int sum) 37static inline __sum16 csum_fold(__wsum csum)
38{ 38{
39 /* the while loop is unnecessary really, it's always enough with two 39 u32 sum = (__force u32)csum;
40 iterations */ 40 sum = (sum & 0xffff) + (sum >> 16); /* add in end-around carry */
41 41 sum = (sum & 0xffff) + (sum >> 16); /* add in end-around carry */
42 while(sum >> 16) 42 return (__force __sum16)~sum;
43 sum = (sum & 0xffff) + (sum >> 16); /* add in end-around carry */
44
45 return ~sum;
46} 43}
47 44
48extern unsigned int csum_partial_copy_from_user(const char *src, char *dst, 45extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst,
49 int len, unsigned int sum, 46 int len, __wsum sum,
50 int *errptr); 47 int *errptr);
51 48
52/* 49/*
@@ -55,8 +52,7 @@ extern unsigned int csum_partial_copy_from_user(const char *src, char *dst,
55 * 52 *
56 */ 53 */
57 54
58static inline unsigned short ip_fast_csum(unsigned char * iph, 55static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
59 unsigned int ihl)
60{ 56{
61 return csum_fold(csum_partial(iph, ihl * 4, 0)); 57 return csum_fold(csum_partial(iph, ihl * 4, 0));
62} 58}
@@ -66,11 +62,10 @@ static inline unsigned short ip_fast_csum(unsigned char * iph,
66 * returns a 16-bit checksum, already complemented 62 * returns a 16-bit checksum, already complemented
67 */ 63 */
68 64
69static inline unsigned short int csum_tcpudp_magic(unsigned long saddr, 65static inline __sum16 int csum_tcpudp_magic(__be32 saddr, __be32 daddr,
70 unsigned long daddr,
71 unsigned short len, 66 unsigned short len,
72 unsigned short proto, 67 unsigned short proto,
73 unsigned int sum) 68 __wsum sum)
74{ 69{
75 return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); 70 return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
76} 71}
@@ -80,7 +75,8 @@ static inline unsigned short int csum_tcpudp_magic(unsigned long saddr,
80 * in icmp.c 75 * in icmp.c
81 */ 76 */
82 77
83static inline unsigned short ip_compute_csum(unsigned char * buff, int len) { 78static inline __sum16 ip_compute_csum(const void *buff, int len)
79{
84 return csum_fold (csum_partial(buff, len, 0)); 80 return csum_fold (csum_partial(buff, len, 0));
85} 81}
86 82