aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-frv
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-frv')
-rw-r--r--include/asm-frv/bitops.h44
-rw-r--r--include/asm-frv/checksum.h41
-rw-r--r--include/asm-frv/device.h7
-rw-r--r--include/asm-frv/dma-mapping.h4
-rw-r--r--include/asm-frv/highmem.h26
-rw-r--r--include/asm-frv/param.h1
-rw-r--r--include/asm-frv/setup.h6
-rw-r--r--include/asm-frv/termbits.h11
-rw-r--r--include/asm-frv/unistd.h119
9 files changed, 102 insertions, 157 deletions
diff --git a/include/asm-frv/bitops.h b/include/asm-frv/bitops.h
index 1f70d47148bd..f8560edf59ff 100644
--- a/include/asm-frv/bitops.h
+++ b/include/asm-frv/bitops.h
@@ -256,6 +256,50 @@ int __ffs(unsigned long x)
256 return 31 - bit; 256 return 31 - bit;
257} 257}
258 258
259/*
260 * special slimline version of fls() for calculating ilog2_u32()
261 * - note: no protection against n == 0
262 */
263#define ARCH_HAS_ILOG2_U32
264static inline __attribute__((const))
265int __ilog2_u32(u32 n)
266{
267 int bit;
268 asm("scan %1,gr0,%0" : "=r"(bit) : "r"(n));
269 return 31 - bit;
270}
271
272/*
273 * special slimline version of fls64() for calculating ilog2_u64()
274 * - note: no protection against n == 0
275 */
276#define ARCH_HAS_ILOG2_U64
277static inline __attribute__((const))
278int __ilog2_u64(u64 n)
279{
280 union {
281 u64 ll;
282 struct { u32 h, l; };
283 } _;
284 int bit, x, y;
285
286 _.ll = n;
287
288 asm(" subcc %3,gr0,gr0,icc0 \n"
289 " ckeq icc0,cc4 \n"
290 " cscan.p %3,gr0,%0 ,cc4,0 \n"
291 " setlos #63,%1 \n"
292 " cscan.p %4,gr0,%0 ,cc4,1 \n"
293 " setlos #31,%2 \n"
294 " csub.p %1,%0,%0 ,cc4,0 \n"
295 " csub %2,%0,%0 ,cc4,1 \n"
296 : "=&r"(bit), "=r"(x), "=r"(y)
297 : "0r"(_.h), "r"(_.l)
298 : "icc0", "cc4"
299 );
300 return bit;
301}
302
259#include <asm-generic/bitops/sched.h> 303#include <asm-generic/bitops/sched.h>
260#include <asm-generic/bitops/hweight.h> 304#include <asm-generic/bitops/hweight.h>
261 305
diff --git a/include/asm-frv/checksum.h b/include/asm-frv/checksum.h
index 42bf0db2287a..9b1689850187 100644
--- a/include/asm-frv/checksum.h
+++ b/include/asm-frv/checksum.h
@@ -26,7 +26,7 @@
26 * 26 *
27 * it's best to have buff aligned on a 32-bit boundary 27 * it's best to have buff aligned on a 32-bit boundary
28 */ 28 */
29unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum); 29__wsum csum_partial(const void *buff, int len, __wsum sum);
30 30
31/* 31/*
32 * the same as csum_partial, but copies from src while it 32 * the same as csum_partial, but copies from src while it
@@ -35,7 +35,7 @@ unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum)
35 * here even more important to align src and dst on a 32-bit (or even 35 * here even more important to align src and dst on a 32-bit (or even
36 * better 64-bit) boundary 36 * better 64-bit) boundary
37 */ 37 */
38unsigned int csum_partial_copy(const char *src, char *dst, int len, int sum); 38__wsum csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
39 39
40/* 40/*
41 * the same as csum_partial_copy, but copies from user space. 41 * the same as csum_partial_copy, but copies from user space.
@@ -43,11 +43,8 @@ unsigned int csum_partial_copy(const char *src, char *dst, int len, int sum);
43 * here even more important to align src and dst on a 32-bit (or even 43 * here even more important to align src and dst on a 32-bit (or even
44 * better 64-bit) boundary 44 * better 64-bit) boundary
45 */ 45 */
46extern unsigned int csum_partial_copy_from_user(const char __user *src, char *dst, 46extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst,
47 int len, int sum, int *csum_err); 47 int len, __wsum sum, int *csum_err);
48
49#define csum_partial_copy_nocheck(src, dst, len, sum) \
50 csum_partial_copy((src), (dst), (len), (sum))
51 48
52/* 49/*
53 * This is a version of ip_compute_csum() optimized for IP headers, 50 * This is a version of ip_compute_csum() optimized for IP headers,
@@ -55,7 +52,7 @@ extern unsigned int csum_partial_copy_from_user(const char __user *src, char *ds
55 * 52 *
56 */ 53 */
57static inline 54static inline
58unsigned short ip_fast_csum(unsigned char *iph, unsigned int ihl) 55__sum16 ip_fast_csum(const void *iph, unsigned int ihl)
59{ 56{
60 unsigned int tmp, inc, sum = 0; 57 unsigned int tmp, inc, sum = 0;
61 58
@@ -81,13 +78,13 @@ unsigned short ip_fast_csum(unsigned char *iph, unsigned int ihl)
81 : "icc0", "icc1" 78 : "icc0", "icc1"
82 ); 79 );
83 80
84 return ~sum; 81 return (__force __sum16)~sum;
85} 82}
86 83
87/* 84/*
88 * Fold a partial checksum 85 * Fold a partial checksum
89 */ 86 */
90static inline unsigned int csum_fold(unsigned int sum) 87static inline __sum16 csum_fold(__wsum sum)
91{ 88{
92 unsigned int tmp; 89 unsigned int tmp;
93 90
@@ -100,16 +97,16 @@ static inline unsigned int csum_fold(unsigned int sum)
100 : "0"(sum) 97 : "0"(sum)
101 ); 98 );
102 99
103 return ~sum; 100 return (__force __sum16)~sum;
104} 101}
105 102
106/* 103/*
107 * computes the checksum of the TCP/UDP pseudo-header 104 * computes the checksum of the TCP/UDP pseudo-header
108 * returns a 16-bit checksum, already complemented 105 * returns a 16-bit checksum, already complemented
109 */ 106 */
110static inline unsigned int 107static inline __wsum
111csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr, unsigned short len, 108csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
112 unsigned short proto, unsigned int sum) 109 unsigned short proto, __wsum sum)
113{ 110{
114 asm(" addcc %1,%0,%0,icc0 \n" 111 asm(" addcc %1,%0,%0,icc0 \n"
115 " addxcc %2,%0,%0,icc0 \n" 112 " addxcc %2,%0,%0,icc0 \n"
@@ -122,9 +119,9 @@ csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr, unsigned short len,
122 return sum; 119 return sum;
123} 120}
124 121
125static inline unsigned short int 122static inline __sum16
126csum_tcpudp_magic(unsigned long saddr, unsigned long daddr, unsigned short len, 123csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len,
127 unsigned short proto, unsigned int sum) 124 unsigned short proto, __wsum sum)
128{ 125{
129 return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); 126 return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
130} 127}
@@ -133,12 +130,12 @@ csum_tcpudp_magic(unsigned long saddr, unsigned long daddr, unsigned short len,
133 * this routine is used for miscellaneous IP-like checksums, mainly 130 * this routine is used for miscellaneous IP-like checksums, mainly
134 * in icmp.c 131 * in icmp.c
135 */ 132 */
136extern unsigned short ip_compute_csum(const unsigned char * buff, int len); 133extern __sum16 ip_compute_csum(const void *buff, int len);
137 134
138#define _HAVE_ARCH_IPV6_CSUM 135#define _HAVE_ARCH_IPV6_CSUM
139static inline unsigned short int 136static inline __sum16
140csum_ipv6_magic(struct in6_addr *saddr, struct in6_addr *daddr, 137csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr,
141 __u32 len, unsigned short proto, unsigned int sum) 138 __u32 len, unsigned short proto, __wsum sum)
142{ 139{
143 unsigned long tmp, tmp2; 140 unsigned long tmp, tmp2;
144 141
@@ -177,7 +174,7 @@ csum_ipv6_magic(struct in6_addr *saddr, struct in6_addr *daddr,
177 : "icc0" 174 : "icc0"
178 ); 175 );
179 176
180 return ~sum; 177 return (__force __sum16)~sum;
181} 178}
182 179
183#endif /* _ASM_CHECKSUM_H */ 180#endif /* _ASM_CHECKSUM_H */
diff --git a/include/asm-frv/device.h b/include/asm-frv/device.h
new file mode 100644
index 000000000000..d8f9872b0e2d
--- /dev/null
+++ b/include/asm-frv/device.h
@@ -0,0 +1,7 @@
1/*
2 * Arch specific extensions to struct device
3 *
4 * This file is released under the GPLv2
5 */
6#include <asm-generic/device.h>
7
diff --git a/include/asm-frv/dma-mapping.h b/include/asm-frv/dma-mapping.h
index e9fc1d47797e..bcb2df68496e 100644
--- a/include/asm-frv/dma-mapping.h
+++ b/include/asm-frv/dma-mapping.h
@@ -172,10 +172,10 @@ int dma_get_cache_alignment(void)
172 return 1 << L1_CACHE_SHIFT; 172 return 1 << L1_CACHE_SHIFT;
173} 173}
174 174
175#define dma_is_consistent(d) (1) 175#define dma_is_consistent(d, h) (1)
176 176
177static inline 177static inline
178void dma_cache_sync(void *vaddr, size_t size, 178void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
179 enum dma_data_direction direction) 179 enum dma_data_direction direction)
180{ 180{
181 flush_write_buffers(); 181 flush_write_buffers();
diff --git a/include/asm-frv/highmem.h b/include/asm-frv/highmem.h
index e2247c22a638..ff4d6cdeb152 100644
--- a/include/asm-frv/highmem.h
+++ b/include/asm-frv/highmem.h
@@ -82,11 +82,11 @@ extern struct page *kmap_atomic_to_page(void *ptr);
82 dampr = paddr | xAMPRx_L | xAMPRx_M | xAMPRx_S | xAMPRx_SS_16Kb | xAMPRx_V; \ 82 dampr = paddr | xAMPRx_L | xAMPRx_M | xAMPRx_S | xAMPRx_SS_16Kb | xAMPRx_V; \
83 \ 83 \
84 if (type != __KM_CACHE) \ 84 if (type != __KM_CACHE) \
85 asm volatile("movgs %0,dampr"#ampr :: "r"(dampr)); \ 85 asm volatile("movgs %0,dampr"#ampr :: "r"(dampr) : "memory"); \
86 else \ 86 else \
87 asm volatile("movgs %0,iampr"#ampr"\n" \ 87 asm volatile("movgs %0,iampr"#ampr"\n" \
88 "movgs %0,dampr"#ampr"\n" \ 88 "movgs %0,dampr"#ampr"\n" \
89 :: "r"(dampr) \ 89 :: "r"(dampr) : "memory" \
90 ); \ 90 ); \
91 \ 91 \
92 asm("movsg damlr"#ampr",%0" : "=r"(damlr)); \ 92 asm("movsg damlr"#ampr",%0" : "=r"(damlr)); \
@@ -104,7 +104,7 @@ extern struct page *kmap_atomic_to_page(void *ptr);
104 asm volatile("movgs %0,tplr \n" \ 104 asm volatile("movgs %0,tplr \n" \
105 "movgs %1,tppr \n" \ 105 "movgs %1,tppr \n" \
106 "tlbpr %0,gr0,#2,#1" \ 106 "tlbpr %0,gr0,#2,#1" \
107 : : "r"(damlr), "r"(dampr)); \ 107 : : "r"(damlr), "r"(dampr) : "memory"); \
108 \ 108 \
109 /*printk("TLB: SECN sl=%d L=%08lx P=%08lx\n", slot, damlr, dampr);*/ \ 109 /*printk("TLB: SECN sl=%d L=%08lx P=%08lx\n", slot, damlr, dampr);*/ \
110 \ 110 \
@@ -115,7 +115,7 @@ static inline void *kmap_atomic(struct page *page, enum km_type type)
115{ 115{
116 unsigned long paddr; 116 unsigned long paddr;
117 117
118 preempt_disable(); 118 pagefault_disable();
119 paddr = page_to_phys(page); 119 paddr = page_to_phys(page);
120 120
121 switch (type) { 121 switch (type) {
@@ -138,16 +138,16 @@ static inline void *kmap_atomic(struct page *page, enum km_type type)
138 } 138 }
139} 139}
140 140
141#define __kunmap_atomic_primary(type, ampr) \ 141#define __kunmap_atomic_primary(type, ampr) \
142do { \ 142do { \
143 asm volatile("movgs gr0,dampr"#ampr"\n"); \ 143 asm volatile("movgs gr0,dampr"#ampr"\n" ::: "memory"); \
144 if (type == __KM_CACHE) \ 144 if (type == __KM_CACHE) \
145 asm volatile("movgs gr0,iampr"#ampr"\n"); \ 145 asm volatile("movgs gr0,iampr"#ampr"\n" ::: "memory"); \
146} while(0) 146} while(0)
147 147
148#define __kunmap_atomic_secondary(slot, vaddr) \ 148#define __kunmap_atomic_secondary(slot, vaddr) \
149do { \ 149do { \
150 asm volatile("tlbpr %0,gr0,#4,#1" : : "r"(vaddr)); \ 150 asm volatile("tlbpr %0,gr0,#4,#1" : : "r"(vaddr) : "memory"); \
151} while(0) 151} while(0)
152 152
153static inline void kunmap_atomic(void *kvaddr, enum km_type type) 153static inline void kunmap_atomic(void *kvaddr, enum km_type type)
@@ -170,7 +170,7 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type)
170 default: 170 default:
171 BUG(); 171 BUG();
172 } 172 }
173 preempt_enable(); 173 pagefault_enable();
174} 174}
175 175
176#endif /* !__ASSEMBLY__ */ 176#endif /* !__ASSEMBLY__ */
diff --git a/include/asm-frv/param.h b/include/asm-frv/param.h
index 168381ebb41a..365653b1726c 100644
--- a/include/asm-frv/param.h
+++ b/include/asm-frv/param.h
@@ -18,6 +18,5 @@
18#endif 18#endif
19 19
20#define MAXHOSTNAMELEN 64 /* max length of hostname */ 20#define MAXHOSTNAMELEN 64 /* max length of hostname */
21#define COMMAND_LINE_SIZE 512
22 21
23#endif /* _ASM_PARAM_H */ 22#endif /* _ASM_PARAM_H */
diff --git a/include/asm-frv/setup.h b/include/asm-frv/setup.h
index 0d293b9a5857..afd787ceede6 100644
--- a/include/asm-frv/setup.h
+++ b/include/asm-frv/setup.h
@@ -12,6 +12,10 @@
12#ifndef _ASM_SETUP_H 12#ifndef _ASM_SETUP_H
13#define _ASM_SETUP_H 13#define _ASM_SETUP_H
14 14
15#define COMMAND_LINE_SIZE 512
16
17#ifdef __KERNEL__
18
15#include <linux/init.h> 19#include <linux/init.h>
16 20
17#ifndef __ASSEMBLY__ 21#ifndef __ASSEMBLY__
@@ -22,4 +26,6 @@ extern unsigned long __initdata num_mappedpages;
22 26
23#endif /* !__ASSEMBLY__ */ 27#endif /* !__ASSEMBLY__ */
24 28
29#endif /* __KERNEL__ */
30
25#endif /* _ASM_SETUP_H */ 31#endif /* _ASM_SETUP_H */
diff --git a/include/asm-frv/termbits.h b/include/asm-frv/termbits.h
index 74f20d6e292f..2d6d389cff49 100644
--- a/include/asm-frv/termbits.h
+++ b/include/asm-frv/termbits.h
@@ -17,6 +17,17 @@ struct termios {
17 cc_t c_cc[NCCS]; /* control characters */ 17 cc_t c_cc[NCCS]; /* control characters */
18}; 18};
19 19
20struct ktermios {
21 tcflag_t c_iflag; /* input mode flags */
22 tcflag_t c_oflag; /* output mode flags */
23 tcflag_t c_cflag; /* control mode flags */
24 tcflag_t c_lflag; /* local mode flags */
25 cc_t c_line; /* line discipline */
26 cc_t c_cc[NCCS]; /* control characters */
27 speed_t c_ispeed; /* input speed */
28 speed_t c_ospeed; /* output speed */
29};
30
20/* c_cc characters */ 31/* c_cc characters */
21#define VINTR 0 32#define VINTR 0
22#define VQUIT 1 33#define VQUIT 1
diff --git a/include/asm-frv/unistd.h b/include/asm-frv/unistd.h
index 725e854928cf..584c0417ae4d 100644
--- a/include/asm-frv/unistd.h
+++ b/include/asm-frv/unistd.h
@@ -320,125 +320,6 @@
320#ifdef __KERNEL__ 320#ifdef __KERNEL__
321 321
322#define NR_syscalls 310 322#define NR_syscalls 310
323#include <linux/err.h>
324
325/*
326 * process the return value of a syscall, consigning it to one of two possible fates
327 * - user-visible error numbers are in the range -1 - -4095: see <asm-frv/errno.h>
328 */
329#undef __syscall_return
330#define __syscall_return(type, res) \
331do { \
332 unsigned long __sr2 = (res); \
333 if (__builtin_expect(__sr2 >= (unsigned long)(-MAX_ERRNO), 0)) { \
334 errno = (-__sr2); \
335 __sr2 = ~0UL; \
336 } \
337 return (type) __sr2; \
338} while (0)
339
340/* XXX - _foo needs to be __foo, while __NR_bar could be _NR_bar. */
341
342#undef _syscall0
343#define _syscall0(type,name) \
344type name(void) \
345{ \
346 register unsigned long __scnum __asm__ ("gr7") = (__NR_##name); \
347 register unsigned long __sc0 __asm__ ("gr8"); \
348 __asm__ __volatile__ ("tira gr0,#0" \
349 : "=r" (__sc0) \
350 : "r" (__scnum)); \
351 __syscall_return(type, __sc0); \
352}
353
354#undef _syscall1
355#define _syscall1(type,name,type1,arg1) \
356type name(type1 arg1) \
357{ \
358 register unsigned long __scnum __asm__ ("gr7") = (__NR_##name); \
359 register unsigned long __sc0 __asm__ ("gr8") = (unsigned long) arg1; \
360 __asm__ __volatile__ ("tira gr0,#0" \
361 : "+r" (__sc0) \
362 : "r" (__scnum)); \
363 __syscall_return(type, __sc0); \
364}
365
366#undef _syscall2
367#define _syscall2(type,name,type1,arg1,type2,arg2) \
368type name(type1 arg1,type2 arg2) \
369{ \
370 register unsigned long __scnum __asm__ ("gr7") = (__NR_##name); \
371 register unsigned long __sc0 __asm__ ("gr8") = (unsigned long) arg1; \
372 register unsigned long __sc1 __asm__ ("gr9") = (unsigned long) arg2; \
373 __asm__ __volatile__ ("tira gr0,#0" \
374 : "+r" (__sc0) \
375 : "r" (__scnum), "r" (__sc1)); \
376 __syscall_return(type, __sc0); \
377}
378
379#undef _syscall3
380#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
381type name(type1 arg1,type2 arg2,type3 arg3) \
382{ \
383 register unsigned long __scnum __asm__ ("gr7") = (__NR_##name); \
384 register unsigned long __sc0 __asm__ ("gr8") = (unsigned long) arg1; \
385 register unsigned long __sc1 __asm__ ("gr9") = (unsigned long) arg2; \
386 register unsigned long __sc2 __asm__ ("gr10") = (unsigned long) arg3; \
387 __asm__ __volatile__ ("tira gr0,#0" \
388 : "+r" (__sc0) \
389 : "r" (__scnum), "r" (__sc1), "r" (__sc2)); \
390 __syscall_return(type, __sc0); \
391}
392
393#undef _syscall4
394#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
395type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
396{ \
397 register unsigned long __scnum __asm__ ("gr7") = (__NR_##name); \
398 register unsigned long __sc0 __asm__ ("gr8") = (unsigned long) arg1; \
399 register unsigned long __sc1 __asm__ ("gr9") = (unsigned long) arg2; \
400 register unsigned long __sc2 __asm__ ("gr10") = (unsigned long) arg3; \
401 register unsigned long __sc3 __asm__ ("gr11") = (unsigned long) arg4; \
402 __asm__ __volatile__ ("tira gr0,#0" \
403 : "+r" (__sc0) \
404 : "r" (__scnum), "r" (__sc1), "r" (__sc2), "r" (__sc3)); \
405 __syscall_return(type, __sc0); \
406}
407
408#undef _syscall5
409#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5) \
410type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \
411{ \
412 register unsigned long __scnum __asm__ ("gr7") = (__NR_##name); \
413 register unsigned long __sc0 __asm__ ("gr8") = (unsigned long) arg1; \
414 register unsigned long __sc1 __asm__ ("gr9") = (unsigned long) arg2; \
415 register unsigned long __sc2 __asm__ ("gr10") = (unsigned long) arg3; \
416 register unsigned long __sc3 __asm__ ("gr11") = (unsigned long) arg4; \
417 register unsigned long __sc4 __asm__ ("gr12") = (unsigned long) arg5; \
418 __asm__ __volatile__ ("tira gr0,#0" \
419 : "+r" (__sc0) \
420 : "r" (__scnum), "r" (__sc1), "r" (__sc2), \
421 "r" (__sc3), "r" (__sc4)); \
422 __syscall_return(type, __sc0); \
423}
424
425#undef _syscall6
426#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5, type6, arg6) \
427type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5, type6 arg6) \
428{ \
429 register unsigned long __scnum __asm__ ("gr7") = (__NR_##name); \
430 register unsigned long __sc0 __asm__ ("gr8") = (unsigned long) arg1; \
431 register unsigned long __sc1 __asm__ ("gr9") = (unsigned long) arg2; \
432 register unsigned long __sc2 __asm__ ("gr10") = (unsigned long) arg3; \
433 register unsigned long __sc3 __asm__ ("gr11") = (unsigned long) arg4; \
434 register unsigned long __sc4 __asm__ ("gr12") = (unsigned long) arg5; \
435 register unsigned long __sc5 __asm__ ("gr13") = (unsigned long) arg6; \
436 __asm__ __volatile__ ("tira gr0,#0" \
437 : "+r" (__sc0) \
438 : "r" (__scnum), "r" (__sc1), "r" (__sc2), \
439 "r" (__sc3), "r" (__sc4), "r" (__sc5)); \
440 __syscall_return(type, __sc0); \
441}
442 323
443#define __ARCH_WANT_IPC_PARSE_VERSION 324#define __ARCH_WANT_IPC_PARSE_VERSION
444/* #define __ARCH_WANT_OLD_READDIR */ 325/* #define __ARCH_WANT_OLD_READDIR */